我们用HashMap不香吗?感觉什么都能干!但是呢。。。 在多个线程同时使用HashMap的时候可能就会出错,你不信可以试试用Put方法,可能你20调线程Put20个不同的值,你会发现最后map中没有20个K-V对。因为比如其中两个线程插入的值所算出的哈希值是一样,所以他们是插入同一个下标。那么开始展现真是的技术了!
现在有两个线程,一个叫大哈,一个叫二哈,他们两个手里都有一个数,大哈拿着A,二哈拿着B。
这两兄弟很有缘。都要把A,B数放入同一个地方,2号位置。
先是大哈进入看见没有数(正准备放入的时候,他的线程时间片到了,切到了二哈线程去了),红字非常重要,我们可以把大哈当作瞎子(现在记忆中2号位置是没有数的)
然后二哈线程很顺利的把B放入了这个地方
二哈放了B就走了,现在大哈线程醒了,然后因为大哈记忆中2号位置是没有存数的,所以直接把自己的A放入,把B覆盖了。而不是加一个链表,掉在B后面!!
这种情况是不对的情况。
我们真实正确的情况是这样。
所以是HashMap是不安全的。 如果用concurrentHashMap的话,我们就能得到上图的正确结果
final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount = 0;
for (Node[] tab = table;;) {
Node f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
//第一次进入数组进行初始化
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
//判断key的hash散列表处是否有位置
if (casTabAt(tab, i, null,
new Node(hash, key, value, null)))
break; // no lock when adding to empty bin
}
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node pred = e;
if ((e = e.next) == null) {
pred.next = new Node(hash, key,
value, null);
break;
}
}
}
else if (f instanceof TreeBin) {
Node p;
binCount = 2;
if ((p = ((TreeBin)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
addCount(1L, binCount);
//扩容检查
return null;
}
初始化initTable()
private final Node[] initTable() {
Node[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
//sizeCtl在两个HashMap的比较中有
if ((sc = sizeCtl) < 0)
Thread.yield(); // lost initialization race; just spin
//yield,到了它的线程,主动让权,
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
//SIZECTL直接从内存获取当前这个属性字段的在整个对象空间里的偏移量
//compareAndSwapInt 比较SIZECTL和sc比较,相等就将-1赋给iszeCtl,如果还有其他并发操作,会回到上一sc=sizeCtl<0,会让权cpu(去执行完初始化后,才能跑下走)
try {
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node[] nt = (Node[])new Node,?>[n];
//新建数组
table = tab = nt;
sc = n - (n >>> 2);
//转化成为table的阈值
}
} finally {
sizeCtl = sc;
}
break;
}
}
return tab;
}
扩容函数 addCount(1L, binCount)
private final void addCount(long x, int check) {
CounterCell[] as; long b, s;
if ((as = counterCells) != null ||
!U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
CounterCell a; long v; int m;
boolean uncontended = true;
if (as == null || (m = as.length - 1) < 0 ||
(a = as[ThreadLocalRandom.getProbe() & m]) == null ||
!(uncontended =
U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
fullAddCount(x, uncontended);
return;
}
if (check <= 1)
return;
s = sumCount();
}
if (check >= 0) {
Node[] tab, nt; int n, sc;
while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
(n = tab.length) < MAXIMUM_CAPACITY) {
//看是否当前的数组长度是否大于等于阈值sc
int rs = resizeStamp(n);
if (sc < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
//判断是否能扩容不
transfer(tab, null);
//正儿八经开始扩容
s = sumCount();
}
}
}
transfer函数里有这段代码:
private final void transfer(Node[] tab, Node[] nextTab) {
int n = tab.length, stride;
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
//NCPU根据电脑来的,四核八线程NCPU就是8,如果数量超过16就分段去迁移桶,如果小于就直接用单线程,MIN_TRANSFER_STRIDE=16
stride = MIN_TRANSFER_STRIDE; // subdivide range
if (nextTab == null) { // initiating
try {
@SuppressWarnings("unchecked")
Node[] nt = (Node[])new Node,?>[n << 1];
//扩容成两倍的数组
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab;
transferIndex = n;
//记录扩容前的大小,以便后面往前遍历。如果从0开始的话,每次都扩容n遍历时这个n值每次都变,所以直接从后往前遍历到0即可,程序复用性高。
}
int nextn = nextTab.length;
ForwardingNode fwd = new ForwardingNode(nextTab);
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
//for是个死循环里面肯定有中止结束标志,
Node f; int fh;
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing)
//最大--i与最小值下标bound比较
advance = false;
else if ((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
}
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
bound = nextBound;
//表示最小下标
i = nextIndex - 1;
//表示最大下标,从最大坐标依次i--,迁移
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
if (finishing) {
nextTable = null;
table = nextTab;
sizeCtl = (n << 1) - (n >>> 1);
return;
}
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
}
else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd);
else if ((fh = f.hash) == MOVED)
advance = true; // already processed
else {
synchronized (f) {
if (tabAt(tab, i) == f) {
Node ln, hn;
if (fh >= 0) {
//fh当前节点存在的哈希值
int runBit = fh & n;
Node lastRun = f;
for (Node p = f.next; p != null; p = p.next) {
//重要的迁徙知识
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
//最后同色的节点:lastrun
}
}
if (runBit == 0) {
ln = lastRun;
//低位链表扩容放0颜色块的数据包
hn = null;
//高位链表扩容放1颜色块的数据包
}
else {
hn = lastRun;
ln = null;
}
for (Node p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node(ph, pk, pv, ln);
else
hn = new Node(ph, pk, pv, hn);
}
setTabAt(nextTab, i, ln);
//将低位链表放入总链表
setTabAt(nextTab, i + n, hn);
//将高位链表放入总链表
setTabAt(tab, i, fwd);
advance = true;
}
else if (f instanceof TreeBin) {
TreeBin t = (TreeBin)f;
TreeNode lo = null, loTail = null;
TreeNode hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode p = new TreeNode
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin(hi) : t;
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}
https://blog.csdn.net/qq_40262372/article/details/112669338
https://blog.csdn.net/qq_40262372/article/details/112556249