DEFAULT_CAPACITY = 16; // 默认数组长度
TREEIFY_THRESHOLD = 8; // 单向链表转双向链表和红黑树需要满足的第一个条件
MIN_TREEIFY_CAPACITY = 64; // 单向链表转双向链表和红黑树需要满足的第二个条件
/**
* Table initialization and resizing control. When negative, the
* table is being initialized or resized: -1 for initialization,
* else -(1 + the number of active resizing threads). Otherwise,
* when table is null, holds the initial table size to use upon
* creation, or 0 for default. After initialization, holds the
* next element count value upon which to resize the table.
*
* 我看不出来“-(1 + the number of active resizing threads)”。
*/
volatile int sizeCtl;
/**
* The next table to use; non-null only while resizing.
*/
volatile Node[] nextTable;
/**
* The number of bits used for generation stamp in sizeCtl.
* Must be at least 6 for 32bit arrays.
*/
int RESIZE_STAMP_BITS = 16;
/**
* The bit shift for recording size stamp in sizeCtl.
*/
RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; // RESIZE_STAMP_SHIFT = 16
/** Number of CPUS, to place bounds on some sizings */
int NCPU = Runtime.getRuntime().availableProcessors();
/**
* Minimum number of rebinnings per transfer step. Ranges are
* subdivided to allow multiple resizer threads. This value
* serves as a lower bound to avoid resizers encountering
* excessive memory contention. The value should be at least
* DEFAULT_CAPACITY.
*/
int MIN_TRANSFER_STRIDE = 16;
/**
* The next table index (plus one) to split while resizing.
*/
volatile int transferIndex; // 在扩容过程中实现多线程迁移的关键
/**
* Table of counter cells. When non-null, size is a power of 2.
*
* 可以理解为选举活动中的投票箱。如果只有一个箱子,那大家就要排队,队伍可能很长。但如果有多个箱,有多个队伍,分开计票,最后汇总,就能提高效率
*/
volatile CounterCell[] counterCells;
/**
* Base counter value, used mainly when there is no contention,
* but also as a fallback during table initialization
* races. Updated via CAS.
*/
volatile long baseCount;
/**
* Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
*
* 把cellsBusy从0置为1的线程才能对counterCells进行初始化和扩容,以及往counterCells里放置元素
*/
volatile int cellsBusy;
/**
* Returns the stamp bits for resizing a table of size n.
* Must be negative when shifted left by RESIZE_STAMP_SHIFT.
*/
static final int resizeStamp(int n) {
return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
}
/**
* Returns the number of zero bits preceding the highest-order
* ("leftmost") one-bit in the two's complement binary representation
* of the specified {@code int} value. Returns 32 if the
* specified value has no one-bits in its two's complement representation,
* in other words if it is equal to zero.
*
* Note that this method is closely related to the logarithm base 2.
* For all positive {@code int} values x:
*
* - floor(log2(x)) = {@code 31 - numberOfLeadingZeros(x)}
*
- ceil(log2(x)) = {@code 32 - numberOfLeadingZeros(x - 1)}
*
*
* @param i the value whose number of leading zeros is to be computed
* @return the number of zero bits preceding the highest-order
* ("leftmost") one-bit in the two's complement binary representation
* of the specified {@code int} value, or 32 if the value
* is equal to zero.
* @since 1.5
*/
Integer.numberOfLeadingZeros(16) = 27: 0000 0000 0000 0000 0000 0000 0001 1011 => Integer.numberOfLeadingZeros(n)的返回值是[1,27]
n = 16: 0000 0000 0000 0000 0000 0000 0001 0000
RESIZE_STAMP_BITS - 1 = 15
1 << (RESIZE_STAMP_BITS - 1) = 1 << 15: 0000 0000 0000 0000 1000 0000 0000 0000
rs = resizeStamp(16): 0000 0000 0000 0000 1000 0000 0001 1011 // 32795
rs << RESIZE_STAMP_SHIFT: 1000 0000 0001 1011 0000 0000 0000 0000 => 溢出
原码 => 1111 1111 1111 1111 1111 1111 1111 1111 + 1000 0000 0000 0000 0000 0000 0000 0001 + 0000 0000 0001 1011 0000 0000 0000 0000
补码 => 1000 0000 0000 0000 0000 0000 0000 0001 + 1111 1111 1111 1111 1111 1111 1111 1111 + 0000 0000 0001 1011 0000 0000 0000 0000
= 1000 0000 0000 0000 0000 0000 0000 0001 + 0000 0000 0001 1010 1111 1111 1111 1111
= 1000 0000 0001 1011 0000 0000 0000 0000
原码 => 1111 1111 1110 0101 0000 0000 0000 0000 // -2145714176
(rs << RESIZE_STAMP_SHIFT) + 2: 1111 1111 1110 0101 0000 0000 0000 0000 + 0000 0000 0000 0000 0000 0000 0000 0010
补码 => 1000 0000 0001 1011 0000 0000 0000 0000 + 0000 0000 0000 0000 0000 0000 0000 0010
= 1000 0000 0001 1011 0000 0000 0000 0010
原码 => 1111 1111 1110 0100 1111 1111 1111 1110 // -2145714174
(rs << RESIZE_STAMP_SHIFT) + 2 - 1 = (rs << RESIZE_STAMP_SHIFT) + 1 = 1111 1111 1110 0101 0000 0000 0000 0000 + 0000 0000 0000 0000 0000 0000 0000 0001
补码 => 1000 0000 0001 1011 0000 0000 0000 0000 + 0000 0000 0000 0000 0000 0000 0000 0001
= 1000 0000 0001 1011 0000 0000 0000 0001
原码 => 1111 1111 1110 0100 1111 1111 1111 1111 // -2145714175
rs = resizeStamp(16) = 0000 0000 0000 0000 1000 0000 0001 1011
sc = (rs << RESIZE_STAMP_SHIFT) + 2 = 1111 1111 1110 0100 1111 1111 1111 1110
sc >>> RESIZE_STAMP_SHIFT => 补码1000 0000 0001 1011 0000 0000 0000 0010右移16位 = 0000 0000 0000 0000 1000 0000 0001 1011
“(sc >>> RESIZE_STAMP_SHIFT) = rs”说明当前ConcurrentHashMap对象正在扩容
/**
* Spreads (XORs) higher bits of hash to lower and also forces top <= 从第一句可知:返回值必然是非负数
* bit to 0. ……
*/
static final int spread(int h) {
return (h ^ (h >>> 16)) & HASH_BITS; // HASH_BITS = 0x7fffffff = 2147483647 = 0111 1111 1111 1111 1111 1111 1111 1111
}
/**
* Key-value entry. This class is never exported out as a
* user-mutable Map.Entry (i.e., one supporting setValue; see
* MapEntry below), but can be used for read-only traversals used
* in bulk tasks. Subclasses of Node with a negative hash field <= 重点:Node的子类的哈希值可能是负数,但Node的hash必然是非负数
* are special, and contain null keys and values (but are never
* exported). Otherwise, keys and vals are never null.
*/
static class Node implements Map.Entry {
……
}
final V putVal(K key, V value, boolean onlyIfAbsent) { // onlyIfAbsent = false
if (key == null || value == null) throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount = 0;
for (Node[] tab = table;;) {
Node f; // 哈希后得到的数组元素
int n, i, fh; // n代表数组长度,i代表哈希后得到的数组下标,fh代表数组元素(即内部类Node)的哈希值
if (tab == null || (n = tab.length) == 0)
tab = initTable(); // 初始化
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
// 没有哈希冲突
if (casTabAt(tab, i, null,
new Node(hash, key, value, null)))
break; // no lock when adding to empty bin
}
else if ((fh = f.hash) == MOVED) // 看看当前ConcurrentHashMap对象是否正在扩容
tab = helpTransfer(tab, f); // 当前线程尝试参与扩容
else {
// 发生哈希冲突
V oldVal = null;
synchronized (f) {
if (tabAt(tab, i) == f) { // 看看元素有没有被别的线程移除
// 元素没有被移除
if (fh >= 0) { // 根据spread方法和内部类Node的注釋,Node的哈希值必为非负数,但其子类的哈希值可能是负数
// 单向链表
binCount = 1; // 记录链表长度
for (Node e = f;; ++binCount) {
K ek; // Node的key值
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
// 找到key值一样的Node
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value; // 替换value
break;
}
Node pred = e;
if ((e = e.next) == null) {
// 在链尾添加Node
pred.next = new Node(hash, key,
value, null);
break;
}
}
}
else if (f instanceof TreeBin) { // 根据内部类Node的注释,Node的子类的哈希值可能是负数
// 红黑树
Node p;
binCount = 2; // 在这里可以是任何大于1的正整数,详见addCount方法
if ((p = ((TreeBin)f).putTreeVal(hash, key,
value)) != null) {
// 找到key值一样的Node
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
// 退出synchronized同步代码块
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i); // 试图把链表转红黑树,但这里没有加锁
if (oldVal != null)
return oldVal;
break;
}
}
}
addCount(1L, binCount);
return null;
}
private final Node[] initTable() {
Node[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
if ((sc = sizeCtl) < 0)
// 其他线程正在初始化
Thread.yield(); // lost initialization race; just spin
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) { // 看看是否还没初始化
int n = (sc > 0) ? sc : DEFAULT_CAPACITY; // 数组初始长度,假设取默认值16
@SuppressWarnings("unchecked")
Node[] nt = (Node[])new Node,?>[n];
table = tab = nt;
sc = n - (n >>> 2); // sc等于数组长度的四分之三,即12
}
} finally {
sizeCtl = sc; // 根据sizeCtl的注释,sizeCtl还被用作数组扩容的条件。与HashMap一样,当数组的负载达到75%或以上时,对数组进行扩容,详见addCount方法
}
break;
}
}
return tab;
}
private final void treeifyBin(Node[] tab, int index) {
Node b; // 数组元素(即内部类Node)
int n, sc; // n代表数组长度,sc没被用到
if (tab != null) {
if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
tryPresize(n << 1); // 增加数组长度,单向链表不转双向链表或红黑树
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
// 单向链表转双向链表和红黑树
synchronized (b) {
if (tabAt(tab, index) == b) { // 例如看看元素有没有被别的线程移除
// 元素还在
TreeNode hd = null, tl = null;
for (Node e = b; e != null; e = e.next) {
TreeNode p =
new TreeNode(e.hash, e.key, e.val,
null, null);
if ((p.prev = tl) == null)
hd = p;
else
tl.next = p;
tl = p;
}
setTabAt(tab, index, new TreeBin(hd));
}
}
}
}
}
/**
* 假设:第一次扩容,被treeifyBin调用,size是32
*/
private final void tryPresize(int size) {
int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
tableSizeFor(size + (size >>> 1) + 1); // c = 32
int sc;
while ((sc = sizeCtl) >= 0) { // sc = sizeCtl = 12
Node[] tab = table;
int n; // n代表数组长度 => n = 16
if (tab == null || (n = tab.length) == 0) {
// putAll调用本方法时才会走这一块逻辑,和initTable类似,都是初始化。然后继续while循环,触发扩容
n = (sc > c) ? sc : c;
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if (table == tab) {
@SuppressWarnings("unchecked")
Node[] nt = (Node[])new Node,?>[n];
table = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
}
}
else if (c <= sc || n >= MAXIMUM_CAPACITY)
break;
else if (tab == table) {
int rs = resizeStamp(n); // rs由当前数组长度n决定,每个不同的n对应不同的的rs,也就是说每次扩容时,rs都必然不一样。当n=16时,rs=0000 0000 0000 0000 1000 0000 0001 1011
if (sc < 0) { // sc = 12 > 0 => 不会走这一块逻辑
// 我觉得在任何情况下都没有线程能进来这一块逻辑
// 因为sc是本方法的局部变量,而sc只在while条件判断时被赋值,又因为只有当sc大于或等于零时才能进while的循环体,所以sc不可能小于零
Node[] nt;
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2)) // 当n=16时,sizeCtl = 1111 1111 1110 0100 1111 1111 1111 1110 => sc < 0
// 因为现在是第一次扩容,所以如果有多个线程调用本方法,那只有1个线程能进来这里(此时sizeCtl已经小于零)
// 其他线程将会退出while循环(因为它们会把最新的sizeCtl赋值给sc),然后结束本方法的调用,返回到treeifyBin
transfer(tab, null);
}
}
}
/**
* 假设:在单线程环境下使用ConcurrentHashMap,第1次扩容,此时数组长度是16,CPU数量是1,只有1个线程执行此方法
*/
private final void transfer(Node[] tab, Node[] nextTab) {
int n = tab.length, stride; // n = 16,stride代表什么? => 单线程环境下看不出来,需要在多线程环境下才能被理解
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) // NCPU = 1,stride = 16
stride = MIN_TRANSFER_STRIDE; // subdivide range => stride = 16
if (nextTab == null) { // initiating
// 每次扩容时都只会执行一次
try {
@SuppressWarnings("unchecked")
Node[] nt = (Node[])new Node,?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab;
transferIndex = n; // transferIndex = 16
}
int nextn = nextTab.length; // nextn = 32
ForwardingNode fwd = new ForwardingNode(nextTab); // fwd.hash = MOVED => 用来标记已migrate的数组下标
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
Node f; int fh;
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing) // 第一次for循环:i = -1,bound = 0;第二次for循环:i = 14,bound = 0;……;第十七次for循环:i = -1,bound = 0;第十八次for循环(开始“recheck before commit”):i = 15,bound = 0;……;第三十四次for循环:i = -1,bound = 0;
advance = false;
else if ((nextIndex = transferIndex) <= 0) { // 只有第一次for循环(nextIndex = transferIndex = 16)、第十七次for循环(nextIndex = transferIndex = 0)和第三十四次for循环(nextIndex = transferIndex = 0)会走到这判斷條件
// 第十七次for循环和第三十四次for循环会进来
i = -1;
advance = false;
}
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) { // 只有第一次循环会走到这里:transferIndex = nextBound = 0
bound = nextBound; // 第一次循环:bound = 0
i = nextIndex - 1; // 第一次循环:i = 15
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) { // 第一次for循环:i=15,不执行;第二次for循环:i=14,不执行;……;第十六次for循环:i=0,不执行;第十七次for循环:i=-1<0,执行;第十八次for循环:i=15,不执行;……;第三十三次for循环:i=0,不执行;第三十四次for循环:i=-1<0,执行
int sc;
if (finishing) { // finishing没那么快变成true,继续往下看吧
// 第三十四次for循环会进来
// finishing变成true后,开始“recheck before commit”,最终大功告成
nextTable = null;
table = nextTab;
sizeCtl = (n << 1) - (n >>> 1);
return;
}
// sizeCtl = 1111 1111 1110 0100 1111 1111 1111 1110
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
// 第十七次for循环会进来
// sc = 1111 1111 1110 0100 1111 1111 1111 1110
// sizeCtl = sc - 1 = 1111 1111 1110 0100 1111 1111 1111 1111
// sc - 2 = 1111 1111 1110 0101 0000 0000 0000 0000
// resizeStamp(16) << RESIZE_STAMP_SHIFT = 1111 1111 1110 0101 0000 0000 0000 0000
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) // 此时:sc - 2 = resizeStamp(16) << RESIZE_STAMP_SHIFT,说明当前线程是最后一个完成迁移任务的线程(从一开始我就假设了是在单线程环境下运行)
return;
// 当前线程是最后一个完成迁移任务的线程,得从头到尾recheck一下
finishing = advance = true;
i = n; // recheck before commit => i = 16,第十七次for循环结束
}
}
else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd);
else if ((fh = f.hash) == MOVED)
advance = true; // already processed => 第十八次for循环开始走这里,因为“recheck before commit”
else {
synchronized (f) {
if (tabAt(tab, i) == f) { // 例如看看元素有没有被别的线程移除
// 元素还在
Node ln, hn;
if (fh >= 0) {
// hash为非负数 => f就是Node,而非Node的子类 => 单向链表节点 => 高低位
int runBit = fh & n;
Node lastRun = f;
for (Node p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
// 低位
ln = lastRun;
hn = null;
}
else {
// 高位
hn = lastRun;
ln = null;
}
for (Node p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node(ph, pk, pv, ln);
else
hn = new Node(ph, pk, pv, hn);
}
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
else if (f instanceof TreeBin) {
// hash非负数,且f就是Node的子类 => TreeNode(双向链表+红黑树)
TreeBin t = (TreeBin)f;
TreeNode lo = null, loTail = null; // 低位
TreeNode hi = null, hiTail = null; // 高位
int lc = 0, hc = 0;
for (Node e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode p = new TreeNode
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin(hi) : t;
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}
/**
* 假设:在多线程环境下使用ConcurrentHashMap,第7次扩容,此时数组长度是1024,CPU数量是4,只有4个线程执行此方法
*
* 第1次扩容,此时数组长度n=2^4=16,n>>>3=2,stride=2/4=0
* 第2次扩容,此时数组长度n=2^5=32,n>>>3=4,stride=4/4=1
* 第3次扩容,此时数组长度n=2^6=64,n>>>3=8,stride=8/4=2
* 第4次扩容,此时数组长度n=2^7=128,n>>>3=16,stride=16/4=4
* 第5次扩容,此时数组长度n=2^8=256,n>>>3=32,stride=32/4=8
* 第6次扩容,此时数组长度n=2^9=512,n>>>3=64,stride=64/4=16
* 第7次扩容,此时数组长度n=2^10=1024,n>>>3=128,stride=128/4=32
*/
private final void transfer(Node[] tab, Node[] nextTab) {
int n = tab.length, stride; // n = 1024,stride代表每个线程负责迁移的数组元素的个数
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) // NCPU = 4,n >>> 3 = 1024 >>> 3 = 128,stride = 32
stride = MIN_TRANSFER_STRIDE; // subdivide range
if (nextTab == null) { // initiating
// 每次扩容时都只会执行一次
try {
@SuppressWarnings("unchecked")
Node[] nt = (Node[])new Node,?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab;
transferIndex = n; // transferIndex = 1024
}
int nextn = nextTab.length; // nextn = 2048
ForwardingNode fwd = new ForwardingNode(nextTab); // fwd.hash = MOVED => 用来标记已migrate的数组下标
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
Node f; int fh;
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing) // "--i"用于移动数组下标,当"--i >= bound"返回false时意味着线程完成了其负责的迁移任务
advance = false;
else if ((nextIndex = transferIndex) <= 0) { // 四个线程每次走到这里都会拿最新的transferIndex作为nextIndex
i = -1;
advance = false;
}
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
// 线程1:nextIndex = 1024,stride = 32,transferIndex = nextBound = 992
// 线程2:nextIndex = 992,stride = 32,transferIndex = nextBound = 960
// 线程3:nextIndex = 960,stride = 32,transferIndex = nextBound = 928
// 线程4:nextIndex = 928,stride = 32,transferIndex = nextBound = 896
bound = nextBound; // 线程1:bound = 992;线程1:bound = 960;线程1:bound = 928;线程1:bound = 896
i = nextIndex - 1; // 线程1:i = 1023;线程1:i = 991;线程1:i = 959;线程1:i = 927
// 线程1负责的数组下标是1023到992
// 线程2负责的数组下标是991到960
// 线程3负责的数组下标是959到928
// 线程4负责的数组下标是927到896
// 任何一个线程完成了其负责的迁移任务后,就会根据最新的transferIndex来领取新的迁移任务
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
if (finishing) {
nextTable = null;
table = nextTab;
sizeCtl = (n << 1) - (n >>> 1);
return;
}
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
}
else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd); // null就不用迁移,但得告知其他线程当前ConcurrentHashMap对象正在扩容,邀请它们来帮忙。例如:当其他线程在执行putVal时,就可能会调用helpTransfer
else if ((fh = f.hash) == MOVED)
advance = true; // already processed => 纯粹是为了“recheck before commit”
else {
synchronized (f) {
if (tabAt(tab, i) == f) { // 例如看看元素有没有被别的线程移除
// 元素还在
Node ln, hn;
if (fh >= 0) {
// hash是非负数 => f就是Node,而非Node的子类 => 单向链表节点 => 高低位
int runBit = fh & n;
Node lastRun = f;
for (Node p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
// 低位
ln = lastRun;
hn = null;
}
else {
// 高位
hn = lastRun;
ln = null;
}
for (Node p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node(ph, pk, pv, ln);
else
hn = new Node(ph, pk, pv, hn);
}
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd); // 完成当前数组下标元素的迁移后,通过"fwd.hash = MOVED"来告知其他线程当前ConcurrentHashMap对象正在扩容,邀请它们来帮忙。例如:当其他线程在执行putVal时,就可能会调用helpTransfer
advance = true;
}
else if (f instanceof TreeBin) {
// hash是负数,且f就是Node的子类 => TreeNode(双向链表+红黑树)
TreeBin t = (TreeBin)f;
TreeNode lo = null, loTail = null; // 低位
TreeNode hi = null, hiTail = null; // 高位
int lc = 0, hc = 0;
for (Node e = t.first; e != null; e = e.next) { // 把一个TreeNode双向链表拆成“高位”和“低位”两个TreeNode双向链表,但并没有把一棵TreeNode红黑树分拆成两棵
int h = e.hash;
TreeNode p = new TreeNode
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin(lo) : t; // 如果低位的TreeNode双向链表太短,则把其变成普通单项链表,否则使用低位的TreeNode双向链表来构造新的TreeNode红黑树
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin(hi) : t; // 如果高位的TreeNode双向链表太短,则把其变成普通单项链表,否则使用高位的TreeNode双向链表来构造新的TreeNode红黑树
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd); // 完成当前数组下标元素的迁移后,通过"fwd.hash = MOVED"来告知其他线程当前ConcurrentHashMap对象正在扩容,邀请它们来帮忙。例如:当其他线程在执行putVal时,就可能会调用helpTransfer
advance = true;
}
}
}
}
}
}
/**
* Helps transfer if a resize is in progress.
*/
final Node[] helpTransfer(Node[] tab, Node f) {
Node[] nextTab;
int sc;
if (tab != null && (f instanceof ForwardingNode) &&
(nextTab = ((ForwardingNode)f).nextTable) != null) {
int rs = resizeStamp(tab.length);
while (nextTab == nextTable && table == tab &&
(sc = sizeCtl) < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || transferIndex <= 0)
break; // 其他线程已经完成了扩容,当前线程什么都不用做
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
transfer(tab, nextTab); // 当前线程触发扩容
break;
}
}
return nextTab;
}
return table;
}
/**
* Adds to count, and if table is too small and not already
* resizing, initiates transfer. If already resizing, helps
* perform transfer if work is available. Rechecks occupancy
* after a transfer to see if another resize is already needed
* because resizings are lagging additions.
*
* @param x the count to add
* @param check if <0, don't check resize, if <= 1 only check if uncontended
*/
private final void addCount(long x, int check) {
CounterCell[] as; // 可以理解为选举活动中的投票箱。如果只有一个箱子,那大家就要排队,队伍可能很长。但如果有多个箱,有多个队伍,分开计票,最后汇总,就能提高效率
long b, s; // s表示当前ConcurrentHashMap对象中元素的个数
if ((as = counterCells) != null ||
!U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
// 如果counterCells不是null,只能说竞争曾经很激烈,但无法判断当前竞争是否激烈,不过还是会进来
// 如果counterCells是null,那会对baseCount尝试一次CAS,成功的话说明竞争不激烈,就不进来了。但失败了就会认为此时竞争蛮激烈,会进来
// 结论:除非能明确判断竞争不激烈,否则都会进来
CounterCell a;
long v;
int m;
boolean uncontended = true; // 记录CAS的执行结果
if (as == null || (m = as.length - 1) < 0 ||
(a = as[ThreadLocalRandom.getProbe() & m]) == null ||
!(uncontended =
U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
// 如果as是null,就说明上一个if条件之所以成立,是因为CAS失败,说明当时竞争激烈
// 如果as不是null,就随机选一个箱子,try一次CAS。如果CAS成功,那就说明竞争不激烈;如果CAS失败,那就说明竞争很激烈
// 在as不是null的情况下,“(m = as.length - 1) < 0”和“(a = as[ThreadLocalRandom.getProbe() & m]) == null”的作用更多是给变量m和a赋值并充当卫语句,为CAS服务
fullAddCount(x, uncontended);
return; // 在竞争激烈的情况下直接返回,不考虑是否需要扩容
}
// 往下的逻辑:uncontended = true
if (check <= 1)
return;
s = sumCount();
}
// 竞争不激烈
if (check >= 0) {
Node[] tab, nt;
int n, sc;
while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
(n = tab.length) < MAXIMUM_CAPACITY) { // 比较元素个数s和sizeCtl
// 当sizeCtl大于0,sizeCtl存放扩容阈值;当sizeCtl小于0,则表示当前ConcurrentHashMap对象正在扩容
int rs = resizeStamp(n);
if (sc < 0) {
// sizeCtl是负数,则说明当前ConcurrentHashMap对象正在扩容
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break; // 其他线程已经完成了扩容,当前线程什么都不用做
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt); // 当前线程参与扩容
}
// sizeCtl是正数,说明元素个数s不小于扩容阈值,需要扩容
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null); // 当前线程触发扩容
s = sumCount();
}
}
}
/**
* 假设:此方法没有被调用过(counterCells=null),现在有多个线程同时执行此方法
*
* 总结:
* 1,当线程连续两次在选中的数组元素(元素不是null)上进行CAS操作失败后,会尝试对counterCells进行扩容
* 2.因为CPU的数量决定了可同时运行的线程数,所以counterCells的长度没必要太大
* 3.当counterCells的长度大于CPU的数量时,线程不会再对counterCells进行扩容
*/
private final void fullAddCount(long x, boolean wasUncontended) {
int h; // 将其视为一个线程安全的随机数即可,用来选择数组下标
if ((h = ThreadLocalRandom.getProbe()) == 0) {
ThreadLocalRandom.localInit(); // force initialization
h = ThreadLocalRandom.getProbe();
wasUncontended = true;
}
boolean collide = false; // True if last slot nonempty
for (;;) {
CounterCell[] as;
CounterCell a; // 从counterCells中随机选的一个元素
int n; // counterCells的长度
long v; // 从counterCells中选出来的元素的初值
if ((as = counterCells) != null && (n = as.length) > 0) {
// counterCells已经被初始化
if ((a = as[(n - 1) & h]) == null) {
// 随机选择的数组下标没有元素
if (cellsBusy == 0) { // Try to attach new Cell
CounterCell r = new CounterCell(x); // Optimistic create => 把这一句放到“Recheck under lock”里是不是更好?
if (cellsBusy == 0 &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
boolean created = false;
try { // Recheck under lock
CounterCell[] rs;
int m, j;
if ((rs = counterCells) != null &&
(m = rs.length) > 0 &&
rs[j = (m - 1) & h] == null) {
// 往选中的数组下标添加元素
rs[j] = r;
created = true; // 成功地在选中的数组下标里赋值
}
} finally {
cellsBusy = 0;
}
if (created)
break; // 由于已成功地在选中的数组下标里赋值,所以可以跳出循环
continue; // Slot is now non-empty => 其他线程也选中了同样的数组下标,且比当前线程更快,当前线程会重来,而且会重选数组下标
}
}
collide = false; // 当前线程选中的数组下标没有元素,且抢不到cellsBusy
}
// 往下的逻辑:随机选择的数组下标有元素
else if (!wasUncontended) // CAS already known to fail
// wasUncontended为false,当前线程会更新随机数,重选数组下标,这样设计的意图也许是想尽可能选择没有元素的数组下标吧
wasUncontended = true; // Continue after rehash
else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) // 尝试在选中的数组元素上操作
break;
// 往下的逻辑:当前线程未能成功地在选中的数组元素(元素不是null)上进行CAS操作
else if (counterCells != as || n >= NCPU)
// “counterCells != as”说明其他线程完成了对counterCells的扩容,让当前线程重来
// “n >= NCPU”说明没必要再扩容。毕竟同时运行的线程数受制于CPU数量,让当前线程重来
collide = false; // At max size or stale
// 往下的逻辑:其他线程未对counterCells进行扩容,但其实还可以继续对counterCells进行扩容
else if (!collide)
collide = true; // 当前线程第一次在选中的数组元素(元素不是null)上进行CAS操作失败了
// 往下的逻辑:当前线程连续两次在选中的数组元素(元素不是null)上进行CAS操作都失败了
else if (cellsBusy == 0 &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
// 当前线程对counterCells进行扩容
try {
if (counterCells == as) {// Expand table unless stale
// 其他线程未对counterCells进行扩容
CounterCell[] rs = new CounterCell[n << 1];
for (int i = 0; i < n; ++i)
rs[i] = as[i];
counterCells = rs;
}
} finally {
cellsBusy = 0;
}
collide = false;
continue; // Retry with expanded table
}
h = ThreadLocalRandom.advanceProbe(h); // 更新随机数,用于重选数组下标
}
// counterCells是null
else if (cellsBusy == 0 && counterCells == as &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
// 只有一个线程能进来初始化counterCells
boolean init = false;
try { // Initialize table
if (counterCells == as) {
CounterCell[] rs = new CounterCell[2]; // CounterCell数组初始长度是2
rs[h & 1] = new CounterCell(x); // 只初始化1个数组元素,并赋值
counterCells = rs;
init = true;
}
} finally {
cellsBusy = 0;
}
if (init)
break; // 跳出for循环
}
else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) // 尝试在baseCount上操作
break; // Fall back on using base
}
}
/**
* Implementation for the four public remove/replace methods:
* Replaces node value with v, conditional upon match of cv if
* non-null. If resulting value is null, delete.
*/
final V replaceNode(Object key, V value, Object cv) { // value = null,cv = null
int hash = spread(key.hashCode());
for (Node[] tab = table;;) {
Node f;
int n, i, fh;
if (tab == null || (n = tab.length) == 0 ||
(f = tabAt(tab, i = (n - 1) & hash)) == null)
break; // 未初始化或者未找到key值一样的Entry
else if ((fh = f.hash) == MOVED) // 看看当前ConcurrentHashMap对象是否正在扩容
tab = helpTransfer(tab, f); // 当前线程尝试参与扩容
else {
V oldVal = null;
boolean validated = false; // 用来表示是否跳出for循环
synchronized (f) {
if (tabAt(tab, i) == f) { // 例如看看元素有没有被别的线程移除
// 元素还在
if (fh >= 0) { // 根据spread方法和内部类Node的注釋,Node的哈希值必为非负数,但其子类的哈希值可能是负数
// 单向链表
validated = true;
for (Node e = f, pred = null;;) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
// 找到key值一样的Node
V ev = e.val;
if (cv == null || cv == ev ||
(ev != null && cv.equals(ev))) {
oldVal = ev;
if (value != null)
e.val = value;
else if (pred != null)
pred.next = e.next;
else
setTabAt(tab, i, e.next);
}
break;
}
// key值不一样,继续往后找
pred = e;
if ((e = e.next) == null)
break; // 没找到key值一样的Entry
}
}
else if (f instanceof TreeBin) { // 根据内部类Node的注释,Node的子类的哈希值可能是负数
// 红黑树
validated = true;
TreeBin t = (TreeBin)f;
TreeNode r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
// 找到key值一样的TreeNode
V pv = p.val;
if (cv == null || cv == pv ||
(pv != null && cv.equals(pv))) {
oldVal = pv;
if (value != null)
p.val = value;
else if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
}
if (validated) {
if (oldVal != null) {
if (value == null)
addCount(-1L, -1);
return oldVal;
}
break;
}
}
}
return null;
}