public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
implements ConcurrentMap<K,V>, Serializable {
由上述代码可见, ConcurrentHashMap扩展了AbstractMap类, 实现了ConcurrentMap接口和Serializable接口.
//ConcurrentHashMap类内部采用Node类存储键值对
static class Node implements Map.Entry {
final int hash;
final K key;
volatile V val; //采用volatile关键字修饰
volatile Node next; //采用volatile关键字修饰
Node(int hash, K key, V val, Node next) {
this.hash = hash;
this.key = key;
this.val = val;
this.next = next;
}
public final K getKey() { return key; }
public final V getValue() { return val; }
public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
public final String toString(){ return key + "=" + val; }
public final V setValue(V value) { //不支持修改value, 否则将会抛出异常
throw new UnsupportedOperationException();
}
public final boolean equals(Object o) {
Object k, v, u; Map.Entry,?> e;
return ((o instanceof Map.Entry) &&
(k = (e = (Map.Entry,?>)o).getKey()) != null &&
(v = e.getValue()) != null &&
(k == key || k.equals(key)) &&
(v == (u = val) || v.equals(u)));
}
//查找当前节点之后的链表,若是存在则返回相应的Node; 否则返回Null.
Node find(int h, Object k) {
Node e = this;
if (k != null) {
do {
K ek;
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
} while ((e = e.next) != null);
}
return null;
}
}
//当值为-1时, 代表数组正在被初始化;
//按照源码注释翻译,当值为-(1+扩容线程数), 代表数组正在被多个线程扩容。但是其实不是这样的,当线程进行扩容时,会根据resizeStamp函数生成一个基数戳rs,然后((rs<
//当table为null时, 代表要初始化的容量大小; 否则代表下次要扩容的容量
private transient volatile int sizeCtl;
//ConcurrentHashMap的最大容量 2^30
private static final int MAXIMUM_CAPACITY = 1 << 30;
//ConcurrentHashMap的默认容量 2^4
private static final int DEFAULT_CAPACITY = 16;
//hash值为-1处的节点代表forwarding node
static final int MOVED = -1;
//和key对应hash值进行与操作, 将hash值最高位置0
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
//用于生成当前数组对应的基数戳
private static int RESIZE_STAMP_BITS = 16;
//将基数戳左移的位数,保证左移后的基数戳为负值,然后再加上n+1,表示n个线程正在扩容
private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
//表示最多能有多少个线程能够帮助进行扩容,因为sizeCtl只有低16位用于标识,所以最多只有2^16-1个线程帮助扩容
private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
//数组位置中红黑树根节点的hash值为-2,小于0
static final int TREEBIN = -2;
//将HASH_BITS和普通节点的hash相与,将hash值最高位置0,从而保证普通节点的hash值都是>=0的
static final int HASH_BITS = 0x7fffffff;
//扩容线程所负责的区间大小最低为16,避免发生大量的内存冲突
private static final int MIN_TRANSFER_STRIDE = 16;
//用于扩容过程中,指示原数组下一个分割区间的上界位置
private transient volatile int transferIndex;
//只有当数组处于扩容过程时,nextTable才不为null;否则其他时刻,nextTable为null;
//nextTable主要用于扩容过程中指向扩容后的新数组
private transient volatile Node[] nextTable;
//节点数组,用于存储键值对,当第一次插入时进行初始化。
transient volatile Node[] table;
//默认构造方法
public ConcurrentHashMap() {
}
//用户自定义初始化容量作为参数
public ConcurrentHashMap(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); //对用户输入的初始化容量修剪为2^n次方,
this.sizeCtl = cap;
}
在扩容过程中, 正在扩容的线程会将正在转移的table节点标记为ForwardingNode, 其他线程若是查找到某个节点为ForwardingNode类型节点, 则查找下一个table节点辅助进行扩容操作, ForwardingNode源代码如下:
//ForwardingNode是Node的子类型
static final class ForwardingNode<K,V> extends Node<K,V> {
final Node[] nextTable; //设置辅助扩容线程的下一段table
ForwardingNode(Node[] tab) {
super(MOVED, null, null, null);
this.nextTable = tab;
}
Node find(int h, Object k) {
outer: for (Node[] tab = nextTable;;) {
Node e; int n;
if (k == null || tab == null || (n = tab.length) == 0 ||
(e = tabAt(tab, (n - 1) & h)) == null) //查找hash数组位置h处的Node
return null;
for (;;) {
int eh; K ek;
if ((eh = e.hash) == h &&
((ek = e.key) == k || (ek != null && k.equals(ek)))) //查找到key相同的Node
return e;
if (eh < 0) {
if (e instanceof ForwardingNode) {
tab = ((ForwardingNode)e).nextTable; //递归查询下一个ForwardingNode
continue outer;
}
else
return e.find(h, k); //查找链表
}
if ((e = e.next) == null)
return null;
}
}
}
}
//扩容详细过程
private final void transfer(Node[] tab, Node[] nextTab) {
int n = tab.length, stride;
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE; // 每个线程所负责转移的数组的区间最少为MIN_TRANSFER_STRIDE=16,也就是说数组的连续16个位置都是由这个线程来进行转移,其他线程不允许接触这连续的16个位置,必须发生线程之间大量的内存冲突。换另一个角度来说,每个线程负责连续16个大小区间的数组转移。
if (nextTab == null) { // 初始化生成新的扩容数组
try {
@SuppressWarnings("unchecked")
Node[] nt = (Node[])new Node,?>[n << 1]; //新创建两倍原数组大小的新数组
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab; //nextTable为类成员变量,只有在扩容的过程中有作用,在其他时刻都是null值。nextTable指向新数组
transferIndex = n; //转移后的节点偏移量
}
int nextn = nextTab.length;
ForwardingNode fwd = new ForwardingNode(nextTab);
boolean advance = true; //遍历
boolean finishing = false; //保证在提交扩容后的新数组时,原数组中的所有元素都已经被遍历
for (int i = 0, bound = 0;;) {
Node f; int fh;
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing) //bound为数组区间下限值,i为当前转移数组的位置,--i处理转移下一个节点位置,从后往前处理
advance = false; //退出while循环
else if ((nextIndex = transferIndex) <= 0) { //表示原数组已经分割完了
i = -1;
advance = false; //退出while循环
}
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) { //CAS操作修改transferIndex值,代表下一个线程转移原数组的节点的位置
bound = nextBound; //设置当前线程转移原数组区间的下限值
i = nextIndex - 1; //从后往前处理
advance = false; //退出while循环
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
if (finishing) { //扩容完成
nextTable = null; //将nextTable置为null,表示当前扩容过程完成
table = nextTab; //table指向扩容后的新数组
sizeCtl = (n << 1) - (n >>> 1); //将szieCtl设置为正数,设置为原数组的3/2,即新数组的3/4
return;
}
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) //因为只有一个线程扩容时sc=resizeStamp(n)+2,所以该if语句是在最后一个线程完成扩容操作时,将finishing置为true,表示正确完成。
return;
finishing = advance = true;
i = n; // recheck before commit
}
}
else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd); //将原数组相应位置直接设置为fwd,表示该位置已经遍历过
else if ((fh = f.hash) == MOVED)
advance = true; // 表示该数组位置已经被其他线程处理过了
else { //否则需要将原数组位置相应元素复制到新数组上
synchronized (f) { //上锁
if (tabAt(tab, i) == f) { //再次核对,防止其他线程对该hash值进行修改
Node ln, hn;
if (fh >= 0) { //说明该位置存放的是普通节点
int runBit = fh & n; //判断原数组中的节点的hash的 log(n)位为0或者1
Node lastRun = f;
for (Node p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun; //指向链表的最后出现连续log(n)位为0的第一个节点
hn = null;
}
else {
hn = lastRun; //指向链表的最后出现连续log(n)位为1的第一个节点
ln = null;
}
for (Node p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node(ph, pk, pv, ln);
else
hn = new Node(ph, pk, pv, hn);
}
setTabAt(nextTab, i, ln); //将hash值的 log(n) 位为0的节点链表复制到新数组对应原来数组的位置
setTabAt(nextTab, i + n, hn); //将Hash值的 log(n) 位为1的节点链表复制到新数组对应原来数组位置+n
setTabAt(tab, i, fwd); //将该数组位置设置为已处理
advance = true;
}
else if (f instanceof TreeBin) { //说明该数组位置是红黑树根节点
TreeBin t = (TreeBin)f;
TreeNode lo = null, loTail = null;
TreeNode hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode p = new TreeNode
(h, e.key, e.val, null, null);
if ((h & n) == 0) { //判断红黑树中节点的hash值的 log(n) 位为0,说明该节点应该存放到新数组中对应原数组的位置
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else { //判断红黑树中节点的hash值的 log(n) 位为1,说明该节点应该存放到新数组中对应原数组位置+n
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
//根据链表中节点的个数和UNTREEIFY_THRESHOLD进行比较,如果小于等于,则不需要将链表转换为红黑树;如果大于,则需要将链表转换为红黑树
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin(hi) : t;
setTabAt(nextTab, i, ln); //复制到新数组中
setTabAt(nextTab, i + n, hn); //复制到新数组中
setTabAt(tab, i, fwd); //将原数组中相应位置为fwd,表示该位置已经被处理过
advance = true; //继续进行遍历
}
}
}
}
}
}
//helpTransfer函数的主要作用是如果有线程正在进行扩容操作,则帮助其他线程进行扩容操作
final Node[] helpTransfer(Node[] tab, Node f) {
Node[] nextTab; int sc;
if (tab != null && (f instanceof ForwardingNode) &&
(nextTab = ((ForwardingNode)f).nextTable) != null) { //帮助进行扩容
int rs = resizeStamp(tab.length);
while (nextTab == nextTable && table == tab &&
(sc = sizeCtl) < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) { //CAS修改sizeCtl=sizeCtl+1,表示新增加一个线程辅助扩容
transfer(tab, nextTab);
break;
}
}
return nextTab;
}
return table;
}
public V put(K key, V value) {
return putVal(key, value, false);
}
由上述代码可见, put方法调用了putVal方法, putVal方法如下:
final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException();
//获取key的hash值, 并将hash值传递给spread函数.
//spread函数的主要作用是将hash值高16位和低16位进行异或操作, 对hash值进行优化, 避免在生成hash值位置时只考虑低16位.
int hash = spread(key.hashCode());
int binCount = 0;
for (Node[] tab = table;;) { //类似死循环,直到插入成功
Node f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable(); //如果tab为null, 则需要对tab进行初始化.
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { //如果hash值对应位置处为null, 直接添加即可
if (casTabAt(tab, i, null,
new Node(hash, key, value, null))) //无需加锁, 进行CAS操作, 在i位置处添加新hash对应的键值对
break;
}
else if ((fh = f.hash) == MOVED) //f.hash==-1说明其他线程正在进行扩容操作
tab = helpTransfer(tab, f); //调用helpTransfer函数进行扩容操作
else { //否则进行插入操作
V oldVal = null;
synchronized (f) { //对f节点加锁
if (tabAt(tab, i) == f) { //重复检查,避免多线程导致的修改
if (fh >= 0) { //说明该节点为普通节点
binCount = 1;
for (Node e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node pred = e;
if ((e = e.next) == null) {
pred.next = new Node(hash, key,
value, null); //插入到链表的末尾
break;
}
}
}
else if (f instanceof TreeBin) { //说明该节点为红黑树的根节点
Node p;
binCount = 2;
if ((p = ((TreeBin)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD) //根据链表的长度判断是否需要将链表转换为红黑树结构
treeifyBin(tab, i); //调用treeifyBin方法将链表改为红黑树结构
if (oldVal != null)
return oldVal;
break;
}
}
}
addCount(1L, binCount); //调用addCount函数,将容器大小加1,并判断是否需要进行扩容
return null;
}
addCount函数的源代码如下:
private final void addCount(long x, int check) {
CounterCell[] as; long b, s;
if ((as = counterCells) != null ||
!U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { //利用CAS操作更新baseCount
CounterCell a; long v; int m;
boolean uncontended = true;
if (as == null || (m = as.length - 1) < 0 ||
(a = as[ThreadLocalRandom.getProbe() & m]) == null ||
!(uncontended =
U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
fullAddCount(x, uncontended);
return;
}
if (check <= 1)
return;
s = sumCount();
}
if (check >= 0) { //判断是否需要扩容
Node[] tab, nt; int n, sc;
while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
(n = tab.length) < MAXIMUM_CAPACITY) {
int rs = resizeStamp(n); //生成一个基数戳
if (sc < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) //将sizeCtl加1,表示新增加一个线程进行辅助操作
transfer(tab, nt);
}
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2)) //基数戳rs<
transfer(tab, null);
s = sumCount();
}
}
}
treeifyBin方法的源代码如下:
private final void treeifyBin(Node[] tab, int index) {
Node b; int n, sc;
if (tab != null) {
if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
tryPresize(n << 1); //如果数组的长度小于 MIN_TREEIFY_CAPACITY=64,则调用tryPresize方法进行扩容,而不是直接改为红黑树结构
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { //需要改为红黑树结构
synchronized (b) { //将node b加锁
if (tabAt(tab, index) == b) { //重复检查,避免多线程导致的修改
TreeNode hd = null, tl = null;
for (Node e = b; e != null; e = e.next) {
TreeNode p =
new TreeNode(e.hash, e.key, e.val,
null, null);
if ((p.prev = tl) == null)
hd = p;
else
tl.next = p;
tl = p;
}
setTabAt(tab, index, new TreeBin(hd)); //将TreeNode链表封装到TreeBin对象中,由TreeBin负责红黑树的生成,将数组相应位置设置为TreeBin对象
}
}
}
}
}
tryPresize方法源代码如下:
//将原数组进行两倍扩容
private final void tryPresize(int size) {
int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
tableSizeFor(size + (size >>> 1) + 1);
int sc;
while ((sc = sizeCtl) >= 0) { //说明数组不是处于扩容状态
Node[] tab = table; int n;
if (tab == null || (n = tab.length) == 0) { //如果数组为null
n = (sc > c) ? sc : c;
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { //将sc设置为-1,表示当前数组正在进行扩容操作
try {
if (table == tab) {
@SuppressWarnings("unchecked")
Node[] nt = (Node[])new Node,?>[n]; //生成新的数组
table = nt; //table指向新数组
sc = n - (n >>> 2); //sc保存新数组的上限值
}
} finally {
sizeCtl = sc;
}
}
}
else if (c <= sc || n >= MAXIMUM_CAPACITY)
break;
else if (tab == table) {
int rs = resizeStamp(n);
if (sc < 0) {
Node[] nt;
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) //辅助扩容操作,将sizeCtl加1,表示新增加一个线程辅助扩容
transfer(tab, nt);
}
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2)) //开始进行扩容,通过CAS操作将sizeCtl置为负值,代表只要一个线程在进行扩容操作。
transfer(tab, null);
}
}
}
table的初始化函数initTable过程如下:
private final Node[] initTable() {
Node[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
if ((sc = sizeCtl) < 0) //如果sizeCtl<0, 根据规定, 这代表有其他线程正在初始化或者扩容
Thread.yield(); // 暂停初始化步骤, 让出处理器, 进行旋转
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { // 否则进行CAS操作, 将sizeCtl置为-1, 代表当前线程正在进行初始化操作
try {
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node[] nt = (Node[])new Node,?>[n];
table = tab = nt;
sc = n - (n >>> 2); //减去1/4, 剩下3/4
}
} finally {
sizeCtl = sc; // 作为下一次扩容的临界值
}
break;
}
}
return tab;
}
public V get(Object key) {
Node[] tab; Node e, p; int n, eh; K ek;
int h = spread(key.hashCode()); //获取相应的hash值
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
else if (eh < 0) //说明该节点位置为红黑树节点
return (p = e.find(h, key)) != null ? p.val : null; //调用find方法在红黑树中进行查找
while ((e = e.next) != null) { //遍历链表
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
public int size() {
long n = sumCount(); //调用内部sumCount方法
return ((n < 0L) ? 0 :
(n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
(int)n);
}
final long sumCount() {
CounterCell[] as = counterCells; CounterCell a;
long sum = baseCount;
if (as != null) {
for (int i = 0; i < as.length; ++i) {
if ((a = as[i]) != null)
sum += a.value;
}
}
return sum;
}
实际上在ConcurrentHashMap内部使用了如下变量来保存map中键值对个数
private transient volatile long baseCount;
因为在调用size()获取当前ConcurrentHashMap对象中的键值对个数时,返回的值是估算值,不是精确值,因为在查询个数的同时可能存在多个线程在进行插入、删除操作,不能将所有线程停下进行统计。