//使用了unSafe方法,通过直接操作内存的方式来保证并发处理的安全性,使用的是硬件的安全机制。
/*
* 用来返回节点数组的指定位置的节点的原子操作
*/
static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
}
/*
* cas原子操作,在指定位置设定值
*/
static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
Node<K,V> c, Node<K,V> v) {
return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
}
/*
* 原子操作,在指定位置设定值
*/
static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
}
public V put(K key, V value) {
/*
* onlyIfAbsent
* false:这个value一定会设置
* true:只有当这个key的value为空的时候才会设置
*/
return putVal(key, value, false);
}
final V putVal(K key, V value, boolean onlyIfAbsent) {
//不允许key/value为null,否则及时失败
if (key == null || value == null) throw new NullPointerException();
//获取key的hashCode
int hash = spread(key.hashCode());
//用来计算在这个节点总共有多少个元素,用来控制扩容或者转移为树
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
//初始化tab
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if (
//通过哈希计算出一个表中的位置因为n是数组的长度,所以(n-1)&hash肯定不会出现数组越界
(f = tabAt(tab, i = (n - 1) & hash)) == null) {
//如果这个位置没有元素的话,则通过cas的方式尝试添加,注意这个时候是没有加锁的
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
/*
* 如果检测到某个节点的hash值是MOVED,则表示正在进行数组扩张的数据复制阶段,
* 则当前线程也会参与去复制,通过允许多线程复制的功能,一次来减少数组的复制所带来的性能损失
*/
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
synchronized (f) {
//再次取出要存储的位置的元素,跟前面取出来的比较
if (tabAt(tab, i) == f) {
//取出来的元素的hash值大于0,当转换为树之后,hash值为-2
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
//查找到值,则覆盖
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
//找到最后,没找到值的,则新建对象.
if ((e = e.next) == null) {
//添加链表尾端
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
//对红黑树的处理
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
//链表节点其中个数达到8,则扩张数组或转成树
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
//计数
addCount(1L, binCount);
return null;
}
private final Node<K,V>[] initTable() {
Node<K,V>[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
//sizeCtl初始值为0,当小于0的时候表示在别的线程在初始化表或扩展表
//则暂停当前正在执行的线程对象,并执行其他线程。
if ((sc = sizeCtl) < 0)
Thread.yield(); // lost initialization race; just spin
else if (
/*
SIZECTL:当前内存偏移量,
sc:期望值
-1:表示要替换的值
*/
U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) {
//指定了大小的时候就创建指定大小的Node数组,否则创建指定大小(16)的Node数组
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = tab = nt;
sc = n - (n >>> 2);
}
} finally {
//初始化后,sizeCtl长度为数组长度的3/4
sizeCtl = sc;
}
break;
}
}
return tab;
}
/**
* Replaces all linked nodes in bin at given index unless table is
* too small, in which case resizes instead.
* 数组长度<64,则扩容一倍
* 否则转成树
*/
private final void treeifyBin(Node<K,V>[] tab, int index) {
Node<K,V> b; int n, sc;
if (tab != null) {
if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
//扩容
tryPresize(n << 1);
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
synchronized (b) {
if (tabAt(tab, index) == b) {
TreeNode<K,V> hd = null, tl = null;
for (Node<K,V> e = b; e != null; e = e.next) {
TreeNode<K,V> p =
new TreeNode<K,V>(e.hash, e.key, e.val,
null, null);
//把Node组成的链表,转化为TreeNode的链表,头结点依然放在相同的位置
if ((p.prev = tl) == null)
hd = p;
else
tl.next = p;
tl = p;
}
//把TreeNode的链表放入容器TreeBin中,内部将单节点树转换成红黑树
setTabAt(tab, index, new TreeBin<K,V>(hd));
}
}
}
}
}
private final void tryPresize(int size) {
//扩容大小>=最大的一半,直接设置成最大容量
int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
//返回大于输入参数且最近的2的整数次幂的数
tableSizeFor(size + (size >>> 1) + 1);
int sc;
while ((sc = sizeCtl) >= 0) {
Node<K,V>[] tab = table; int n;
//如果数组还没有初始化
//putAll的时候,会执行这儿
if (tab == null || (n = tab.length) == 0) {
n = (sc > c) ? sc : c;
//SIZECTL设置-1,表示正在初始化
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
//双重检查
if (table == tab) {
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = nt;
//sc=3/4*n
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
}
}
//扩容后的大小<=sizeCtl或者当前数组长度>容量上限,则退出
else if (c <= sc || n >= MAXIMUM_CAPACITY)
break;
else if (tab == table) {
int rs = resizeStamp(n);
//表示正在扩容
if (sc < 0) {
Node<K,V>[] nt;
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
//transfer线程数+1,当前线程将加入对transfer的处理
//transfer的时候,sc表示在transfer工作的线程数
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
//没有在初始化或扩容,则开始扩容
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
}
}
}
private static final int tableSizeFor(int c) {
/*
让cap-1再赋值给n的目的是另找到的目标值大于或等于原值。例如二进制1000,十进制数值为8。
如果不对它减1而直接操作,将得到答案10000,即16。显然不是结果。
减1后二进制为111,再进行操作则会得到原来的数值1000,即8。
*/
int n = c - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
int n = tab.length, stride;
//MIN_TRANSFER_STRIDE=16.控制线程数
//每个CPU最少处理16个长度的数组元素,也就是说,如果一个数组的长度只有16,那只有一个线程会对其进行扩容的复制移动操作
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE; // subdivide range
//只有第一个线程进此方法的时候,才会初始化数组.
if (nextTab == null) { // initiating
try {
@SuppressWarnings("unchecked")
//扩容一倍数组容量
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
//这里标记数组初始化完成,
nextTable = nextTab;
transferIndex = n;
}
int nextn = nextTab.length;
/*
* 创建一个fwd节点,这个是用来控制并发的,当一个节点为空或已经被转移之后,就设置为fwd节点
* 这是一个空的标志节点
*/
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
//是否继续向前查找的标志位
boolean advance = true;
//在完成之前重新在扫描一遍数组,看看有没完成的没
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
Node<K,V> f; int fh;
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing)
advance = false;
else if ((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
}
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
//如果一个数组的长度只有16,只有一个线程会对其进行扩容的复制移动操作
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
//已经完成转移
if (finishing) {
nextTable = null;
//这里完成nextTab=>table转换
table = nextTab;
//为扩容后的0.75
sizeCtl = (n << 1) - (n >>> 1);
return;
}
//正在工作的线程数-1,并返回
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
}
else if ((f = tabAt(tab, i)) == null)
//数组中把null的元素设置为ForwardingNode节点(hash值为MOVED)
advance = casTabAt(tab, i, null, fwd);
else if ((fh = f.hash) == MOVED)
//表示已有线程正在处理
advance = true; // already processed
else {
synchronized (f) {
//双重检查加锁
if (tabAt(tab, i) == f) {
Node<K,V> ln, hn;
//>=0说明是node节点
if (fh >= 0) {
//为0则表示放在扩容后数组当前索引下,否则放在n+之前位置索引下
int runBit = fh & n;
Node<K,V> lastRun = f;
/*
循环结束之后,runBit就是最后不变的hash&n的值
也就是说由lastRun节点后的hash&n的值一样,这样就可以直接保存,而不需要处理后面的节点
*/
for (Node<K,V> p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun;
hn = null;
}
else {
hn = lastRun;
ln = null;
}
//分别逆序存入ln或hn链表中
for (Node<K,V> p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node<K,V>(ph, pk, pv, ln);
else
hn = new Node<K,V>(ph, pk, pv, hn);
}
//存入之前的位置
setTabAt(nextTab, i, ln);
//存入改变后的位置
setTabAt(nextTab, i + n, hn);
//设置fwd,这样其他线程执行的时候,会跳过去.
setTabAt(tab, i, fwd);
advance = true;
}
else if (f instanceof TreeBin) {
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> lo = null, loTail = null;
TreeNode<K,V> hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node<K,V> e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode<K,V> p = new TreeNode<K,V>
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
/*
* 在复制完树节点之后,判断该节点处构成的树还有几个节点,
* 如果≤6个的话,就转为一个链表
*/
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin<K,V>(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin<K,V>(hi) : t;
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}
final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
Node<K,V>[] nextTab; int sc;
if (tab != null && (f instanceof ForwardingNode) &&
(nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
int rs = resizeStamp(tab.length);
while (nextTab == nextTable && table == tab &&
(sc = sizeCtl) < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || transferIndex <= 0)
break;
//线程数+1,帮助一起转换
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
transfer(tab, nextTab);
break;
}
}
return nextTab;
}
return table;
}
public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
//如果first匹配key-value,则直接返回
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
else if (eh < 0)
return (p = e.find(h, key)) != null ? p.val : null;
//遍历查找
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
public V remove(Object key) {
return replaceNode(key, null, null);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if any of the arguments are null
* key映射有值,值为oldValue,则更新
*/
public boolean replace(K key, V oldValue, V newValue) {
if (key == null || oldValue == null || newValue == null)
throw new NullPointerException();
return replaceNode(key, newValue, oldValue) != null;
}
final V replaceNode(Object key, V value, Object cv) {
int hash = spread(key.hashCode());
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
//空判断
if (tab == null || (n = tab.length) == 0 ||
(f = tabAt(tab, i = (n - 1) & hash)) == null)
break;
//正在扩容,则加入扩容队伍
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
boolean validated = false;
synchronized (f) {
//双重检查加锁,DCL
if (tabAt(tab, i) == f) {
//对链表的处理
if (fh >= 0) {
validated = true;
for (Node<K,V> e = f, pred = null;;) {
K ek;
//匹配key
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
V ev = e.val;
//匹配value
if (cv == null || cv == ev ||
(ev != null && cv.equals(ev))) {
oldVal = ev;
//修改
if (value != null)
e.val = value;
//value=null,说明执行删除操作
else if (pred != null)
pred.next = e.next;
//当匹配的值是first节点的处理
else
setTabAt(tab, i, e.next);
}
break;
}
//临时保存上一个节点,便于执行删除节点操作
pred = e;
//执行最后一个节点,则退出
if ((e = e.next) == null)
break;
}
}
//红黑树的处理
else if (f instanceof TreeBin) {
validated = true;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
if (cv == null || cv == pv ||
(pv != null && cv.equals(pv))) {
oldVal = pv;
if (value != null)
p.val = value;
else if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
}
if (validated) {
if (oldVal != null) {
if (value == null)
//记数-1
addCount(-1L, -1);
return oldVal;
}
break;
}
}
}
return null;
}
什么时候扩容?
没有实现对map进行加锁来执行独占访问,因为采用了分段锁,所以无法使用客户端加锁来创建新的原子操作,如若没有则添加之内操作.
JDK1.8放弃分段锁
段Segment继承了重入锁ReentrantLock,有了锁的功能,每个锁控制的是一段,当每个Segment越来越大时,锁的粒度就变得有些大了。
分段锁的优势在于保证在操作不同段 map 的时候可以并发执行,操作同段 map 的时候,进行锁的竞争和等待。这相对于直接对整个map同步synchronized是有优势的。
缺点在于分成很多段时会比较浪费内存空间(不连续,碎片化); 操作map时竞争同一个分段锁的概率非常小时,分段锁反而会造成更新等操作的长时间等待; 当某个段很大时,分段锁的性能会下降。
jdk1.8的map实现
和hashmap一样,jdk 1.8中ConcurrentHashmap采用的底层数据结构为数组+链表+红黑树的形式。数组可以扩容,链表可以转化为红黑树。
为什么不用ReentrantLock而用synchronized ?
减少内存开销:如果使用ReentrantLock则需要节点继承AQS来获得同步支持,增加内存开销,而1.8中只有头节点需要进行同步。
内部优化:synchronized则是JVM直接支持的,JVM能够在运行时作出相应的优化措施:锁粗化、锁消除、锁自旋等等。
多个线程又是如何同步处理的呢?
写入时复制,只要正确发布一个事实不可变对象,在访问该对象时就不再需要进一步同步,在每次修改时,都会创建并重新发布一个新的容器副本,从而实现可变性.
使用场景:迭代>修改,事件通知系统(注册和注销事件监听器操作少于接收事件通知的操作)
//构造时,初始化容量为0的Object数组
public CopyOnWriteArrayList() {
setArray(new Object[0]);
}
public boolean add(E e) {
final ReentrantLock lock = this.lock;
//加锁
lock.lock();
try {
Object[] elements = getArray();
int len = elements.length;
//创建数组副本,容量+1
Object[] newElements = Arrays.copyOf(elements, len + 1);
newElements[len] = e;
//更新旧的数组
setArray(newElements);
return true;
} finally {
//解锁
lock.unlock();
}
}
public E remove(int index) {
final ReentrantLock lock = this.lock;
//加锁
lock.lock();
try {
//获取数组以及当前值
Object[] elements = getArray();
int len = elements.length;
E oldValue = get(elements, index);
int numMoved = len - index - 1;
//最后一个数组索引,则直接copy
if (numMoved == 0)
setArray(Arrays.copyOf(elements, len - 1));
else {
//copy2次
Object[] newElements = new Object[len - 1];
System.arraycopy(elements, 0, newElements, 0, index);
System.arraycopy(elements, index + 1, newElements, index,
numMoved);
setArray(newElements);
}
return oldValue;
} finally {
//解锁
lock.unlock();
}
}
public E get(int index) {
return get(getArray(), index);
}
private E get(Object[] a, int index) {
return (E) a[index];
}
public E set(int index, E element) {
final ReentrantLock lock = this.lock;
lock.lock();
try {
Object[] elements = getArray();
E oldValue = get(elements, index);
if (oldValue != element) {
int len = elements.length;
//副本修改
Object[] newElements = Arrays.copyOf(elements, len);
newElements[index] = element;
//副本设置为当前数组
setArray(newElements);
} else {
// Not quite a no-op; ensures volatile write semantics
//保证最终一致性
setArray(elements);
}
return oldValue;
} finally {
lock.unlock();
}
}
应用场景:读多写少
如:黑名单,监听器
读上没加锁,所以支持大量并发,但涉及修改的时候,有2个数组副本,当数组过大,则造成多余的内存消耗.
CopyOnWrite容器只能保证数据的最终一致性,不能保证数据的实时一致性。之所以只能保证最终一致,是因为每次涉及到修改都会copy一个副本然后回写,最终的结果是一致的,但是在copy途中如果有读操作,那么就会造成数据不一致问题.
一个基于链表的阻塞队列。此队列按 FIFO(先进先出)排序元素
public LinkedBlockingQueue() {
//默认最大容量
this(Integer.MAX_VALUE);
}
public LinkedBlockingQueue(int capacity) {
if (capacity <= 0) throw new IllegalArgumentException();
this.capacity = capacity;
//维护双端队列
last = head = new Node<E>(null);
}
//put将指定元素插入此队列尾部,将等待可用的空间
public void put(E e) throws InterruptedException {
if (e == null) throw new NullPointerException();
// Note: convention in all put/take/etc is to preset local var
// holding count negative to indicate failure unless set.
int c = -1;
//创建新节点
Node<E> node = new Node<E>(e);
//获取put锁
final ReentrantLock putLock = this.putLock;
final AtomicInteger count = this.count;
//如果当前线程未被中断,则获取锁。
putLock.lockInterruptibly();
try {
//达到上限容量,则一直等待
while (count.get() == capacity) {
notFull.await();
}
//设置值
enqueue(node);
c = count.getAndIncrement();
if (c + 1 < capacity)
//列是否有可用空间,如果有则唤醒一个等待线程
notFull.signal();
} finally {
//释放锁
putLock.unlock();
}
// 如果队列中有一条数据,唤醒消费线程进行消费
if (c == 0)
signalNotEmpty();
}
public boolean offer(E e) {
if (e == null) throw new NullPointerException();
//等于最大容量,则返回,而不阻塞
final AtomicInteger count = this.count;
if (count.get() == capacity)
return false;
int c = -1;
Node<E> node = new Node<E>(e);
final ReentrantLock putLock = this.putLock;
//因为不阻塞,所以直接获取锁
putLock.lock();
try {
//再次检查容量大小,然后直接添加,随后唤醒一个等待线程
if (count.get() < capacity) {
enqueue(node);
c = count.getAndIncrement();
if (c + 1 < capacity)
notFull.signal();
}
} finally {
putLock.unlock();
}
// 如果队列中有一条数据,唤醒消费线程进行消费
if (c == 0)
signalNotEmpty();
return c >= 0;
}
public boolean offer(E e, long timeout, TimeUnit unit)
throws InterruptedException {
if (e == null) throw new NullPointerException();
long nanos = unit.toNanos(timeout);
int c = -1;
final ReentrantLock putLock = this.putLock;
final AtomicInteger count = this.count;
//获取中断锁
putLock.lockInterruptibly();
try {
//等于最大容量,则一直循环
while (count.get() == capacity) {
//超过超时时间则返回
if (nanos <= 0)
return false;
//当前线程在接到信号、被中断或到达指定等待时间之前一直处于等待状态。
nanos = notFull.awaitNanos(nanos);
}
enqueue(new Node<E>(e));
c = count.getAndIncrement();
//通知信号
if (c + 1 < capacity)
notFull.signal();
} finally {
putLock.unlock();
}
// 如果队列中有一条数据,唤醒消费线程进行消费
if (c == 0)
signalNotEmpty();
return true;
}
//获取并移除此队列的头部,在元素变得可用之前一直等待
public E take() throws InterruptedException {
E x;
int c = -1;
final AtomicInteger count = this.count;
final ReentrantLock takeLock = this.takeLock;
//中断点
takeLock.lockInterruptibly();
try {
//队列为空,阻塞等待
while (count.get() == 0) {
notEmpty.await();
}
//获取值
x = dequeue();
c = count.getAndDecrement();
// 队列中还有元素,唤醒下一个消费线程进行消费
if (c > 1)
notEmpty.signal();
} finally {
takeLock.unlock();
}
// 之前队列是满的,则唤醒生产线程进行添加元素
if (c == capacity)
signalNotFull();
return x;
}
private E dequeue() {
// assert takeLock.isHeldByCurrentThread();
// assert head.item == null;
//head默认root的value是null
Node<E> h = head;
Node<E> first = h.next;
// head节点原来指向的节点的next指向自己,等待下次gc回收
h.next = h; // help GC
// head节点指向下一个节点
head = first;
//获取新的head的value
E x = first.item;
//新的head设置null
first.item = null;
return x;
}
public E poll() {
final AtomicInteger count = this.count;
//容量为0,直接返回
if (count.get() == 0)
return null;
E x = null;
int c = -1;
final ReentrantLock takeLock = this.takeLock;
takeLock.lock();
try {
if (count.get() > 0) {
x = dequeue();
c = count.getAndDecrement();
if (c > 1)
notEmpty.signal();
}
} finally {
takeLock.unlock();
}
if (c == capacity)
signalNotFull();
return x;
}
public E peek() {
if (count.get() == 0)
return null;
final ReentrantLock takeLock = this.takeLock;
takeLock.lock();
try {
Node<E> first = head.next;
if (first == null)
return null;
else
return first.item;
} finally {
takeLock.unlock();
}
}
public boolean remove(Object o) {
//为null,直接返回
if (o == null) return false;
//put和take锁,就暂时不能新增或修改
fullyLock();
try {
for (Node<E> trail = head, p = trail.next;
p != null;
trail = p, p = p.next) {
//匹配到值,则删除
if (o.equals(p.item)) {
unlink(p, trail);
return true;
}
}
return false;
} finally {
//释放2个锁
fullyUnlock();
}
}
void unlink(Node<E> p, Node<E> trail) {
// assert isFullyLocked();
// p.next is not changed, to allow iterators that are
// traversing p to maintain their weak-consistency guarantee.
p.item = null;
//在迭代的时候,如果p.next为null,则会造成异常.所以这里没设置null
trail.next = p.next;
if (last == p)
last = trail;
// 如果删除之前元素是满的,删除之后就有空间了,唤醒生产线程放入元素
if (count.getAndDecrement() == capacity)
notFull.signal();
}
当执行迭代器的nextNode的时候,如果同时发现有执行take操作,因为当前head.next指向了自己,
private Node<E> nextNode(Node<E> p) {
for (;;) {
Node<E> s = p.next;
//take时,head.next=head,则直接返回当前head的下一个节点
if (s == p)
return head.next;
if (s == null || s.item != null)
return s;
p = s;
}
}
底层阻塞队列FIFO.内部由两个ReentrantLock来实现出入队列的线程安全,由各自的Condition对象的await和signal来实现等待和唤醒功能。
默认容量无界,且底层链表,所以执行插入和删除效率比较高.且2把锁维护新增删除,所以并发有所提高.
public ArrayBlockingQueue(int capacity, boolean fair) {
if (capacity <= 0)
throw new IllegalArgumentException();
this.items = new Object[capacity];
//一把锁管理
lock = new ReentrantLock(fair);
notEmpty = lock.newCondition();
notFull = lock.newCondition();
}
public void put(E e) throws InterruptedException {
//value不允许为null
checkNotNull(e);
final ReentrantLock lock = this.lock;
//中断锁
lock.lockInterruptibly();
try {
//当前count超过生产的容量上限,则等待
while (count == items.length)
notFull.await();
enqueue(e);
} finally {
//释放锁
lock.unlock();
}
}
private void enqueue(E x) {
// assert lock.getHoldCount() == 1;
// assert items[putIndex] == null;
final Object[] items = this.items;
items[putIndex] = x;
//当前个数超过下标,则回滚为0下标
if (++putIndex == items.length)
putIndex = 0;
//当前可进行消息个数+1
count++;
//唤醒消费线程进行消费
notEmpty.signal();
}
public boolean offer(E e) {
checkNotNull(e);
final ReentrantLock lock = this.lock;
lock.lock();
try {
//当前消息个数超过上限,则直接返回
if (count == items.length)
return false;
else {
enqueue(e);
return true;
}
} finally {
lock.unlock();
}
}
public E take() throws InterruptedException {
final ReentrantLock lock = this.lock;
lock.lockInterruptibly();
try {
//当前没有消息,则等待
while (count == 0)
notEmpty.await();
//消费消息
return dequeue();
} finally {
lock.unlock();
}
}
private E dequeue() {
// assert lock.getHoldCount() == 1;
// assert items[takeIndex] != null;
final Object[] items = this.items;
@SuppressWarnings("unchecked")
//获取消息
E x = (E) items[takeIndex];
//清空消息
items[takeIndex] = null;
//当前消费的索引为数组长度,则从0开始消费消息
if (++takeIndex == items.length)
takeIndex = 0;
count--;
//迭代器冲突处理
if (itrs != null)
itrs.elementDequeued();
//唤醒生产线程,生产消息
notFull.signal();
return x;
}
void elementDequeued() {
// assert lock.getHoldCount() == 1;
//消费个数为0,则清空迭代器值
if (count == 0)
queueIsEmpty();
else if (takeIndex == 0)
//消费的索引回滚为0处理
takeIndexWrapped();
}
public boolean remove(Object o) {
if (o == null) return false;
final Object[] items = this.items;
final ReentrantLock lock = this.lock;
lock.lock();
try {
if (count > 0) {
final int putIndex = this.putIndex;
int i = takeIndex;
//遍历takeIndex ~ putIndex之间的数据,如果涉及到边界问题,则从0开始查找
do {
//匹配则删除
if (o.equals(items[i])) {
removeAt(i);
return true;
}
//边界检测.
if (++i == items.length)
i = 0;
} while (i != putIndex);
}
return false;
} finally {
lock.unlock();
}
}
void removeAt(final int removeIndex) {
// assert lock.getHoldCount() == 1;
// assert items[removeIndex] != null;
// assert removeIndex >= 0 && removeIndex < items.length;
final Object[] items = this.items;
//如果删除的消息和目前正take索引相同
if (removeIndex == takeIndex) {
// removing front item; just advance
//清空当前take消息
items[takeIndex] = null;
//takeIndex偏移到下一个
if (++takeIndex == items.length)
takeIndex = 0;
count--;
//迭代器冲突处理
if (itrs != null)
itrs.elementDequeued();
} else {
// an "interior" remove
// slide over all others up through putIndex.
final int putIndex = this.putIndex;
for (int i = removeIndex;;) {
int next = i + 1;
//达到数组上线,索引设为0,开始遍历
if (next == items.length)
next = 0;
//未到边界,则偏移到下一个数组值
if (next != putIndex) {
items[i] = items[next];
i = next;
} else {
//到了边界,设置当前items[putIndex]值为null,且更新putIndex
items[i] = null;
this.putIndex = i;
break;
}
}
//当前消息个数-1
count--;
//迭代器冲突处理
if (itrs != null)
itrs.removedAt(removeIndex);
}
notFull.signal();
}
有界阻塞队列。此队列按 FIFO(先进先出)原则对元素进行排序。
初始化必须设置容量,value不允许为空
全局一个锁处理,相对并发比较低.
因为底层数组,所以修改查询快.
public void set(T value) {
//当前线程的.ThreadLocalMap绑定了当前ThreadLocal对象和value
//获取当前线程
Thread t = Thread.currentThread();
//获取与当前线程绑定的map,这里主线程的map应该是jni初始化的
ThreadLocalMap map = getMap(t);
if (map != null)
//将当前ThreadLocal与value绑定
map.set(this, value);
else
//创建map
createMap(t, value);
}
void createMap(Thread t, T firstValue) {
t.threadLocals = new ThreadLocalMap(this, firstValue);
}
ThreadLocalMap(ThreadLocal<?> firstKey, Object firstValue) {
//初始化一个16容量数组
table = new Entry[INITIAL_CAPACITY];
//根据hash算出索引
int i = firstKey.threadLocalHashCode & (INITIAL_CAPACITY - 1);
//Entry继承WeakReference
//也就是说当一个对象仅仅被weak reference指向,
// 而没有任何其他strong reference指向的时候, 如果GC运行, 那么这个对象就会被回收.
table[i] = new Entry(firstKey, firstValue);
//初始化容量值大小
size = 1;
//设置扩充容量值,容量*2/3
setThreshold(INITIAL_CAPACITY);
}
static class Entry extends WeakReference<ThreadLocal<?>> {
/** The value associated with this ThreadLocal. */
Object value;
Entry(ThreadLocal<?> k, Object v) {
super(k);
value = v;
}
}
private void set(ThreadLocal<?> key, Object value) {
// We don't use a fast path as with get() because it is at
// least as common to use set() to create new entries as
// it is to replace existing ones, in which case, a fast
// path would fail more often than not.
Entry[] tab = table;
int len = tab.length;
int i = key.threadLocalHashCode & (len-1);
/*
遍历Entry
如果找到了则赋值返回
找到了回收的节点,则重新使用
*/
for (Entry e = tab[i];
e != null;
e = tab[i = nextIndex(i, len)]) {
ThreadLocal<?> k = e.get();
//当前ThreadLocal与k一致,则赋值,并返回
if (k == key) {
e.value = value;
return;
}
//说明被回收,则重新使用
if (k == null) {
replaceStaleEntry(key, value, i);
return;
}
}
//执行到这里,说明key不存在Entry中,被回收了
//tab[i]此时为空,则赋值
tab[i] = new Entry(key, value);
int sz = ++size;
//清空失效对象
if (!cleanSomeSlots(i, sz) && sz >= threshold)
//扩容
rehash();
}
private void replaceStaleEntry(ThreadLocal<?> key, Object value,
int staleSlot) {
Entry[] tab = table;
int len = tab.length;
Entry e;
// Back up to check for prior stale entry in current run.
// We clean out whole runs at a time to avoid continual
// incremental rehashing due to garbage collector freeing
// up refs in bunches (i.e., whenever the collector runs).
//记录失效节点索引
int slotToExpunge = staleSlot;
//根据传入无效的索引向前遍历,找到一段连续的无效的索引
for (int i = prevIndex(staleSlot, len);
(e = tab[i]) != null;
i = prevIndex(i, len))
if (e.get() == null)
slotToExpunge = i;
//slotToExpunge此时只有2种状态
//slotToExpunge=staleSlot,说明没有找到前驱节点有无效的
//slotToExpunge!=staleSlot,说明靠近tab[i]=null的后驱节点有无效节点
// Find either the key or trailing null slot of run, whichever
// occurs first
//遍历后驱节点
for (int i = nextIndex(staleSlot, len);
(e = tab[i]) != null;
i = nextIndex(i, len)) {
ThreadLocal<?> k = e.get();
// If we find key, then we need to swap it
// with the stale entry to maintain hash table order.
// The newly stale slot, or any other stale slot
// encountered above it, can then be sent to expungeStaleEntry
// to remove or rehash all of the other entries in run.
//找到了key,则与无效索引互换位置,也就是当前无效索引对应的是存储的key-value
if (k == key) {
e.value = value;
tab[i] = tab[staleSlot];
tab[staleSlot] = e;
// Start expunge at preceding stale entry if it exists
//如果向前没有找到无效索引,则将slotToExpunge设置为i
if (slotToExpunge == staleSlot)
slotToExpunge = i;
//清空后续无效节点
cleanSomeSlots(
//删除无效节点,且返回下一个无效节点
expungeStaleEntry(slotToExpunge), len);
return;
}
// If we didn't find stale entry on backward scan, the
// first stale entry seen while scanning for key is the
// first still present in the run.
//如果k为null说明被回收,且如果没有找到上一个无效索引,则更改slotToExpunge为i
if (k == null && slotToExpunge == staleSlot)
slotToExpunge = i;
}
// If key not found, put new entry in stale slot
//没有找到key,在无效的staleSlot索引上,新建一个Entry对象
tab[staleSlot].value = null;
tab[staleSlot] = new Entry(key, value);
// If there are any other stale entries in run, expunge them
//2者不相等,则清除其他无效的entry
if (slotToExpunge != staleSlot)
cleanSomeSlots(expungeStaleEntry(slotToExpunge), len);
}
```java
private int expungeStaleEntry(int staleSlot) {
Entry[] tab = table;
int len = tab.length;
// expunge entry at staleSlot
//清空value和索引对象
tab[staleSlot].value = null;
tab[staleSlot] = null;
//数组里面的值大小-1
size--;
// Rehash until we encounter null
Entry e;
int i;
//向后遍历
for (i = nextIndex(staleSlot, len);
(e = tab[i]) != null;
i = nextIndex(i, len)) {
ThreadLocal<?> k = e.get();
//清除过期的key
if (k == null) {
e.value = null;
tab[i] = null;
size--;
} else {
int h = k.threadLocalHashCode & (len - 1);
//重新计算k的索引,判断是否和当前i冲突
if (h != i) {
tab[i] = null;
// Unlike Knuth 6.4 Algorithm R, we must scan until
// null because multiple entries could have been stale.
//获取下一个tab为null的位置,赋值e
//这里实际上是将e移动到靠近h的后驱节点上,这样get/set就方便查找
while (tab[h] != null)
h = nextIndex(h, len);
//存储e
tab[h] = e;
}
}
}
//返回下一个为空的entry的索引
return i;
}
private boolean cleanSomeSlots(int i, int n) {
boolean removed = false;
Entry[] tab = table;
int len = tab.length;
//遍历下一个节点,如果查找到无效节点,则删除
do {
i = nextIndex(i, len);
Entry e = tab[i];
if (e != null && e.get() == null) {
//重置n为tab长度
n = len;
removed = true;
i = expungeStaleEntry(i);
}
} while ( (n >>>= 1) != 0);//无符号右移,控制扫描次数log2(N)
return removed;
}
private void rehash() {
//清除所有无效节点
expungeStaleEntries();
// Use lower threshold for doubling to avoid hysteresis
//2/3*len-2/3*len/4=1/2*len
//size >= 1/2*len
if (size >= threshold - threshold / 4)
resize();
}
private void expungeStaleEntries() {
Entry[] tab = table;
int len = tab.length;
for (int j = 0; j < len; j++) {
Entry e = tab[j];
if (e != null && e.get() == null)
expungeStaleEntry(j);
}
}
private void resize() {
Entry[] oldTab = table;
int oldLen = oldTab.length;
//长度扩容一倍
int newLen = oldLen * 2;
Entry[] newTab = new Entry[newLen];
int count = 0;
for (int j = 0; j < oldLen; ++j) {
Entry e = oldTab[j];
if (e != null) {
ThreadLocal<?> k = e.get();
//再次检测无效引用则清理
if (k == null) {
e.value = null; // Help the GC
} else {
//循环获取无效位置然后赋值
int h = k.threadLocalHashCode & (newLen - 1);
while (newTab[h] != null)
h = nextIndex(h, newLen);
newTab[h] = e;
count++;
}
}
}
//设置新的扩容长度限制
setThreshold(newLen);
size = count;
table = newTab;
}
public T get() {
//获取当前线程
Thread t = Thread.currentThread();
//获取当前线程map
ThreadLocalMap map = getMap(t);
if (map != null) {
//根据当前ThreadLocal获取value
ThreadLocalMap.Entry e = map.getEntry(this);
if (e != null) {
@SuppressWarnings("unchecked")
//获取值,并返回
T result = (T)e.value;
return result;
}
}
//map为空则初始化value
return setInitialValue();
}
private T setInitialValue() {
//初始化值为null
T value = initialValue();
//获取当前线程
Thread t = Thread.currentThread();
//获取map
ThreadLocalMap map = getMap(t);
if (map != null)
//否则设置值
map.set(this, value);
else
//map为空则创建map,并赋值
createMap(t, value);
return value;
}
private Entry getEntry(ThreadLocal<?> key) {
//算出索引
int i = key.threadLocalHashCode & (table.length - 1);
Entry e = table[i];
//根据key获取Entry
if (e != null && e.get() == key)
return e;
else
//没有获取,则说明要么失效了,要么存在在后续节点
return getEntryAfterMiss(key, i, e);
}
private Entry getEntryAfterMiss(ThreadLocal<?> key, int i, Entry e) {
Entry[] tab = table;
int len = tab.length;
while (e != null) {
ThreadLocal<?> k = e.get();
//匹配,则返回
if (k == key)
return e;
//说明失效,则删除失效节点
if (k == null)
expungeStaleEntry(i);
else
//没有失效,继续遍历后续节点
i = nextIndex(i, len);
e = tab[i];
}
return null;
}
public void remove() {
//获取当前线程map
ThreadLocalMap m = getMap(Thread.currentThread());
if (m != null)
//删除当前值
m.remove(this);
}
private void remove(ThreadLocal<?> key) {
Entry[] tab = table;
int len = tab.length;
int i = key.threadLocalHashCode & (len-1);
//遍历Entry
for (Entry e = tab[i];
e != null;
e = tab[i = nextIndex(i, len)]) {
//删除当前ThreadLocal对应的value
if (e.get() == key) {
//清空referent
e.clear();
expungeStaleEntry(i);
return;
}
}
}
扩容时机,当前集合个数超过len*3/4
Entry继承WeakReference,也就是说当一个对象仅仅被weak reference指向,而没有任何其他strong reference指向的时候, 如果GC运行, 那么这个对象就会被回收.
底层数组存储ThreadLocal和val,采用hashcode获取索引
为什么采用弱引用?因为当采用强引用的时候,如果ThreadLocal为空的时候,GC依旧不会清空存储的值,会造成内存泄漏
在threadLocal的生命周期里(set,getEntry,remove)里,都会针对key为null的脏entry进行处理。
一个无界阻塞队列,它使用与类 PriorityQueue 相同的顺序规则,并且提供了阻塞获取操作。
其中每个插入操作必须等待另一个线程的对应移除操作
可缩放的并发 ConcurrentNavigableMap 实现。映射可以根据键的自然顺序进行排序,也可以根据创建映射时所提供的 Comparator 进行排序,具体取决于使用的构造方法。
原子域更新器