目录
一、类属性
二、静态方法
三、类构造器方法
四、Map接口实现
/**
* 默认的初始容量是16,必须是2的倍数
*/
static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16
/**
* 最大容量
*/
static final int MAXIMUM_CAPACITY = 1 << 30;
/**
* 默认负载因子
*/
static final float DEFAULT_LOAD_FACTOR = 0.75f;
/**
* 桶内节点数大于8个时,存储结构由单向链表转换成红黑树
*/
static final int TREEIFY_THRESHOLD = 8;
/**
* 扩容时当桶内节点数小于6个时红黑树转换成单向链表
*/
static final int UNTREEIFY_THRESHOLD = 6;
/**
*桶存储结构由列表转换成树时数组的最低容量,低于该容量时通过扩容数组解决部分桶节点过多问题
*/
static final int MIN_TREEIFY_CAPACITY = 64;
/* ---------------- Fields -------------- */
/**
*存储Node的数组,总是2的整数次幂
*/
transient java.util.HashMap.Node[] table;
/**
*保存元素的Set集合
*/
transient Set> entrySet;
/**
* 包含的键值对个数
*/
transient int size;
/**
*记录修改次数,实现在遍历map时如果修改map会快速失败的功能
*/
transient int modCount;
/**
* 进行扩容的临界值,根据初始容量计算出来的实际最大容量再乘以负载因子
*/
int threshold;
/**
* 负载因子
*/
final float loadFactor;
/**
* 该方法用于计算key的hash值,然后用该hash值对HashMap的容量取模,算出key存储的位置
*/
static final int hash(Object key) {
int h;
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
}
为什么要做上述的位运算了?首先实际生产中HashMap的容量基本不会超过2^16,即65536个,这种大Map通常都放到缓存组件中了,所以用key的hash值的低16位是最关键的。其次,hashCode()方法返回的是一个int类型,最长32位,右移16位且高位补0(h>>>16)后与原值做异或(两者相同为0,不同为1),相当于高16位和低16位都参与了运算,与原来的只有低16位参与运算相比可以高效的避免hash值不同最后取模结果一样的情形,使key的分散更均匀,提高查找效率。参考如下例子:
keyA hashCode 0000 0000 0000 1100 1010 1111 0000 1000
keyB hashCode 0000 0000 0000 0011 1010 1111 0000 1000
HashMap容量n 0000 0000 0000 0000 0010 0000 0000 0000
n-1 0000 0000 0000 0000 0001 1111 1111 1111
只有低16位参与计算时,hashCode对容量取模的结果都是:
0000 0000 0000 0000 0000 1111 0000 1000
当高16位参与运算时:
keyA h>>>16 0000 0000 0000 0000 0000 0000 0000 1100
^运算 0000 0000 0000 1100 1010 1111 0000 1100
取模 0000 0000 0000 0000 0000 1111 0000 1100
keyB h>>>16 0000 0000 0000 0000 0000 0000 0000 0011
^运算 0000 0000 0000 0011 1010 1111 0000 1011
取模 0000 0000 0000 0000 0000 1111 0000 1011
最终取模的结果不一样
/**
* 该方法用于计算HashMap的实际容量,cap为用户输入的初始容量,实际容量必须是最近的大于或者等于初始容量的2的整数次幂,比如10,则返回16
*/
static final int tableSizeFor(int cap) {
int n = cap - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
//n最大可能为2^31-1,所以先判断是否大于2^30,如果不是才能加1,否则会超过int类型的最大值
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
参考如下计算过程:
初始值
00000000 11000000 00000000 00010011
n-1
00000000 11000000 00000000 00010010
>>>1
00000000 01100000 00000000 00001001
|=
00000000 11100000 00000000 00011011
>>>2
00000000 00111000 00000000 00000110
|=
00000000 11111000 00000000 00011111
>>>4
00000000 00001111 10000000 00000001
|=
00000000 11111111 10000000 00011111
>>>8
00000000 00000000 11111111 10000000
|=
00000000 11111111 11111111 10011111
>>>16
00000000 00000000 00000000 11111111
|=
00000000 11111111 11111111 11111111
n+1
00000001 00000000 00000000 00000000
从上述计算过程可以发现,其实初始值的低位是没有用到的,关键是初始值的最高位的1,每次右移再做或运算相当于不断的在最高一位1的后面不断填充1,第一次右移1位,填充1个1,第二次右移2位,填充2个1,第三次右移4位,填充4个1,第四次右移8位,填充8个1,最后一次右移16位,填充16个1,如果最高位的1的后面都是1,则后面的位运算无意义。最后通过加1,算出结果。为什么不会最多会右移16位了?因为int是32位,右移16位最多填充31个1,当最高位的1是第一位时,也能让后面都填充为1。
/**
* 如果x实现了Comparable接口则返回x的Class,否则返回null
*/
static Class> comparableClassFor(Object x) {
if (x instanceof Comparable) {
Class> c; Type[] ts, as; Type t; ParameterizedType p;
if ((c = x.getClass()) == String.class) // bypass checks
return c;
//获取该类实现的接口
if ((ts = c.getGenericInterfaces()) != null) {
for (int i = 0; i < ts.length; ++i) {
//如果该类实现了Comparable接口,并且接口的参数化类型就是该类本身
if (((t = ts[i]) instanceof ParameterizedType) &&
((p = (ParameterizedType)t).getRawType() ==
Comparable.class) &&
(as = p.getActualTypeArguments()) != null &&
as.length == 1 && as[0] == c) // type arg is c
return c;
}
}
}
return null;
}
/**
*k和x进行比较,要求k实现Comparable接口,x的Class类型就是kc
*/
@SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
static int compareComparables(Class> kc, Object k, Object x) {
return (x == null || x.getClass() != kc ? 0 :
((Comparable)k).compareTo(x));
}
ParameterizedType接口参考:https://www.cnblogs.com/linghu-java/p/8067886.html,参考如下用例:
public class TypeTest {
private Map map;
private List extends Number> numbers;
private List super HashMap> numbers2;
private List list;
private List[] arrays;
@Test
public void name() {
List list=new ArrayList<>();
list.add("test");
//获取该类实现的接口
Type[] types=list.getClass().getGenericInterfaces();
for(Type type:types){
System.out.println(type);
if(type instanceof ParameterizedType){
ParameterizedType parameterizedType=(ParameterizedType) type;
System.out.println("param:"+ Arrays.toString(parameterizedType.getActualTypeArguments()));
System.out.println("param:"+parameterizedType.getRawType());
System.out.println("param:"+parameterizedType.getOwnerType());
System.out.println("param:"+parameterizedType.getTypeName());
}
}
}
@Test
public void test3() throws Exception{
Field field=TypeTest.class.getDeclaredField("map");
//通过类属性获取该属性的参数化类型
ParameterizedType parameterizedType=(ParameterizedType) field.getGenericType();
System.out.println(Arrays.toString(parameterizedType.getActualTypeArguments()));
System.out.println(parameterizedType.getTypeName());
System.out.println(parameterizedType.getRawType());
System.out.println(parameterizedType.getOwnerType());
}
@Test
public void test4() throws Exception {
Field field=TypeTest.class.getDeclaredField("numbers");
ParameterizedType parameterizedType=(ParameterizedType) field.getGenericType();
System.out.println(parameterizedType.getRawType());
Type[] types=parameterizedType.getActualTypeArguments();
WildcardType wildcardType=(WildcardType) types[0];
System.out.println(Arrays.toString(wildcardType.getLowerBounds()));
System.out.println(Arrays.toString(wildcardType.getUpperBounds()));
}
@Test
public void test5() throws Exception {
Field field=TypeTest.class.getDeclaredField("list");
ParameterizedType parameterizedType=(ParameterizedType) field.getGenericType();
System.out.println(parameterizedType.getRawType());
Type[] types=parameterizedType.getActualTypeArguments();
TypeVariable typeVariable=(TypeVariable) types[0];
System.out.println(Arrays.toString(typeVariable.getBounds()));
System.out.println(typeVariable.getName());
}
@Test
public void test6() throws Exception {
Field field=TypeTest.class.getDeclaredField("arrays");
GenericArrayType genericArrayType=(GenericArrayType) field.getGenericType();
Type type=genericArrayType.getGenericComponentType();
System.out.println(type);
ParameterizedType parameterizedType=(ParameterizedType) type;
System.out.println(Arrays.toString(parameterizedType.getActualTypeArguments()));
System.out.println(parameterizedType.getRawType());
}
}
public HashMap(int initialCapacity, float loadFactor) {
if (initialCapacity < 0)
throw new IllegalArgumentException("Illegal initial capacity: " +
initialCapacity);
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new IllegalArgumentException("Illegal load factor: " +
loadFactor);
this.loadFactor = loadFactor;
//根据初始容量计算实际的容量
this.threshold = tableSizeFor(initialCapacity);
}
public HashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR);
}
public HashMap() {
this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
}
public HashMap(Map extends K, ? extends V> m) {
this.loadFactor = DEFAULT_LOAD_FACTOR;
putMapEntries(m, false);
}
在未指定容量的时候threshold并未初始化,所以情况下都初始化了负载因子。
1、put方法,增加key/value
public V put(K key, V value) {
return putVal(hash(key), key, value, false, true);
}
/**
* 插入元素
* @param hash key的hash值
* @param key
* @param value
* @param onlyIfAbsent 如果为true且该key存在的时候则不改变原值
* @param evict 如果为false,则表为创建模式
* @return key的原值,如果key不存在返回null
*/
final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
boolean evict) {
java.util.HashMap.Node[] tab; java.util.HashMap.Node p; int n, i;
//如果tab未初始化,通过resize()方法初始化
if ((tab = table) == null || (n = tab.length) == 0)
n = (tab = resize()).length;
//(n - 1) & hash 相当于hash对n-1求余
//如果目标tab内没有元素
if ((p = tab[i = (n - 1) & hash]) == null)
//将新元素放入tab类
tab[i] = newNode(hash, key, value, null);
else {
//如果目标tab内有元素
java.util.HashMap.Node e; K k;
//判断tab内的头元素是否等于目标元素
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
e = p;
//如果tab内元素是树形结构
else if (p instanceof java.util.HashMap.TreeNode)
e = ((java.util.HashMap.TreeNode)p).putTreeVal(this, tab, hash, key, value);
else {
//如果tab内元素是链式结构,binCount用于记录链表长度
for (int binCount = 0; ; ++binCount) {
//链表所有元素都遍历完了
if ((e = p.next) == null) {
p.next = newNode(hash, key, value, null);
//链表元素个数超过阈值则转化成树形结构
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
treeifyBin(tab, hash);
break;
}
//判断链表上的元素是否跟目标元素一致
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
break;
p = e;
}
}
//如果已经存在这个key
if (e != null) {
V oldValue = e.value;
//设置新值
if (!onlyIfAbsent || oldValue == null)
e.value = value;
//执行访问回调动作
afterNodeAccess(e);
return oldValue;
}
}
//新插入了一个key,modCount和size都加1
++modCount;
//当size大于阈值后进行扩容
if (++size > threshold)
resize();
//执行插入完成回调
afterNodeInsertion(evict);
return null;
}
2、get方法, 根据key查找
/**
* 返回值为null,不一定不包含这个key,也可能key对应的value就是null
*/
public V get(Object key) {
Node e;
return (e = getNode(hash(key), key)) == null ? null : e.value;
}
final Node getNode(int hash, Object key) {
Node[] tab; Node first, e; int n; K k;
//效验tab不为null且目标tab内头元素不为空
if ((tab = table) != null && (n = tab.length) > 0 &&
(first = tab[(n - 1) & hash]) != null) {
//判断第一个Node节点是否是目标key
if (first.hash == hash &&
((k = first.key) == key || (key != null && key.equals(k))))
return first;
//如果第一个不是则检查该Node节点的下一个节点
if ((e = first.next) != null) {
//如果该节点是TreeNode
if (first instanceof TreeNode)
return ((TreeNode)first).getTreeNode(hash, key);
//如果是普通的单向链表节点,则遍历所有的节点直到找到目标key
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
return e;
} while ((e = e.next) != null);
}
}
return null;
}
3.resize方法,初始化数组或者执行扩容
/**
*
* 执行table初始化或者扩容,扩容都是当前容量的双倍
*
* @return the table
*/
final Node[] resize() {
Node[] oldTab = table;
int oldCap = (oldTab == null) ? 0 : oldTab.length;
int oldThr = threshold;
int newCap, newThr = 0;
if (oldCap > 0) {
//已经达到最大容量
if (oldCap >= MAXIMUM_CAPACITY) {
threshold = Integer.MAX_VALUE;
return oldTab;
}
//当前容量乘以2小于最大容量则扩容,否则维持不变
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
oldCap >= DEFAULT_INITIAL_CAPACITY)
newThr = oldThr << 1; // double threshold
}
//已经设置容量尚未初始化
else if (oldThr > 0) // initial capacity was placed in threshold
newCap = oldThr;
//初始容量未设置,采用默认值初始化
else { // zero initial threshold signifies using defaults
newCap = DEFAULT_INITIAL_CAPACITY;
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
}
//根据newCap计算newThr
if (newThr == 0) {
float ft = (float)newCap * loadFactor;
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
(int)ft : Integer.MAX_VALUE);
}
//初始化newCap对应的Node数组
threshold = newThr;
@SuppressWarnings({"rawtypes","unchecked"})
Node[] newTab = (Node[])new Node[newCap];
table = newTab;
//进行两倍扩容
if (oldTab != null) {
for (int j = 0; j < oldCap; ++j) {
Node e;
if ((e = oldTab[j]) != null) {
oldTab[j] = null;
//该Tab内只有一个元素
if (e.next == null)
//将该元素重新hash,最终计算的索引有可能变,也可能不变
newTab[e.hash & (newCap - 1)] = e;
//如果该Tab内存储的是一个红黑树
else if (e instanceof TreeNode)
((TreeNode)e).split(this, newTab, j, oldCap);
//如果该Tab内存储的是一个链表
else { // preserve order
Node loHead = null, loTail = null;
Node hiHead = null, hiTail = null;
Node next;
//循环遍历将原链表拆分成两个
do {
next = e.next;
//因为Cap都是2的整数倍,所以最终的结果只能是0或者2的整数倍,等于0时按扩容后的容量重新计算index结果还是原来的,
//如果等于2的整数倍,则重新计算index的结果需要在原来的基础上加上原有的1倍容量
//以原有的容量16扩容成32为例,hash值为23,容量16时计算的index为7,容量为32时计算的index为23,hash值为7时,容量为16或者32时index都是7
if ((e.hash & oldCap) == 0) {
if (loTail == null)
loHead = e;
else
loTail.next = e;
loTail = e;
}
else {
if (hiTail == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
}
} while ((e = next) != null);
//
if (loTail != null) {
loTail.next = null;
newTab[j] = loHead;
}
if (hiTail != null) {
hiTail.next = null;
newTab[j + oldCap] = hiHead;
}
}
}
}
}
return newTab;
}
4、remove,删除某个key
public V remove(Object key) {
Node e;
return (e = removeNode(hash(key), key, null, false, true)) == null ?
null : e.value;
}
final Node removeNode(int hash, Object key, Object value,
boolean matchValue, boolean movable) {
Node[] tab; Node p; int n, index;
//效验目标tab是否为null
if ((tab = table) != null && (n = tab.length) > 0 &&
(p = tab[index = (n - 1) & hash]) != null) {
Node node = null, e; K k; V v;
//找到指定key对应的元素,查找逻辑和getNode方法一致
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
node = p;
else if ((e = p.next) != null) {
if (p instanceof TreeNode)
node = ((TreeNode)p).getTreeNode(hash, key);
else {
do {
if (e.hash == hash &&
((k = e.key) == key ||
(key != null && key.equals(k)))) {
node = e;
break;
}
p = e;
} while ((e = e.next) != null);
}
}
//如果目标key对应的元素存在
if (node != null && (!matchValue || (v = node.value) == value ||
(value != null && value.equals(v)))) {
//如果是树形结构
if (node instanceof TreeNode)
((TreeNode)node).removeTreeNode(this, tab, movable);
//如果为头元素
else if (node == p)
tab[index] = node.next;
//对链式结构,p是node的上一个元素
else
p.next = node.next;
++modCount;
--size;
//执行元素删除回调
afterNodeRemoval(node);
return node;
}
}
return null;
}
5、treeifyBin方法,将单向链表转换成红黑树结构
final void treeifyBin(java.util.HashMap.Node[] tab, int hash) {
int n, index; java.util.HashMap.Node e;
//如果容量小于64则只是做扩容
if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY)
resize();
//效验目标tab内元素是否为空
else if ((e = tab[index = (n - 1) & hash]) != null) {
//hd表示头部元素,tl表示链表中上一个元素
java.util.HashMap.TreeNode hd = null, tl = null;
do {
//将原来的Node元素转换为TreeNode
java.util.HashMap.TreeNode p = replacementTreeNode(e, null);
if (tl == null)
hd = p;
else {
p.prev = tl;
tl.next = p;
}
tl = p;
} while ((e = e.next) != null);
//将已经是链表的TreeNode转化成一个红黑树
if ((tab[index] = hd) != null)
hd.treeify(tab);
}
}
6、HashIterator实现,该类是KeyIterator,ValueIterator,EntryIterator的父类,如果通过Iterator实现对应的KeySet,Values,EntrySet,从而实现对key,value,Map.Entry的遍历。
abstract class HashIterator {
Node next; // next entry to return
Node current; // current entry
int expectedModCount; // for fast-fail
int index; // current slot
HashIterator() {
expectedModCount = modCount;
Node[] t = table;
current = next = null;
index = 0;
if (t != null && size > 0) {
//遍历找到Tab内第一个不为空的元素
do {} while (index < t.length && (next = t[index++]) == null);
}
}
public final boolean hasNext() {
return next != null;
}
final Node nextNode() {
Node[] t;
Node e = next;
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
if (e == null)
throw new NoSuchElementException();
//先判断当前节点是否存在下一个元素,即在同一个Tab类遍历,如果没有则找到下一个不为空的Tab遍历
//注意此处返回了当前节点,并找到了下一个遍历的节点
if ((next = (current = e).next) == null && (t = table) != null) {
do {} while (index < t.length && (next = t[index++]) == null);
}
return e;
}
public final void remove() {
Node p = current;
if (p == null)
throw new IllegalStateException();
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
current = null;
K key = p.key;
removeNode(hash(key), key, null, false, false);
//次数改变了expectedModCount,所以删除元素不会快速失败
expectedModCount = modCount;
}
}
final class KeyIterator extends HashIterator
implements Iterator {
public final K next() { return nextNode().key; }
}
final class ValueIterator extends HashIterator
implements Iterator {
public final V next() { return nextNode().value; }
}
final class EntryIterator extends HashIterator
implements Iterator> {
public final Map.Entry next() { return nextNode(); }
}
另外还有一个java8引入的Spliterator接口,用于表示该类可以通过stream的方式并行处理流数据,参考如下:
https://blog.csdn.net/lh513828570/article/details/56673804,接口实现方式基本是固定的
7、Map序列化,HashMap改写了默认实现,注意readObject的顺序必须与writeObject的顺序保持一致
private void writeObject(java.io.ObjectOutputStream s)
throws IOException {
int buckets = capacity();
//表示开始写入对象
s.defaultWriteObject();
s.writeInt(buckets);
s.writeInt(size);
internalWriteEntries(s);
}
// Called only from writeObject, to ensure compatible ordering.
void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
java.util.HashMap.Node[] tab;
if (size > 0 && (tab = table) != null) {
for (int i = 0; i < tab.length; ++i) {
for (java.util.HashMap.Node e = tab[i]; e != null; e = e.next) {
//遍历tab将tab内每个元素的key和value分开写入对象流中
s.writeObject(e.key);
s.writeObject(e.value);
}
}
}
}
/**
* Reconstitute the {@code HashMap} instance from a stream (i.e.,
* deserialize it).
*/
private void readObject(java.io.ObjectInputStream s)
throws IOException, ClassNotFoundException {
// 表示开始从流中读取对象
s.defaultReadObject();
//HashMap初始化
reinitialize();
//校验负载因子
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new InvalidObjectException("Illegal load factor: " +
loadFactor);
//读取buckets,即原Map的容量
s.readInt();
// 读取原Map的size
int mappings = s.readInt();
if (mappings < 0)
throw new InvalidObjectException("Illegal mappings count: " +
mappings);
else if (mappings > 0) { // (if zero, use defaults)
//计算容量
float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
float fc = (float)mappings / lf + 1.0f;
int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
DEFAULT_INITIAL_CAPACITY :
(fc >= MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY :
tableSizeFor((int)fc));
float ft = (float)cap * lf;
threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?
(int)ft : Integer.MAX_VALUE);
// Check Map.Entry[].class since it's the nearest public type to
// what we're actually creating.
SharedSecrets.getJavaOISAccess().checkArray(s, Map.Entry[].class, cap);
//tab初始化
@SuppressWarnings({"rawtypes","unchecked"})
Node[] tab = (Node[])new Node[cap];
table = tab;
// 逐一读取key,value
for (int i = 0; i < mappings; i++) {
@SuppressWarnings("unchecked")
K key = (K) s.readObject();
@SuppressWarnings("unchecked")
V value = (V) s.readObject();
putVal(hash(key), key, value, false, false);
}
}
}