jdk1.8 hashMap源码分析
jdk1.8 hashMap源码分析
public class TestHashMap<K,V> extends AbstractMap<K,V> implements Map<K,V>, Cloneable, Serializable { //序列号 private static final long serialVersionUID = 362498820763181265L; /** * map是键值对的存储方式 , 链表加数组的存储结构 . 1<<4 = 16即是数组的默认初始容量 . * */ static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 /** * 最大容量 : */ static final int MAXIMUM_CAPACITY = 1 << 30; /** * 默认加载因子 */ static final float DEFAULT_LOAD_FACTOR = 0.75f; /** * 当桶(bucket)上的结点数大于这个值时会转成红黑树 */ static final int TREEIFY_THRESHOLD = 8; /** * 当bucket[桶]上的节点数小于6时将红黑树结构转为链表 */ static final int UNTREEIFY_THRESHOLD = 6; /** * 最小树形化容量阈值:即 当哈希表中的容量 > 该值时,才允许树形化链表 (即 将链表 转换成红黑树) * 否则,若桶内元素太多时,则直接扩容,而不是树形化 * 为了避免进行扩容、树形化选择的冲突,这个值不能小于 4 * TREEIFY_THRESHOLD */ static final int MIN_TREEIFY_CAPACITY = 64; /** * 泛型 : 代表存储的键值对 * @param <K> 键的类型 * @param <V> 值的类型 */ static class Node<K,V> implements Map.Entry<K,V> { final int hash; final K key; V value; Node<K,V> next; Node(int hash, K key, V value, Node<K,V> next) { this.hash = hash; this.key = key; this.value = value; this.next = next; } public final K getKey() { return key; } public final V getValue() { return value; } public final String toString() { return key + "=" + value; } public final int hashCode() { return Objects.hashCode(key) ^ Objects.hashCode(value); } public final V setValue(V newValue) { V oldValue = value; value = newValue; return oldValue; } public final boolean equals(Object o) { if (o == this) return true; if (o instanceof Map.Entry) { Map.Entry<?,?> e = (Map.Entry<?,?>)o; if (Objects.equals(key, e.getKey()) && Objects.equals(value, e.getValue())) return true; } return false; } } /* ---------------- Static utilities -------------- */ /** * 计算存储的数据的键的hash . 为避免计算key值时出现碰撞 , 针对hash进行重写 .在原有objects求hash的基础上 , 对key的hash值幂等于右移key的hash值的16位 * @param key * @return */ static final int hash(Object key) { int h; // 两个值做异或,最终相同的可能性很大 return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); } /** * 判断对象是否实现compare接口 , 若实现 : 返回运营的对象本身类型 . 否则 : 返回null * @param x * @return */ static Class<?> comparableClassFor(Object x) { if (x instanceof Comparable) { Class<?> c; Type[] ts, as; Type t; ParameterizedType p; if ((c = x.getClass()) == String.class) // bypass checks return c; if ((ts = c.getGenericInterfaces()) != null) { for (int i = 0; i < ts.length; ++i) { if (((t = ts[i]) instanceof ParameterizedType) && ((p = (ParameterizedType)t).getRawType() == Comparable.class) && (as = p.getActualTypeArguments()) != null && as.length == 1 && as[0] == c) // type arg is c return c; } } } return null; } /** * Returns k.compareTo(x) if x matches kc (k's screened comparable * class), else 0. * 如果x的类型是kc,返回k.compareTo(x)的比较结果 * 如果x为空,或者类型不是kc,返回0 */ @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable static int compareComparables(Class<?> kc, Object k, Object x) { return (x == null || x.getClass() != kc ? 0 : ((Comparable)k).compareTo(x)); } /** * Returns a power of two size for the given target capacity. * 大于输入参数且最近的2的整数次幂的数。最大值为1<< 30 . */ static final int tableSizeFor(int cap) { int n = cap - 1; n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; } /* ---------------- Fields -------------- */ /** * 存储元素的数组,总是2的幂次倍 */ transient Node<K,V>[] table; /** * 存放具体元素的集 : 为键值对映射关系 */ transient Set<Map.Entry<K,V>> entrySet; /** * The number of key-value mappings contained in this map. * 集合的长度 */ transient int size; /** * 修改次数 : hashMap本身是线程不安全的 . 这个属性是在迭代map集合时 , * 在迭代的过程中有其他线程对该集合进行修改(增/删/改)时 .会抛出异常,一定程度上保证线程修改的可见性 */ transient int modCount; /** * The next size value at which to resize (capacity * load factor). * 临界值 当实际大小(容量*填充因子)超过临界值时,会进行扩容 * @serial */ int threshold; /** * The load factor for the hash table. * 填充因子 * @serial */ float loadFactor; /* ---------------- Public operations -------------- */ /** * 构造器: 自定义默认加载容量,及填充因子 * @param initialCapacity * @param loadFactor */ public HashMap(int initialCapacity, float loadFactor) { if (initialCapacity < 0) throw new IllegalArgumentException("Illegal initial capacity: " + initialCapacity); if (initialCapacity > MAXIMUM_CAPACITY) initialCapacity = MAXIMUM_CAPACITY; if (loadFactor <= 0 || Float.isNaN(loadFactor)) throw new IllegalArgumentException("Illegal load factor: " + loadFactor); this.loadFactor = loadFactor; this.threshold = tableSizeFor(initialCapacity); } /** * 构造器 : 自定义默认加载容量 * @param initialCapacity */ public HashMap(int initialCapacity) { this(initialCapacity, DEFAULT_LOAD_FACTOR); } /** * 无参构造 */ public HashMap() { this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted } /** * 构造器 : 把原有的集合数据放到里面 */ public HashMap(Map<? extends K, ? extends V> m) { this.loadFactor = DEFAULT_LOAD_FACTOR; putMapEntries(m, false); } /** * 函数将集合m的所有元素存入本HashMap实例中。 * */ final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) { //s为m的实际元素个数 int s = m.size(); if (s > 0) { // 判断table是否已经初始化 if (table == null) { // pre-size // 未初始化 : 基于s定义table初始化大小: 不允许超出最大容量 float ft = ((float)s / loadFactor) + 1.0F; int t = ((ft < (float)MAXIMUM_CAPACITY) ? (int)ft : MAXIMUM_CAPACITY); // 计算得到的t大于阈值,则初始化阈值 if (t > threshold) threshold = tableSizeFor(t); } //table已初始化,并且m元素个数大于阈值,进行扩容处理 else if (s > threshold) resize(); //将m中的所有元素添加至HashMap中 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) { K key = e.getKey(); V value = e.getValue(); putVal(hash(key), key, value, false, evict); } } } /** * map集合长度----数组的长度 * @return */ public int size() { return size; } /** * 判断是否为空集合 * @return */ public boolean isEmpty() { return size == 0; } /** * 根据K获取value * @param key * @return */ public V get(Object key) { //临时变量 :节点e Node<K,V> e; //1. 根据key获取hash值 //2. 根据key的hash值及key对象获取对应的节点Node //3. 把获取到的节点对象赋值给e //4. 判断当前节点e是否为null .为null : 直接返回null . 不为null : 获取当前节点对应的value值 return (e = getNode(hash(key), key)) == null ? null : e.value; } /** * 实现了map类中的get方法及相关方法 * @param hash key的hash值 : 调用的上面的hash方法 : key.hashCdoe() ^ key.hashCdoe >>> 16 : 右移16位[且忽略正负符号] * @param key key对象 * @return 当前节点Node对象 , 或者为null或者不存在 */ final Node<K,V> getNode(int hash, Object key) { //临时变量 : 节点数组对象 Node<K,V>[] tab; //节点对象 : first: 第一个节点 . e : 节点对象 Node<K,V> first, e; int n; K k; //1. 临时变量赋值 : tab= table . n = table.length . first = tab[(n - 1) & hash] //2. 判断 : table数组不为null 且 table数组长度 > 0, 即有数据 且第一个节点不为null if ((tab = table) != null && (n = tab.length) > 0 && (first = tab[(n - 1) & hash]) != null) { //针对key对象进行判断是否相同 . hash值 & 对象本身内容 & 是否同一个对象 //1 .满足以上非空判断情况下 , 判断first节点的hash值[为插入数据时计算所得]是否等于当前要存储的key的hash值 //2 .给临时变量赋值 : k = first.key 判断节点first的key对象是否同要存储的key对象相同 即为同一个对象 //3 .key对象不为null且内容相同 if (first.hash == hash && ((k = first.key) == key || (key != null && key.equals(k)))) return first; //若不满足以上情况. 则获取first下一个节点并对e进行初始化赋值 if ((e = first.next) != null) {//判断first下一个节点不为null //判断是否为红黑树结构 if (first instanceof TreeNode) //若为红黑树结构 : 1. first节点强转为TreeNode<K , V>对象,并根据hash(key) & key获取节点对象并返回 return ((TreeNode<K,V>)first).getTreeNode(hash, key); //不是红黑树结构 : 即当前仍是链表结构 . 则在e节点不为null的情况下 循环获取Node节点e do { //当节点e满足key相同 , 直接返回节点e if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) return e; } while ((e = e.next) != null); } } return null; } /** * 判断key是否存在 : 通过key获取节点 . 当节点不为null的情况下 . 证明已存在 . 这个过程中不判断value值 * @param key * @return */ public boolean containsKey(Object key) { return getNode(hash(key), key) != null; } /** * 存储键值对数据 * @param key 键 * @param value 值 * @return */ public V put(K key, V value) { return putVal(hash(key), key, value, false, true); } /** * 存键值对的方法 * @param hash 键的hash值 * @param key 键 * @param value 值 * @param onlyIfAbsent 是否替换oldValue * @param evict * @return */ final V putVal(int hash, K key, V value, boolean onlyIfAbsent, boolean evict) { Node<K,V>[] tab; Node<K,V> p; int n, i; //如果 : table未初始化或者长度为0时 if ((tab = table) == null || (n = tab.length) == 0) //进行扩容操作 n = (tab = resize()).length; //TODO : 给节点P进行初始化赋值: p = tab[i = (n - 1) & hash] //如果当前数组不为空 且在数组中 根据key的hash值 & n-1对应的下标处的节点为null // 生成新的节点 , 并把value放入桶中 , 此时这个节点是在数组中而不是链表中 if ((p = tab[i = (n - 1) & hash]) == null) //生成新的节点并把数据放入桶中 tab[i] = newNode(hash, key, value, null); //如果当前数组即不为空且在数组中当前hash值 & n-1 对应的下标的节点不为null时 , // 往数组中该下标对应的 链表或者红黑树中插入value值 else { Node<K,V> e; K k; //1. 比较节点P的hash值与要存储的key的hash值是否相同 , //2. 对局部变量k = p.key 比较节点P的key对象是否同要存储的key是相同的 , 或者要存储的key不为null 且 key对象即为节点P的key对象 if (p.hash == hash && ((k = p.key) == key || (key != null && key.equals(k)))) //两个条件满足的情况下 , 将该数组的第一个元素赋值给e : e = p e = p; //如果两个条件不满足 , 即key不相等 , 则判断是否为红黑树结构 else if (p instanceof TreeNode) //若是红黑树结构 : 则把对应的键值对数据放入tree中 e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value); //如果两个条件均不满足且不是红黑树结构 else { //则循环链表数据 , 在链表最末插入节点 for (int binCount = 0; ; ++binCount) { //对e进行赋值 , 且链表中对比节点p的下一个节点是否为null if ((e = p.next) == null) { //若为null , 初始化创建新的节点,并存储目标数据 p.next = newNode(hash, key, value, null); //判断当前循环次数是否大于7 . 是 : 则将链表转换为红黑树结构 if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st treeifyBin(tab, hash); //停止并跳出循环 break; } //e[也就是节点p的下一个节点]节点不为null时 ,判断e的hash值即key是否相同 if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) //若相同 , 则停止并跳出循环 break; //若p的下一个节点的key与要存储的key不相同,则对节点P重新赋值 : 此时 : p = p.next; p = e; } } //节点e不为null , 即目标key-value的key在map中已存在 if (e != null) { // existing mapping for key //取出上一个value值 V oldValue = e.value; //onlyIfAbsent : true : 不替换当前value值 , false : 替换当前value值 . //若替换当前value值 或者当前value值为null if (!onlyIfAbsent || oldValue == null) //替换e节点处的value值为目标要存储的value值 e.value = value; //访问后回调函数 afterNodeAccess(e); //返回旧的value值 : jar包中putval方法不可调用 return oldValue; } } //集合修改次数+1 ++modCount; //1. 当前集合长度size+1 2. 若size> 当前加载因子 扩容 if (++size > threshold) resize(); //集合处理后回调 : 主要是在linkedHashMap中应用 . hashmap本身并无实际意义 afterNodeInsertion(evict); return null; } /** * 扩容 * @return 返回扩容后的Node数组 */ final Node<K,V>[] resize() { //现有的table Node<K,V>[] oldTab = table; //oldCap原有table容量 : 原table为null? Y : oldCap = 0; N : oldCap = table.length int oldCap = (oldTab == null) ? 0 : oldTab.length; //临界值 int oldThr = threshold; //新的table容量即长度 . 新的临界值 int newCap, newThr = 0; //若table不为null if (oldCap > 0) { //若原有table容量已达到最大容量 : 对临界值进行赋值为 : 2 ^ 31 -1 ,此时不再进行扩容操作 . if (oldCap >= MAXIMUM_CAPACITY) { threshold = Integer.MAX_VALUE; return oldTab; } //若没有达到最大容量 . 则newCap = oldCap * 2[即左移一位] //若此时newCap < 最大容量 且 oldCap >= 默认初始容量 : 1 << 16 else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && oldCap >= DEFAULT_INITIAL_CAPACITY) //则针对临界值 * 2 newThr = oldThr << 1; //这时候若做批量插入 : 则容易造成map内存溢出 } //若table长度=0 判断原有临界值是否初始化赋值 else if (oldThr > 0) //此时 : 临界值不变 newCap = oldThr; //原本临界值没有赋值 : 即该map集合此时为空的集合 else { //数组长度为默认的初始化长度 1<< 16 newCap = DEFAULT_INITIAL_CAPACITY; //临界值为默认初始化长度*加载因子 . 即临界值 = 1 << 16 * 0.75f newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); } //若此时 : 新的临界值为0 if (newThr == 0) { float ft = (float)newCap * loadFactor; //此时变更后的集合长度小于最大容量 且 此时数组容量 * 填充因子 也没有达到最大值 ? Y : 新的临界值为ft N : 1<<31 -1 newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? (int)ft : Integer.MAX_VALUE); } //给全局变量 : 临界值重新赋值 threshold = newThr; //根据得到的集合长度newCap 生成新的Node数组 Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap]; //且赋值给全局变量table table = newTab; //若原有table不为null if (oldTab != null) { //根据原有数组长度进行循环 , 把旧数据拷贝到新的数组中 for (int j = 0; j < oldCap; ++j) { Node<K,V> e;//临时节点变量e if ((e = oldTab[j]) != null) {//若当前节点不为null : e进行初始化赋值 //给旧数组中对应的节点赋予null oldTab[j] = null; if (e.next == null)//针对数组中下标对应的当前链表[或红黑树] : 若节点e的下一个节点为null newTab[e.hash & (newCap - 1)] = e;//对新的数组对应的下标位置进行初始化赋值 else if (e instanceof TreeNode)//判断旧的节点是一个树节点,则对树进行操作 ((TreeNode<K,V>)e).split(this, newTab, j, oldCap);// else {//为链表结构 : 针对链表结构进行操作 //一堆临时变量 Node<K,V> loHead = null, loTail = null; Node<K,V> hiHead = null, hiTail = null; Node<K,V> next; do {//循环当前链表. 通过e.next的方式 next = e.next; //e.hash & oldCap执行与运算符 . 参考二进制 . 取出小的数据的 // 在计算机中&运算符速度较快 . 为0则证明有对应的下标 if ((e.hash & oldCap) == 0) { //若此时loTail仍为null if (loTail == null) loHead = e;//即链表结构头节点为e else //不然 : 把节点e存储为loTail的next节点 loTail.next = e; //loTail重新赋值为e : loTail = e; } else {//位运算得到不为0 .则在远table中无 [这两处的位运算的判断目的没搞很明白 , 有明白的指导一下?????] if (hiTail == null) hiHead = e; else hiTail.next = e; hiTail = e; } } while ((e = next) != null); //TODO: 把处理过的链表或者红黑树结构放入数组对应的下标处 if (loTail != null) { loTail.next = null; newTab[j] = loHead; } if (hiTail != null) { hiTail.next = null; newTab[j + oldCap] = hiHead; } } } } } return newTab; } /** * 将数组中对应的下标处的链表转为红黑树结构 * @param tab * @param hash */ final void treeifyBin(Node<K,V>[] tab, int hash) { int n, index; Node<K,V> e; if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) resize();//若数组此时为空 , 或者数组的长度小于64[树形化结构的最小值] . 扩容 else if ((e = tab[index = (n - 1) & hash]) != null) { //根据hash值得到数组中下标对应的节点对象 . 若不为null TreeNode<K,V> hd = null, tl = null; do { TreeNode<K,V> p = replacementTreeNode(e, null);//创建新的树节点p , 该操作即为下面的操作 // TreeNode<K,V> p = new TreeNode<K,V>(e.hash, e.key, e.value, null); //若树节点t1为null if (tl == null) hd = p;//则对hd进行初始化赋值p else {//不然则 : 对树节点的prev和next进行初始化赋值 p.prev = tl; tl.next = p;//TODO: 这个赋值 : 感觉没啥意义 , 有理解的希望分享下 } //t1重新初始化赋值 tl = p; } while ((e = e.next) != null); //若数组中下标对应的节点不为null if ((tab[index] = hd) != null) hd.treeify(tab);//则对链表进行树形化结构处理 } } /** * 批量插入 * @param m */ public void putAll(Map<? extends K, ? extends V> m) { putMapEntries(m, true); } /** * 根据K移除 * @param key * @return */ public V remove(Object key) { Node<K,V> e; return (e = removeNode(hash(key), key, null, false, true)) == null ? null : e.value; } /** * 移除节点 : * @param hash key的hash值 * @param key 键 * @param value 值: 实际调用中无实际意义 * @param matchValue : 作用:区别remove(Key key)与remove(Key key,Value value) * 如果matchValue为false,remove(Key key)则删除与key值相等的节点,否则不删除 * @param movable :false : remove()中不移除其他节点 . 否则移除 * @return */ final Node<K,V> removeNode(int hash, Object key, Object value, boolean matchValue, boolean movable) { Node<K,V>[] tab; Node<K,V> p; int n, index; if ((tab = table) != null && (n = tab.length) > 0 && (p = tab[index = (n - 1) & hash]) != null) {//数组不为空且key所对应的下标的节点对象不为null Node<K,V> node = null, e; K k; V v; //目标key的对比 if (p.hash == hash && ((k = p.key) == key || (key != null && key.equals(k)))) node = p;//初始化赋值 else if ((e = p.next) != null) {//e初始化赋值 ., 且节点p的next节点不为null if (p instanceof TreeNode)//若本身结构为树结构 node = ((TreeNode<K,V>)p).getTreeNode(hash, key);//从树种取出Node对象 else {//不为树结构 , 则循环链表判断key给node初始化赋值 do { if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) { node = e; break; } p = e; } while ((e = e.next) != null); } } //若key存在 , 且删除与key相同的节点 if (node != null && (!matchValue || (v = node.value) == value || (value != null && value.equals(v)))) { if (node instanceof TreeNode)//移除树节点 ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable); else if (node == p) tab[index] = node.next;//替换节点对象 else p.next = node.next; ++modCount;//map的操作次数 . 乐观锁 : 一定程度上保证了线程安全 . --size;//size-1 //节点移除后的回调方法 afterNodeRemoval(node); return node; } } return null; } /** * 清空数组 */ public void clear() { Node<K,V>[] tab; modCount++; if ((tab = table) != null && size > 0) { size = 0; for (int i = 0; i < tab.length; ++i) tab[i] = null; } } /** * 是否存在value值 * @param value * @return */ public boolean containsValue(Object value) { Node<K,V>[] tab; V v; if ((tab = table) != null && size > 0) { for (int i = 0; i < tab.length; ++i) { for (Node<K,V> e = tab[i]; e != null; e = e.next) { if ((v = e.value) == value || (value != null && value.equals(v))) return true; } } } return false; } /** * 获取集合所有的key的集合 获取继承自abstractMap的参数 . * 若为null则 : 通过内部类KeySet 执行初始化赋值操作 * @return */ public Set<K> keySet() { Set<K> ks = keySet; if (ks == null) { ks = new KeySet(); keySet = ks; } return ks; } /** * 内部类KeySet : 即集合中键的集合 */ final class KeySet extends AbstractSet<K> { //数组/集合长度 public final int size() { return size; } //清空方法 public final void clear() { TestHashMap.this.clear(); } //迭代器:根据key public final Iterator<K> iterator() { return new KeyIterator(); } //判断是否存在 public final boolean contains(Object o) { return containsKey(o); } //根据key移除 , 并移除相同key的节点数据 public final boolean remove(Object key) { return removeNode(hash(key), key, null, false, true) != null; } //分割迭代器 public final Spliterator<K> spliterator() { return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0); } /** * 1.8新增迭代器 : 支持除map集合之外的其他集合类型 : list & set * action: 可以在action内部定义规则 : 同lamda表达式类似 * demo1: * map.forEach((key,value1)->{ * if(key != null && key > 1) { * System.out.println(key + ":" + value1); * } * }); * demo2 * Consumer<Map.Entry<Integer , String>> action = new Consumer<Map.Entry<Integer, String>>() { * @Override * public void accept(Map.Entry<Integer, String> integerStringEntry) { * if(integerStringEntry.getKey() != null && integerStringEntry.getKey() < 2) { * System.out.println(integerStringEntry.getKey() + " = " + integerStringEntry.getValue()); * } * } * }; * map.entrySet().forEach(action); * demo3 : * Consumer<Integer> action2 = new Consumer<Integer>() { * @Override * public void accept(Integer integer) { * if(integer != null && integer > 1) { * System.out.println(integer); * } * } * }; * map.keySet().forEach(action2); * * @param action */ public final void forEach(Consumer<? super K> action) { Node<K,V>[] tab; if (action == null) throw new NullPointerException(); if (size > 0 && (tab = table) != null) { int mc = modCount; for (int i = 0; i < tab.length; ++i) { for (Node<K,V> e = tab[i]; e != null; e = e.next) action.accept(e.key); } if (modCount != mc) throw new ConcurrentModificationException(); } } } /** * 获取所有的value值集合 * @return */ public Collection<V> values() { Collection<V> vs = values; if (vs == null) { vs = new Values(); values = vs; } return vs; } /** * AbstractCollection中的方法 */ final class Values extends AbstractCollection<V> { public final int size() { return size; } public final void clear() { HashMap.this.clear(); } public final Iterator<V> iterator() { return new ValueIterator(); } public final boolean contains(Object o) { return containsValue(o); } public final Spliterator<V> spliterator() { return new ValueSpliterator<K , V>(TestHashMap.this, 0, -1, 0, 0); } //迭代方法 public final void forEach(Consumer<? super V> action) { Node<K,V>[] tab; if (action == null) throw new NullPointerException(); if (size > 0 && (tab = table) != null) { int mc = modCount; for (int i = 0; i < tab.length; ++i) { for (Node<K,V> e = tab[i]; e != null; e = e.next) action.accept(e.value); } if (modCount != mc) throw new ConcurrentModificationException(); } } } /** * key-value 映射 * @return */ public Set<Map.Entry<K,V>> entrySet() { Set<Map.Entry<K,V>> es; return (es = entrySet) == null ? (entrySet = new EntrySet()) : es; } /** * key-value映射时初始化的对象 */ final class EntrySet extends AbstractSet<Map.Entry<K,V>> { public final int size() { return size; } public final void clear() { TestHashMap.this.clear(); } //迭代器 : 自定义迭代规则 : 获取节点中的数据 public final Iterator<Map.Entry<K,V>> iterator() { return new EntryIterator(); } public final boolean contains(Object o) { if (!(o instanceof Map.Entry)) return false; Map.Entry<?,?> e = (Map.Entry<?,?>) o; Object key = e.getKey(); Node<K,V> candidate = getNode(hash(key), key); return candidate != null && candidate.equals(e); } public final boolean remove(Object o) { if (o instanceof Map.Entry) { Map.Entry<?,?> e = (Map.Entry<?,?>) o; Object key = e.getKey(); Object value = e.getValue(); return removeNode(hash(key), key, value, true, true) != null; } return false; } public final Spliterator<Map.Entry<K,V>> spliterator() { return new EntrySpliterator<>(TestHashMap.this, 0, -1, 0, 0); } public final void forEach(Consumer<? super Map.Entry<K,V>> action) { Node<K,V>[] tab; if (action == null) throw new NullPointerException(); if (size > 0 && (tab = table) != null) { int mc = modCount; for (int i = 0; i < tab.length; ++i) { for (Node<K,V> e = tab[i]; e != null; e = e.next) action.accept(e); } if (modCount != mc) throw new ConcurrentModificationException(); } } } /** * 重写的方法1.7之前也存在 : 若key存在 : 则返回对应value . 不存在 : 返回defaultValue * @param key * @param defaultValue * @return */ @Override public V getOrDefault(Object key, V defaultValue) { Node<K,V> e; return (e = getNode(hash(key), key)) == null ? defaultValue : e.value; } /** * 如果匹配不到则增加key-value,返回null,如果匹配到Node,如果oldValue不等于null则不进行value覆盖,返回oldValue * @param key * @param value * @return */ @Override public V putIfAbsent(K key, V value) { return putVal(hash(key), key, value, true, true); } /** * 根据key匹配node,如果value也相同则删除 * @param key * @param value * @return */ @Override public boolean remove(Object key, Object value) { return removeNode(hash(key), key, value, true, true) != null; } /** * 替换 : 当key对应的value相同时才执行成功 * @param key * @param oldValue * @param newValue * @return */ @Override public boolean replace(K key, V oldValue, V newValue) { Node<K,V> e; V v; if ((e = getNode(hash(key), key)) != null && ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) { e.value = newValue; afterNodeAccess(e); return true; } return false; } /** * 直接进行替换 : key不存在 : 返回false . 存在 : 直接替换 * @param key * @param value * @return */ @Override public V replace(K key, V value) { Node<K,V> e; if ((e = getNode(hash(key), key)) != null) { V oldValue = e.value; e.value = value; afterNodeAccess(e); return oldValue; } return null; } /** * 根据key做匹配Node,(匹配不到则新建然后重排)如果Node有value,则直接返回oldValue, * 如果没有value则根据Function接口的apply方法获取value,返回value。 * map.computeIfAbsent("key", k -> new Object()); * @param key * @param mappingFunction * @return */ @Override public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { if (mappingFunction == null) throw new NullPointerException(); int hash = hash(key); Node<K,V>[] tab; Node<K,V> first; int n, i; int binCount = 0; TreeNode<K,V> t = null; Node<K,V> old = null; if (size > threshold || (tab = table) == null || (n = tab.length) == 0) n = (tab = resize()).length; if ((first = tab[i = (n - 1) & hash]) != null) { if (first instanceof TreeNode) //如果已经转为树,按照树的规则进行处理 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); else { Node<K,V> e = first; K k; //查找整个链表,找到对应的key do { if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) { old = e; break; } ++binCount; } while ((e = e.next) != null); } V oldValue; if (old != null && (oldValue = old.value) != null) { afterNodeAccess(old); return oldValue; } } //根据重写逻辑计算返回value V v = mappingFunction.apply(key); if (v == null) { return null; } else if (old != null) { old.value = v; afterNodeAccess(old); return v; } else if (t != null) t.putTreeVal(this, tab, hash, key, v); else { //如果匹配不到则table加入数据 tab[i] = newNode(hash, key, v, first); if (binCount >= TREEIFY_THRESHOLD - 1) treeifyBin(tab, hash); } ++modCount; ++size; afterNodeInsertion(true); return v; } /** * /V computeIfPresent(K key,BiFunction remappingFunction): * 根据key做匹配,如果匹配不上则返回null, * 匹配上根据BiFunction的apply方法获取value,返回value。 * BiFunction接口的apply的入参为key、oldValue, * 调用computeIfPresent时重写Function接口可以根据key和oldValue进行逻辑处理, * apply的返回值如果为null则删除该节点,否则即为要存储的value。 * @param key * @param remappingFunction * @return */ public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { if (remappingFunction == null) throw new NullPointerException(); Node<K,V> e; V oldValue; int hash = hash(key); if ((e = getNode(hash, key)) != null && (oldValue = e.value) != null) { //使用key和原value作为入参 V v = remappingFunction.apply(key, oldValue); if (v != null) { e.value = v; afterNodeAccess(e); return v; } else //apply的返回值如果为null则删除该节点, removeNode(hash, key, null, false, true); } return null; } /** * 根据key做匹配,根据BiFunction的apply返回做存储的value。 * 匹配到Node做value替换,匹配不到新增node。 * apply的返回值如果为null则删除该节点,否则即为要存储的value。 * @param key * @param remappingFunction * @return */ @Override public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { if (remappingFunction == null) throw new NullPointerException(); int hash = hash(key); Node<K,V>[] tab; Node<K,V> first; int n, i; int binCount = 0; TreeNode<K,V> t = null; Node<K,V> old = null; if (size > threshold || (tab = table) == null || (n = tab.length) == 0) n = (tab = resize()).length; if ((first = tab[i = (n - 1) & hash]) != null) { if (first instanceof TreeNode) old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); else { Node<K,V> e = first; K k; do { if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) { old = e; break; } ++binCount; } while ((e = e.next) != null); } } V oldValue = (old == null) ? null : old.value; V v = remappingFunction.apply(key, oldValue); if (old != null) { if (v != null) { old.value = v; afterNodeAccess(old); } else removeNode(hash, key, null, false, true); } else if (v != null) { if (t != null) t.putTreeVal(this, tab, hash, key, v); else { tab[i] = newNode(hash, key, v, first); if (binCount >= TREEIFY_THRESHOLD - 1) treeifyBin(tab, hash); } ++modCount; ++size; afterNodeInsertion(true); } return v; } /** * 功能大部分与compute相同,不同之处在于BiFunction中apply的参数, * 入参为oldValue、value,调用merge时根据两个value进行逻辑处理并返回value。 * @param key * @param value * @param remappingFunction * @return */ @Override public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { if (value == null) throw new NullPointerException(); if (remappingFunction == null) throw new NullPointerException(); int hash = hash(key); Node<K,V>[] tab; Node<K,V> first; int n, i; int binCount = 0; TreeNode<K,V> t = null; Node<K,V> old = null; if (size > threshold || (tab = table) == null || (n = tab.length) == 0) n = (tab = resize()).length; if ((first = tab[i = (n - 1) & hash]) != null) { if (first instanceof TreeNode) //若为树结构 , 获取旧的节点 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); else { Node<K,V> e = first; K k; //循环链表获取key do { if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) { old = e; break; } ++binCount; } while ((e = e.next) != null); } } if (old != null) { V v; //旧节点value不为null,则根据apply方法得到新的value值 , 为null , 直接赋值 if (old.value != null) v = remappingFunction.apply(old.value, value); else v = value; //此时value不为null . 对old节点value做覆盖 if (v != null) { old.value = v; afterNodeAccess(old); } else //若此时old节点value仍为null , 则删除该节点 . 即当 : oldvalue和传入的value都为null时 . 删除节点 removeNode(hash, key, null, false, true); return v; } //入参value不为null if (value != null) { if (t != null) //且为树结构时 . 替换value值 t.putTreeVal(this, tab, hash, key, value); else { //为链表结构时 , 创建新的节点对象 tab[i] = newNode(hash, key, value, first); //若此时的链表数据已达到链表转树结构的阈值 , 转为树结构 if (binCount >= TREEIFY_THRESHOLD - 1) treeifyBin(tab, hash); } ++modCount; ++size; afterNodeInsertion(true); } return value; } //迭代 @Override public void forEach(BiConsumer<? super K, ? super V> action) { Node<K,V>[] tab; if (action == null) throw new NullPointerException(); if (size > 0 && (tab = table) != null) { int mc = modCount; for (int i = 0; i < tab.length; ++i) { for (Node<K,V> e = tab[i]; e != null; e = e.next) action.accept(e.key, e.value); } if (modCount != mc) throw new ConcurrentModificationException(); } } //调用此方法时重写BiFunction的Object apply(Object o, Object o2)方法, // 其中o为key,o2为value,根据重写方法逻辑进行重新赋值。 @Override public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { Node<K,V>[] tab; if (function == null) throw new NullPointerException(); if (size > 0 && (tab = table) != null) { int mc = modCount; for (int i = 0; i < tab.length; ++i) { for (Node<K,V> e = tab[i]; e != null; e = e.next) { e.value = function.apply(e.key, e.value); } } if (modCount != mc) throw new ConcurrentModificationException(); } } /* ------------------------------------------------------------ */ // Cloning and serialization /** * 实现cloneable接口,重排map结构 , 目前不可调用 * @return */ @SuppressWarnings("unchecked") @Override public Object clone() { HashMap<K,V> result; try { result = (HashMap<K,V>)super.clone(); } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable throw new InternalError(e); } result.reinitialize(); result.putMapEntries(this, false); return result; } /** * 这些方法只在序列化map集合时启用 : 初始化填充引起 * @return */ final float loadFactor() { return loadFactor; } //初始化容量值:即数组长度 final int capacity() { return (table != null) ? table.length : (threshold > 0) ? threshold : DEFAULT_INITIAL_CAPACITY; } /** * Save the state of the <tt>HashMap</tt> instance to a stream (i.e., * serialize it). * * @serialData The <i>capacity</i> of the HashMap (the length of the * bucket array) is emitted (int), followed by the * <i>size</i> (an int, the number of key-value * mappings), followed by the key (Object) and value (Object) * for each key-value mapping. The key-value mappings are * emitted in no particular order. */ private void writeObject(java.io.ObjectOutputStream s) throws IOException { int buckets = capacity(); // Write out the threshold, loadfactor, and any hidden stuff s.defaultWriteObject(); s.writeInt(buckets); s.writeInt(size); internalWriteEntries(s); } /** * Reconstitute the {@code HashMap} instance from a stream (i.e., * deserialize it). */ private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { // Read in the threshold (ignored), loadfactor, and any hidden stuff s.defaultReadObject(); reinitialize(); if (loadFactor <= 0 || Float.isNaN(loadFactor)) throw new InvalidObjectException("Illegal load factor: " + loadFactor); s.readInt(); // Read and ignore number of buckets int mappings = s.readInt(); // Read number of mappings (size) if (mappings < 0) throw new InvalidObjectException("Illegal mappings count: " + mappings); else if (mappings > 0) { // (if zero, use defaults) // Size the table using given load factor only if within // range of 0.25...4.0 float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f); float fc = (float)mappings / lf + 1.0f; int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ? DEFAULT_INITIAL_CAPACITY : (fc >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : tableSizeFor((int)fc)); float ft = (float)cap * lf; threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? (int)ft : Integer.MAX_VALUE); @SuppressWarnings({"rawtypes","unchecked"}) Node<K,V>[] tab = (Node<K,V>[])new Node[cap]; table = tab; // Read the keys and values, and put the mappings in the HashMap for (int i = 0; i < mappings; i++) { @SuppressWarnings("unchecked") K key = (K) s.readObject(); @SuppressWarnings("unchecked") V value = (V) s.readObject(); putVal(hash(key), key, value, false, false); } } } /* ------------------------------------------------------------ */ // iterators abstract class HashIterator { Node<K,V> next; // 下一个节点 Node<K,V> current; // 当前节点 int expectedModCount; // 修改次数 : 避免迭代时有其他线程修改了此map集合数据 int index; //当前插槽 /** * 构造器 */ HashIterator() { expectedModCount = modCount; Node<K,V>[] t = table; current = next = null; index = 0; if (t != null && size > 0) { // advance to first entry do {} while (index < t.length && (next = t[index++]) == null); } } /** * 是否有下一个 * @return */ public final boolean hasNext() { return next != null; } /** * 下一个节点 * @return */ final Node<K,V> nextNode() { Node<K,V>[] t; Node<K,V> e = next; if (modCount != expectedModCount) throw new ConcurrentModificationException(); if (e == null) throw new NoSuchElementException(); if ((next = (current = e).next) == null && (t = table) != null) { do {} while (index < t.length && (next = t[index++]) == null); } return e; } /** * 移除 */ public final void remove() { Node<K,V> p = current; //非空和 if (p == null) throw new IllegalStateException(); if (modCount != expectedModCount) throw new ConcurrentModificationException(); current = null; K key = p.key; removeNode(hash(key), key, null, false, false); expectedModCount = modCount; } } final class KeyIterator extends HashIterator implements Iterator<K> { public final K next() { return nextNode().key; } } final class ValueIterator extends HashIterator implements Iterator<V> { public final V next() { return nextNode().value; } } final class EntryIterator extends HashIterator implements Iterator<Map.Entry<K,V>> { public final Map.Entry<K,V> next() { return nextNode(); } } /* ------------------------------------------------------------ */ // spliterators static class HashMapSpliterator<K,V> { //要遍历的map集合对象 final HashMap<K,V> map; //当前正在遍历的节点 Node<K,V> current; //当前迭代器正在遍历的桶索引 int index; //当前迭代器遍历的桶索引的上限 int fence; //元素个数 int est; // 期望操作数,用于多线程情况下,如果多个线程同时对 HashMap 进行读写, // 那么这个期望操作数 expectedModCount 和 HashMap 的 modCount 就会不一致, // 这时候抛个异常出来,称为“快速失败” int expectedModCount; /** * 初始化 * @param m * @param origin * @param fence * @param est * @param expectedModCount */ HashMapSpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount) { this.map = m; this.index = origin; this.fence = fence; this.est = est; this.expectedModCount = expectedModCount; } // 获取栅栏?不不不,这个方法的作用是获取一个当前迭代器的一个迭代范围,例如返回的值是 4,那么遍历到第四个桶就会结束 // 如果 table 有数据的话,貌似返回的值永远都是一样的 final int getFence() { int hi; if ((hi = fence) < 0) { HashMap<K,V> m = map; est = m.size; expectedModCount = m.modCount; Node<K,V>[] tab = m.table; hi = fence = (tab == null) ? 0 : tab.length; } return hi; } // 获取当前迭代器需要遍历的元素个数 public final long estimateSize() { getFence(); // force init return (long) est; } } /** * 针对map集合的键的迭代器 * @param <K> * @param <V> */ static final class KeySpliterator<K,V> extends HashMapSpliterator<K,V> implements Spliterator<K> { /** * 初始化 * @param m 集合本身 * @param origin * @param fence 当前map桶中数组的长度 * @param est 集合的元素个数 * @param expectedModCount 修改次数 : 为快速失败 */ KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount) { super(m, origin, fence, est, expectedModCount); } /** * 对当前迭代器进行分割 * @return */ public KeySpliterator<K,V> trySplit() { // 这里的分割方法只是把当前迭代器的开始索引和最后索引除以二而已 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; return (lo >= mid || current != null) ? null : new KeySpliterator<>(map, lo, index = mid, est >>>= 1, expectedModCount); } // 在当前迭代器遍历范围遍历一遍 public void forEachRemaining(Consumer<? super K> action) { int i, hi, mc; if (action == null) throw new NullPointerException(); HashMap<K,V> m = map; Node<K,V>[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; hi = fence = (tab == null) ? 0 : tab.length; } else mc = expectedModCount; if (tab != null && tab.length >= hi && (i = index) >= 0 && (i < (index = hi) || current != null)) { Node<K,V> p = current; current = null; do { if (p == null) p = tab[i++]; else { action.accept(p.key); p = p.next; } } while (p != null || i < hi); if (m.modCount != mc) throw new ConcurrentModificationException(); } } // 会遍历迭代器遍历的范围之内的元素,当找到第一个非空元素的时候就会停止遍历 public boolean tryAdvance(Consumer<? super K> action) { int hi; if (action == null) throw new NullPointerException(); Node<K,V>[] tab = map.table; if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { while (current != null || index < hi) { if (current == null) current = tab[index++]; else { K k = current.key; current = current.next; action.accept(k); if (map.modCount != expectedModCount) throw new ConcurrentModificationException(); return true; } } } return false; } public int characteristics() { return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | Spliterator.DISTINCT; } } static final class ValueSpliterator<K,V> extends HashMapSpliterator<K,V> implements Spliterator<V> { ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount) { super(m, origin, fence, est, expectedModCount); } public ValueSpliterator<K,V> trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; return (lo >= mid || current != null) ? null : new ValueSpliterator<>(map, lo, index = mid, est >>>= 1, expectedModCount); } public void forEachRemaining(Consumer<? super V> action) { int i, hi, mc; if (action == null) throw new NullPointerException(); HashMap<K,V> m = map; Node<K,V>[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; hi = fence = (tab == null) ? 0 : tab.length; } else mc = expectedModCount; if (tab != null && tab.length >= hi && (i = index) >= 0 && (i < (index = hi) || current != null)) { Node<K,V> p = current; current = null; do { if (p == null) p = tab[i++]; else { action.accept(p.value); p = p.next; } } while (p != null || i < hi); if (m.modCount != mc) throw new ConcurrentModificationException(); } } public boolean tryAdvance(Consumer<? super V> action) { int hi; if (action == null) throw new NullPointerException(); Node<K,V>[] tab = map.table; if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { while (current != null || index < hi) { if (current == null) current = tab[index++]; else { V v = current.value; current = current.next; action.accept(v); if (map.modCount != expectedModCount) throw new ConcurrentModificationException(); return true; } } } return false; } public int characteristics() { return (fence < 0 || est == map.size ? Spliterator.SIZED : 0); } } static final class EntrySpliterator<K,V> extends HashMapSpliterator<K,V> implements Spliterator<Map.Entry<K,V>> { EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount) { super(m, origin, fence, est, expectedModCount); } public EntrySpliterator<K,V> trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; return (lo >= mid || current != null) ? null : new EntrySpliterator<>(map, lo, index = mid, est >>>= 1, expectedModCount); } public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { int i, hi, mc; if (action == null) throw new NullPointerException(); HashMap<K,V> m = map; Node<K,V>[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; hi = fence = (tab == null) ? 0 : tab.length; } else mc = expectedModCount; if (tab != null && tab.length >= hi && (i = index) >= 0 && (i < (index = hi) || current != null)) { Node<K,V> p = current; current = null; do { if (p == null) p = tab[i++]; else { action.accept(p); p = p.next; } } while (p != null || i < hi); if (m.modCount != mc) throw new ConcurrentModificationException(); } } public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { int hi; if (action == null) throw new NullPointerException(); Node<K,V>[] tab = map.table; if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { while (current != null || index < hi) { if (current == null) current = tab[index++]; else { Node<K,V> e = current; current = current.next; action.accept(e); if (map.modCount != expectedModCount) throw new ConcurrentModificationException(); return true; } } } return false; } public int characteristics() { return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | Spliterator.DISTINCT; } } /* ------------------------------------------------------------ */ // LinkedHashMap支持的类的 //链表中 : 创建新的节点对象 , 把oldValue后移以为 , 当前value为第一个节点 Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) { return new Node<>(hash, key, value, next); } //链表结构中 : 根据p节点的key的hash值和key对象本身查找对应节点 替换当前节点即节点的next指向 Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) { return new Node<>(p.hash, p.key, p.value, next); } //创建红黑树节点 TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) { return new TreeNode<>(hash, key, value, next); } //为treeifyBin方法定义的 : 替换红黑树结构中的node节点 TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) { return new TreeNode<>(p.hash, p.key, p.value, next); } /** * 重置状态值为初始化状态数据 */ void reinitialize() { table = null; entrySet = null; keySet = null; values = null; modCount = 0; threshold = 0; size = 0; } //集合处理后回调 : 主要是在linkedHashMap中应用 . hashmap本身并无实际意义 void afterNodeAccess(Node<K,V> p) { } void afterNodeInsertion(boolean evict) { } void afterNodeRemoval(Node<K,V> p) { } //只是为了确保有一定顺序 void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { Node<K,V>[] tab; if (size > 0 && (tab = table) != null) { for (int i = 0; i < tab.length; ++i) { for (Node<K,V> e = tab[i]; e != null; e = e.next) { s.writeObject(e.key); s.writeObject(e.value); } } } } /* ------------------------------------------------------------ */ // Tree bins /** * 红黑树结构的节点对象 * @param <K> 键 - key * @param <V> 值 - value */ static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> { TreeNode<K,V> parent; //父节点 TreeNode<K,V> left; //左子树 TreeNode<K,V> right; //右子树 TreeNode<K,V> prev; ////前方结点 : 增加了该属性使得链表成为双向的 . 删除后需要取消连接 //颜色属性 : //1. 每个节点要么黑要么红 //2. 所有的红黑树的根节点都是黑色 //3. 每个叶子节点[NIL]是黑色的 //4. 如果一个节点是红色 , 则它的子节点必须是黑色 . 即父子节点不能同时为红色 //5. 从一个节点到该节点的子孙节点的所有路径上包含相同数目的黑节点[这一点是保证树结构平衡的关键] , // 这个属性被称为黑高,记作bh(x) //6. 一个节点的左子节点的所有元素都要小于该节点 . 节点的右子元素都要大于该节点 : // 这是构成二叉搜索树的基本要求,而红黑树就是从这个延伸过来的 boolean red; /** * 构造器 * @param hash key的hash值 * @param key 键 * @param val 值 * @param next 下一个节点 */ TreeNode(int hash, K key, V val, Node<K,V> next) { super(hash, key, val, next); } /** * 获取根节点 : 通过无限循环获取节点的父节点 . 只有根节点没有父节点 . * @return */ final TreeNode<K,V> root() { for (TreeNode<K,V> r = this, p;;) { if ((p = r.parent) == null) return r; r = p; } } /** * 确确保给出的根结点是箱中的第一个结点也就是直接位于table上, * 原本的第一个结点若不是root则将root从链表中剪下放到第一个结点的前方 * @param tab 当前数组 * @param root 参数节点 * @param <K> 节点key * @param <V> 节点value */ static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) { int n; //若入参节点不为null且数组不为null且数组length >0 if (root != null && tab != null && (n = tab.length) > 0) { //n-1为当前下标 , 和root的hash值相同则为对应节点下标 int index = (n - 1) & root.hash; //取出第一个节点 TreeNode<K,V> first = (TreeNode<K,V>)tab[index]; //若第一个节点不是root if (root != first) { Node<K,V> rn; //下标index处的节点替换为root tab[index] = root; //取出root的前一个节点 TreeNode<K,V> rp = root.prev; if ((rn = root.next) != null)//rn = root.next 若root节点的下一个节点非空 ((TreeNode<K,V>)rn).prev = rp;////rn的前指针指向root的前一个结点 if (rp != null) rp.next = rn;//若rp非空 , 即rp的下一个节点指向root的下一个节点 if (first != null) first.prev = root;//原第一个节点不为null . 则原第一个节点的前指针指向root root.next = first;//结合上一部操作 , root的子节点指向first root.prev = null;//root为第一个节点即根节点 } //处理后回调 assert checkInvariants(root); } } /** * 从根结点p开始根据hash和key值寻找指定的结点。kc是key的class * @param h 入参key的hash值 * @param k 键 * @param kc key的class * @return */ final TreeNode<K,V> find(int h, Object k, Class<?> kc) { TreeNode<K,V> p = this;//该方法调用时this是根结点 do { int ph, dir; K pk; TreeNode<K,V> pl = p.left, pr = p.right, q; if ((ph = p.hash) > h) p = pl;//p.hash>参数hash时 , 即根节点的hash值大于参数h时 ,移向左子树 else if (ph < h) p = pr;//即根节点的hash值大于参数h时 ,移向右子树 else if ((pk = p.key) == k || (k != null && k.equals(pk))) return p;//当前根节点时 , 直接返回 else if (pl == null) p = pr;//若左子节点元素为null , 转向右子树 else if (pr == null) p = pl;//若右子节点元素为null , 转向左子树 else if ((kc != null || (kc = comparableClassFor(k)) != null) && (dir = compareComparables(kc, k, pk)) != 0) //key的class不为null或(kc与K不是相同的CLASS且当前根节点的key的class与kc与key的class相同)时 , 不相同 ,compare结果为0 //若key的类型不相同 : 转向左子节点 ,否则转向右节点 p = (dir < 0) ? pl : pr; else if ((q = pr.find(h, k, kc)) != null) //根据key即class对比向右子树查找节点 , 若存在不为null的元素 , 直接返回 //这里开始的条件仅当输入k=null的时候才会进入,先检查右子树再检查左子树 return q; else p = pl;//不然转向左子树 , 继续循环 , 直至树元素为null } while (p != null); return null; } /** * 从根结点寻找h和k符合的结点 */ final TreeNode<K,V> getTreeNode(int h, Object k) { return ((parent != null) ? root() : this).find(h, k, null); } /** * 在插入结点的key值k和父结点的key值pk无法比较出大小时,用于比较k和pk的hash值大小。 * @param a * @param b * @return */ static int tieBreakOrder(Object a, Object b) { int d; if (a == null || b == null || (d = a.getClass().getName(). compareTo(b.getClass().getName())) == 0) d = (System.identityHashCode(a) <= System.identityHashCode(b) ? -1 : 1); return d; } /** * 根据链表生成树了,遍历链表获取结点,一个个插入到红黑树中,根节点除外 . * 每次插入从根开始根据hash值寻找到叶结点位置进行插入,插入一个结点后调用一次 * @return root of tree */ final void treeify(Node<K,V>[] tab) { TreeNode<K,V> root = null; for (TreeNode<K,V> x = this, next; x != null; x = next) {//当前数组的下标处开始循环链表 , 到元素为null结束 next = (TreeNode<K,V>)x.next;//将下一个节点转为tree结构 x.left = x.right = null;//左右节点指向为null先 if (root == null) {//根结点一定是黑色的 x.parent = null; x.red = false; root = x; } else {//不然 : 根据hash值找到叶子节点并设置指向 K k = x.key; int h = x.hash; Class<?> kc = null; for (TreeNode<K,V> p = root;;) { int dir, ph; K pk = p.key; if ((ph = p.hash) > h) dir = -1;//p.hash>h则dir=-1 else if (ph < h) dir = 1;//p.hash<h则dir=1 else if ((kc == null && (kc = comparableClassFor(k)) == null) || (dir = compareComparables(kc, k, pk)) == 0) //k是不可比较的类或者k和p.key相等,kc==null这个条件只是为了给kc初始化 //比较k和p.k的hash值大小,k大dir=-1,p.key大则dir=1,pk是父亲的hash,k是要插入结点的hash dir = tieBreakOrder(k, pk); TreeNode<K,V> xp = p; if ((p = (dir <= 0) ? p.left : p.right) == null) { x.parent = xp;//设置父节点为p if (dir <= 0)//若x的hash值小于等于p的hash值时尝试插入到左子树 xp.left = x; else xp.right = x;;//x的hash值大于p的hash值时尝试插入到右子树 //检查x位置的红黑树性质是否需要修复 root = balanceInsertion(root, x); break; } } } } //确保当前的root是直接落在table数组上 moveRootToFront(tab, root); } /** * Returns a list of non-TreeNodes replacing those linked from this node. *把树转为链表,由于replacementNode这个方法会生成新的Node, * 所以产生的新链表不再具有树的信息了,原本的TreeNode被gc了。 */ final Node<K,V> untreeify(HashMap<K,V> map) { Node<K,V> hd = null, tl = null;//hd是头部,tl是尾部 for (Node<K,V> q = this; q != null; q = q.next) { Node<K,V> p = map.replacementNode(q, null);//根据q产生一个新的结点,next=null,hash key value和q相等 if (tl == null) hd = p;//第一个结点产生时头部指向它 else tl.next = p; tl = p; } return hd; } /** * 往树结构中插入节点 */ final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab, int h, K k, V v) { Class<?> kc = null; boolean searched = false; TreeNode<K,V> root = (parent != null) ? root() : this; for (TreeNode<K,V> p = root;;) { int dir, ph; K pk; if ((ph = p.hash) > h) dir = -1; else if (ph < h) dir = 1; else if ((pk = p.key) == k || (k != null && k.equals(pk))) return p; else if ((kc == null && (kc = comparableClassFor(k)) == null) || (dir = compareComparables(kc, k, pk)) == 0) { if (!searched) { TreeNode<K,V> q, ch; searched = true; if (((ch = p.left) != null && (q = ch.find(h, k, kc)) != null) || ((ch = p.right) != null && (q = ch.find(h, k, kc)) != null)) return q; } dir = tieBreakOrder(k, pk); } TreeNode<K,V> xp = p; if ((p = (dir <= 0) ? p.left : p.right) == null) { Node<K,V> xpn = xp.next; TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn); if (dir <= 0) xp.left = x; else xp.right = x; xp.next = x; x.parent = x.prev = xp; if (xpn != null) ((TreeNode<K,V>)xpn).prev = x; moveRootToFront(tab, balanceInsertion(root, x)); return null; } } } /** * 移除tree节点 . 只是把指向变更 . 若 * @param map * @param tab * @param movable */ final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab, boolean movable) { int n; if (tab == null || (n = tab.length) == 0) return; int index = (n - 1) & hash; TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl; TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev; if (pred == null) tab[index] = first = succ; else pred.next = succ; if (succ != null) succ.prev = pred; if (first == null) return; if (root.parent != null) root = root.root(); if (root == null || root.right == null || (rl = root.left) == null || rl.left == null) { tab[index] = first.untreeify(map); // too small return; } TreeNode<K,V> p = this, pl = left, pr = right, replacement; if (pl != null && pr != null) { TreeNode<K,V> s = pr, sl; while ((sl = s.left) != null) // find successor s = sl; boolean c = s.red; s.red = p.red; p.red = c; // swap colors TreeNode<K,V> sr = s.right; TreeNode<K,V> pp = p.parent; if (s == pr) { // p was s's direct parent p.parent = s; s.right = p; } else { TreeNode<K,V> sp = s.parent; if ((p.parent = sp) != null) { if (s == sp.left) sp.left = p; else sp.right = p; } if ((s.right = pr) != null) pr.parent = s; } p.left = null; if ((p.right = sr) != null) sr.parent = p; if ((s.left = pl) != null) pl.parent = s; if ((s.parent = pp) == null) root = s; else if (p == pp.left) pp.left = s; else pp.right = s; if (sr != null) replacement = sr; else replacement = p; } else if (pl != null) replacement = pl; else if (pr != null) replacement = pr; else replacement = p; if (replacement != p) { TreeNode<K,V> pp = replacement.parent = p.parent; if (pp == null) root = replacement; else if (p == pp.left) pp.left = replacement; else pp.right = replacement; p.left = p.right = p.parent = null; } TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement); if (replacement == p) { // detach TreeNode<K,V> pp = p.parent; p.parent = null; if (pp != null) { if (p == pp.left) pp.left = null; else if (p == pp.right) pp.right = null; } } if (movable) moveRootToFront(tab, r); } /** * 将树从给定的结点分裂成低位和高位的两棵树,若新树结点太少则转为线性链表。只有resize时会调用 * * @param map the map * @param tab the table for recording bin heads 存储链表头的hash表 * @param index the index of the table being split 需要分裂的表下标位置 * @param bit the bit of hash to split on 分裂时分到高位和低位的依据参数,实际使用时输入的是扩展之前旧数组的大小 */ final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) { TreeNode<K,V> b = this; // Relink into lo and hi lists, preserving order TreeNode<K,V> loHead = null, loTail = null;//低位头尾指针 TreeNode<K,V> hiHead = null, hiTail = null;//高位头尾指针 int lc = 0, hc = 0;//低位和高位的结点个数统计 for (TreeNode<K,V> e = b, next; e != null; e = next) {//e从this开始遍历直到next为null next = (TreeNode<K,V>)e.next; e.next = null; //这段决定了该结点被分到低位还是高位,依据算式是e.hash mod bit, // 由于bit是扩展前数组的大小,所以一定是2的指数次幂,所以bit一定只有一个高位是1其余全是0 //这个算式实际是判断e.hash新多出来的有效位是0还是1,若是0则分去低位树,是1则分去高位树 if ((e.hash & bit) == 0) { if ((e.prev = loTail) == null) loHead = e; else loTail.next = e; loTail = e; ++lc; } else { if ((e.prev = hiTail) == null) hiHead = e; else hiTail.next = e; hiTail = e; ++hc; } } if (loHead != null) { if (lc <= UNTREEIFY_THRESHOLD) tab[index] = loHead.untreeify(map);//分裂后的低位树结点太少转为线性链表 else { tab[index] = loHead; if (hiHead != null) //若高位树为null则代表整棵树全保留在了低位,树没有变化所以不用进行后面的treeify loHead.treeify(tab); } } if (hiHead != null) {//这段与上面对于低位部分的分析相对应 if (hc <= UNTREEIFY_THRESHOLD) tab[index + bit] = hiHead.untreeify(map); else { tab[index + bit] = hiHead; if (loHead != null) hiHead.treeify(tab); } } } /* ------------------------------------------------------------ */ // Red-black tree methods, all adapted from CLR /** * 左旋操作 : 如 : 有父节点 : x ,x的左子树x1 ; 右子树y , y的左子树y1 , 右子树y2 . * 此时x < y 则左旋操作为 : x节点降为y的左子树 , 原本x1< x 故x1仍为x的左子树 . * y > x .y1 > x , y1 < y , y2 > y . 故 : y2 仍为y的右子树 . 而y1为x的右子树 . * 若x为根节点 , 则为黑色 , y节点仍为黑色 * @param root 右子树 * @param p 右子树的父节点 * @param <K> * @param <V> * @return */ static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, TreeNode<K,V> p) { TreeNode<K,V> r, pp, rl; if (p != null && (r = p.right) != null) { if ((rl = p.right = r.left) != null) rl.parent = p; if ((pp = r.parent = p.parent) == null) (root = r).red = false; else if (pp.left == p) pp.left = r; else pp.right = r; r.left = p; p.parent = r; } return root; } /** * 右旋 : 原理参照上面左旋 . 方向相反而已 * @param root * @param p * @param <K> * @param <V> * @return */ static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, TreeNode<K,V> p) { TreeNode<K,V> l, pp, lr; if (p != null && (l = p.left) != null) { if ((lr = p.left = l.right) != null) lr.parent = p; if ((pp = l.parent = p.parent) == null) (root = l).red = false; else if (pp.right == p) pp.right = l; else pp.left = l; l.right = p; p.parent = l; } return root; } /** * 插入后用于维持红黑树性质的修复操作, * @param root * @param x * @param <K> * @param <V> * @return */ static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, TreeNode<K,V> x) { x.red = true;//插入的结点设为红色 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { if ((xp = x.parent) == null) { x.red = false;//x的父亲为null代表x是根结点,x改黑色直接结束 return x; } else if (!xp.red || (xpp = xp.parent) == null) return root;//若x的父结点为黑色或者x的父亲为根结点(实际上根应该是黑色)插入红色结点不影响红黑树性质 if (xp == (xppl = xpp.left)) { if ((xppr = xpp.right) != null && xppr.red) { //xppr为x的叔叔,且叔叔为红色,x的叔叔和父亲改为红色,x的爷爷改为黑色,x指针上移到爷爷的位置 xppr.red = false; xp.red = false; xpp.red = true; x = xpp; } else { if (x == xp.right) { //情况2,x的叔叔是黑色且x是右儿子。对x上升至父亲后执行一次左旋 root = rotateLeft(root, x = xp); xpp = (xp = x.parent) == null ? null : xp.parent; } if (xp != null) { //情况3,x的叔叔是黑色且x是左儿子。x的父亲改黑色,x的爷爷改红色后对x的爷爷进行右旋 xp.red = false; if (xpp != null) { xpp.red = true; root = rotateRight(root, xpp); } } } } else {//以下为对称的操作 if (xppl != null && xppl.red) { xppl.red = false; xp.red = false; xpp.red = true; x = xpp; } else { if (x == xp.left) { root = rotateRight(root, x = xp); xpp = (xp = x.parent) == null ? null : xp.parent; } if (xp != null) { xp.red = false; if (xpp != null) { xpp.red = true; root = rotateLeft(root, xpp); } } } } } } /** * 删除操作需要寻找一个后驱结点来顶替原结点的位置, * 在结点无儿子时删除后不需做其他调整,结点只有一个儿子时那个儿子是后驱, * 否则右子树中的最小结点作为后驱。 * @param root * @param x * @param <K> * @param <V> * @return */ static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, TreeNode<K,V> x) { for (TreeNode<K,V> xp, xpl, xpr;;) { if (x == null || x == root) return root; else if ((xp = x.parent) == null) { x.red = false; return x; } else if (x.red) { x.red = false; return root; } else if ((xpl = xp.left) == x) { if ((xpr = xp.right) != null && xpr.red) { xpr.red = false; xp.red = true; root = rotateLeft(root, xp); xpr = (xp = x.parent) == null ? null : xp.right; } if (xpr == null) x = xp; else { TreeNode<K,V> sl = xpr.left, sr = xpr.right; if ((sr == null || !sr.red) && (sl == null || !sl.red)) { xpr.red = true; x = xp; } else { if (sr == null || !sr.red) { if (sl != null) sl.red = false; xpr.red = true; root = rotateRight(root, xpr); xpr = (xp = x.parent) == null ? null : xp.right; } if (xpr != null) { xpr.red = (xp == null) ? false : xp.red; if ((sr = xpr.right) != null) sr.red = false; } if (xp != null) { xp.red = false; root = rotateLeft(root, xp); } x = root; } } } else { // symmetric if (xpl != null && xpl.red) { xpl.red = false; xp.red = true; root = rotateRight(root, xp); xpl = (xp = x.parent) == null ? null : xp.left; } if (xpl == null) x = xp; else { TreeNode<K,V> sl = xpl.left, sr = xpl.right; if ((sl == null || !sl.red) && (sr == null || !sr.red)) { xpl.red = true; x = xp; } else { if (sl == null || !sl.red) { if (sr != null) sr.red = false; xpl.red = true; root = rotateLeft(root, xpl); xpl = (xp = x.parent) == null ? null : xp.left; } if (xpl != null) { xpl.red = (xp == null) ? false : xp.red; if ((sl = xpl.left) != null) sl.red = false; } if (xp != null) { xp.red = false; root = rotateRight(root, xp); } x = root; } } } } } /** * Recursive invariant check */ static <K,V> boolean checkInvariants(TreeNode<K,V> t) { TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, tb = t.prev, tn = (TreeNode<K,V>)t.next; if (tb != null && tb.next != t) return false; if (tn != null && tn.prev != t) return false; if (tp != null && t != tp.left && t != tp.right) return false; if (tl != null && (tl.parent != t || tl.hash > t.hash)) return false; if (tr != null && (tr.parent != t || tr.hash < t.hash)) return false; if (t.red && tl != null && tl.red && tr != null && tr.red) return false; if (tl != null && !checkInvariants(tl)) return false; if (tr != null && !checkInvariants(tr)) return false; return true; } } }

浙公网安备 33010602011771号