ConcurrentHashMap(Java8)源码分析
1. 常量、成员变量
private static final int MAXIMUM_CAPACITY = 1 << 30; // 和HashMap一样
private static final int DEFAULT_CAPACITY = 16; // 和HashMap一样
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; // 和HashMap一样
static final int TREEIFY_THRESHOLD = 8; // 和HashMap一样
static final int UNTREEIFY_THRESHOLD = 6; // 和HashMap一样
static final int MIN_TREEIFY_CAPACITY = 64; // 和HashMap一样
private static final float LOAD_FACTOR = 0.75f; // 不怎么用 -> 见transfer方法:sizeCtl = (n << 1) - (n >>> 1) private static final int DEFAULT_CONCURRENCY_LEVEL = 16; // 旧版的Segment??? private static final int MIN_TRANSFER_STRIDE = 16; // 扩容时最小的迁移分组大小(见transfer)
private static int RESIZE_STAMP_BITS = 16; // 见resizeStamp方法
private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1; // 2^16 - 1
private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; // 见addCount方法 static final int MOVED = -1; // 转发节点(ForwardingNode)hash
static final int TREEBIN = -2; // 红黑树节点(TreeBin)hash
static final int RESERVED = -3; /// 保留
static final int HASH_BITS = 0x7fffffff; // hash掩码(见spread方法) static final int NCPU = Runtime.getRuntime().availableProcessors(); // 处理器数量(见transfer方法和fullAddCoun方法)
private static final ObjectStreamField[] serialPersistentFields = {
new ObjectStreamField("segments", Segment[].class),
new ObjectStreamField("segmentMask", Integer.TYPE),
new ObjectStreamField("segmentShift", Integer.TYPE)
}; transient volatile Node<K,V>[] table; // 哈希数组(链地址法)
private transient volatile Node<K,V>[] nextTable; // 当前扩容时的table(临时)
private transient volatile long baseCount; // 节点基础计数(见addCount方法)
private transient volatile int sizeCtl; // 扩容阈值,类似于HashMap中的threshold
private transient volatile int transferIndex; // 线程迁移bin的起始位置,CAS(transferIndex)成功者可迁移transferIndex前置stride个bin(见transfer)
private transient volatile int cellsBusy; // counterCells锁(见fullAddCount方法)
private transient volatile CounterCell[] counterCells; // baseCount增量(见fullAddCount方法)
private static final sun.misc.Unsafe U;
private static final long SIZECTL;
private static final long TRANSFERINDEX;
private static final long BASECOUNT;
private static final long CELLSBUSY;
private static final long CELLVALUE;
private static final long ABASE;
private static final int ASHIFT; static {
try {
U = sun.misc.Unsafe.getUnsafe();
Class<?> k = ConcurrentHashMap.class;
SIZECTL = U.objectFieldOffset(k.getDeclaredField("sizeCtl"));
TRANSFERINDEX = U.objectFieldOffset(k.getDeclaredField("transferIndex"));
BASECOUNT = U.objectFieldOffset(k.getDeclaredField("baseCount"));
CELLSBUSY = U.objectFieldOffset(k.getDeclaredField("cellsBusy"));
Class<?> ck = CounterCell.class;
CELLVALUE = U.objectFieldOffset(ck.getDeclaredField("value"));
Class<?> ak = Node[].class;
ABASE = U.arrayBaseOffset(ak); // 数组中存放数据的起始地址(见tabAt方法)
int scale = U.arrayIndexScale(ak); // 数组元素的大小(单位:字节)
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); // 幂(2^ASHIFT == 1 << ASHIFT == scale),位运算用(见tabAt方法)
} catch (Exception e) {
throw new Error(e);
}
}
2. 获取元素
1' 若key所映射的bin为空,则返回空
2‘ 若当前bin首个节点为待查找节点,则返回首个节点值
3' 若当前bin首个节点哈希值 < 0,则当前bin为红黑树,或当前bin已迁移到新table中
1’‘ 若当前bin为空黑树(TreeBin),则在红黑树上存在写者或等待者时遍历双向链表,否则在红黑树中查找
2'' 若当前bin已迁移到新table中,则根据前进节点(ForwardingNode)前往新table中查找
4' 当前bin首个节点哈希值 >= 0,即当前bin为单向链表,则遍历链表
// 数组内的元素非volatile
static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) { // 取table[i]
return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
} public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 && (e = tabAt(tab, (n - 1) & h)) != null) { // 当前bin不为空,e头结点
if ((eh = e.hash) == h) { // Node:hash >= 0
if ((ek = e.key) == key || (ek != null && key.equals(ek))) // 头结点为待查找节点
return e.val;
}
else if (eh < 0) // ForwardingNode:MOVED(-1) || TreeBin:TREEBIN(-2)
return (p = e.find(h, key)) != null ? p.val : null; // ForwardingNode.find || TreeNode.find
while ((e = e.next) != null) { // 遍历Node链表
if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
} static class Node<K,V> implements Map.Entry<K,V> {
final int hash;
final K key;
volatile V val; // volatile
volatile Node<K,V> next; // volatile
Node(int hash, K key, V val, Node<K,V> next); ... ...
} static final class ForwardingNode<K,V> extends Node<K,V> {
final Node<K,V>[] nextTable;
ForwardingNode(Node<K,V>[] tab); // hash = TREEBIN,设置nextTable Node<K,V> find(int h, Object k) {
outer: for (Node<K,V>[] tab = nextTable;;) {
Node<K,V> e; int n;
if (k == null || tab == null || (n = tab.length) == 0 || (e = tabAt(tab, (n - 1) & h)) == null) // 当前bin为空
return null;
for (;;) {
int eh; K ek;
if ((eh = e.hash) == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) // e为待查找节点
return e;
if (eh < 0) { // ForwardingNode:MOVED(-1) || TreeBin:TREEBIN(-2)
if (e instanceof ForwardingNode) { // 继续重定向
tab = ((ForwardingNode<K,V>)e).nextTable;
continue outer;
}
else
return e.find(h, k); // TreeBin.find(红黑树查找)
}
if ((e = e.next) == null) // 遍历Node链表
return null;
}
}
}
}
// 建议自己了解下红黑树和读写锁
static final class TreeBin<K,V> extends Node<K,V> {
TreeNode<K,V> root; // 红黑树根节点
volatile TreeNode<K,V> first; // 双向链表头节点(volatile)
volatile Thread waiter; // 等待者线程
volatile int lockState; // 锁状态(volatile)
static final int WRITER = 1; // 写锁位
static final int WAITER = 2; // 等待位
static final int READER = 4; // 读锁位 TreeBin(TreeNode<K,V> b); // hash = TREEBIN,在TreeNode双向链表上建立红黑树 final Node<K,V> find(int h, Object k) {
if (k != null) {
for (Node<K,V> e = first; e != null; ) {
int s; K ek;
if (((s = lockState) & (WAITER|WRITER)) != 0) { // 存在等待者 || 写者
if (e.hash == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) // 遍历TreeNode双向链表
return e;
e = e.next;
}
else if (U.compareAndSwapInt(this, LOCKSTATE, s, s + READER)) { // 加读锁(读者++)
TreeNode<K,V> r, p;
try {
p = ((r = root) == null ? null : r.findTreeNode(h, k, null)); // 红黑树查找
} finally {
Thread w;
if (U.getAndAddInt(this, LOCKSTATE, -READER) == (READER|WAITER) && // 释放读锁(读者--):最后一个读者 && 存在等待者
(w = waiter) != null) // 等待者线程不为空
LockSupport.unpark(w); // 唤醒等待者
}
return p;
}
}
}
return null;
} final TreeNode<K,V> putTreeVal(int h, K k, V v) {
... ...
else {
lockRoot(); // 加写锁
try {
root = balanceInsertion(root, x);
} finally {
unlockRoot(); // 释放写锁
}
}
... ...
} final boolean removeTreeNode(TreeNode<K,V> p) {
... ...
lockRoot(); // 加写锁
try {
... ...
root = (p.red) ? r : balanceDeletion(r, replacement);
... ...
} finally {
unlockRoot(); // 释放写锁
}
... ...
} ... ...
}
3. 添加元素
1' 若table尚未分配空间,则初始化table
2' 若key所映射的bin为空,则设置首个节点
3' 若table正在进行扩容(首个节点为前进节点),则参与扩容,待扩容完成后再添加新元素
4' 在当前bin上加锁,并在当前bin中插入节点(红黑树 / 单向链表)
5' 节点计数++(可能进行扩容)
// 数组内的元素非volatile
static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { // 置table[i]
U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
} static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i, Node<K,V> c, Node<K,V> v) { // CAS设置table[i]
return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
} public V put(K key, V value) {
return putVal(key, value, false);
} final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount = 0; // 添加元素前链表内的元素计数 || 红黑树固定为2
for (Node<K,V>[] tab = table;;) {
// CAS失败 || binCount为0 回到此处
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0) // table尚未分配空间
tab = initTable(); // 为table分配空间
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { // 当前bin为空
if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value, null))) // CAS设置bin的首个节点
// CAS成功
break;
// CAS失败
}
else if ((fh = f.hash) == MOVED) // f为ForwardingNode类型(f.hash = -1):当前正在扩容,当前bin已迁移至新table中
tab = helpTransfer(tab, f); // 帮助迁移元素
else { // 当前bin不为空
V oldVal = null;
synchronized (f) { // lock当前bin
if (tabAt(tab, i) == f) {
if (fh >= 0) { // 链表
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { // 已存在节点
oldVal = e.val;
if (!onlyIfAbsent) // 是否覆盖
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key, value, null); // 链表尾部添加节点
break;
}
}
}
else if (f instanceof TreeBin) { // 红黑树,f.hash == TREEBIN(-2)
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key, value)) != null) { // 红黑树添加节点
// 已存在节点
oldVal = p.val;
if (!onlyIfAbsent) // 是否覆盖
p.val = value;
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i); // 链表转红黑树
if (oldVal != null) // 未添加新的节点
return oldVal; // return 旧值
break;
}
// binCount为0
}
}
addCount(1L, binCount); // 节点计数++,可能进行扩容
return null;
} private final Node<K,V>[] initTable() {
Node<K,V>[] tab; int sc;
while ((tab = table) == null || tab.length == 0) { // table尚未分配空间
if ((sc = sizeCtl) < 0) /*记录sizeCtl*/
Thread.yield();
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { /*CAS设置sizeCtl = -1*/
// CAS(sizeCtl)成功
try {
if ((tab = table) == null || tab.length == 0) { // table尚未分配空间
int n = (sc > 0) ? sc : DEFAULT_CAPACITY; // 初始容量保存在sizeCtl中 || 16
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = tab = nt;
sc = n - (n >>> 2); // 计算阈值(容量 * 0.75)
}
} finally {
sizeCtl = sc; // 保存阈值到sizeCtl
}
break;
}
}
return tab;
}
4. 节点计数
增加节点计数(addCount)包含两个步骤:
1' 增加节点计数:基础计数 + 多个增量
2' 扩容:分组迁移节点
public int size() { // 多个线程同步竞争ConcurrentHashMap比较激烈时,size算不准的
    long n = sumCount();
    return ((n < 0L) ? 0 : (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int)n);
}
@sun.misc.Contended static final class CounterCell {
    volatile long value;
    CounterCell(long x) { value = x; }
}
final long sumCount() { // 计算节点总数(baseCount + 各CounterCell.value)
    CounterCell[] as = counterCells; CounterCell a;
    long sum = baseCount;
    if (as != null) {
        for (int i = 0; i < as.length; ++i) {
            if ((a = as[i]) != null)
                sum += a.value;
        }
    }
    return sum;
}
static final int resizeStamp(int n) {
    return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
}
private final void addCount(long x, int check) {
    CounterCell[] as; long b, s;
    // 若多个线程同步竞争ConcurrentHashMap不激烈,则counterCells可能一直为空
    // 若多个线程同步修改ConcurrentHashMap,则一旦出现某个线程CAS(baseCount)失败,将设置counterCells存放baseCount增量
    if ((as = counterCells) != null || // counterCells不为空
        !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { // CAS设置baseCount += x(此时baseCount即为节点总数)
        // counterCells为空 || CAS(baseCount)失败
        CounterCell a; long v; int m;
        boolean uncontended = true;
        if (as == null || (m = as.length - 1) < 0 || // counterCells为空
            (a = as[ThreadLocalRandom.getProbe() & m]) == null || // counterCells当前位置为空
            !(uncontended = U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { // CAS设置CounterCell.value += x
            // counterCells为空 || 当前counterCell为空 || CAS(CounterCell.value)失败
            fullAddCount(x, uncontended);
            return; // 不检查扩容
        }
        if (check <= 1) // put:check(binCount) <= 1 || remove、clear:check < 0
            return; // 不检查扩容
        s = sumCount(); // 计算节点总数
    }
    if (check >= 0) { // put:check(binCount) >= 0
        Node<K,V>[] tab, nt; int n, sc;
        while (s >= (long)(sc = sizeCtl) // s >= 扩容阈值                                    /*记录sizeCtl*/
            && (tab = table) != null && (n = tab.length) < MAXIMUM_CAPACITY) {
            int rs = resizeStamp(n);
            if (sc < 0) { // 正在扩容
                if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || sc == rs + MAX_RESIZERS ||
                    (nt = nextTable) == null || // 扩容尚未准备完成
                    transferIndex <= 0) // 扩容尚未准备完成 || 扩容完毕
                    break;
                if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) // 参与扩容线程++        /*CAS设置sizeCtl += 1*/
                    // CAS(sizeCtl)成功
                    transfer(tab, nt); // helpTransfer to end
            }
            // 准备扩容(单线程)
            else if (U.compareAndSwapInt(this, SIZECTL, sc, (rs << RESIZE_STAMP_SHIFT) + 2)) /*CAS设置sizeCtl = 0x100#####00000000 + 2*/
                // CAS(sizeCtl)成功
                transfer(tab, null); // 扩容
            s = sumCount();
        }
    }
}
// 本来真的不想看的
private final void fullAddCount(long x, boolean wasUncontended) {
    int h;
    if ((h = ThreadLocalRandom.getProbe()) == 0) {
        ThreadLocalRandom.localInit();
        h = ThreadLocalRandom.getProbe();
        wasUncontended = true;
    }
    boolean collide = false;
    for (;;) {
        CounterCell[] as; CounterCell a; int n; long v;
        if ((as = counterCells) != null && (n = as.length) > 0) { // counterCells不为空
            if ((a = as[(n - 1) & h]) == null) { // 当前counterCell为空
                if (cellsBusy == 0) { // counterCells未加锁                             /*确认cellsBusy == 0*/
                    CounterCell r = new CounterCell(x);
                    if (cellsBusy == 0 && U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { /*CAS设置cellsBusy = 1*///counterCells加锁
                        // CAS(cellsBusy)成功
                        boolean created = false;
                        try {
                            CounterCell[] rs; int m, j;
                            if ((rs = counterCells) != null && (m = rs.length) > 0 && rs[j = (m - 1) & h] == null) {
                                rs[j] = r; // 添加CounterCell
                                created = true;
                            }
                        } finally {
                            cellsBusy = 0; // counterCells释放锁
                        }
                        if (created) // 添加CounterCell成功
                            break;
                        continue;
                    }
                }
                // 未能添加CounterCell...................................................................................................1
                collide = false; // 下次CAS(CounterCell.value)失败不进行扩容
            }
            else if (!wasUncontended) // 上次CAS(CounterCell.value)失败..................................................................2
                wasUncontended = true;
            else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) // CAS设置CounterCell.value += x............................3
                break;
            else if (counterCells != as || n >= NCPU) // 已建立新的counterCells || counterCells.length > cup数...........................4
                collide = false; // 下次CAS(CounterCell.value)失败不进行扩容
            else if (!collide)
                collide = true; // 下次CAS(CounterCell.value)失败进行扩容
            else if (cellsBusy == 0 && U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { // CAS设置cellsBusy = 1(counterCells加锁)
                // CAS(cellsBusy)成功
                try { // counterCells扩容
                    if (counterCells == as) {
                        CounterCell[] rs = new CounterCell[n << 1];
                        for (int i = 0; i < n; ++i) // 转移CounterCell
                            rs[i] = as[i];
                        counterCells = rs;
                    }
                } finally {
                    cellsBusy = 0; // counterCells释放锁
                }
                collide = false; // 下次CAS(CounterCell.value)失败不进行扩容
                continue; // 再次尝试
            }
            // 未能添加CounterCell:counterCells已加锁 || CAS(cellsBusy)失败.............................................................1
            // 上次CAS(CounterCell.value)失败............................................................................................2
            // 本次CAS(CounterCell.value)失败............................................................................................3
            // 不进行扩容:已建立新的counterCells || counterCells.length > cup数.........................................................4
            // 未能进行counterCells扩容:counterCells已加锁 || CAS(cellsBusy)失败........................................................5
            h = ThreadLocalRandom.advanceProbe(h); // 重新计算hash
        }
        // 当前counterCell为空:CAS设置cellsBusy = 1(counterCells加锁)
        else if (cellsBusy == 0 && counterCells == as && U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
            // CAS(cellsBusy)成功
            boolean init = false;
            try { // counterCells分配初始空间2
                if (counterCells == as) {
                    CounterCell[] rs = new CounterCell[2];
                    rs[h & 1] = new CounterCell(x);
                    counterCells = rs;
                    init = true;
                }
            } finally {
                cellsBusy = 0; // counterCells释放锁
            }
            if (init) // 成功分配空间
                break;
        }
        // 当前counterCell为空,counterCells加锁失败:cellsBusy = 1 || CAS(cellsBusy)失败
        else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) // CAS设置baseCount += x
            break;
    }
}
5. 扩容
1’ 分组迁移:允许多个线程参与迁移过程
2' 逆序迁移:transferIndex = n -> 0
3' ForwardingNode:对迁移完成的bin,其首个节点设为前进节点
final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
    Node<K,V>[] nextTab; int sc;
    if (tab != null && (f instanceof ForwardingNode) && // 当前正在扩容
        (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) { // 扩容已准备完毕
        int rs = resizeStamp(tab.length);
        while (nextTab == nextTable && table == tab && (sc = sizeCtl) < 0) { // 当前正在扩容      /*记录sizeCtl*/
            if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
                sc == rs + MAX_RESIZERS || transferIndex <= 0)
                break;
            if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) { // 参与扩容线程++               /*CAS设置sizeCtl += 1*/
                // CAS(sizeCtl)成功
                transfer(tab, nextTab);
                break;
            }
        }
        return nextTab;
    }
    return table;
}
private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
    int n = tab.length, stride;
    if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) // 迁移分组,最小分组为16,最大分组为table容量
        stride = MIN_TRANSFER_STRIDE;
    if (nextTab == null) { // 准备扩容(单线程)
        try {
            Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1]; // 新的table
            nextTab = nt;
        } catch (Throwable ex) {
            sizeCtl = Integer.MAX_VALUE;
            return;
        }
        nextTable = nextTab;
        transferIndex = n; // 当前迁移位置(逆序),transferIndex为0时整个table迁移完毕
    }
    int nextn = nextTab.length; // n << 1
    ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); // 前进节点 -> 新的table
    boolean advance = true;
    boolean finishing = false; // 最后一个线程退出扩容,可准备结束
    for (int i = 0, bound = 0;;) { // 当前分组的边界[bound, i]
        Node<K,V> f; int fh;
        while (advance) {
            // CAS(transferIndex)失败将回到此处
            int nextIndex, nextBound;
            if (--i >= bound || finishing) // 当前分组未完全迁移 || 整个table迁移完毕,准备结束
                advance = false;
            else if ((nextIndex = transferIndex) <= 0) { // 不存在下一个分组     /*记录transferIndex*/
                i = -1; // i越界
                advance = false;
            }
            // 计算下个分组的边界:i(nextIndex - 1)和bound(nextBound)
            else if (U.compareAndSwapInt(this, TRANSFERINDEX, nextIndex,         /*CAS设置transferIndex = nextIndex - stride || 0*/
                      nextBound = (nextIndex > stride ? nextIndex - stride : 0))) {
                // CAS(transferIndex)成功
                bound = nextBound; // 高位边界
                i = nextIndex - 1; // 低位边界
                advance = false;
            }
        }
        // 迁移table[bound ~ i]内的bin(逆序)
        if (i < 0 || i >= n || i + n >= nextn) { // i越界检查:所有分组迁移完毕
            int sc;
            if (finishing) { // 整个table迁移完毕,扩容结束
                nextTable = null; // 临时table置空
                table = nextTab; // 更换新的table
                sizeCtl = (n << 1) - (n >>> 1); // 阈值:0.75 * 容量
                return;
            }
            if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { // CAS设置sizeCtl -= 1:参与扩容线程--
                // CAS(sizeCtl)成功
                if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) // 是否为最后一个退出扩容的线程
                    return; // 否
                // 是
                finishing = advance = true; // 准备结束
                i = n; // i越界
            }
        }
        else if ((f = tabAt(tab, i)) == null) // 当前bin为空
            advance = casTabAt(tab, i, null, fwd); // 置当前节点为重定向节点:可能失败,其它线程在tab[i]处同步添加节点
        else if ((fh = f.hash) == MOVED) // 确认当前节点为重定向节点(hash == MOVED):CAS成功 || 迁移当前BIN成功
            advance = true; // i++
        else { // 当前bin非空:分割、迁移bin
            synchronized (f) { // lock当前bin
                if (tabAt(tab, i) == f) {
                    Node<K,V> ln, hn; // 分割后的低位链表(ln)、高位链表(hn)
                    if (fh >= 0) { // 分割、迁移链表
                        int runBit = fh & n;
                        Node<K,V> lastRun = f;
                        for (Node<K,V> p = f.next; p != null; p = p.next) {
                            int b = p.hash & n;
                            if (b != runBit) {
                                runBit = b;
                                lastRun = p;
                            }
                        }
                        if (runBit == 0) {
                            ln = lastRun;
                            hn = null;
                        }
                        else {
                            hn = lastRun;
                            ln = null;
                        }
                        for (Node<K,V> p = f; p != lastRun; p = p.next) {
                            int ph = p.hash; K pk = p.key; V pv = p.val;
                            if ((ph & n) == 0)
                                ln = new Node<K,V>(ph, pk, pv, ln);
                            else
                                hn = new Node<K,V>(ph, pk, pv, hn);
                        }
                        setTabAt(nextTab, i, ln);
                        setTabAt(nextTab, i + n, hn);
                        setTabAt(tab, i, fwd); // 当前bin迁移完毕,置tab[i]为前进节点
                        advance = true;
                    }
                    else if (f instanceof TreeBin) { // 分割、迁移红黑树
                        TreeBin<K,V> t = (TreeBin<K,V>)f;
                        TreeNode<K,V> lo = null, loTail = null; // 低位双向链表
                        TreeNode<K,V> hi = null, hiTail = null; // 高位双向链表
                        int lc = 0, hc = 0;
                        for (Node<K,V> e = t.first; e != null; e = e.next) {
                            int h = e.hash;
                            TreeNode<K,V> p = new TreeNode<K,V>(h, e.key, e.val, null, null);
                            if ((h & n) == 0) {
                                if ((p.prev = loTail) == null)
                                    lo = p;
                                else
                                    loTail.next = p;
                                loTail = p;
                                ++lc;
                            }
                            else {
                                if ((p.prev = hiTail) == null)
                                    hi = p;
                                else
                                    hiTail.next = p;
                                hiTail = p;
                                ++hc;
                            }
                        }
                        ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : // 双向链表转链表
                            (hc != 0) ? new TreeBin<K,V>(lo) : t; // 在双向链表上建立红黑树(treeify) || noop
                        hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : // 双向链表转链表
                            (lc != 0) ? new TreeBin<K,V>(hi) : t; // 在双向链表上建立红黑树(treeify) || noop
                        setTabAt(nextTab, i, ln);
                        setTabAt(nextTab, i + n, hn);
                        setTabAt(tab, i, fwd); // 当前bin迁移完毕,置tab[i]为前进节点
                        advance = true;
                    }
                }
            }
        }
    }
}
6. 删除元素
1' 若key所映射的bin为空,则返回
2' 若table正在进行扩容(首个节点为前进节点),则参与扩容,待扩容完成后再删除元素
3' 在当前bin上加锁,并从当前bin中删除节点(红黑树 / 单向链表)
4' 节点计数--
public V remove(Object key) {
    return replaceNode(key, null, null);
}
final V replaceNode(Object key, V value, Object cv) { // value != null表示替换元素
    int hash = spread(key.hashCode());
    for (Node<K,V>[] tab = table;;) {
        Node<K,V> f; int n, i, fh;
        if (tab == null || (n = tab.length) == 0 || (f = tabAt(tab, i = (n - 1) & hash)) == null) // 当前bin为空
            break;
        else if ((fh = f.hash) == MOVED) // f为ForwardingNode类型(f.hash = -1):当前正在扩容,当前bin已迁移至新table中
            tab = helpTransfer(tab, f); // 帮助迁移元素
        else { // 当前bin不为空
            V oldVal = null;
            boolean validated = false;
            synchronized (f) { // lock当前bin
                if (tabAt(tab, i) == f) {
                    if (fh >= 0) { // 链表
                        validated = true;
                        for (Node<K,V> e = f, pred = null;;) {
                            K ek;
                            if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { // 找到待删除(替换)节点
                                V ev = e.val;
                                if (cv == null || cv == ev || (ev != null && cv.equals(ev))) {
                                    oldVal = ev;
                                    if (value != null) // 替换元素
                                        e.val = value;
                                    else if (pred != null) // 删除链表的非首个节点
                                        pred.next = e.next;
                                    else // 删除连表的首个节点
                                        setTabAt(tab, i, e.next);
                                }
                                break;
                            }
                            pred = e;
                            if ((e = e.next) == null)
                                break;
                        }
                    }
                    else if (f instanceof TreeBin) { // 红黑树,f.hash == TREEBIN(-2)
                        validated = true;
                        TreeBin<K,V> t = (TreeBin<K,V>)f;
                        TreeNode<K,V> r, p;
                        if ((r = t.root) != null &&
                            (p = r.findTreeNode(hash, key, null)) != null) { // 在红黑树中找到待删除(替换)节点p
                            V pv = p.val;
                            if (cv == null || cv == pv || (pv != null && cv.equals(pv))) {
                                oldVal = pv;
                                if (value != null)
                                    p.val = value;
                                else if (t.removeTreeNode(p)) // 在红黑树中删除p && 红黑树待转链表
                                    setTabAt(tab, i, untreeify(t.first));
                            }
                        }
                    }
                }
            }
            if (validated) {
                if (oldVal != null) { // 存在待删除(替换)节点
                    if (value == null) // 删除节点而非替换节点值
                        addCount(-1L, -1); // 节点计数--
                    return oldVal;
                }
                break;
            }
            // 未删除节点
        }
    }
    return null;
}
ConcurrentHashMap(Java8)源码分析的更多相关文章
- Java并发(四):并发集合ConcurrentHashMap的源码分析
		
之前介绍了Java并发的基础知识和使用案例分析,接下来我们正式地进入Java并发的源码分析阶段,本文作为源码分析地开篇,源码参考JDK1.8 OverView: JDK1.8源码中的注释提到:Conc ...
 - JUC源码分析-集合篇(一)ConcurrentHashMap
		
JUC源码分析-集合篇(一)ConcurrentHashMap 1. 概述 <HashMap 源码详细分析(JDK1.8)>:https://segmentfault.com/a/1190 ...
 - 还不懂 ConcurrentHashMap ?这份源码分析了解一下
		
上一篇文章介绍了 HashMap 源码,反响不错,也有很多同学发表了自己的观点,这次又来了,这次是 ConcurrentHashMap 了,作为线程安全的HashMap ,它的使用频率也是很高.那么它 ...
 - [JUC-5]ConcurrentHashMap源码分析JDK8
		
在学习之前,最好先了解下如下知识: 1.ReentrantLock的实现和原理. 2.Synchronized的实现和原理. 3.硬件对并发支持的CAS操作及JVM中Unsafe对CAS的实现. 4. ...
 - 【JUC】JDK1.8源码分析之ConcurrentHashMap(一)
		
一.前言 最近几天忙着做点别的东西,今天终于有时间分析源码了,看源码感觉很爽,并且发现ConcurrentHashMap在JDK1.8版本与之前的版本在并发控制上存在很大的差别,很有必要进行认真的分析 ...
 - ConcurrentHashMap源码分析
		
看过hashMap源码之后一直意犹未尽的感觉,挡不住我看其他的源码了.HashMap在单线程中非常好用,也不会出现什么问题,但是一到多线程就gg了,变的不灵了.我们有HashTable可以运用在多线程 ...
 - Hashtable、ConcurrentHashMap源码分析
		
Hashtable.ConcurrentHashMap源码分析 为什么把这两个数据结构对比分析呢,相信大家都明白.首先二者都是线程安全的,但是二者保证线程安全的方式却是不同的.废话不多说了,从源码的角 ...
 - ConcurrentHashMap实现原理及源码分析
		
ConcurrentHashMap实现原理 ConcurrentHashMap源码分析 总结 ConcurrentHashMap是Java并发包中提供的一个线程安全且高效的HashMap实现(若对Ha ...
 - ConcurrentHashMap源码分析(一)
		
本篇博客的目录: 前言 一:ConcurrentHashMap简介 二:ConcurrentHashMap的内部实现 三:总结 前言:HashMap很多人都熟悉吧,它是我们平时编程中高频率出现的一种集 ...
 - ConcurrentHashMap 源码分析
		
ConcurrentHashMap 源码分析 1. 前言 终于到这个类了,其实在前面很过很多次这个类,因为这个类代码量比较大,并且涉及到并发的问题,还有一点就是这个代码有些真的晦涩,不好懂.前前 ...
 
随机推荐
- ubuntu gnome桌面秀
			
之前装的是ubuntu14.10gnome版的 然后一路升级成15.04,到今天的15.10 最后发现现在的gnome桌面已经非常适合我的工作了 先放一张图,这是我的桌面 原来使用ubuntu15.0 ...
 - MFC单文档框架分析及执行流程(转)
			
原文转自 https://blog.csdn.net/u011619422/article/details/40402705 首先来分析一下MFC单文档类的结构: 它包括如下几个类: CAboutDl ...
 - Python3  面向对象编程
			
小案例: #!/usr/bin/env python # _*_ coding:utf-8 _*_ # Author:Bert import sys class Role(object): n=&qu ...
 - [Leetcode Week15]Populating Next Right Pointers in Each Node
			
Populating Next Right Pointers in Each Node 题解 原创文章,拒绝转载 题目来源:https://leetcode.com/problems/populati ...
 - skb管理函数之skb_put、skb_push、skb_pull、skb_reserve
			
四个操作函数直接的区别,如下图: /** * skb_put - add data to a buffer * @skb: buffer to use * @len: amount of data t ...
 - c语言简单实现telnet客户端
			
c语言简单实现telnet客户端 http://blog.csdn.net/haiwenchen/article/details/69944118
 - C#使用Linq To XML读取XML,Linq生成XML,Linq创建带属性或带节点XML
			
using System; using System.Linq; using System.Xml.Linq; namespace Sample2 { class Program { static v ...
 - C#面向对象(OOP)入门—第二天—多态和继承(继承)
			
介绍: 第一天的内容主要是不同情形下的方法重载.这一部分则主要讲面向对象中继承的概念.首先用一个要点图形来定义继承. 继承 一个简单的例子: ClassA: class ClassA:ClassB { ...
 - 如何生成[0,maxval]范围内m个随机整数的无重复的有序序列
			
在这里我们将待生成的数据结构称为IntSet,接口定义如下: class IntSetImp { public: IntSetImp(int maxelements,int maxval); void ...
 - linux命令(30):touch命令
			
实例一:创建不存在的文件 touch test.log test1.log 实例二:更新log.log的时间和log2012.log时间戳相同 touch -r test.log test1.log ...