并发编程---ConcurrentHashMap源码解析

    ConcurrentHashMap是java中为了解决HashMap不能支持高并发而设计的新的实现。

    ConcurrentHashMap的类结构

public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
    implements ConcurrentMap<K,V>, Serializable {
      ......
}

    ConcurrentHashMap的主要成员变量

//容量最大值
private static final int MAXIMUM_CAPACITY = 1 << 30;
//默认容量大小
private static final int DEFAULT_CAPACITY = 16;
//数组容量的最大值
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
//默认的并发数
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
//负载因子
private static final float LOAD_FACTOR = 0.75f;
//由链表转为红黑树的阈值
static final int TREEIFY_THRESHOLD = 8;
//由红黑树转为链表的阈值
static final int UNTREEIFY_THRESHOLD = 6;
//转换为红黑树的最小容量
static final int MIN_TREEIFY_CAPACITY = 64;
//每次进行转移的最小值
private static final int MIN_TRANSFER_STRIDE = 16;
//生成sizeCtl所使用的最小bit位数
private static int RESIZE_STAMP_BITS = 16;
//进行扩容锁需要的最大线程数
private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
//记录sizeCtl的大小所需要进行的偏移位数
private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
//标识
static final int MOVED     = -1; // hash for forwarding nodes
static final int TREEBIN   = -2; // hash for roots of trees
static final int RESERVED  = -3; // hash for transient reservations
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
/** Number of CPUS, to place bounds on some sizings */
//cpu的个数
static final int NCPU = Runtime.getRuntime().availableProcessors();
//存储元素的数组
transient volatile Node<K,V>[] table;
//扩容时新生成的数组,用于下一个存放元素的数组,其大小为原数组的两倍
private transient volatile Node<K,V>[] nextTable;
//基本计数
private transient volatile long baseCount;
/**
* hash表初始化或扩容时的一个控制位标识量。
* 负数代表正在进行初始化或扩容操作
* -1代表正在初始化
* -N 表示有N-1个线程正在进行扩容操作
* 正数或0代表hash表还没有被初始化,这个数值表示初始化或下一次进行扩容的大小
*/
private transient volatile int sizeCtl;
//扩容下另一个表的索引
private transient volatile int transferIndex;
//
private transient volatile int cellsBusy;
//
private transient volatile CounterCell[] counterCells;

//以下是通过sun.misc.Unsafe的objectFieldOffset方法获取成员变量在class域中的偏移值
private static final sun.misc.Unsafe U;
private static final long SIZECTL;
private static final long TRANSFERINDEX;
private static final long BASECOUNT;
private static final long CELLSBUSY;
private static final long CELLVALUE;
private static final long ABASE;
private static final int ASHIFT;
static {
        try {
            U = sun.misc.Unsafe.getUnsafe();
            Class<?> k = ConcurrentHashMap.class;
            SIZECTL = U.objectFieldOffset
                (k.getDeclaredField("sizeCtl"));
            TRANSFERINDEX = U.objectFieldOffset
                (k.getDeclaredField("transferIndex"));
            BASECOUNT = U.objectFieldOffset
                (k.getDeclaredField("baseCount"));
            CELLSBUSY = U.objectFieldOffset
                (k.getDeclaredField("cellsBusy"));
            Class<?> ck = CounterCell.class;
            CELLVALUE = U.objectFieldOffset
                (ck.getDeclaredField("value"));
            Class<?> ak = Node[].class;
            ABASE = U.arrayBaseOffset(ak);
            int scale = U.arrayIndexScale(ak);
            if ((scale & (scale - 1)) != 0)
                throw new Error("data type scale not a power of two");
            ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
        } catch (Exception e) {
            throw new Error(e);
        }
    }

    ConcurrentHashMap中的主要内部类

    Node:

static class Node<K,V> implements Map.Entry<K,V> {
        final int hash;
        final K key;
        volatile V val;
        volatile Node<K,V> next;

        Node(int hash, K key, V val, Node<K,V> next) {
            this.hash = hash;
            this.key = key;
            this.val = val;
            this.next = next;
        }

        public final K getKey()       { return key; }
        public final V getValue()     { return val; }
        public final int hashCode()   { return key.hashCode() ^ val.hashCode(); }
        public final String toString(){ return key + "=" + val; }
        public final V setValue(V value) {
            throw new UnsupportedOperationException();
        }

        public final boolean equals(Object o) {
            Object k, v, u; Map.Entry<?,?> e;
            return ((o instanceof Map.Entry) &&
                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
                    (v = e.getValue()) != null &&
                    (k == key || k.equals(key)) &&
                    (v == (u = val) || v.equals(u)));
        }

        /**
         * Virtualized support for map.get(); overridden in subclasses.
         */
        Node<K,V> find(int h, Object k) {
            Node<K,V> e = this;
            if (k != null) {
                do {
                    K ek;
                    if (e.hash == h &&
                        ((ek = e.key) == k || (ek != null && k.equals(ek))))
                        return e;
                } while ((e = e.next) != null);
            }
            return null;
        }
}

    由于这里Node和HashMap中的Node基本一致,所以不再赘述。

    ForwardingNode:继承Node节点,hash值为-1,其中存储nextTable的引用。

static final class ForwardingNode<K,V> extends Node<K,V> {
        final Node<K,V>[] nextTable;
        ForwardingNode(Node<K,V>[] tab) {
            super(MOVED, null, null, null);
            this.nextTable = tab;
        }

        Node<K,V> find(int h, Object k) {
            // loop to avoid arbitrarily deep recursion on forwarding nodes
            outer: for (Node<K,V>[] tab = nextTable;;) {
                Node<K,V> e; int n;
                if (k == null || tab == null || (n = tab.length) == 0 ||
                    (e = tabAt(tab, (n - 1) & h)) == null)
                    return null;
                for (;;) {
                    int eh; K ek;
                    if ((eh = e.hash) == h &&
                        ((ek = e.key) == k || (ek != null && k.equals(ek))))
                        return e;
                    if (eh < 0) {
                        if (e instanceof ForwardingNode) {
                            tab = ((ForwardingNode<K,V>)e).nextTable;
                            continue outer;
                        }
                        else
                            return e.find(h, k);
                    }
                    if ((e = e.next) == null)
                        return null;
                }
            }
        }
    }

    ReservationNode:继承于Node,哈希值为-3。

static final class ReservationNode<K,V> extends Node<K,V> {
    ReservationNode() {
        super(RESERVED, null, null, null);
    }

    Node<K,V> find(int h, Object k) {
        return null;
    }
}

    ConcurrentHashMap的主要构造函数

//设置容量值的构造函数
public ConcurrentHashMap(int initialCapacity) {
        if (initialCapacity < 0)
            throw new IllegalArgumentException();
        //根据容量值计算sizeCtl
        //MAXIMUM_CAPACITY >>> 1 允许的最大容量值无符号右移一位,(1 << 30)>>>1
        //如果参数容量值大于等于参数容量值,sizeCtl直接为允许的最大容量值
        //否则,initialCapacity + (initialCapacity >>> 1) + 1,大概为initialCapacity 的1.5倍
        //tableSizeFor方法咱们再HashMap中分析过了,是取大于参数的最小二次幂,比如参数为15,结果就为16
        int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
                   MAXIMUM_CAPACITY :
                   tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
        //设置sizeCtl值
        this.sizeCtl = cap;
}

//构造参数为Map的构造方法
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
        //sizeCtl直接为默认容量值
        this.sizeCtl = DEFAULT_CAPACITY;
        putAll(m);
}

//设置容量值、负载因子的构造函数
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
        this(initialCapacity, loadFactor, 1);
}

//设置容量值、负载因子、并发等级的构造函数
public ConcurrentHashMap(int initialCapacity,float loadFactor, int concurrencyLevel) {
        if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
            throw new IllegalArgumentException();
        if (initialCapacity < concurrencyLevel)   // Use at least as many bins
            //如果容量值小于并发等级,则将容量值设置成并发等级,也就是容量值不能小于并发等级
            initialCapacity = concurrencyLevel;   // as estimated threads
        //容量值除以负载因子,可以将容量值看成阈值,然后反推容量值,这里size就是反推的容量值
        long size = (long)(1.0 + (long)initialCapacity / loadFactor);
        //根据容量值计算sizeCtl,该方法上面已经分析过了
        int cap = (size >= (long)MAXIMUM_CAPACITY) ?
            MAXIMUM_CAPACITY : tableSizeFor((int)size);
        this.sizeCtl = cap;
}

    我们发现构造函数中都只是对sizeCtl进行了初始化,其余成员变量,比如table数组,均没有初始化,而是等到第一次put操作时进行初始化。

    ConcurrentHashMap的主要方法

    我们先来看看ConcurrentHashMap的一些基础方法。

    散列计算:int spread(int h):对key的hashCode值进行散列计算。

//对key值的hashCode值进行散列
static final int spread(int h) {
        return (h ^ (h >>> 16)) & HASH_BITS;
}

     原子操作方法:tabAt、casTabAt、setTabAt。这是三个原子操作,用于对指定位置的节点进行操作。正是这些原子操作保证了ConcurrentHashMap的线程安全。

//获得数组table中在i位置上的Node节点
static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
        //这里U就是UnSafe类的实例
        return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
}

//利用CAS算法设置i位置上的Node节点。之所以能实现并发是因为他指定了原来这个节点的值是多少
//在CAS算法中,会比较内存中的值与你指定的这个值是否相等,如果相等才接受你的修改,否则拒绝你的修改
//因此当前线程中的值并不是最新的值,这种修改可能会覆盖掉其他线程的修改结果,ABA问题
static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
                                        Node<K,V> c, Node<K,V> v) {
        return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
}

//利用volatile方法设置数组table中位置为i的node
static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
        U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
}

    初始化数组:Node<K,V>[] initTable()

private final Node<K,V>[] initTable() {
        Node<K,V>[] tab; int sc;
        while ((tab = table) == null || tab.length == 0) {
            if ((sc = sizeCtl) < 0)
                Thread.yield(); // lost initialization race; just spin
            else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
                try {
                    if ((tab = table) == null || tab.length == 0) {
                        int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
                        @SuppressWarnings("unchecked")
                        Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
                        table = tab = nt;
                        sc = n - (n >>> 2);
                    }
                } finally {
                    sizeCtl = sc;
                }
                break;
            }
        }
        return tab;
}

获取:V get(Object key)

public V get(Object key) {
        //定义两个类型为Node数组的局部变量tab和e
        //定义两个int类型的局部变量n、eh
        //定义类型为泛型的局部变量ek
        Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
        //对key的hashCode进行散列计算
        int h = spread(key.hashCode());
        //将当前node数组table赋值给tab,将node数组长度赋值给n
        if ((tab = table) != null && (n = tab.length) > 0 &&
            (e = tabAt(tab, (n - 1) & h)) != null) {
            if ((eh = e.hash) == h) {
                if ((ek = e.key) == key || (ek != null && key.equals(ek)))
                    return e.val;
            }
            else if (eh < 0)
                return (p = e.find(h, key)) != null ? p.val : null;
            while ((e = e.next) != null) {
                if (e.hash == h &&
                    ((ek = e.key) == key || (ek != null && key.equals(ek))))
                    return e.val;
            }
        }
        return null;
    }

猜你喜欢

转载自my.oschina.net/u/3765527/blog/1832989