Coverage Summary for Class: Striped64 (com.google.common.cache)

Class Method, % Line, %
Striped64 0% (0/8) 0% (0/75)
Striped64$1 0% (0/2) 0% (0/7)
Striped64$Cell 0% (0/3) 0% (0/9)
Total 0% (0/13) 0% (0/91)


1 /* 2  * Written by Doug Lea with assistance from members of JCP JSR-166 3  * Expert Group and released to the public domain, as explained at 4  * http://creativecommons.org/publicdomain/zero/1.0/ 5  */ 6  7 /* 8  * Source: 9  * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.9 10  */ 11  12 package com.google.common.cache; 13  14 import com.google.common.annotations.GwtIncompatible; 15 import java.util.Random; 16 import org.checkerframework.checker.nullness.qual.Nullable; 17  18 /** 19  * A package-local class holding common representation and mechanics for classes supporting dynamic 20  * striping on 64bit values. The class extends Number so that concrete subclasses must publicly do 21  * so. 22  */ 23 @GwtIncompatible 24 abstract class Striped64 extends Number { 25  /* 26  * This class maintains a lazily-initialized table of atomically 27  * updated variables, plus an extra "base" field. The table size 28  * is a power of two. Indexing uses masked per-thread hash codes. 29  * Nearly all declarations in this class are package-private, 30  * accessed directly by subclasses. 31  * 32  * Table entries are of class Cell; a variant of AtomicLong padded 33  * to reduce cache contention on most processors. Padding is 34  * overkill for most Atomics because they are usually irregularly 35  * scattered in memory and thus don't interfere much with each 36  * other. But Atomic objects residing in arrays will tend to be 37  * placed adjacent to each other, and so will most often share 38  * cache lines (with a huge negative performance impact) without 39  * this precaution. 40  * 41  * In part because Cells are relatively large, we avoid creating 42  * them until they are needed. When there is no contention, all 43  * updates are made to the base field. Upon first contention (a 44  * failed CAS on base update), the table is initialized to size 2. 45  * The table size is doubled upon further contention until 46  * reaching the nearest power of two greater than or equal to the 47  * number of CPUS. Table slots remain empty (null) until they are 48  * needed. 49  * 50  * A single spinlock ("busy") is used for initializing and 51  * resizing the table, as well as populating slots with new Cells. 52  * There is no need for a blocking lock; when the lock is not 53  * available, threads try other slots (or the base). During these 54  * retries, there is increased contention and reduced locality, 55  * which is still better than alternatives. 56  * 57  * Per-thread hash codes are initialized to random values. 58  * Contention and/or table collisions are indicated by failed 59  * CASes when performing an update operation (see method 60  * retryUpdate). Upon a collision, if the table size is less than 61  * the capacity, it is doubled in size unless some other thread 62  * holds the lock. If a hashed slot is empty, and lock is 63  * available, a new Cell is created. Otherwise, if the slot 64  * exists, a CAS is tried. Retries proceed by "double hashing", 65  * using a secondary hash (Marsaglia XorShift) to try to find a 66  * free slot. 67  * 68  * The table size is capped because, when there are more threads 69  * than CPUs, supposing that each thread were bound to a CPU, 70  * there would exist a perfect hash function mapping threads to 71  * slots that eliminates collisions. When we reach capacity, we 72  * search for this mapping by randomly varying the hash codes of 73  * colliding threads. Because search is random, and collisions 74  * only become known via CAS failures, convergence can be slow, 75  * and because threads are typically not bound to CPUS forever, 76  * may not occur at all. However, despite these limitations, 77  * observed contention rates are typically low in these cases. 78  * 79  * It is possible for a Cell to become unused when threads that 80  * once hashed to it terminate, as well as in the case where 81  * doubling the table causes no thread to hash to it under 82  * expanded mask. We do not try to detect or remove such cells, 83  * under the assumption that for long-running instances, observed 84  * contention levels will recur, so the cells will eventually be 85  * needed again; and for short-lived ones, it does not matter. 86  */ 87  88  /** 89  * Padded variant of AtomicLong supporting only raw accesses plus CAS. The value field is placed 90  * between pads, hoping that the JVM doesn't reorder them. 91  * 92  * <p>JVM intrinsics note: It would be possible to use a release-only form of CAS here, if it were 93  * provided. 94  */ 95  static final class Cell { 96  volatile long p0, p1, p2, p3, p4, p5, p6; 97  volatile long value; 98  volatile long q0, q1, q2, q3, q4, q5, q6; 99  100  Cell(long x) { 101  value = x; 102  } 103  104  final boolean cas(long cmp, long val) { 105  return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val); 106  } 107  108  // Unsafe mechanics 109  private static final sun.misc.Unsafe UNSAFE; 110  private static final long valueOffset; 111  112  static { 113  try { 114  UNSAFE = getUnsafe(); 115  Class<?> ak = Cell.class; 116  valueOffset = UNSAFE.objectFieldOffset(ak.getDeclaredField("value")); 117  } catch (Exception e) { 118  throw new Error(e); 119  } 120  } 121  } 122  123  /** 124  * ThreadLocal holding a single-slot int array holding hash code. Unlike the JDK8 version of this 125  * class, we use a suboptimal int[] representation to avoid introducing a new type that can impede 126  * class-unloading when ThreadLocals are not removed. 127  */ 128  static final ThreadLocal<int[]> threadHashCode = new ThreadLocal<>(); 129  130  /** Generator of new random hash codes */ 131  static final Random rng = new Random(); 132  133  /** Number of CPUS, to place bound on table size */ 134  static final int NCPU = Runtime.getRuntime().availableProcessors(); 135  136  /** Table of cells. When non-null, size is a power of 2. */ 137  transient volatile Cell @Nullable [] cells; 138  139  /** 140  * Base value, used mainly when there is no contention, but also as a fallback during table 141  * initialization races. Updated via CAS. 142  */ 143  transient volatile long base; 144  145  /** Spinlock (locked via CAS) used when resizing and/or creating Cells. */ 146  transient volatile int busy; 147  148  /** Package-private default constructor */ 149  Striped64() {} 150  151  /** CASes the base field. */ 152  final boolean casBase(long cmp, long val) { 153  return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val); 154  } 155  156  /** CASes the busy field from 0 to 1 to acquire lock. */ 157  final boolean casBusy() { 158  return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1); 159  } 160  161  /** 162  * Computes the function of current and new value. Subclasses should open-code this update 163  * function for most uses, but the virtualized form is needed within retryUpdate. 164  * 165  * @param currentValue the current value (of either base or a cell) 166  * @param newValue the argument from a user update call 167  * @return result of the update function 168  */ 169  abstract long fn(long currentValue, long newValue); 170  171  /** 172  * Handles cases of updates involving initialization, resizing, creating new Cells, and/or 173  * contention. See above for explanation. This method suffers the usual non-modularity problems of 174  * optimistic retry code, relying on rechecked sets of reads. 175  * 176  * @param x the value 177  * @param hc the hash code holder 178  * @param wasUncontended false if CAS failed before call 179  */ 180  final void retryUpdate(long x, int[] hc, boolean wasUncontended) { 181  int h; 182  if (hc == null) { 183  threadHashCode.set(hc = new int[1]); // Initialize randomly 184  int r = rng.nextInt(); // Avoid zero to allow xorShift rehash 185  h = hc[0] = (r == 0) ? 1 : r; 186  } else h = hc[0]; 187  boolean collide = false; // True if last slot nonempty 188  for (; ; ) { 189  Cell[] as; 190  Cell a; 191  int n; 192  long v; 193  if ((as = cells) != null && (n = as.length) > 0) { 194  if ((a = as[(n - 1) & h]) == null) { 195  if (busy == 0) { // Try to attach new Cell 196  Cell r = new Cell(x); // Optimistically create 197  if (busy == 0 && casBusy()) { 198  boolean created = false; 199  try { // Recheck under lock 200  Cell[] rs; 201  int m, j; 202  if ((rs = cells) != null && (m = rs.length) > 0 && rs[j = (m - 1) & h] == null) { 203  rs[j] = r; 204  created = true; 205  } 206  } finally { 207  busy = 0; 208  } 209  if (created) break; 210  continue; // Slot is now non-empty 211  } 212  } 213  collide = false; 214  } else if (!wasUncontended) // CAS already known to fail 215  wasUncontended = true; // Continue after rehash 216  else if (a.cas(v = a.value, fn(v, x))) break; 217  else if (n >= NCPU || cells != as) collide = false; // At max size or stale 218  else if (!collide) collide = true; 219  else if (busy == 0 && casBusy()) { 220  try { 221  if (cells == as) { // Expand table unless stale 222  Cell[] rs = new Cell[n << 1]; 223  for (int i = 0; i < n; ++i) rs[i] = as[i]; 224  cells = rs; 225  } 226  } finally { 227  busy = 0; 228  } 229  collide = false; 230  continue; // Retry with expanded table 231  } 232  h ^= h << 13; // Rehash 233  h ^= h >>> 17; 234  h ^= h << 5; 235  hc[0] = h; // Record index for next time 236  } else if (busy == 0 && cells == as && casBusy()) { 237  boolean init = false; 238  try { // Initialize table 239  if (cells == as) { 240  Cell[] rs = new Cell[2]; 241  rs[h & 1] = new Cell(x); 242  cells = rs; 243  init = true; 244  } 245  } finally { 246  busy = 0; 247  } 248  if (init) break; 249  } else if (casBase(v = base, fn(v, x))) break; // Fall back on using base 250  } 251  } 252  253  /** Sets base and all cells to the given value. */ 254  final void internalReset(long initialValue) { 255  Cell[] as = cells; 256  base = initialValue; 257  if (as != null) { 258  int n = as.length; 259  for (int i = 0; i < n; ++i) { 260  Cell a = as[i]; 261  if (a != null) a.value = initialValue; 262  } 263  } 264  } 265  266  // Unsafe mechanics 267  private static final sun.misc.Unsafe UNSAFE; 268  private static final long baseOffset; 269  private static final long busyOffset; 270  271  static { 272  try { 273  UNSAFE = getUnsafe(); 274  Class<?> sk = Striped64.class; 275  baseOffset = UNSAFE.objectFieldOffset(sk.getDeclaredField("base")); 276  busyOffset = UNSAFE.objectFieldOffset(sk.getDeclaredField("busy")); 277  } catch (Exception e) { 278  throw new Error(e); 279  } 280  } 281  282  /** 283  * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. Replace with a simple call 284  * to Unsafe.getUnsafe when integrating into a jdk. 285  * 286  * @return a sun.misc.Unsafe 287  */ 288  private static sun.misc.Unsafe getUnsafe() { 289  try { 290  return sun.misc.Unsafe.getUnsafe(); 291  } catch (SecurityException tryReflectionInstead) { 292  } 293  try { 294  return java.security.AccessController.doPrivileged( 295  new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() { 296  @Override 297  public sun.misc.Unsafe run() throws Exception { 298  Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class; 299  for (java.lang.reflect.Field f : k.getDeclaredFields()) { 300  f.setAccessible(true); 301  Object x = f.get(null); 302  if (k.isInstance(x)) return k.cast(x); 303  } 304  throw new NoSuchFieldError("the Unsafe"); 305  } 306  }); 307  } catch (java.security.PrivilegedActionException e) { 308  throw new RuntimeException("Could not initialize intrinsics", e.getCause()); 309  } 310  } 311 }