Coverage Summary for Class: Striped (com.google.common.util.concurrent)

Class Method, % Line, %
Striped 0% (0/14) 0% (0/34)
Striped$1 0% (0/2) 0% (0/2)
Striped$2 0% (0/2) 0% (0/2)
Striped$3 0% (0/2) 0% (0/2)
Striped$4 0% (0/2) 0% (0/2)
Striped$5 0% (0/2) 0% (0/2)
Striped$6 0% (0/2) 0% (0/2)
Striped$CompactStriped 0% (0/4) 0% (0/8)
Striped$LargeLazyStriped 0% (0/3) 0% (0/13)
Striped$PaddedLock 0% (0/1) 0% (0/1)
Striped$PaddedSemaphore 0% (0/1) 0% (0/1)
Striped$PowerOfTwoStriped 0% (0/3) 0% (0/6)
Striped$SmallLazyStriped 0% (0/4) 0% (0/25)
Striped$SmallLazyStriped$ArrayReference 0% (0/1) 0% (0/2)
Striped$WeakSafeCondition 0% (0/2) 0% (0/4)
Striped$WeakSafeLock 0% (0/3) 0% (0/5)
Striped$WeakSafeReadWriteLock 0% (0/3) 0% (0/4)
Total 0% (0/51) 0% (0/115)


1 /* 2  * Copyright (C) 2011 The Guava Authors 3  * 4  * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except 5  * in compliance with the License. You may obtain a copy of the License at 6  * 7  * http://www.apache.org/licenses/LICENSE-2.0 8  * 9  * Unless required by applicable law or agreed to in writing, software distributed under the License 10  * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 11  * or implied. See the License for the specific language governing permissions and limitations under 12  * the License. 13  */ 14  15 package com.google.common.util.concurrent; 16  17 import com.google.common.annotations.Beta; 18 import com.google.common.annotations.GwtIncompatible; 19 import com.google.common.annotations.VisibleForTesting; 20 import com.google.common.base.MoreObjects; 21 import com.google.common.base.Preconditions; 22 import com.google.common.base.Supplier; 23 import com.google.common.collect.ImmutableList; 24 import com.google.common.collect.Iterables; 25 import com.google.common.collect.MapMaker; 26 import com.google.common.math.IntMath; 27 import com.google.common.primitives.Ints; 28 import java.lang.ref.Reference; 29 import java.lang.ref.ReferenceQueue; 30 import java.lang.ref.WeakReference; 31 import java.math.RoundingMode; 32 import java.util.Arrays; 33 import java.util.Collections; 34 import java.util.List; 35 import java.util.concurrent.ConcurrentMap; 36 import java.util.concurrent.Semaphore; 37 import java.util.concurrent.atomic.AtomicReferenceArray; 38 import java.util.concurrent.locks.Condition; 39 import java.util.concurrent.locks.Lock; 40 import java.util.concurrent.locks.ReadWriteLock; 41 import java.util.concurrent.locks.ReentrantLock; 42 import java.util.concurrent.locks.ReentrantReadWriteLock; 43 import org.checkerframework.checker.nullness.qual.Nullable; 44  45 /** 46  * A striped {@code Lock/Semaphore/ReadWriteLock}. This offers the underlying lock striping similar 47  * to that of {@code ConcurrentHashMap} in a reusable form, and extends it for semaphores and 48  * read-write locks. Conceptually, lock striping is the technique of dividing a lock into many 49  * <i>stripes</i>, increasing the granularity of a single lock and allowing independent operations 50  * to lock different stripes and proceed concurrently, instead of creating contention for a single 51  * lock. 52  * 53  * <p>The guarantee provided by this class is that equal keys lead to the same lock (or semaphore), 54  * i.e. {@code if (key1.equals(key2))} then {@code striped.get(key1) == striped.get(key2)} (assuming 55  * {@link Object#hashCode()} is correctly implemented for the keys). Note that if {@code key1} is 56  * <strong>not</strong> equal to {@code key2}, it is <strong>not</strong> guaranteed that {@code 57  * striped.get(key1) != striped.get(key2)}; the elements might nevertheless be mapped to the same 58  * lock. The lower the number of stripes, the higher the probability of this happening. 59  * 60  * <p>There are three flavors of this class: {@code Striped<Lock>}, {@code Striped<Semaphore>}, and 61  * {@code Striped<ReadWriteLock>}. For each type, two implementations are offered: {@linkplain 62  * #lock(int) strong} and {@linkplain #lazyWeakLock(int) weak} {@code Striped<Lock>}, {@linkplain 63  * #semaphore(int, int) strong} and {@linkplain #lazyWeakSemaphore(int, int) weak} {@code 64  * Striped<Semaphore>}, and {@linkplain #readWriteLock(int) strong} and {@linkplain 65  * #lazyWeakReadWriteLock(int) weak} {@code Striped<ReadWriteLock>}. <i>Strong</i> means that all 66  * stripes (locks/semaphores) are initialized eagerly, and are not reclaimed unless {@code Striped} 67  * itself is reclaimable. <i>Weak</i> means that locks/semaphores are created lazily, and they are 68  * allowed to be reclaimed if nobody is holding on to them. This is useful, for example, if one 69  * wants to create a {@code Striped<Lock>} of many locks, but worries that in most cases only a 70  * small portion of these would be in use. 71  * 72  * <p>Prior to this class, one might be tempted to use {@code Map<K, Lock>}, where {@code K} 73  * represents the task. This maximizes concurrency by having each unique key mapped to a unique 74  * lock, but also maximizes memory footprint. On the other extreme, one could use a single lock for 75  * all tasks, which minimizes memory footprint but also minimizes concurrency. Instead of choosing 76  * either of these extremes, {@code Striped} allows the user to trade between required concurrency 77  * and memory footprint. For example, if a set of tasks are CPU-bound, one could easily create a 78  * very compact {@code Striped<Lock>} of {@code availableProcessors() * 4} stripes, instead of 79  * possibly thousands of locks which could be created in a {@code Map<K, Lock>} structure. 80  * 81  * @author Dimitris Andreou 82  * @since 13.0 83  */ 84 @Beta 85 @GwtIncompatible 86 @ElementTypesAreNonnullByDefault 87 public abstract class Striped<L> { 88  /** 89  * If there are at least this many stripes, we assume the memory usage of a ConcurrentMap will be 90  * smaller than a large array. (This assumes that in the lazy case, most stripes are unused. As 91  * always, if many stripes are in use, a non-lazy striped makes more sense.) 92  */ 93  private static final int LARGE_LAZY_CUTOFF = 1024; 94  95  private Striped() {} 96  97  /** 98  * Returns the stripe that corresponds to the passed key. It is always guaranteed that if {@code 99  * key1.equals(key2)}, then {@code get(key1) == get(key2)}. 100  * 101  * @param key an arbitrary, non-null key 102  * @return the stripe that the passed key corresponds to 103  */ 104  public abstract L get(Object key); 105  106  /** 107  * Returns the stripe at the specified index. Valid indexes are 0, inclusively, to {@code size()}, 108  * exclusively. 109  * 110  * @param index the index of the stripe to return; must be in {@code [0...size())} 111  * @return the stripe at the specified index 112  */ 113  public abstract L getAt(int index); 114  115  /** 116  * Returns the index to which the given key is mapped, so that getAt(indexFor(key)) == get(key). 117  */ 118  abstract int indexFor(Object key); 119  120  /** Returns the total number of stripes in this instance. */ 121  public abstract int size(); 122  123  /** 124  * Returns the stripes that correspond to the passed objects, in ascending (as per {@link 125  * #getAt(int)}) order. Thus, threads that use the stripes in the order returned by this method 126  * are guaranteed to not deadlock each other. 127  * 128  * <p>It should be noted that using a {@code Striped<L>} with relatively few stripes, and {@code 129  * bulkGet(keys)} with a relative large number of keys can cause an excessive number of shared 130  * stripes (much like the birthday paradox, where much fewer than anticipated birthdays are needed 131  * for a pair of them to match). Please consider carefully the implications of the number of 132  * stripes, the intended concurrency level, and the typical number of keys used in a {@code 133  * bulkGet(keys)} operation. See <a href="http://www.mathpages.com/home/kmath199.htm">Balls in 134  * Bins model</a> for mathematical formulas that can be used to estimate the probability of 135  * collisions. 136  * 137  * @param keys arbitrary non-null keys 138  * @return the stripes corresponding to the objects (one per each object, derived by delegating to 139  * {@link #get(Object)}; may contain duplicates), in an increasing index order. 140  */ 141  public Iterable<L> bulkGet(Iterable<?> keys) { 142  // Initially using the array to store the keys, then reusing it to store the respective L's 143  final Object[] array = Iterables.toArray(keys, Object.class); 144  if (array.length == 0) { 145  return ImmutableList.of(); 146  } 147  int[] stripes = new int[array.length]; 148  for (int i = 0; i < array.length; i++) { 149  stripes[i] = indexFor(array[i]); 150  } 151  Arrays.sort(stripes); 152  // optimize for runs of identical stripes 153  int previousStripe = stripes[0]; 154  array[0] = getAt(previousStripe); 155  for (int i = 1; i < array.length; i++) { 156  int currentStripe = stripes[i]; 157  if (currentStripe == previousStripe) { 158  array[i] = array[i - 1]; 159  } else { 160  array[i] = getAt(currentStripe); 161  previousStripe = currentStripe; 162  } 163  } 164  /* 165  * Note that the returned Iterable holds references to the returned stripes, to avoid 166  * error-prone code like: 167  * 168  * Striped<Lock> stripedLock = Striped.lazyWeakXXX(...)' 169  * Iterable<Lock> locks = stripedLock.bulkGet(keys); 170  * for (Lock lock : locks) { 171  * lock.lock(); 172  * } 173  * operation(); 174  * for (Lock lock : locks) { 175  * lock.unlock(); 176  * } 177  * 178  * If we only held the int[] stripes, translating it on the fly to L's, the original locks might 179  * be garbage collected after locking them, ending up in a huge mess. 180  */ 181  @SuppressWarnings("unchecked") // we carefully replaced all keys with their respective L's 182  List<L> asList = (List<L>) Arrays.asList(array); 183  return Collections.unmodifiableList(asList); 184  } 185  186  // Static factories 187  188  /** 189  * Creates a {@code Striped<L>} with eagerly initialized, strongly referenced locks. Every lock is 190  * obtained from the passed supplier. 191  * 192  * @param stripes the minimum number of stripes (locks) required 193  * @param supplier a {@code Supplier<L>} object to obtain locks from 194  * @return a new {@code Striped<L>} 195  */ 196  static <L> Striped<L> custom(int stripes, Supplier<L> supplier) { 197  return new CompactStriped<>(stripes, supplier); 198  } 199  200  /** 201  * Creates a {@code Striped<Lock>} with eagerly initialized, strongly referenced locks. Every lock 202  * is reentrant. 203  * 204  * @param stripes the minimum number of stripes (locks) required 205  * @return a new {@code Striped<Lock>} 206  */ 207  public static Striped<Lock> lock(int stripes) { 208  return custom( 209  stripes, 210  new Supplier<Lock>() { 211  @Override 212  public Lock get() { 213  return new PaddedLock(); 214  } 215  }); 216  } 217  218  /** 219  * Creates a {@code Striped<Lock>} with lazily initialized, weakly referenced locks. Every lock is 220  * reentrant. 221  * 222  * @param stripes the minimum number of stripes (locks) required 223  * @return a new {@code Striped<Lock>} 224  */ 225  public static Striped<Lock> lazyWeakLock(int stripes) { 226  return lazy( 227  stripes, 228  new Supplier<Lock>() { 229  @Override 230  public Lock get() { 231  return new ReentrantLock(false); 232  } 233  }); 234  } 235  236  private static <L> Striped<L> lazy(int stripes, Supplier<L> supplier) { 237  return stripes < LARGE_LAZY_CUTOFF 238  ? new SmallLazyStriped<L>(stripes, supplier) 239  : new LargeLazyStriped<L>(stripes, supplier); 240  } 241  242  /** 243  * Creates a {@code Striped<Semaphore>} with eagerly initialized, strongly referenced semaphores, 244  * with the specified number of permits. 245  * 246  * @param stripes the minimum number of stripes (semaphores) required 247  * @param permits the number of permits in each semaphore 248  * @return a new {@code Striped<Semaphore>} 249  */ 250  public static Striped<Semaphore> semaphore(int stripes, final int permits) { 251  return custom( 252  stripes, 253  new Supplier<Semaphore>() { 254  @Override 255  public Semaphore get() { 256  return new PaddedSemaphore(permits); 257  } 258  }); 259  } 260  261  /** 262  * Creates a {@code Striped<Semaphore>} with lazily initialized, weakly referenced semaphores, 263  * with the specified number of permits. 264  * 265  * @param stripes the minimum number of stripes (semaphores) required 266  * @param permits the number of permits in each semaphore 267  * @return a new {@code Striped<Semaphore>} 268  */ 269  public static Striped<Semaphore> lazyWeakSemaphore(int stripes, final int permits) { 270  return lazy( 271  stripes, 272  new Supplier<Semaphore>() { 273  @Override 274  public Semaphore get() { 275  return new Semaphore(permits, false); 276  } 277  }); 278  } 279  280  /** 281  * Creates a {@code Striped<ReadWriteLock>} with eagerly initialized, strongly referenced 282  * read-write locks. Every lock is reentrant. 283  * 284  * @param stripes the minimum number of stripes (locks) required 285  * @return a new {@code Striped<ReadWriteLock>} 286  */ 287  public static Striped<ReadWriteLock> readWriteLock(int stripes) { 288  return custom(stripes, READ_WRITE_LOCK_SUPPLIER); 289  } 290  291  /** 292  * Creates a {@code Striped<ReadWriteLock>} with lazily initialized, weakly referenced read-write 293  * locks. Every lock is reentrant. 294  * 295  * @param stripes the minimum number of stripes (locks) required 296  * @return a new {@code Striped<ReadWriteLock>} 297  */ 298  public static Striped<ReadWriteLock> lazyWeakReadWriteLock(int stripes) { 299  return lazy(stripes, WEAK_SAFE_READ_WRITE_LOCK_SUPPLIER); 300  } 301  302  private static final Supplier<ReadWriteLock> READ_WRITE_LOCK_SUPPLIER = 303  new Supplier<ReadWriteLock>() { 304  @Override 305  public ReadWriteLock get() { 306  return new ReentrantReadWriteLock(); 307  } 308  }; 309  310  private static final Supplier<ReadWriteLock> WEAK_SAFE_READ_WRITE_LOCK_SUPPLIER = 311  new Supplier<ReadWriteLock>() { 312  @Override 313  public ReadWriteLock get() { 314  return new WeakSafeReadWriteLock(); 315  } 316  }; 317  318  /** 319  * ReadWriteLock implementation whose read and write locks retain a reference back to this lock. 320  * Otherwise, a reference to just the read lock or just the write lock would not suffice to ensure 321  * the {@code ReadWriteLock} is retained. 322  */ 323  private static final class WeakSafeReadWriteLock implements ReadWriteLock { 324  private final ReadWriteLock delegate; 325  326  WeakSafeReadWriteLock() { 327  this.delegate = new ReentrantReadWriteLock(); 328  } 329  330  @Override 331  public Lock readLock() { 332  return new WeakSafeLock(delegate.readLock(), this); 333  } 334  335  @Override 336  public Lock writeLock() { 337  return new WeakSafeLock(delegate.writeLock(), this); 338  } 339  } 340  341  /** Lock object that ensures a strong reference is retained to a specified object. */ 342  private static final class WeakSafeLock extends ForwardingLock { 343  private final Lock delegate; 344  345  @SuppressWarnings("unused") 346  private final WeakSafeReadWriteLock strongReference; 347  348  WeakSafeLock(Lock delegate, WeakSafeReadWriteLock strongReference) { 349  this.delegate = delegate; 350  this.strongReference = strongReference; 351  } 352  353  @Override 354  Lock delegate() { 355  return delegate; 356  } 357  358  @Override 359  public Condition newCondition() { 360  return new WeakSafeCondition(delegate.newCondition(), strongReference); 361  } 362  } 363  364  /** Condition object that ensures a strong reference is retained to a specified object. */ 365  private static final class WeakSafeCondition extends ForwardingCondition { 366  private final Condition delegate; 367  368  @SuppressWarnings("unused") 369  private final WeakSafeReadWriteLock strongReference; 370  371  WeakSafeCondition(Condition delegate, WeakSafeReadWriteLock strongReference) { 372  this.delegate = delegate; 373  this.strongReference = strongReference; 374  } 375  376  @Override 377  Condition delegate() { 378  return delegate; 379  } 380  } 381  382  private abstract static class PowerOfTwoStriped<L> extends Striped<L> { 383  /** Capacity (power of two) minus one, for fast mod evaluation */ 384  final int mask; 385  386  PowerOfTwoStriped(int stripes) { 387  Preconditions.checkArgument(stripes > 0, "Stripes must be positive"); 388  this.mask = stripes > Ints.MAX_POWER_OF_TWO ? ALL_SET : ceilToPowerOfTwo(stripes) - 1; 389  } 390  391  @Override 392  final int indexFor(Object key) { 393  int hash = smear(key.hashCode()); 394  return hash & mask; 395  } 396  397  @Override 398  public final L get(Object key) { 399  return getAt(indexFor(key)); 400  } 401  } 402  403  /** 404  * Implementation of Striped where 2^k stripes are represented as an array of the same length, 405  * eagerly initialized. 406  */ 407  private static class CompactStriped<L> extends PowerOfTwoStriped<L> { 408  /** Size is a power of two. */ 409  private final Object[] array; 410  411  private CompactStriped(int stripes, Supplier<L> supplier) { 412  super(stripes); 413  Preconditions.checkArgument(stripes <= Ints.MAX_POWER_OF_TWO, "Stripes must be <= 2^30)"); 414  415  this.array = new Object[mask + 1]; 416  for (int i = 0; i < array.length; i++) { 417  array[i] = supplier.get(); 418  } 419  } 420  421  @SuppressWarnings("unchecked") // we only put L's in the array 422  @Override 423  public L getAt(int index) { 424  return (L) array[index]; 425  } 426  427  @Override 428  public int size() { 429  return array.length; 430  } 431  } 432  433  /** 434  * Implementation of Striped where up to 2^k stripes can be represented, using an 435  * AtomicReferenceArray of size 2^k. To map a user key into a stripe, we take a k-bit slice of the 436  * user key's (smeared) hashCode(). The stripes are lazily initialized and are weakly referenced. 437  */ 438  @VisibleForTesting 439  static class SmallLazyStriped<L> extends PowerOfTwoStriped<L> { 440  final AtomicReferenceArray<@Nullable ArrayReference<? extends L>> locks; 441  final Supplier<L> supplier; 442  final int size; 443  final ReferenceQueue<L> queue = new ReferenceQueue<L>(); 444  445  SmallLazyStriped(int stripes, Supplier<L> supplier) { 446  super(stripes); 447  this.size = (mask == ALL_SET) ? Integer.MAX_VALUE : mask + 1; 448  this.locks = new AtomicReferenceArray<>(size); 449  this.supplier = supplier; 450  } 451  452  @Override 453  public L getAt(int index) { 454  if (size != Integer.MAX_VALUE) { 455  Preconditions.checkElementIndex(index, size()); 456  } // else no check necessary, all index values are valid 457  ArrayReference<? extends L> existingRef = locks.get(index); 458  L existing = existingRef == null ? null : existingRef.get(); 459  if (existing != null) { 460  return existing; 461  } 462  L created = supplier.get(); 463  ArrayReference<L> newRef = new ArrayReference<L>(created, index, queue); 464  while (!locks.compareAndSet(index, existingRef, newRef)) { 465  // we raced, we need to re-read and try again 466  existingRef = locks.get(index); 467  existing = existingRef == null ? null : existingRef.get(); 468  if (existing != null) { 469  return existing; 470  } 471  } 472  drainQueue(); 473  return created; 474  } 475  476  // N.B. Draining the queue is only necessary to ensure that we don't accumulate empty references 477  // in the array. We could skip this if we decide we don't care about holding on to Reference 478  // objects indefinitely. 479  private void drainQueue() { 480  Reference<? extends L> ref; 481  while ((ref = queue.poll()) != null) { 482  // We only ever register ArrayReferences with the queue so this is always safe. 483  ArrayReference<? extends L> arrayRef = (ArrayReference<? extends L>) ref; 484  // Try to clear out the array slot, n.b. if we fail that is fine, in either case the 485  // arrayRef will be out of the array after this step. 486  locks.compareAndSet(arrayRef.index, arrayRef, null); 487  } 488  } 489  490  @Override 491  public int size() { 492  return size; 493  } 494  495  private static final class ArrayReference<L> extends WeakReference<L> { 496  final int index; 497  498  ArrayReference(L referent, int index, ReferenceQueue<L> queue) { 499  super(referent, queue); 500  this.index = index; 501  } 502  } 503  } 504  505  /** 506  * Implementation of Striped where up to 2^k stripes can be represented, using a ConcurrentMap 507  * where the key domain is [0..2^k). To map a user key into a stripe, we take a k-bit slice of the 508  * user key's (smeared) hashCode(). The stripes are lazily initialized and are weakly referenced. 509  */ 510  @VisibleForTesting 511  static class LargeLazyStriped<L> extends PowerOfTwoStriped<L> { 512  final ConcurrentMap<Integer, L> locks; 513  final Supplier<L> supplier; 514  final int size; 515  516  LargeLazyStriped(int stripes, Supplier<L> supplier) { 517  super(stripes); 518  this.size = (mask == ALL_SET) ? Integer.MAX_VALUE : mask + 1; 519  this.supplier = supplier; 520  this.locks = new MapMaker().weakValues().makeMap(); 521  } 522  523  @Override 524  public L getAt(int index) { 525  if (size != Integer.MAX_VALUE) { 526  Preconditions.checkElementIndex(index, size()); 527  } // else no check necessary, all index values are valid 528  L existing = locks.get(index); 529  if (existing != null) { 530  return existing; 531  } 532  L created = supplier.get(); 533  existing = locks.putIfAbsent(index, created); 534  return MoreObjects.firstNonNull(existing, created); 535  } 536  537  @Override 538  public int size() { 539  return size; 540  } 541  } 542  543  /** A bit mask were all bits are set. */ 544  private static final int ALL_SET = ~0; 545  546  private static int ceilToPowerOfTwo(int x) { 547  return 1 << IntMath.log2(x, RoundingMode.CEILING); 548  } 549  550  /* 551  * This method was written by Doug Lea with assistance from members of JCP JSR-166 Expert Group 552  * and released to the public domain, as explained at 553  * http://creativecommons.org/licenses/publicdomain 554  * 555  * As of 2010/06/11, this method is identical to the (package private) hash method in OpenJDK 7's 556  * java.util.HashMap class. 557  */ 558  // Copied from java/com/google/common/collect/Hashing.java 559  private static int smear(int hashCode) { 560  hashCode ^= (hashCode >>> 20) ^ (hashCode >>> 12); 561  return hashCode ^ (hashCode >>> 7) ^ (hashCode >>> 4); 562  } 563  564  private static class PaddedLock extends ReentrantLock { 565  /* 566  * Padding from 40 into 64 bytes, same size as cache line. Might be beneficial to add a fourth 567  * long here, to minimize chance of interference between consecutive locks, but I couldn't 568  * observe any benefit from that. 569  */ 570  long unused1; 571  long unused2; 572  long unused3; 573  574  PaddedLock() { 575  super(false); 576  } 577  } 578  579  private static class PaddedSemaphore extends Semaphore { 580  // See PaddedReentrantLock comment 581  long unused1; 582  long unused2; 583  long unused3; 584  585  PaddedSemaphore(int permits) { 586  super(permits, false); 587  } 588  } 589 }