Coverage Summary for Class: LocalCache (com.google.common.cache)
| Class | Method, % | Line, % |
|---|---|---|
| LocalCache | 0% (0/76) | 0% (0/346) |
| LocalCache$1 | 0% (0/8) | 0% (0/8) |
| LocalCache$2 | 0% (0/6) | 0% (0/6) |
| LocalCache$AbstractCacheSet | 0% (0/6) | 0% (0/6) |
| LocalCache$AbstractReferenceEntry | 0% (0/18) | 0% (0/18) |
| LocalCache$AccessQueue | 0% (0/10) | 0% (0/37) |
| LocalCache$AccessQueue$1 | 0% (0/6) | 0% (0/8) |
| LocalCache$AccessQueue$2 | 0% (0/2) | 0% (0/3) |
| LocalCache$ComputingValueReference | 0% (0/2) | 0% (0/2) |
| LocalCache$EntryFactory | 0% (0/5) | 0% (0/23) |
| LocalCache$EntryFactory$1 | 0% (0/2) | 0% (0/2) |
| LocalCache$EntryFactory$2 | 0% (0/3) | 0% (0/5) |
| LocalCache$EntryFactory$3 | 0% (0/3) | 0% (0/5) |
| LocalCache$EntryFactory$4 | 0% (0/3) | 0% (0/6) |
| LocalCache$EntryFactory$5 | 0% (0/2) | 0% (0/2) |
| LocalCache$EntryFactory$6 | 0% (0/3) | 0% (0/5) |
| LocalCache$EntryFactory$7 | 0% (0/3) | 0% (0/5) |
| LocalCache$EntryFactory$8 | 0% (0/3) | 0% (0/6) |
| LocalCache$EntryIterator | 0% (0/2) | 0% (0/2) |
| LocalCache$EntrySet | 0% (0/6) | 0% (0/17) |
| LocalCache$HashIterator | 0% (0/8) | 0% (0/41) |
| LocalCache$KeyIterator | 0% (0/2) | 0% (0/2) |
| LocalCache$KeySet | 0% (0/4) | 0% (0/4) |
| LocalCache$LoadingSerializationProxy | 0% (0/8) | 0% (0/10) |
| LocalCache$LoadingValueReference | 0% (0/17) | 0% (0/47) |
| LocalCache$LoadingValueReference$1 | 0% (0/2) | 0% (0/3) |
| LocalCache$LocalLoadingCache | 0% (0/7) | 0% (0/9) |
| LocalCache$LocalManualCache | 0% (0/16) | 0% (0/23) |
| LocalCache$LocalManualCache$1 | 0% (0/2) | 0% (0/2) |
| LocalCache$ManualSerializationProxy | 0% (0/6) | 0% (0/40) |
| LocalCache$NullEntry | 0% (0/11) | 0% (0/12) |
| LocalCache$Segment | 0% (0/60) | 0% (0/768) |
| LocalCache$Segment$1 | 0% (0/2) | 0% (0/6) |
| LocalCache$SoftValueReference | 0% (0/7) | 0% (0/8) |
| LocalCache$Strength | 0% (0/1) | 0% (0/4) |
| LocalCache$Strength$1 | 0% (0/3) | 0% (0/5) |
| LocalCache$Strength$2 | 0% (0/3) | 0% (0/5) |
| LocalCache$Strength$3 | 0% (0/3) | 0% (0/5) |
| LocalCache$StrongAccessEntry | 0% (0/7) | 0% (0/10) |
| LocalCache$StrongAccessWriteEntry | 0% (0/13) | 0% (0/19) |
| LocalCache$StrongEntry | 0% (0/6) | 0% (0/10) |
| LocalCache$StrongValueReference | 0% (0/8) | 0% (0/9) |
| LocalCache$StrongWriteEntry | 0% (0/7) | 0% (0/10) |
| LocalCache$ValueIterator | 0% (0/2) | 0% (0/2) |
| LocalCache$Values | 0% (0/10) | 0% (0/10) |
| LocalCache$WeakAccessEntry | 0% (0/7) | 0% (0/10) |
| LocalCache$WeakAccessWriteEntry | 0% (0/13) | 0% (0/19) |
| LocalCache$WeakEntry | 0% (0/18) | 0% (0/21) |
| LocalCache$WeakValueReference | 0% (0/7) | 0% (0/8) |
| LocalCache$WeakWriteEntry | 0% (0/7) | 0% (0/10) |
| LocalCache$WeightedSoftValueReference | 0% (0/3) | 0% (0/4) |
| LocalCache$WeightedStrongValueReference | 0% (0/2) | 0% (0/3) |
| LocalCache$WeightedWeakValueReference | 0% (0/3) | 0% (0/4) |
| LocalCache$WriteQueue | 0% (0/10) | 0% (0/37) |
| LocalCache$WriteQueue$1 | 0% (0/6) | 0% (0/8) |
| LocalCache$WriteQueue$2 | 0% (0/2) | 0% (0/3) |
| LocalCache$WriteThroughEntry | 0% (0/7) | 0% (0/14) |
| Total | 0% (0/469) | 0% (0/1717) |
1 /* 2 * Copyright (C) 2009 The Guava Authors 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except 5 * in compliance with the License. You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software distributed under the License 10 * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 11 * or implied. See the License for the specific language governing permissions and limitations under 12 * the License. 13 */ 14 15 package com.google.common.cache; 16 17 import static com.google.common.base.Preconditions.checkNotNull; 18 import static com.google.common.base.Preconditions.checkState; 19 import static com.google.common.cache.CacheBuilder.NULL_TICKER; 20 import static com.google.common.cache.CacheBuilder.UNSET_INT; 21 import static com.google.common.util.concurrent.Futures.transform; 22 import static com.google.common.util.concurrent.MoreExecutors.directExecutor; 23 import static com.google.common.util.concurrent.Uninterruptibles.getUninterruptibly; 24 import static java.util.concurrent.TimeUnit.NANOSECONDS; 25 26 import com.google.common.annotations.GwtCompatible; 27 import com.google.common.annotations.GwtIncompatible; 28 import com.google.common.annotations.VisibleForTesting; 29 import com.google.common.base.Equivalence; 30 import com.google.common.base.Stopwatch; 31 import com.google.common.base.Ticker; 32 import com.google.common.cache.AbstractCache.SimpleStatsCounter; 33 import com.google.common.cache.AbstractCache.StatsCounter; 34 import com.google.common.cache.CacheBuilder.NullListener; 35 import com.google.common.cache.CacheBuilder.OneWeigher; 36 import com.google.common.cache.CacheLoader.InvalidCacheLoadException; 37 import com.google.common.cache.CacheLoader.UnsupportedLoadingOperationException; 38 import com.google.common.cache.LocalCache.AbstractCacheSet; 39 import com.google.common.collect.AbstractSequentialIterator; 40 import com.google.common.collect.ImmutableMap; 41 import com.google.common.collect.ImmutableSet; 42 import com.google.common.collect.Iterators; 43 import com.google.common.collect.Maps; 44 import com.google.common.collect.Sets; 45 import com.google.common.primitives.Ints; 46 import com.google.common.util.concurrent.ExecutionError; 47 import com.google.common.util.concurrent.Futures; 48 import com.google.common.util.concurrent.ListenableFuture; 49 import com.google.common.util.concurrent.SettableFuture; 50 import com.google.common.util.concurrent.UncheckedExecutionException; 51 import com.google.common.util.concurrent.Uninterruptibles; 52 import com.google.errorprone.annotations.concurrent.GuardedBy; 53 import com.google.j2objc.annotations.RetainedWith; 54 import com.google.j2objc.annotations.Weak; 55 import java.io.IOException; 56 import java.io.ObjectInputStream; 57 import java.io.Serializable; 58 import java.lang.ref.Reference; 59 import java.lang.ref.ReferenceQueue; 60 import java.lang.ref.SoftReference; 61 import java.lang.ref.WeakReference; 62 import java.util.AbstractCollection; 63 import java.util.AbstractMap; 64 import java.util.AbstractQueue; 65 import java.util.AbstractSet; 66 import java.util.ArrayList; 67 import java.util.Collection; 68 import java.util.Iterator; 69 import java.util.Map; 70 import java.util.Map.Entry; 71 import java.util.NoSuchElementException; 72 import java.util.Queue; 73 import java.util.Set; 74 import java.util.concurrent.Callable; 75 import java.util.concurrent.ConcurrentLinkedQueue; 76 import java.util.concurrent.ConcurrentMap; 77 import java.util.concurrent.ExecutionException; 78 import java.util.concurrent.TimeUnit; 79 import java.util.concurrent.atomic.AtomicInteger; 80 import java.util.concurrent.atomic.AtomicReferenceArray; 81 import java.util.concurrent.locks.ReentrantLock; 82 import java.util.function.BiFunction; 83 import java.util.function.BiPredicate; 84 import java.util.function.Function; 85 import java.util.function.Predicate; 86 import java.util.logging.Level; 87 import java.util.logging.Logger; 88 import org.checkerframework.checker.nullness.qual.Nullable; 89 90 /** 91 * The concurrent hash map implementation built by {@link CacheBuilder}. 92 * 93 * <p>This implementation is heavily derived from revision 1.96 of <a 94 * href="http://tinyurl.com/ConcurrentHashMap">ConcurrentHashMap.java</a>. 95 * 96 * @author Charles Fry 97 * @author Bob Lee ({@code com.google.common.collect.MapMaker}) 98 * @author Doug Lea ({@code ConcurrentHashMap}) 99 */ 100 @SuppressWarnings("GoodTime") // lots of violations (nanosecond math) 101 @GwtCompatible(emulated = true) 102 class LocalCache<K, V> extends AbstractMap<K, V> implements ConcurrentMap<K, V> { 103 104 /* 105 * The basic strategy is to subdivide the table among Segments, each of which itself is a 106 * concurrently readable hash table. The map supports non-blocking reads and concurrent writes 107 * across different segments. 108 * 109 * If a maximum size is specified, a best-effort bounding is performed per segment, using a 110 * page-replacement algorithm to determine which entries to evict when the capacity has been 111 * exceeded. 112 * 113 * The page replacement algorithm's data structures are kept casually consistent with the map. The 114 * ordering of writes to a segment is sequentially consistent. An update to the map and recording 115 * of reads may not be immediately reflected on the algorithm's data structures. These structures 116 * are guarded by a lock and operations are applied in batches to avoid lock contention. The 117 * penalty of applying the batches is spread across threads so that the amortized cost is slightly 118 * higher than performing just the operation without enforcing the capacity constraint. 119 * 120 * This implementation uses a per-segment queue to record a memento of the additions, removals, 121 * and accesses that were performed on the map. The queue is drained on writes and when it exceeds 122 * its capacity threshold. 123 * 124 * The Least Recently Used page replacement algorithm was chosen due to its simplicity, high hit 125 * rate, and ability to be implemented with O(1) time complexity. The initial LRU implementation 126 * operates per-segment rather than globally for increased implementation simplicity. We expect 127 * the cache hit rate to be similar to that of a global LRU algorithm. 128 */ 129 130 // Constants 131 132 /** 133 * The maximum capacity, used if a higher value is implicitly specified by either of the 134 * constructors with arguments. MUST be a power of two {@code <= 1<<30} to ensure that entries are 135 * indexable using ints. 136 */ 137 static final int MAXIMUM_CAPACITY = 1 << 30; 138 139 /** The maximum number of segments to allow; used to bound constructor arguments. */ 140 static final int MAX_SEGMENTS = 1 << 16; // slightly conservative 141 142 /** Number of (unsynchronized) retries in the containsValue method. */ 143 static final int CONTAINS_VALUE_RETRIES = 3; 144 145 /** 146 * Number of cache access operations that can be buffered per segment before the cache's recency 147 * ordering information is updated. This is used to avoid lock contention by recording a memento 148 * of reads and delaying a lock acquisition until the threshold is crossed or a mutation occurs. 149 * 150 * <p>This must be a (2^n)-1 as it is used as a mask. 151 */ 152 static final int DRAIN_THRESHOLD = 0x3F; 153 154 /** 155 * Maximum number of entries to be drained in a single cleanup run. This applies independently to 156 * the cleanup queue and both reference queues. 157 */ 158 // TODO(fry): empirically optimize this 159 static final int DRAIN_MAX = 16; 160 161 // Fields 162 163 static final Logger logger = Logger.getLogger(LocalCache.class.getName()); 164 165 /** 166 * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose 167 * the segment. 168 */ 169 final int segmentMask; 170 171 /** 172 * Shift value for indexing within segments. Helps prevent entries that end up in the same segment 173 * from also ending up in the same bucket. 174 */ 175 final int segmentShift; 176 177 /** The segments, each of which is a specialized hash table. */ 178 final Segment<K, V>[] segments; 179 180 /** The concurrency level. */ 181 final int concurrencyLevel; 182 183 /** Strategy for comparing keys. */ 184 final Equivalence<Object> keyEquivalence; 185 186 /** Strategy for comparing values. */ 187 final Equivalence<Object> valueEquivalence; 188 189 /** Strategy for referencing keys. */ 190 final Strength keyStrength; 191 192 /** Strategy for referencing values. */ 193 final Strength valueStrength; 194 195 /** The maximum weight of this map. UNSET_INT if there is no maximum. */ 196 final long maxWeight; 197 198 /** Weigher to weigh cache entries. */ 199 final Weigher<K, V> weigher; 200 201 /** How long after the last access to an entry the map will retain that entry. */ 202 final long expireAfterAccessNanos; 203 204 /** How long after the last write to an entry the map will retain that entry. */ 205 final long expireAfterWriteNanos; 206 207 /** How long after the last write an entry becomes a candidate for refresh. */ 208 final long refreshNanos; 209 210 /** Entries waiting to be consumed by the removal listener. */ 211 // TODO(fry): define a new type which creates event objects and automates the clear logic 212 final Queue<RemovalNotification<K, V>> removalNotificationQueue; 213 214 /** 215 * A listener that is invoked when an entry is removed due to expiration or garbage collection of 216 * soft/weak entries. 217 */ 218 final RemovalListener<K, V> removalListener; 219 220 /** Measures time in a testable way. */ 221 final Ticker ticker; 222 223 /** Factory used to create new entries. */ 224 final EntryFactory entryFactory; 225 226 /** 227 * Accumulates global cache statistics. Note that there are also per-segments stats counters which 228 * must be aggregated to obtain a global stats view. 229 */ 230 final StatsCounter globalStatsCounter; 231 232 /** The default cache loader to use on loading operations. */ 233 final @Nullable CacheLoader<? super K, V> defaultLoader; 234 235 /** 236 * Creates a new, empty map with the specified strategy, initial capacity and concurrency level. 237 */ 238 LocalCache( 239 CacheBuilder<? super K, ? super V> builder, @Nullable CacheLoader<? super K, V> loader) { 240 concurrencyLevel = Math.min(builder.getConcurrencyLevel(), MAX_SEGMENTS); 241 242 keyStrength = builder.getKeyStrength(); 243 valueStrength = builder.getValueStrength(); 244 245 keyEquivalence = builder.getKeyEquivalence(); 246 valueEquivalence = builder.getValueEquivalence(); 247 248 maxWeight = builder.getMaximumWeight(); 249 weigher = builder.getWeigher(); 250 expireAfterAccessNanos = builder.getExpireAfterAccessNanos(); 251 expireAfterWriteNanos = builder.getExpireAfterWriteNanos(); 252 refreshNanos = builder.getRefreshNanos(); 253 254 removalListener = builder.getRemovalListener(); 255 removalNotificationQueue = 256 (removalListener == NullListener.INSTANCE) 257 ? LocalCache.<RemovalNotification<K, V>>discardingQueue() 258 : new ConcurrentLinkedQueue<RemovalNotification<K, V>>(); 259 260 ticker = builder.getTicker(recordsTime()); 261 entryFactory = EntryFactory.getFactory(keyStrength, usesAccessEntries(), usesWriteEntries()); 262 globalStatsCounter = builder.getStatsCounterSupplier().get(); 263 defaultLoader = loader; 264 265 int initialCapacity = Math.min(builder.getInitialCapacity(), MAXIMUM_CAPACITY); 266 if (evictsBySize() && !customWeigher()) { 267 initialCapacity = (int) Math.min(initialCapacity, maxWeight); 268 } 269 270 // Find the lowest power-of-two segmentCount that exceeds concurrencyLevel, unless 271 // maximumSize/Weight is specified in which case ensure that each segment gets at least 10 272 // entries. The special casing for size-based eviction is only necessary because that eviction 273 // happens per segment instead of globally, so too many segments compared to the maximum size 274 // will result in random eviction behavior. 275 int segmentShift = 0; 276 int segmentCount = 1; 277 while (segmentCount < concurrencyLevel && (!evictsBySize() || segmentCount * 20 <= maxWeight)) { 278 ++segmentShift; 279 segmentCount <<= 1; 280 } 281 this.segmentShift = 32 - segmentShift; 282 segmentMask = segmentCount - 1; 283 284 this.segments = newSegmentArray(segmentCount); 285 286 int segmentCapacity = initialCapacity / segmentCount; 287 if (segmentCapacity * segmentCount < initialCapacity) { 288 ++segmentCapacity; 289 } 290 291 int segmentSize = 1; 292 while (segmentSize < segmentCapacity) { 293 segmentSize <<= 1; 294 } 295 296 if (evictsBySize()) { 297 // Ensure sum of segment max weights = overall max weights 298 long maxSegmentWeight = maxWeight / segmentCount + 1; 299 long remainder = maxWeight % segmentCount; 300 for (int i = 0; i < this.segments.length; ++i) { 301 if (i == remainder) { 302 maxSegmentWeight--; 303 } 304 this.segments[i] = 305 createSegment(segmentSize, maxSegmentWeight, builder.getStatsCounterSupplier().get()); 306 } 307 } else { 308 for (int i = 0; i < this.segments.length; ++i) { 309 this.segments[i] = 310 createSegment(segmentSize, UNSET_INT, builder.getStatsCounterSupplier().get()); 311 } 312 } 313 } 314 315 boolean evictsBySize() { 316 return maxWeight >= 0; 317 } 318 319 boolean customWeigher() { 320 return weigher != OneWeigher.INSTANCE; 321 } 322 323 boolean expires() { 324 return expiresAfterWrite() || expiresAfterAccess(); 325 } 326 327 boolean expiresAfterWrite() { 328 return expireAfterWriteNanos > 0; 329 } 330 331 boolean expiresAfterAccess() { 332 return expireAfterAccessNanos > 0; 333 } 334 335 boolean refreshes() { 336 return refreshNanos > 0; 337 } 338 339 boolean usesAccessQueue() { 340 return expiresAfterAccess() || evictsBySize(); 341 } 342 343 boolean usesWriteQueue() { 344 return expiresAfterWrite(); 345 } 346 347 boolean recordsWrite() { 348 return expiresAfterWrite() || refreshes(); 349 } 350 351 boolean recordsAccess() { 352 return expiresAfterAccess(); 353 } 354 355 boolean recordsTime() { 356 return recordsWrite() || recordsAccess(); 357 } 358 359 boolean usesWriteEntries() { 360 return usesWriteQueue() || recordsWrite(); 361 } 362 363 boolean usesAccessEntries() { 364 return usesAccessQueue() || recordsAccess(); 365 } 366 367 boolean usesKeyReferences() { 368 return keyStrength != Strength.STRONG; 369 } 370 371 boolean usesValueReferences() { 372 return valueStrength != Strength.STRONG; 373 } 374 375 enum Strength { 376 /* 377 * TODO(kevinb): If we strongly reference the value and aren't loading, we needn't wrap the 378 * value. This could save ~8 bytes per entry. 379 */ 380 381 STRONG { 382 @Override 383 <K, V> ValueReference<K, V> referenceValue( 384 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) { 385 return (weight == 1) 386 ? new StrongValueReference<K, V>(value) 387 : new WeightedStrongValueReference<K, V>(value, weight); 388 } 389 390 @Override 391 Equivalence<Object> defaultEquivalence() { 392 return Equivalence.equals(); 393 } 394 }, 395 SOFT { 396 @Override 397 <K, V> ValueReference<K, V> referenceValue( 398 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) { 399 return (weight == 1) 400 ? new SoftValueReference<K, V>(segment.valueReferenceQueue, value, entry) 401 : new WeightedSoftValueReference<K, V>( 402 segment.valueReferenceQueue, value, entry, weight); 403 } 404 405 @Override 406 Equivalence<Object> defaultEquivalence() { 407 return Equivalence.identity(); 408 } 409 }, 410 WEAK { 411 @Override 412 <K, V> ValueReference<K, V> referenceValue( 413 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) { 414 return (weight == 1) 415 ? new WeakValueReference<K, V>(segment.valueReferenceQueue, value, entry) 416 : new WeightedWeakValueReference<K, V>( 417 segment.valueReferenceQueue, value, entry, weight); 418 } 419 420 @Override 421 Equivalence<Object> defaultEquivalence() { 422 return Equivalence.identity(); 423 } 424 }; 425 426 /** Creates a reference for the given value according to this value strength. */ 427 abstract <K, V> ValueReference<K, V> referenceValue( 428 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight); 429 430 /** 431 * Returns the default equivalence strategy used to compare and hash keys or values referenced 432 * at this strength. This strategy will be used unless the user explicitly specifies an 433 * alternate strategy. 434 */ 435 abstract Equivalence<Object> defaultEquivalence(); 436 } 437 438 /** Creates new entries. */ 439 enum EntryFactory { 440 STRONG { 441 @Override 442 <K, V> ReferenceEntry<K, V> newEntry( 443 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 444 return new StrongEntry<>(key, hash, next); 445 } 446 }, 447 STRONG_ACCESS { 448 @Override 449 <K, V> ReferenceEntry<K, V> newEntry( 450 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 451 return new StrongAccessEntry<>(key, hash, next); 452 } 453 454 @Override 455 <K, V> ReferenceEntry<K, V> copyEntry( 456 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 457 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 458 copyAccessEntry(original, newEntry); 459 return newEntry; 460 } 461 }, 462 STRONG_WRITE { 463 @Override 464 <K, V> ReferenceEntry<K, V> newEntry( 465 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 466 return new StrongWriteEntry<>(key, hash, next); 467 } 468 469 @Override 470 <K, V> ReferenceEntry<K, V> copyEntry( 471 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 472 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 473 copyWriteEntry(original, newEntry); 474 return newEntry; 475 } 476 }, 477 STRONG_ACCESS_WRITE { 478 @Override 479 <K, V> ReferenceEntry<K, V> newEntry( 480 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 481 return new StrongAccessWriteEntry<>(key, hash, next); 482 } 483 484 @Override 485 <K, V> ReferenceEntry<K, V> copyEntry( 486 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 487 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 488 copyAccessEntry(original, newEntry); 489 copyWriteEntry(original, newEntry); 490 return newEntry; 491 } 492 }, 493 WEAK { 494 @Override 495 <K, V> ReferenceEntry<K, V> newEntry( 496 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 497 return new WeakEntry<>(segment.keyReferenceQueue, key, hash, next); 498 } 499 }, 500 WEAK_ACCESS { 501 @Override 502 <K, V> ReferenceEntry<K, V> newEntry( 503 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 504 return new WeakAccessEntry<>(segment.keyReferenceQueue, key, hash, next); 505 } 506 507 @Override 508 <K, V> ReferenceEntry<K, V> copyEntry( 509 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 510 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 511 copyAccessEntry(original, newEntry); 512 return newEntry; 513 } 514 }, 515 WEAK_WRITE { 516 @Override 517 <K, V> ReferenceEntry<K, V> newEntry( 518 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 519 return new WeakWriteEntry<>(segment.keyReferenceQueue, key, hash, next); 520 } 521 522 @Override 523 <K, V> ReferenceEntry<K, V> copyEntry( 524 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 525 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 526 copyWriteEntry(original, newEntry); 527 return newEntry; 528 } 529 }, 530 WEAK_ACCESS_WRITE { 531 @Override 532 <K, V> ReferenceEntry<K, V> newEntry( 533 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 534 return new WeakAccessWriteEntry<>(segment.keyReferenceQueue, key, hash, next); 535 } 536 537 @Override 538 <K, V> ReferenceEntry<K, V> copyEntry( 539 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 540 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 541 copyAccessEntry(original, newEntry); 542 copyWriteEntry(original, newEntry); 543 return newEntry; 544 } 545 }; 546 547 // Masks used to compute indices in the following table. 548 549 static final int ACCESS_MASK = 1; 550 static final int WRITE_MASK = 2; 551 static final int WEAK_MASK = 4; 552 553 /** Look-up table for factories. */ 554 static final EntryFactory[] factories = { 555 STRONG, 556 STRONG_ACCESS, 557 STRONG_WRITE, 558 STRONG_ACCESS_WRITE, 559 WEAK, 560 WEAK_ACCESS, 561 WEAK_WRITE, 562 WEAK_ACCESS_WRITE, 563 }; 564 565 static EntryFactory getFactory( 566 Strength keyStrength, boolean usesAccessQueue, boolean usesWriteQueue) { 567 int flags = 568 ((keyStrength == Strength.WEAK) ? WEAK_MASK : 0) 569 | (usesAccessQueue ? ACCESS_MASK : 0) 570 | (usesWriteQueue ? WRITE_MASK : 0); 571 return factories[flags]; 572 } 573 574 /** 575 * Creates a new entry. 576 * 577 * @param segment to create the entry for 578 * @param key of the entry 579 * @param hash of the key 580 * @param next entry in the same bucket 581 */ 582 abstract <K, V> ReferenceEntry<K, V> newEntry( 583 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next); 584 585 /** 586 * Copies an entry, assigning it a new {@code next} entry. 587 * 588 * @param original the entry to copy 589 * @param newNext entry in the same bucket 590 */ 591 // Guarded By Segment.this 592 <K, V> ReferenceEntry<K, V> copyEntry( 593 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 594 return newEntry(segment, original.getKey(), original.getHash(), newNext); 595 } 596 597 // Guarded By Segment.this 598 <K, V> void copyAccessEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) { 599 // TODO(fry): when we link values instead of entries this method can go 600 // away, as can connectAccessOrder, nullifyAccessOrder. 601 newEntry.setAccessTime(original.getAccessTime()); 602 603 connectAccessOrder(original.getPreviousInAccessQueue(), newEntry); 604 connectAccessOrder(newEntry, original.getNextInAccessQueue()); 605 606 nullifyAccessOrder(original); 607 } 608 609 // Guarded By Segment.this 610 <K, V> void copyWriteEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) { 611 // TODO(fry): when we link values instead of entries this method can go 612 // away, as can connectWriteOrder, nullifyWriteOrder. 613 newEntry.setWriteTime(original.getWriteTime()); 614 615 connectWriteOrder(original.getPreviousInWriteQueue(), newEntry); 616 connectWriteOrder(newEntry, original.getNextInWriteQueue()); 617 618 nullifyWriteOrder(original); 619 } 620 } 621 622 /** A reference to a value. */ 623 interface ValueReference<K, V> { 624 /** Returns the value. Does not block or throw exceptions. */ 625 @Nullable 626 V get(); 627 628 /** 629 * Waits for a value that may still be loading. Unlike get(), this method can block (in the case 630 * of FutureValueReference). 631 * 632 * @throws ExecutionException if the loading thread throws an exception 633 * @throws ExecutionError if the loading thread throws an error 634 */ 635 V waitForValue() throws ExecutionException; 636 637 /** Returns the weight of this entry. This is assumed to be static between calls to setValue. */ 638 int getWeight(); 639 640 /** 641 * Returns the entry associated with this value reference, or {@code null} if this value 642 * reference is independent of any entry. 643 */ 644 @Nullable 645 ReferenceEntry<K, V> getEntry(); 646 647 /** 648 * Creates a copy of this reference for the given entry. 649 * 650 * <p>{@code value} may be null only for a loading reference. 651 */ 652 ValueReference<K, V> copyFor( 653 ReferenceQueue<V> queue, @Nullable V value, ReferenceEntry<K, V> entry); 654 655 /** 656 * Notify pending loads that a new value was set. This is only relevant to loading value 657 * references. 658 */ 659 void notifyNewValue(@Nullable V newValue); 660 661 /** 662 * Returns true if a new value is currently loading, regardless of whether or not there is an 663 * existing value. It is assumed that the return value of this method is constant for any given 664 * ValueReference instance. 665 */ 666 boolean isLoading(); 667 668 /** 669 * Returns true if this reference contains an active value, meaning one that is still considered 670 * present in the cache. Active values consist of live values, which are returned by cache 671 * lookups, and dead values, which have been evicted but awaiting removal. Non-active values 672 * consist strictly of loading values, though during refresh a value may be both active and 673 * loading. 674 */ 675 boolean isActive(); 676 } 677 678 /** Placeholder. Indicates that the value hasn't been set yet. */ 679 static final ValueReference<Object, Object> UNSET = 680 new ValueReference<Object, Object>() { 681 @Override 682 public Object get() { 683 return null; 684 } 685 686 @Override 687 public int getWeight() { 688 return 0; 689 } 690 691 @Override 692 public ReferenceEntry<Object, Object> getEntry() { 693 return null; 694 } 695 696 @Override 697 public ValueReference<Object, Object> copyFor( 698 ReferenceQueue<Object> queue, 699 @Nullable Object value, 700 ReferenceEntry<Object, Object> entry) { 701 return this; 702 } 703 704 @Override 705 public boolean isLoading() { 706 return false; 707 } 708 709 @Override 710 public boolean isActive() { 711 return false; 712 } 713 714 @Override 715 public Object waitForValue() { 716 return null; 717 } 718 719 @Override 720 public void notifyNewValue(Object newValue) {} 721 }; 722 723 /** Singleton placeholder that indicates a value is being loaded. */ 724 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value 725 static <K, V> ValueReference<K, V> unset() { 726 return (ValueReference<K, V>) UNSET; 727 } 728 729 private enum NullEntry implements ReferenceEntry<Object, Object> { 730 INSTANCE; 731 732 @Override 733 public ValueReference<Object, Object> getValueReference() { 734 return null; 735 } 736 737 @Override 738 public void setValueReference(ValueReference<Object, Object> valueReference) {} 739 740 @Override 741 public ReferenceEntry<Object, Object> getNext() { 742 return null; 743 } 744 745 @Override 746 public int getHash() { 747 return 0; 748 } 749 750 @Override 751 public Object getKey() { 752 return null; 753 } 754 755 @Override 756 public long getAccessTime() { 757 return 0; 758 } 759 760 @Override 761 public void setAccessTime(long time) {} 762 763 @Override 764 public ReferenceEntry<Object, Object> getNextInAccessQueue() { 765 return this; 766 } 767 768 @Override 769 public void setNextInAccessQueue(ReferenceEntry<Object, Object> next) {} 770 771 @Override 772 public ReferenceEntry<Object, Object> getPreviousInAccessQueue() { 773 return this; 774 } 775 776 @Override 777 public void setPreviousInAccessQueue(ReferenceEntry<Object, Object> previous) {} 778 779 @Override 780 public long getWriteTime() { 781 return 0; 782 } 783 784 @Override 785 public void setWriteTime(long time) {} 786 787 @Override 788 public ReferenceEntry<Object, Object> getNextInWriteQueue() { 789 return this; 790 } 791 792 @Override 793 public void setNextInWriteQueue(ReferenceEntry<Object, Object> next) {} 794 795 @Override 796 public ReferenceEntry<Object, Object> getPreviousInWriteQueue() { 797 return this; 798 } 799 800 @Override 801 public void setPreviousInWriteQueue(ReferenceEntry<Object, Object> previous) {} 802 } 803 804 abstract static class AbstractReferenceEntry<K, V> implements ReferenceEntry<K, V> { 805 @Override 806 public ValueReference<K, V> getValueReference() { 807 throw new UnsupportedOperationException(); 808 } 809 810 @Override 811 public void setValueReference(ValueReference<K, V> valueReference) { 812 throw new UnsupportedOperationException(); 813 } 814 815 @Override 816 public ReferenceEntry<K, V> getNext() { 817 throw new UnsupportedOperationException(); 818 } 819 820 @Override 821 public int getHash() { 822 throw new UnsupportedOperationException(); 823 } 824 825 @Override 826 public K getKey() { 827 throw new UnsupportedOperationException(); 828 } 829 830 @Override 831 public long getAccessTime() { 832 throw new UnsupportedOperationException(); 833 } 834 835 @Override 836 public void setAccessTime(long time) { 837 throw new UnsupportedOperationException(); 838 } 839 840 @Override 841 public ReferenceEntry<K, V> getNextInAccessQueue() { 842 throw new UnsupportedOperationException(); 843 } 844 845 @Override 846 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 847 throw new UnsupportedOperationException(); 848 } 849 850 @Override 851 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 852 throw new UnsupportedOperationException(); 853 } 854 855 @Override 856 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 857 throw new UnsupportedOperationException(); 858 } 859 860 @Override 861 public long getWriteTime() { 862 throw new UnsupportedOperationException(); 863 } 864 865 @Override 866 public void setWriteTime(long time) { 867 throw new UnsupportedOperationException(); 868 } 869 870 @Override 871 public ReferenceEntry<K, V> getNextInWriteQueue() { 872 throw new UnsupportedOperationException(); 873 } 874 875 @Override 876 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 877 throw new UnsupportedOperationException(); 878 } 879 880 @Override 881 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 882 throw new UnsupportedOperationException(); 883 } 884 885 @Override 886 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 887 throw new UnsupportedOperationException(); 888 } 889 } 890 891 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value 892 static <K, V> ReferenceEntry<K, V> nullEntry() { 893 return (ReferenceEntry<K, V>) NullEntry.INSTANCE; 894 } 895 896 static final Queue<?> DISCARDING_QUEUE = 897 new AbstractQueue<Object>() { 898 @Override 899 public boolean offer(Object o) { 900 return true; 901 } 902 903 @Override 904 public Object peek() { 905 return null; 906 } 907 908 @Override 909 public Object poll() { 910 return null; 911 } 912 913 @Override 914 public int size() { 915 return 0; 916 } 917 918 @Override 919 public Iterator<Object> iterator() { 920 return ImmutableSet.of().iterator(); 921 } 922 }; 923 924 /** Queue that discards all elements. */ 925 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value 926 static <E> Queue<E> discardingQueue() { 927 return (Queue) DISCARDING_QUEUE; 928 } 929 930 /* 931 * Note: All of this duplicate code sucks, but it saves a lot of memory. If only Java had mixins! 932 * To maintain this code, make a change for the strong reference type. Then, cut and paste, and 933 * replace "Strong" with "Soft" or "Weak" within the pasted text. The primary difference is that 934 * strong entries store the key reference directly while soft and weak entries delegate to their 935 * respective superclasses. 936 */ 937 938 /** Used for strongly-referenced keys. */ 939 static class StrongEntry<K, V> extends AbstractReferenceEntry<K, V> { 940 final K key; 941 942 StrongEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 943 this.key = key; 944 this.hash = hash; 945 this.next = next; 946 } 947 948 @Override 949 public K getKey() { 950 return this.key; 951 } 952 953 // The code below is exactly the same for each entry type. 954 955 final int hash; 956 final @Nullable ReferenceEntry<K, V> next; 957 volatile ValueReference<K, V> valueReference = unset(); 958 959 @Override 960 public ValueReference<K, V> getValueReference() { 961 return valueReference; 962 } 963 964 @Override 965 public void setValueReference(ValueReference<K, V> valueReference) { 966 this.valueReference = valueReference; 967 } 968 969 @Override 970 public int getHash() { 971 return hash; 972 } 973 974 @Override 975 public ReferenceEntry<K, V> getNext() { 976 return next; 977 } 978 } 979 980 static final class StrongAccessEntry<K, V> extends StrongEntry<K, V> { 981 StrongAccessEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 982 super(key, hash, next); 983 } 984 985 // The code below is exactly the same for each access entry type. 986 987 volatile long accessTime = Long.MAX_VALUE; 988 989 @Override 990 public long getAccessTime() { 991 return accessTime; 992 } 993 994 @Override 995 public void setAccessTime(long time) { 996 this.accessTime = time; 997 } 998 999 // Guarded By Segment.this 1000 @Weak ReferenceEntry<K, V> nextAccess = nullEntry(); 1001 1002 @Override 1003 public ReferenceEntry<K, V> getNextInAccessQueue() { 1004 return nextAccess; 1005 } 1006 1007 @Override 1008 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 1009 this.nextAccess = next; 1010 } 1011 1012 // Guarded By Segment.this 1013 @Weak ReferenceEntry<K, V> previousAccess = nullEntry(); 1014 1015 @Override 1016 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 1017 return previousAccess; 1018 } 1019 1020 @Override 1021 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 1022 this.previousAccess = previous; 1023 } 1024 } 1025 1026 static final class StrongWriteEntry<K, V> extends StrongEntry<K, V> { 1027 StrongWriteEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1028 super(key, hash, next); 1029 } 1030 1031 // The code below is exactly the same for each write entry type. 1032 1033 volatile long writeTime = Long.MAX_VALUE; 1034 1035 @Override 1036 public long getWriteTime() { 1037 return writeTime; 1038 } 1039 1040 @Override 1041 public void setWriteTime(long time) { 1042 this.writeTime = time; 1043 } 1044 1045 // Guarded By Segment.this 1046 @Weak ReferenceEntry<K, V> nextWrite = nullEntry(); 1047 1048 @Override 1049 public ReferenceEntry<K, V> getNextInWriteQueue() { 1050 return nextWrite; 1051 } 1052 1053 @Override 1054 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 1055 this.nextWrite = next; 1056 } 1057 1058 // Guarded By Segment.this 1059 @Weak ReferenceEntry<K, V> previousWrite = nullEntry(); 1060 1061 @Override 1062 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 1063 return previousWrite; 1064 } 1065 1066 @Override 1067 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 1068 this.previousWrite = previous; 1069 } 1070 } 1071 1072 static final class StrongAccessWriteEntry<K, V> extends StrongEntry<K, V> { 1073 StrongAccessWriteEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1074 super(key, hash, next); 1075 } 1076 1077 // The code below is exactly the same for each access entry type. 1078 1079 volatile long accessTime = Long.MAX_VALUE; 1080 1081 @Override 1082 public long getAccessTime() { 1083 return accessTime; 1084 } 1085 1086 @Override 1087 public void setAccessTime(long time) { 1088 this.accessTime = time; 1089 } 1090 1091 // Guarded By Segment.this 1092 @Weak ReferenceEntry<K, V> nextAccess = nullEntry(); 1093 1094 @Override 1095 public ReferenceEntry<K, V> getNextInAccessQueue() { 1096 return nextAccess; 1097 } 1098 1099 @Override 1100 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 1101 this.nextAccess = next; 1102 } 1103 1104 // Guarded By Segment.this 1105 @Weak ReferenceEntry<K, V> previousAccess = nullEntry(); 1106 1107 @Override 1108 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 1109 return previousAccess; 1110 } 1111 1112 @Override 1113 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 1114 this.previousAccess = previous; 1115 } 1116 1117 // The code below is exactly the same for each write entry type. 1118 1119 volatile long writeTime = Long.MAX_VALUE; 1120 1121 @Override 1122 public long getWriteTime() { 1123 return writeTime; 1124 } 1125 1126 @Override 1127 public void setWriteTime(long time) { 1128 this.writeTime = time; 1129 } 1130 1131 // Guarded By Segment.this 1132 @Weak ReferenceEntry<K, V> nextWrite = nullEntry(); 1133 1134 @Override 1135 public ReferenceEntry<K, V> getNextInWriteQueue() { 1136 return nextWrite; 1137 } 1138 1139 @Override 1140 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 1141 this.nextWrite = next; 1142 } 1143 1144 // Guarded By Segment.this 1145 @Weak ReferenceEntry<K, V> previousWrite = nullEntry(); 1146 1147 @Override 1148 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 1149 return previousWrite; 1150 } 1151 1152 @Override 1153 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 1154 this.previousWrite = previous; 1155 } 1156 } 1157 1158 /** Used for weakly-referenced keys. */ 1159 static class WeakEntry<K, V> extends WeakReference<K> implements ReferenceEntry<K, V> { 1160 WeakEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1161 super(key, queue); 1162 this.hash = hash; 1163 this.next = next; 1164 } 1165 1166 @Override 1167 public K getKey() { 1168 return get(); 1169 } 1170 1171 /* 1172 * It'd be nice to get these for free from AbstractReferenceEntry, but we're already extending 1173 * WeakReference<K>. 1174 */ 1175 1176 // null access 1177 1178 @Override 1179 public long getAccessTime() { 1180 throw new UnsupportedOperationException(); 1181 } 1182 1183 @Override 1184 public void setAccessTime(long time) { 1185 throw new UnsupportedOperationException(); 1186 } 1187 1188 @Override 1189 public ReferenceEntry<K, V> getNextInAccessQueue() { 1190 throw new UnsupportedOperationException(); 1191 } 1192 1193 @Override 1194 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 1195 throw new UnsupportedOperationException(); 1196 } 1197 1198 @Override 1199 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 1200 throw new UnsupportedOperationException(); 1201 } 1202 1203 @Override 1204 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 1205 throw new UnsupportedOperationException(); 1206 } 1207 1208 // null write 1209 1210 @Override 1211 public long getWriteTime() { 1212 throw new UnsupportedOperationException(); 1213 } 1214 1215 @Override 1216 public void setWriteTime(long time) { 1217 throw new UnsupportedOperationException(); 1218 } 1219 1220 @Override 1221 public ReferenceEntry<K, V> getNextInWriteQueue() { 1222 throw new UnsupportedOperationException(); 1223 } 1224 1225 @Override 1226 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 1227 throw new UnsupportedOperationException(); 1228 } 1229 1230 @Override 1231 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 1232 throw new UnsupportedOperationException(); 1233 } 1234 1235 @Override 1236 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 1237 throw new UnsupportedOperationException(); 1238 } 1239 1240 // The code below is exactly the same for each entry type. 1241 1242 final int hash; 1243 final @Nullable ReferenceEntry<K, V> next; 1244 volatile ValueReference<K, V> valueReference = unset(); 1245 1246 @Override 1247 public ValueReference<K, V> getValueReference() { 1248 return valueReference; 1249 } 1250 1251 @Override 1252 public void setValueReference(ValueReference<K, V> valueReference) { 1253 this.valueReference = valueReference; 1254 } 1255 1256 @Override 1257 public int getHash() { 1258 return hash; 1259 } 1260 1261 @Override 1262 public ReferenceEntry<K, V> getNext() { 1263 return next; 1264 } 1265 } 1266 1267 static final class WeakAccessEntry<K, V> extends WeakEntry<K, V> { 1268 WeakAccessEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1269 super(queue, key, hash, next); 1270 } 1271 1272 // The code below is exactly the same for each access entry type. 1273 1274 volatile long accessTime = Long.MAX_VALUE; 1275 1276 @Override 1277 public long getAccessTime() { 1278 return accessTime; 1279 } 1280 1281 @Override 1282 public void setAccessTime(long time) { 1283 this.accessTime = time; 1284 } 1285 1286 // Guarded By Segment.this 1287 @Weak ReferenceEntry<K, V> nextAccess = nullEntry(); 1288 1289 @Override 1290 public ReferenceEntry<K, V> getNextInAccessQueue() { 1291 return nextAccess; 1292 } 1293 1294 @Override 1295 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 1296 this.nextAccess = next; 1297 } 1298 1299 // Guarded By Segment.this 1300 @Weak ReferenceEntry<K, V> previousAccess = nullEntry(); 1301 1302 @Override 1303 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 1304 return previousAccess; 1305 } 1306 1307 @Override 1308 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 1309 this.previousAccess = previous; 1310 } 1311 } 1312 1313 static final class WeakWriteEntry<K, V> extends WeakEntry<K, V> { 1314 WeakWriteEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1315 super(queue, key, hash, next); 1316 } 1317 1318 // The code below is exactly the same for each write entry type. 1319 1320 volatile long writeTime = Long.MAX_VALUE; 1321 1322 @Override 1323 public long getWriteTime() { 1324 return writeTime; 1325 } 1326 1327 @Override 1328 public void setWriteTime(long time) { 1329 this.writeTime = time; 1330 } 1331 1332 // Guarded By Segment.this 1333 @Weak ReferenceEntry<K, V> nextWrite = nullEntry(); 1334 1335 @Override 1336 public ReferenceEntry<K, V> getNextInWriteQueue() { 1337 return nextWrite; 1338 } 1339 1340 @Override 1341 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 1342 this.nextWrite = next; 1343 } 1344 1345 // Guarded By Segment.this 1346 @Weak ReferenceEntry<K, V> previousWrite = nullEntry(); 1347 1348 @Override 1349 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 1350 return previousWrite; 1351 } 1352 1353 @Override 1354 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 1355 this.previousWrite = previous; 1356 } 1357 } 1358 1359 static final class WeakAccessWriteEntry<K, V> extends WeakEntry<K, V> { 1360 WeakAccessWriteEntry( 1361 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1362 super(queue, key, hash, next); 1363 } 1364 1365 // The code below is exactly the same for each access entry type. 1366 1367 volatile long accessTime = Long.MAX_VALUE; 1368 1369 @Override 1370 public long getAccessTime() { 1371 return accessTime; 1372 } 1373 1374 @Override 1375 public void setAccessTime(long time) { 1376 this.accessTime = time; 1377 } 1378 1379 // Guarded By Segment.this 1380 @Weak ReferenceEntry<K, V> nextAccess = nullEntry(); 1381 1382 @Override 1383 public ReferenceEntry<K, V> getNextInAccessQueue() { 1384 return nextAccess; 1385 } 1386 1387 @Override 1388 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 1389 this.nextAccess = next; 1390 } 1391 1392 // Guarded By Segment.this 1393 @Weak ReferenceEntry<K, V> previousAccess = nullEntry(); 1394 1395 @Override 1396 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 1397 return previousAccess; 1398 } 1399 1400 @Override 1401 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 1402 this.previousAccess = previous; 1403 } 1404 1405 // The code below is exactly the same for each write entry type. 1406 1407 volatile long writeTime = Long.MAX_VALUE; 1408 1409 @Override 1410 public long getWriteTime() { 1411 return writeTime; 1412 } 1413 1414 @Override 1415 public void setWriteTime(long time) { 1416 this.writeTime = time; 1417 } 1418 1419 // Guarded By Segment.this 1420 @Weak ReferenceEntry<K, V> nextWrite = nullEntry(); 1421 1422 @Override 1423 public ReferenceEntry<K, V> getNextInWriteQueue() { 1424 return nextWrite; 1425 } 1426 1427 @Override 1428 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 1429 this.nextWrite = next; 1430 } 1431 1432 // Guarded By Segment.this 1433 @Weak ReferenceEntry<K, V> previousWrite = nullEntry(); 1434 1435 @Override 1436 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 1437 return previousWrite; 1438 } 1439 1440 @Override 1441 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 1442 this.previousWrite = previous; 1443 } 1444 } 1445 1446 /** References a weak value. */ 1447 static class WeakValueReference<K, V> extends WeakReference<V> implements ValueReference<K, V> { 1448 final ReferenceEntry<K, V> entry; 1449 1450 WeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) { 1451 super(referent, queue); 1452 this.entry = entry; 1453 } 1454 1455 @Override 1456 public int getWeight() { 1457 return 1; 1458 } 1459 1460 @Override 1461 public ReferenceEntry<K, V> getEntry() { 1462 return entry; 1463 } 1464 1465 @Override 1466 public void notifyNewValue(V newValue) {} 1467 1468 @Override 1469 public ValueReference<K, V> copyFor( 1470 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1471 return new WeakValueReference<>(queue, value, entry); 1472 } 1473 1474 @Override 1475 public boolean isLoading() { 1476 return false; 1477 } 1478 1479 @Override 1480 public boolean isActive() { 1481 return true; 1482 } 1483 1484 @Override 1485 public V waitForValue() { 1486 return get(); 1487 } 1488 } 1489 1490 /** References a soft value. */ 1491 static class SoftValueReference<K, V> extends SoftReference<V> implements ValueReference<K, V> { 1492 final ReferenceEntry<K, V> entry; 1493 1494 SoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) { 1495 super(referent, queue); 1496 this.entry = entry; 1497 } 1498 1499 @Override 1500 public int getWeight() { 1501 return 1; 1502 } 1503 1504 @Override 1505 public ReferenceEntry<K, V> getEntry() { 1506 return entry; 1507 } 1508 1509 @Override 1510 public void notifyNewValue(V newValue) {} 1511 1512 @Override 1513 public ValueReference<K, V> copyFor( 1514 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1515 return new SoftValueReference<>(queue, value, entry); 1516 } 1517 1518 @Override 1519 public boolean isLoading() { 1520 return false; 1521 } 1522 1523 @Override 1524 public boolean isActive() { 1525 return true; 1526 } 1527 1528 @Override 1529 public V waitForValue() { 1530 return get(); 1531 } 1532 } 1533 1534 /** References a strong value. */ 1535 static class StrongValueReference<K, V> implements ValueReference<K, V> { 1536 final V referent; 1537 1538 StrongValueReference(V referent) { 1539 this.referent = referent; 1540 } 1541 1542 @Override 1543 public V get() { 1544 return referent; 1545 } 1546 1547 @Override 1548 public int getWeight() { 1549 return 1; 1550 } 1551 1552 @Override 1553 public ReferenceEntry<K, V> getEntry() { 1554 return null; 1555 } 1556 1557 @Override 1558 public ValueReference<K, V> copyFor( 1559 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1560 return this; 1561 } 1562 1563 @Override 1564 public boolean isLoading() { 1565 return false; 1566 } 1567 1568 @Override 1569 public boolean isActive() { 1570 return true; 1571 } 1572 1573 @Override 1574 public V waitForValue() { 1575 return get(); 1576 } 1577 1578 @Override 1579 public void notifyNewValue(V newValue) {} 1580 } 1581 1582 /** References a weak value. */ 1583 static final class WeightedWeakValueReference<K, V> extends WeakValueReference<K, V> { 1584 final int weight; 1585 1586 WeightedWeakValueReference( 1587 ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry, int weight) { 1588 super(queue, referent, entry); 1589 this.weight = weight; 1590 } 1591 1592 @Override 1593 public int getWeight() { 1594 return weight; 1595 } 1596 1597 @Override 1598 public ValueReference<K, V> copyFor( 1599 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1600 return new WeightedWeakValueReference<>(queue, value, entry, weight); 1601 } 1602 } 1603 1604 /** References a soft value. */ 1605 static final class WeightedSoftValueReference<K, V> extends SoftValueReference<K, V> { 1606 final int weight; 1607 1608 WeightedSoftValueReference( 1609 ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry, int weight) { 1610 super(queue, referent, entry); 1611 this.weight = weight; 1612 } 1613 1614 @Override 1615 public int getWeight() { 1616 return weight; 1617 } 1618 1619 @Override 1620 public ValueReference<K, V> copyFor( 1621 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1622 return new WeightedSoftValueReference<>(queue, value, entry, weight); 1623 } 1624 } 1625 1626 /** References a strong value. */ 1627 static final class WeightedStrongValueReference<K, V> extends StrongValueReference<K, V> { 1628 final int weight; 1629 1630 WeightedStrongValueReference(V referent, int weight) { 1631 super(referent); 1632 this.weight = weight; 1633 } 1634 1635 @Override 1636 public int getWeight() { 1637 return weight; 1638 } 1639 } 1640 1641 /** 1642 * Applies a supplemental hash function to a given hash code, which defends against poor quality 1643 * hash functions. This is critical when the concurrent hash map uses power-of-two length hash 1644 * tables, that otherwise encounter collisions for hash codes that do not differ in lower or upper 1645 * bits. 1646 * 1647 * @param h hash code 1648 */ 1649 static int rehash(int h) { 1650 // Spread bits to regularize both segment and index locations, 1651 // using variant of single-word Wang/Jenkins hash. 1652 // TODO(kevinb): use Hashing/move this to Hashing? 1653 h += (h << 15) ^ 0xffffcd7d; 1654 h ^= (h >>> 10); 1655 h += (h << 3); 1656 h ^= (h >>> 6); 1657 h += (h << 2) + (h << 14); 1658 return h ^ (h >>> 16); 1659 } 1660 1661 /** 1662 * This method is a convenience for testing. Code should call {@link Segment#newEntry} directly. 1663 */ 1664 @VisibleForTesting 1665 ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1666 Segment<K, V> segment = segmentFor(hash); 1667 segment.lock(); 1668 try { 1669 return segment.newEntry(key, hash, next); 1670 } finally { 1671 segment.unlock(); 1672 } 1673 } 1674 1675 /** 1676 * This method is a convenience for testing. Code should call {@link Segment#copyEntry} directly. 1677 */ 1678 // Guarded By Segment.this 1679 @SuppressWarnings("GuardedBy") 1680 @VisibleForTesting 1681 ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 1682 int hash = original.getHash(); 1683 return segmentFor(hash).copyEntry(original, newNext); 1684 } 1685 1686 /** 1687 * This method is a convenience for testing. Code should call {@link Segment#setValue} instead. 1688 */ 1689 // Guarded By Segment.this 1690 @VisibleForTesting 1691 ValueReference<K, V> newValueReference(ReferenceEntry<K, V> entry, V value, int weight) { 1692 int hash = entry.getHash(); 1693 return valueStrength.referenceValue(segmentFor(hash), entry, checkNotNull(value), weight); 1694 } 1695 1696 int hash(@Nullable Object key) { 1697 int h = keyEquivalence.hash(key); 1698 return rehash(h); 1699 } 1700 1701 void reclaimValue(ValueReference<K, V> valueReference) { 1702 ReferenceEntry<K, V> entry = valueReference.getEntry(); 1703 int hash = entry.getHash(); 1704 segmentFor(hash).reclaimValue(entry.getKey(), hash, valueReference); 1705 } 1706 1707 void reclaimKey(ReferenceEntry<K, V> entry) { 1708 int hash = entry.getHash(); 1709 segmentFor(hash).reclaimKey(entry, hash); 1710 } 1711 1712 /** 1713 * This method is a convenience for testing. Code should call {@link Segment#getLiveValue} 1714 * instead. 1715 */ 1716 @VisibleForTesting 1717 boolean isLive(ReferenceEntry<K, V> entry, long now) { 1718 return segmentFor(entry.getHash()).getLiveValue(entry, now) != null; 1719 } 1720 1721 /** 1722 * Returns the segment that should be used for a key with the given hash. 1723 * 1724 * @param hash the hash code for the key 1725 * @return the segment 1726 */ 1727 Segment<K, V> segmentFor(int hash) { 1728 // TODO(fry): Lazily create segments? 1729 return segments[(hash >>> segmentShift) & segmentMask]; 1730 } 1731 1732 Segment<K, V> createSegment( 1733 int initialCapacity, long maxSegmentWeight, StatsCounter statsCounter) { 1734 return new Segment<>(this, initialCapacity, maxSegmentWeight, statsCounter); 1735 } 1736 1737 /** 1738 * Gets the value from an entry. Returns null if the entry is invalid, partially-collected, 1739 * loading, or expired. Unlike {@link Segment#getLiveValue} this method does not attempt to 1740 * cleanup stale entries. As such it should only be called outside of a segment context, such as 1741 * during iteration. 1742 */ 1743 @Nullable 1744 V getLiveValue(ReferenceEntry<K, V> entry, long now) { 1745 if (entry.getKey() == null) { 1746 return null; 1747 } 1748 V value = entry.getValueReference().get(); 1749 if (value == null) { 1750 return null; 1751 } 1752 1753 if (isExpired(entry, now)) { 1754 return null; 1755 } 1756 return value; 1757 } 1758 1759 // expiration 1760 1761 /** Returns true if the entry has expired. */ 1762 boolean isExpired(ReferenceEntry<K, V> entry, long now) { 1763 checkNotNull(entry); 1764 if (expiresAfterAccess() && (now - entry.getAccessTime() >= expireAfterAccessNanos)) { 1765 return true; 1766 } 1767 if (expiresAfterWrite() && (now - entry.getWriteTime() >= expireAfterWriteNanos)) { 1768 return true; 1769 } 1770 return false; 1771 } 1772 1773 // queues 1774 1775 // Guarded By Segment.this 1776 static <K, V> void connectAccessOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) { 1777 previous.setNextInAccessQueue(next); 1778 next.setPreviousInAccessQueue(previous); 1779 } 1780 1781 // Guarded By Segment.this 1782 static <K, V> void nullifyAccessOrder(ReferenceEntry<K, V> nulled) { 1783 ReferenceEntry<K, V> nullEntry = nullEntry(); 1784 nulled.setNextInAccessQueue(nullEntry); 1785 nulled.setPreviousInAccessQueue(nullEntry); 1786 } 1787 1788 // Guarded By Segment.this 1789 static <K, V> void connectWriteOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) { 1790 previous.setNextInWriteQueue(next); 1791 next.setPreviousInWriteQueue(previous); 1792 } 1793 1794 // Guarded By Segment.this 1795 static <K, V> void nullifyWriteOrder(ReferenceEntry<K, V> nulled) { 1796 ReferenceEntry<K, V> nullEntry = nullEntry(); 1797 nulled.setNextInWriteQueue(nullEntry); 1798 nulled.setPreviousInWriteQueue(nullEntry); 1799 } 1800 1801 /** 1802 * Notifies listeners that an entry has been automatically removed due to expiration, eviction, or 1803 * eligibility for garbage collection. This should be called every time expireEntries or 1804 * evictEntry is called (once the lock is released). 1805 */ 1806 void processPendingNotifications() { 1807 RemovalNotification<K, V> notification; 1808 while ((notification = removalNotificationQueue.poll()) != null) { 1809 try { 1810 removalListener.onRemoval(notification); 1811 } catch (Throwable e) { 1812 logger.log(Level.WARNING, "Exception thrown by removal listener", e); 1813 } 1814 } 1815 } 1816 1817 @SuppressWarnings("unchecked") 1818 final Segment<K, V>[] newSegmentArray(int ssize) { 1819 return new Segment[ssize]; 1820 } 1821 1822 // Inner Classes 1823 1824 /** 1825 * Segments are specialized versions of hash tables. This subclass inherits from ReentrantLock 1826 * opportunistically, just to simplify some locking and avoid separate construction. 1827 */ 1828 @SuppressWarnings("serial") // This class is never serialized. 1829 static class Segment<K, V> extends ReentrantLock { 1830 1831 /* 1832 * TODO(fry): Consider copying variables (like evictsBySize) from outer class into this class. 1833 * It will require more memory but will reduce indirection. 1834 */ 1835 1836 /* 1837 * Segments maintain a table of entry lists that are ALWAYS kept in a consistent state, so can 1838 * be read without locking. Next fields of nodes are immutable (final). All list additions are 1839 * performed at the front of each bin. This makes it easy to check changes, and also fast to 1840 * traverse. When nodes would otherwise be changed, new nodes are created to replace them. This 1841 * works well for hash tables since the bin lists tend to be short. (The average length is less 1842 * than two.) 1843 * 1844 * Read operations can thus proceed without locking, but rely on selected uses of volatiles to 1845 * ensure that completed write operations performed by other threads are noticed. For most 1846 * purposes, the "count" field, tracking the number of elements, serves as that volatile 1847 * variable ensuring visibility. This is convenient because this field needs to be read in many 1848 * read operations anyway: 1849 * 1850 * - All (unsynchronized) read operations must first read the "count" field, and should not look 1851 * at table entries if it is 0. 1852 * 1853 * - All (synchronized) write operations should write to the "count" field after structurally 1854 * changing any bin. The operations must not take any action that could even momentarily cause a 1855 * concurrent read operation to see inconsistent data. This is made easier by the nature of the 1856 * read operations in Map. For example, no operation can reveal that the table has grown but the 1857 * threshold has not yet been updated, so there are no atomicity requirements for this with 1858 * respect to reads. 1859 * 1860 * As a guide, all critical volatile reads and writes to the count field are marked in code 1861 * comments. 1862 */ 1863 1864 @Weak final LocalCache<K, V> map; 1865 1866 /** The number of live elements in this segment's region. */ 1867 volatile int count; 1868 1869 /** The weight of the live elements in this segment's region. */ 1870 @GuardedBy("this") 1871 long totalWeight; 1872 1873 /** 1874 * Number of updates that alter the size of the table. This is used during bulk-read methods to 1875 * make sure they see a consistent snapshot: If modCounts change during a traversal of segments 1876 * loading size or checking containsValue, then we might have an inconsistent view of state so 1877 * (usually) must retry. 1878 */ 1879 int modCount; 1880 1881 /** 1882 * The table is expanded when its size exceeds this threshold. (The value of this field is 1883 * always {@code (int) (capacity * 0.75)}.) 1884 */ 1885 int threshold; 1886 1887 /** The per-segment table. */ 1888 volatile @Nullable AtomicReferenceArray<ReferenceEntry<K, V>> table; 1889 1890 /** The maximum weight of this segment. UNSET_INT if there is no maximum. */ 1891 final long maxSegmentWeight; 1892 1893 /** 1894 * The key reference queue contains entries whose keys have been garbage collected, and which 1895 * need to be cleaned up internally. 1896 */ 1897 final @Nullable ReferenceQueue<K> keyReferenceQueue; 1898 1899 /** 1900 * The value reference queue contains value references whose values have been garbage collected, 1901 * and which need to be cleaned up internally. 1902 */ 1903 final @Nullable ReferenceQueue<V> valueReferenceQueue; 1904 1905 /** 1906 * The recency queue is used to record which entries were accessed for updating the access 1907 * list's ordering. It is drained as a batch operation when either the DRAIN_THRESHOLD is 1908 * crossed or a write occurs on the segment. 1909 */ 1910 final Queue<ReferenceEntry<K, V>> recencyQueue; 1911 1912 /** 1913 * A counter of the number of reads since the last write, used to drain queues on a small 1914 * fraction of read operations. 1915 */ 1916 final AtomicInteger readCount = new AtomicInteger(); 1917 1918 /** 1919 * A queue of elements currently in the map, ordered by write time. Elements are added to the 1920 * tail of the queue on write. 1921 */ 1922 @GuardedBy("this") 1923 final Queue<ReferenceEntry<K, V>> writeQueue; 1924 1925 /** 1926 * A queue of elements currently in the map, ordered by access time. Elements are added to the 1927 * tail of the queue on access (note that writes count as accesses). 1928 */ 1929 @GuardedBy("this") 1930 final Queue<ReferenceEntry<K, V>> accessQueue; 1931 1932 /** Accumulates cache statistics. */ 1933 final StatsCounter statsCounter; 1934 1935 Segment( 1936 LocalCache<K, V> map, 1937 int initialCapacity, 1938 long maxSegmentWeight, 1939 StatsCounter statsCounter) { 1940 this.map = map; 1941 this.maxSegmentWeight = maxSegmentWeight; 1942 this.statsCounter = checkNotNull(statsCounter); 1943 initTable(newEntryArray(initialCapacity)); 1944 1945 keyReferenceQueue = map.usesKeyReferences() ? new ReferenceQueue<K>() : null; 1946 1947 valueReferenceQueue = map.usesValueReferences() ? new ReferenceQueue<V>() : null; 1948 1949 recencyQueue = 1950 map.usesAccessQueue() 1951 ? new ConcurrentLinkedQueue<ReferenceEntry<K, V>>() 1952 : LocalCache.<ReferenceEntry<K, V>>discardingQueue(); 1953 1954 writeQueue = 1955 map.usesWriteQueue() 1956 ? new WriteQueue<K, V>() 1957 : LocalCache.<ReferenceEntry<K, V>>discardingQueue(); 1958 1959 accessQueue = 1960 map.usesAccessQueue() 1961 ? new AccessQueue<K, V>() 1962 : LocalCache.<ReferenceEntry<K, V>>discardingQueue(); 1963 } 1964 1965 AtomicReferenceArray<ReferenceEntry<K, V>> newEntryArray(int size) { 1966 return new AtomicReferenceArray<>(size); 1967 } 1968 1969 void initTable(AtomicReferenceArray<ReferenceEntry<K, V>> newTable) { 1970 this.threshold = newTable.length() * 3 / 4; // 0.75 1971 if (!map.customWeigher() && this.threshold == maxSegmentWeight) { 1972 // prevent spurious expansion before eviction 1973 this.threshold++; 1974 } 1975 this.table = newTable; 1976 } 1977 1978 @GuardedBy("this") 1979 ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1980 return map.entryFactory.newEntry(this, checkNotNull(key), hash, next); 1981 } 1982 1983 /** 1984 * Copies {@code original} into a new entry chained to {@code newNext}. Returns the new entry, 1985 * or {@code null} if {@code original} was already garbage collected. 1986 */ 1987 @GuardedBy("this") 1988 ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 1989 if (original.getKey() == null) { 1990 // key collected 1991 return null; 1992 } 1993 1994 ValueReference<K, V> valueReference = original.getValueReference(); 1995 V value = valueReference.get(); 1996 if ((value == null) && valueReference.isActive()) { 1997 // value collected 1998 return null; 1999 } 2000 2001 ReferenceEntry<K, V> newEntry = map.entryFactory.copyEntry(this, original, newNext); 2002 newEntry.setValueReference(valueReference.copyFor(this.valueReferenceQueue, value, newEntry)); 2003 return newEntry; 2004 } 2005 2006 /** Sets a new value of an entry. Adds newly created entries at the end of the access queue. */ 2007 @GuardedBy("this") 2008 void setValue(ReferenceEntry<K, V> entry, K key, V value, long now) { 2009 ValueReference<K, V> previous = entry.getValueReference(); 2010 int weight = map.weigher.weigh(key, value); 2011 checkState(weight >= 0, "Weights must be non-negative"); 2012 2013 ValueReference<K, V> valueReference = 2014 map.valueStrength.referenceValue(this, entry, value, weight); 2015 entry.setValueReference(valueReference); 2016 recordWrite(entry, weight, now); 2017 previous.notifyNewValue(value); 2018 } 2019 2020 // loading 2021 2022 V get(K key, int hash, CacheLoader<? super K, V> loader) throws ExecutionException { 2023 checkNotNull(key); 2024 checkNotNull(loader); 2025 try { 2026 if (count != 0) { // read-volatile 2027 // don't call getLiveEntry, which would ignore loading values 2028 ReferenceEntry<K, V> e = getEntry(key, hash); 2029 if (e != null) { 2030 long now = map.ticker.read(); 2031 V value = getLiveValue(e, now); 2032 if (value != null) { 2033 recordRead(e, now); 2034 statsCounter.recordHits(1); 2035 return scheduleRefresh(e, key, hash, value, now, loader); 2036 } 2037 ValueReference<K, V> valueReference = e.getValueReference(); 2038 if (valueReference.isLoading()) { 2039 return waitForLoadingValue(e, key, valueReference); 2040 } 2041 } 2042 } 2043 2044 // at this point e is either null or expired; 2045 return lockedGetOrLoad(key, hash, loader); 2046 } catch (ExecutionException ee) { 2047 Throwable cause = ee.getCause(); 2048 if (cause instanceof Error) { 2049 throw new ExecutionError((Error) cause); 2050 } else if (cause instanceof RuntimeException) { 2051 throw new UncheckedExecutionException(cause); 2052 } 2053 throw ee; 2054 } finally { 2055 postReadCleanup(); 2056 } 2057 } 2058 2059 @Nullable 2060 V get(Object key, int hash) { 2061 try { 2062 if (count != 0) { // read-volatile 2063 long now = map.ticker.read(); 2064 ReferenceEntry<K, V> e = getLiveEntry(key, hash, now); 2065 if (e == null) { 2066 return null; 2067 } 2068 2069 V value = e.getValueReference().get(); 2070 if (value != null) { 2071 recordRead(e, now); 2072 return scheduleRefresh(e, e.getKey(), hash, value, now, map.defaultLoader); 2073 } 2074 tryDrainReferenceQueues(); 2075 } 2076 return null; 2077 } finally { 2078 postReadCleanup(); 2079 } 2080 } 2081 2082 V lockedGetOrLoad(K key, int hash, CacheLoader<? super K, V> loader) throws ExecutionException { 2083 ReferenceEntry<K, V> e; 2084 ValueReference<K, V> valueReference = null; 2085 LoadingValueReference<K, V> loadingValueReference = null; 2086 boolean createNewEntry = true; 2087 2088 lock(); 2089 try { 2090 // re-read ticker once inside the lock 2091 long now = map.ticker.read(); 2092 preWriteCleanup(now); 2093 2094 int newCount = this.count - 1; 2095 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2096 int index = hash & (table.length() - 1); 2097 ReferenceEntry<K, V> first = table.get(index); 2098 2099 for (e = first; e != null; e = e.getNext()) { 2100 K entryKey = e.getKey(); 2101 if (e.getHash() == hash 2102 && entryKey != null 2103 && map.keyEquivalence.equivalent(key, entryKey)) { 2104 valueReference = e.getValueReference(); 2105 if (valueReference.isLoading()) { 2106 createNewEntry = false; 2107 } else { 2108 V value = valueReference.get(); 2109 if (value == null) { 2110 enqueueNotification( 2111 entryKey, hash, value, valueReference.getWeight(), RemovalCause.COLLECTED); 2112 } else if (map.isExpired(e, now)) { 2113 // This is a duplicate check, as preWriteCleanup already purged expired 2114 // entries, but let's accommodate an incorrect expiration queue. 2115 enqueueNotification( 2116 entryKey, hash, value, valueReference.getWeight(), RemovalCause.EXPIRED); 2117 } else { 2118 recordLockedRead(e, now); 2119 statsCounter.recordHits(1); 2120 // we were concurrent with loading; don't consider refresh 2121 return value; 2122 } 2123 2124 // immediately reuse invalid entries 2125 writeQueue.remove(e); 2126 accessQueue.remove(e); 2127 this.count = newCount; // write-volatile 2128 } 2129 break; 2130 } 2131 } 2132 2133 if (createNewEntry) { 2134 loadingValueReference = new LoadingValueReference<>(); 2135 2136 if (e == null) { 2137 e = newEntry(key, hash, first); 2138 e.setValueReference(loadingValueReference); 2139 table.set(index, e); 2140 } else { 2141 e.setValueReference(loadingValueReference); 2142 } 2143 } 2144 } finally { 2145 unlock(); 2146 postWriteCleanup(); 2147 } 2148 2149 if (createNewEntry) { 2150 try { 2151 // Synchronizes on the entry to allow failing fast when a recursive load is 2152 // detected. This may be circumvented when an entry is copied, but will fail fast most 2153 // of the time. 2154 synchronized (e) { 2155 return loadSync(key, hash, loadingValueReference, loader); 2156 } 2157 } finally { 2158 statsCounter.recordMisses(1); 2159 } 2160 } else { 2161 // The entry already exists. Wait for loading. 2162 return waitForLoadingValue(e, key, valueReference); 2163 } 2164 } 2165 2166 V waitForLoadingValue(ReferenceEntry<K, V> e, K key, ValueReference<K, V> valueReference) 2167 throws ExecutionException { 2168 if (!valueReference.isLoading()) { 2169 throw new AssertionError(); 2170 } 2171 2172 checkState(!Thread.holdsLock(e), "Recursive load of: %s", key); 2173 // don't consider expiration as we're concurrent with loading 2174 try { 2175 V value = valueReference.waitForValue(); 2176 if (value == null) { 2177 throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + "."); 2178 } 2179 // re-read ticker now that loading has completed 2180 long now = map.ticker.read(); 2181 recordRead(e, now); 2182 return value; 2183 } finally { 2184 statsCounter.recordMisses(1); 2185 } 2186 } 2187 2188 V compute(K key, int hash, BiFunction<? super K, ? super V, ? extends V> function) { 2189 ReferenceEntry<K, V> e; 2190 ValueReference<K, V> valueReference = null; 2191 ComputingValueReference<K, V> computingValueReference = null; 2192 boolean createNewEntry = true; 2193 V newValue; 2194 2195 lock(); 2196 try { 2197 // re-read ticker once inside the lock 2198 long now = map.ticker.read(); 2199 preWriteCleanup(now); 2200 2201 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2202 int index = hash & (table.length() - 1); 2203 ReferenceEntry<K, V> first = table.get(index); 2204 2205 for (e = first; e != null; e = e.getNext()) { 2206 K entryKey = e.getKey(); 2207 if (e.getHash() == hash 2208 && entryKey != null 2209 && map.keyEquivalence.equivalent(key, entryKey)) { 2210 valueReference = e.getValueReference(); 2211 if (map.isExpired(e, now)) { 2212 // This is a duplicate check, as preWriteCleanup already purged expired 2213 // entries, but let's accomodate an incorrect expiration queue. 2214 enqueueNotification( 2215 entryKey, 2216 hash, 2217 valueReference.get(), 2218 valueReference.getWeight(), 2219 RemovalCause.EXPIRED); 2220 } 2221 2222 // immediately reuse invalid entries 2223 writeQueue.remove(e); 2224 accessQueue.remove(e); 2225 createNewEntry = false; 2226 break; 2227 } 2228 } 2229 2230 // note valueReference can be an existing value or even itself another loading value if 2231 // the value for the key is already being computed. 2232 computingValueReference = new ComputingValueReference<>(valueReference); 2233 2234 if (e == null) { 2235 createNewEntry = true; 2236 e = newEntry(key, hash, first); 2237 e.setValueReference(computingValueReference); 2238 table.set(index, e); 2239 } else { 2240 e.setValueReference(computingValueReference); 2241 } 2242 2243 newValue = computingValueReference.compute(key, function); 2244 if (newValue != null) { 2245 if (valueReference != null && newValue == valueReference.get()) { 2246 computingValueReference.set(newValue); 2247 e.setValueReference(valueReference); 2248 recordWrite(e, 0, now); // no change in weight 2249 return newValue; 2250 } 2251 try { 2252 return getAndRecordStats( 2253 key, hash, computingValueReference, Futures.immediateFuture(newValue)); 2254 } catch (ExecutionException exception) { 2255 throw new AssertionError("impossible; Futures.immediateFuture can't throw"); 2256 } 2257 } else if (createNewEntry || valueReference.isLoading()) { 2258 removeLoadingValue(key, hash, computingValueReference); 2259 return null; 2260 } else { 2261 removeEntry(e, hash, RemovalCause.EXPLICIT); 2262 return null; 2263 } 2264 } finally { 2265 unlock(); 2266 postWriteCleanup(); 2267 } 2268 } 2269 2270 // at most one of loadSync/loadAsync may be called for any given LoadingValueReference 2271 2272 V loadSync( 2273 K key, 2274 int hash, 2275 LoadingValueReference<K, V> loadingValueReference, 2276 CacheLoader<? super K, V> loader) 2277 throws ExecutionException { 2278 ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader); 2279 return getAndRecordStats(key, hash, loadingValueReference, loadingFuture); 2280 } 2281 2282 ListenableFuture<V> loadAsync( 2283 final K key, 2284 final int hash, 2285 final LoadingValueReference<K, V> loadingValueReference, 2286 CacheLoader<? super K, V> loader) { 2287 final ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader); 2288 loadingFuture.addListener( 2289 new Runnable() { 2290 @Override 2291 public void run() { 2292 try { 2293 getAndRecordStats(key, hash, loadingValueReference, loadingFuture); 2294 } catch (Throwable t) { 2295 logger.log(Level.WARNING, "Exception thrown during refresh", t); 2296 loadingValueReference.setException(t); 2297 } 2298 } 2299 }, 2300 directExecutor()); 2301 return loadingFuture; 2302 } 2303 2304 /** Waits uninterruptibly for {@code newValue} to be loaded, and then records loading stats. */ 2305 V getAndRecordStats( 2306 K key, 2307 int hash, 2308 LoadingValueReference<K, V> loadingValueReference, 2309 ListenableFuture<V> newValue) 2310 throws ExecutionException { 2311 V value = null; 2312 try { 2313 value = getUninterruptibly(newValue); 2314 if (value == null) { 2315 throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + "."); 2316 } 2317 statsCounter.recordLoadSuccess(loadingValueReference.elapsedNanos()); 2318 storeLoadedValue(key, hash, loadingValueReference, value); 2319 return value; 2320 } finally { 2321 if (value == null) { 2322 statsCounter.recordLoadException(loadingValueReference.elapsedNanos()); 2323 removeLoadingValue(key, hash, loadingValueReference); 2324 } 2325 } 2326 } 2327 2328 V scheduleRefresh( 2329 ReferenceEntry<K, V> entry, 2330 K key, 2331 int hash, 2332 V oldValue, 2333 long now, 2334 CacheLoader<? super K, V> loader) { 2335 if (map.refreshes() 2336 && (now - entry.getWriteTime() > map.refreshNanos) 2337 && !entry.getValueReference().isLoading()) { 2338 V newValue = refresh(key, hash, loader, true); 2339 if (newValue != null) { 2340 return newValue; 2341 } 2342 } 2343 return oldValue; 2344 } 2345 2346 /** 2347 * Refreshes the value associated with {@code key}, unless another thread is already doing so. 2348 * Returns the newly refreshed value associated with {@code key} if it was refreshed inline, or 2349 * {@code null} if another thread is performing the refresh or if an error occurs during 2350 * refresh. 2351 */ 2352 @Nullable 2353 V refresh(K key, int hash, CacheLoader<? super K, V> loader, boolean checkTime) { 2354 final LoadingValueReference<K, V> loadingValueReference = 2355 insertLoadingValueReference(key, hash, checkTime); 2356 if (loadingValueReference == null) { 2357 return null; 2358 } 2359 2360 ListenableFuture<V> result = loadAsync(key, hash, loadingValueReference, loader); 2361 if (result.isDone()) { 2362 try { 2363 return Uninterruptibles.getUninterruptibly(result); 2364 } catch (Throwable t) { 2365 // don't let refresh exceptions propagate; error was already logged 2366 } 2367 } 2368 return null; 2369 } 2370 2371 /** 2372 * Returns a newly inserted {@code LoadingValueReference}, or null if the live value reference 2373 * is already loading. 2374 */ 2375 @Nullable 2376 LoadingValueReference<K, V> insertLoadingValueReference( 2377 final K key, final int hash, boolean checkTime) { 2378 ReferenceEntry<K, V> e = null; 2379 lock(); 2380 try { 2381 long now = map.ticker.read(); 2382 preWriteCleanup(now); 2383 2384 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2385 int index = hash & (table.length() - 1); 2386 ReferenceEntry<K, V> first = table.get(index); 2387 2388 // Look for an existing entry. 2389 for (e = first; e != null; e = e.getNext()) { 2390 K entryKey = e.getKey(); 2391 if (e.getHash() == hash 2392 && entryKey != null 2393 && map.keyEquivalence.equivalent(key, entryKey)) { 2394 // We found an existing entry. 2395 2396 ValueReference<K, V> valueReference = e.getValueReference(); 2397 if (valueReference.isLoading() 2398 || (checkTime && (now - e.getWriteTime() < map.refreshNanos))) { 2399 // refresh is a no-op if loading is pending 2400 // if checkTime, we want to check *after* acquiring the lock if refresh still needs 2401 // to be scheduled 2402 return null; 2403 } 2404 2405 // continue returning old value while loading 2406 ++modCount; 2407 LoadingValueReference<K, V> loadingValueReference = 2408 new LoadingValueReference<>(valueReference); 2409 e.setValueReference(loadingValueReference); 2410 return loadingValueReference; 2411 } 2412 } 2413 2414 ++modCount; 2415 LoadingValueReference<K, V> loadingValueReference = new LoadingValueReference<>(); 2416 e = newEntry(key, hash, first); 2417 e.setValueReference(loadingValueReference); 2418 table.set(index, e); 2419 return loadingValueReference; 2420 } finally { 2421 unlock(); 2422 postWriteCleanup(); 2423 } 2424 } 2425 2426 // reference queues, for garbage collection cleanup 2427 2428 /** Cleanup collected entries when the lock is available. */ 2429 void tryDrainReferenceQueues() { 2430 if (tryLock()) { 2431 try { 2432 drainReferenceQueues(); 2433 } finally { 2434 unlock(); 2435 } 2436 } 2437 } 2438 2439 /** 2440 * Drain the key and value reference queues, cleaning up internal entries containing garbage 2441 * collected keys or values. 2442 */ 2443 @GuardedBy("this") 2444 void drainReferenceQueues() { 2445 if (map.usesKeyReferences()) { 2446 drainKeyReferenceQueue(); 2447 } 2448 if (map.usesValueReferences()) { 2449 drainValueReferenceQueue(); 2450 } 2451 } 2452 2453 @GuardedBy("this") 2454 void drainKeyReferenceQueue() { 2455 Reference<? extends K> ref; 2456 int i = 0; 2457 while ((ref = keyReferenceQueue.poll()) != null) { 2458 @SuppressWarnings("unchecked") 2459 ReferenceEntry<K, V> entry = (ReferenceEntry<K, V>) ref; 2460 map.reclaimKey(entry); 2461 if (++i == DRAIN_MAX) { 2462 break; 2463 } 2464 } 2465 } 2466 2467 @GuardedBy("this") 2468 void drainValueReferenceQueue() { 2469 Reference<? extends V> ref; 2470 int i = 0; 2471 while ((ref = valueReferenceQueue.poll()) != null) { 2472 @SuppressWarnings("unchecked") 2473 ValueReference<K, V> valueReference = (ValueReference<K, V>) ref; 2474 map.reclaimValue(valueReference); 2475 if (++i == DRAIN_MAX) { 2476 break; 2477 } 2478 } 2479 } 2480 2481 /** Clears all entries from the key and value reference queues. */ 2482 void clearReferenceQueues() { 2483 if (map.usesKeyReferences()) { 2484 clearKeyReferenceQueue(); 2485 } 2486 if (map.usesValueReferences()) { 2487 clearValueReferenceQueue(); 2488 } 2489 } 2490 2491 void clearKeyReferenceQueue() { 2492 while (keyReferenceQueue.poll() != null) {} 2493 } 2494 2495 void clearValueReferenceQueue() { 2496 while (valueReferenceQueue.poll() != null) {} 2497 } 2498 2499 // recency queue, shared by expiration and eviction 2500 2501 /** 2502 * Records the relative order in which this read was performed by adding {@code entry} to the 2503 * recency queue. At write-time, or when the queue is full past the threshold, the queue will be 2504 * drained and the entries therein processed. 2505 * 2506 * <p>Note: locked reads should use {@link #recordLockedRead}. 2507 */ 2508 void recordRead(ReferenceEntry<K, V> entry, long now) { 2509 if (map.recordsAccess()) { 2510 entry.setAccessTime(now); 2511 } 2512 recencyQueue.add(entry); 2513 } 2514 2515 /** 2516 * Updates the eviction metadata that {@code entry} was just read. This currently amounts to 2517 * adding {@code entry} to relevant eviction lists. 2518 * 2519 * <p>Note: this method should only be called under lock, as it directly manipulates the 2520 * eviction queues. Unlocked reads should use {@link #recordRead}. 2521 */ 2522 @GuardedBy("this") 2523 void recordLockedRead(ReferenceEntry<K, V> entry, long now) { 2524 if (map.recordsAccess()) { 2525 entry.setAccessTime(now); 2526 } 2527 accessQueue.add(entry); 2528 } 2529 2530 /** 2531 * Updates eviction metadata that {@code entry} was just written. This currently amounts to 2532 * adding {@code entry} to relevant eviction lists. 2533 */ 2534 @GuardedBy("this") 2535 void recordWrite(ReferenceEntry<K, V> entry, int weight, long now) { 2536 // we are already under lock, so drain the recency queue immediately 2537 drainRecencyQueue(); 2538 totalWeight += weight; 2539 2540 if (map.recordsAccess()) { 2541 entry.setAccessTime(now); 2542 } 2543 if (map.recordsWrite()) { 2544 entry.setWriteTime(now); 2545 } 2546 accessQueue.add(entry); 2547 writeQueue.add(entry); 2548 } 2549 2550 /** 2551 * Drains the recency queue, updating eviction metadata that the entries therein were read in 2552 * the specified relative order. This currently amounts to adding them to relevant eviction 2553 * lists (accounting for the fact that they could have been removed from the map since being 2554 * added to the recency queue). 2555 */ 2556 @GuardedBy("this") 2557 void drainRecencyQueue() { 2558 ReferenceEntry<K, V> e; 2559 while ((e = recencyQueue.poll()) != null) { 2560 // An entry may be in the recency queue despite it being removed from 2561 // the map . This can occur when the entry was concurrently read while a 2562 // writer is removing it from the segment or after a clear has removed 2563 // all of the segment's entries. 2564 if (accessQueue.contains(e)) { 2565 accessQueue.add(e); 2566 } 2567 } 2568 } 2569 2570 // expiration 2571 2572 /** Cleanup expired entries when the lock is available. */ 2573 void tryExpireEntries(long now) { 2574 if (tryLock()) { 2575 try { 2576 expireEntries(now); 2577 } finally { 2578 unlock(); 2579 // don't call postWriteCleanup as we're in a read 2580 } 2581 } 2582 } 2583 2584 @GuardedBy("this") 2585 void expireEntries(long now) { 2586 drainRecencyQueue(); 2587 2588 ReferenceEntry<K, V> e; 2589 while ((e = writeQueue.peek()) != null && map.isExpired(e, now)) { 2590 if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) { 2591 throw new AssertionError(); 2592 } 2593 } 2594 while ((e = accessQueue.peek()) != null && map.isExpired(e, now)) { 2595 if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) { 2596 throw new AssertionError(); 2597 } 2598 } 2599 } 2600 2601 // eviction 2602 2603 @GuardedBy("this") 2604 void enqueueNotification( 2605 @Nullable K key, int hash, @Nullable V value, int weight, RemovalCause cause) { 2606 totalWeight -= weight; 2607 if (cause.wasEvicted()) { 2608 statsCounter.recordEviction(); 2609 } 2610 if (map.removalNotificationQueue != DISCARDING_QUEUE) { 2611 RemovalNotification<K, V> notification = RemovalNotification.create(key, value, cause); 2612 map.removalNotificationQueue.offer(notification); 2613 } 2614 } 2615 2616 /** 2617 * Performs eviction if the segment is over capacity. Avoids flushing the entire cache if the 2618 * newest entry exceeds the maximum weight all on its own. 2619 * 2620 * @param newest the most recently added entry 2621 */ 2622 @GuardedBy("this") 2623 void evictEntries(ReferenceEntry<K, V> newest) { 2624 if (!map.evictsBySize()) { 2625 return; 2626 } 2627 2628 drainRecencyQueue(); 2629 2630 // If the newest entry by itself is too heavy for the segment, don't bother evicting 2631 // anything else, just that 2632 if (newest.getValueReference().getWeight() > maxSegmentWeight) { 2633 if (!removeEntry(newest, newest.getHash(), RemovalCause.SIZE)) { 2634 throw new AssertionError(); 2635 } 2636 } 2637 2638 while (totalWeight > maxSegmentWeight) { 2639 ReferenceEntry<K, V> e = getNextEvictable(); 2640 if (!removeEntry(e, e.getHash(), RemovalCause.SIZE)) { 2641 throw new AssertionError(); 2642 } 2643 } 2644 } 2645 2646 // TODO(fry): instead implement this with an eviction head 2647 @GuardedBy("this") 2648 ReferenceEntry<K, V> getNextEvictable() { 2649 for (ReferenceEntry<K, V> e : accessQueue) { 2650 int weight = e.getValueReference().getWeight(); 2651 if (weight > 0) { 2652 return e; 2653 } 2654 } 2655 throw new AssertionError(); 2656 } 2657 2658 /** Returns first entry of bin for given hash. */ 2659 ReferenceEntry<K, V> getFirst(int hash) { 2660 // read this volatile field only once 2661 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2662 return table.get(hash & (table.length() - 1)); 2663 } 2664 2665 // Specialized implementations of map methods 2666 2667 @Nullable 2668 ReferenceEntry<K, V> getEntry(Object key, int hash) { 2669 for (ReferenceEntry<K, V> e = getFirst(hash); e != null; e = e.getNext()) { 2670 if (e.getHash() != hash) { 2671 continue; 2672 } 2673 2674 K entryKey = e.getKey(); 2675 if (entryKey == null) { 2676 tryDrainReferenceQueues(); 2677 continue; 2678 } 2679 2680 if (map.keyEquivalence.equivalent(key, entryKey)) { 2681 return e; 2682 } 2683 } 2684 2685 return null; 2686 } 2687 2688 @Nullable 2689 ReferenceEntry<K, V> getLiveEntry(Object key, int hash, long now) { 2690 ReferenceEntry<K, V> e = getEntry(key, hash); 2691 if (e == null) { 2692 return null; 2693 } else if (map.isExpired(e, now)) { 2694 tryExpireEntries(now); 2695 return null; 2696 } 2697 return e; 2698 } 2699 2700 /** 2701 * Gets the value from an entry. Returns null if the entry is invalid, partially-collected, 2702 * loading, or expired. 2703 */ 2704 V getLiveValue(ReferenceEntry<K, V> entry, long now) { 2705 if (entry.getKey() == null) { 2706 tryDrainReferenceQueues(); 2707 return null; 2708 } 2709 V value = entry.getValueReference().get(); 2710 if (value == null) { 2711 tryDrainReferenceQueues(); 2712 return null; 2713 } 2714 2715 if (map.isExpired(entry, now)) { 2716 tryExpireEntries(now); 2717 return null; 2718 } 2719 return value; 2720 } 2721 2722 boolean containsKey(Object key, int hash) { 2723 try { 2724 if (count != 0) { // read-volatile 2725 long now = map.ticker.read(); 2726 ReferenceEntry<K, V> e = getLiveEntry(key, hash, now); 2727 if (e == null) { 2728 return false; 2729 } 2730 return e.getValueReference().get() != null; 2731 } 2732 2733 return false; 2734 } finally { 2735 postReadCleanup(); 2736 } 2737 } 2738 2739 /** 2740 * This method is a convenience for testing. Code should call {@link LocalCache#containsValue} 2741 * directly. 2742 */ 2743 @VisibleForTesting 2744 boolean containsValue(Object value) { 2745 try { 2746 if (count != 0) { // read-volatile 2747 long now = map.ticker.read(); 2748 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2749 int length = table.length(); 2750 for (int i = 0; i < length; ++i) { 2751 for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) { 2752 V entryValue = getLiveValue(e, now); 2753 if (entryValue == null) { 2754 continue; 2755 } 2756 if (map.valueEquivalence.equivalent(value, entryValue)) { 2757 return true; 2758 } 2759 } 2760 } 2761 } 2762 2763 return false; 2764 } finally { 2765 postReadCleanup(); 2766 } 2767 } 2768 2769 @Nullable 2770 V put(K key, int hash, V value, boolean onlyIfAbsent) { 2771 lock(); 2772 try { 2773 long now = map.ticker.read(); 2774 preWriteCleanup(now); 2775 2776 int newCount = this.count + 1; 2777 if (newCount > this.threshold) { // ensure capacity 2778 expand(); 2779 newCount = this.count + 1; 2780 } 2781 2782 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2783 int index = hash & (table.length() - 1); 2784 ReferenceEntry<K, V> first = table.get(index); 2785 2786 // Look for an existing entry. 2787 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2788 K entryKey = e.getKey(); 2789 if (e.getHash() == hash 2790 && entryKey != null 2791 && map.keyEquivalence.equivalent(key, entryKey)) { 2792 // We found an existing entry. 2793 2794 ValueReference<K, V> valueReference = e.getValueReference(); 2795 V entryValue = valueReference.get(); 2796 2797 if (entryValue == null) { 2798 ++modCount; 2799 if (valueReference.isActive()) { 2800 enqueueNotification( 2801 key, hash, entryValue, valueReference.getWeight(), RemovalCause.COLLECTED); 2802 setValue(e, key, value, now); 2803 newCount = this.count; // count remains unchanged 2804 } else { 2805 setValue(e, key, value, now); 2806 newCount = this.count + 1; 2807 } 2808 this.count = newCount; // write-volatile 2809 evictEntries(e); 2810 return null; 2811 } else if (onlyIfAbsent) { 2812 // Mimic 2813 // "if (!map.containsKey(key)) ... 2814 // else return map.get(key); 2815 recordLockedRead(e, now); 2816 return entryValue; 2817 } else { 2818 // clobber existing entry, count remains unchanged 2819 ++modCount; 2820 enqueueNotification( 2821 key, hash, entryValue, valueReference.getWeight(), RemovalCause.REPLACED); 2822 setValue(e, key, value, now); 2823 evictEntries(e); 2824 return entryValue; 2825 } 2826 } 2827 } 2828 2829 // Create a new entry. 2830 ++modCount; 2831 ReferenceEntry<K, V> newEntry = newEntry(key, hash, first); 2832 setValue(newEntry, key, value, now); 2833 table.set(index, newEntry); 2834 newCount = this.count + 1; 2835 this.count = newCount; // write-volatile 2836 evictEntries(newEntry); 2837 return null; 2838 } finally { 2839 unlock(); 2840 postWriteCleanup(); 2841 } 2842 } 2843 2844 /** Expands the table if possible. */ 2845 @GuardedBy("this") 2846 void expand() { 2847 AtomicReferenceArray<ReferenceEntry<K, V>> oldTable = table; 2848 int oldCapacity = oldTable.length(); 2849 if (oldCapacity >= MAXIMUM_CAPACITY) { 2850 return; 2851 } 2852 2853 /* 2854 * Reclassify nodes in each list to new Map. Because we are using power-of-two expansion, the 2855 * elements from each bin must either stay at same index, or move with a power of two offset. 2856 * We eliminate unnecessary node creation by catching cases where old nodes can be reused 2857 * because their next fields won't change. Statistically, at the default threshold, only about 2858 * one-sixth of them need cloning when a table doubles. The nodes they replace will be garbage 2859 * collectable as soon as they are no longer referenced by any reader thread that may be in 2860 * the midst of traversing table right now. 2861 */ 2862 2863 int newCount = count; 2864 AtomicReferenceArray<ReferenceEntry<K, V>> newTable = newEntryArray(oldCapacity << 1); 2865 threshold = newTable.length() * 3 / 4; 2866 int newMask = newTable.length() - 1; 2867 for (int oldIndex = 0; oldIndex < oldCapacity; ++oldIndex) { 2868 // We need to guarantee that any existing reads of old Map can 2869 // proceed. So we cannot yet null out each bin. 2870 ReferenceEntry<K, V> head = oldTable.get(oldIndex); 2871 2872 if (head != null) { 2873 ReferenceEntry<K, V> next = head.getNext(); 2874 int headIndex = head.getHash() & newMask; 2875 2876 // Single node on list 2877 if (next == null) { 2878 newTable.set(headIndex, head); 2879 } else { 2880 // Reuse the consecutive sequence of nodes with the same target 2881 // index from the end of the list. tail points to the first 2882 // entry in the reusable list. 2883 ReferenceEntry<K, V> tail = head; 2884 int tailIndex = headIndex; 2885 for (ReferenceEntry<K, V> e = next; e != null; e = e.getNext()) { 2886 int newIndex = e.getHash() & newMask; 2887 if (newIndex != tailIndex) { 2888 // The index changed. We'll need to copy the previous entry. 2889 tailIndex = newIndex; 2890 tail = e; 2891 } 2892 } 2893 newTable.set(tailIndex, tail); 2894 2895 // Clone nodes leading up to the tail. 2896 for (ReferenceEntry<K, V> e = head; e != tail; e = e.getNext()) { 2897 int newIndex = e.getHash() & newMask; 2898 ReferenceEntry<K, V> newNext = newTable.get(newIndex); 2899 ReferenceEntry<K, V> newFirst = copyEntry(e, newNext); 2900 if (newFirst != null) { 2901 newTable.set(newIndex, newFirst); 2902 } else { 2903 removeCollectedEntry(e); 2904 newCount--; 2905 } 2906 } 2907 } 2908 } 2909 } 2910 table = newTable; 2911 this.count = newCount; 2912 } 2913 2914 boolean replace(K key, int hash, V oldValue, V newValue) { 2915 lock(); 2916 try { 2917 long now = map.ticker.read(); 2918 preWriteCleanup(now); 2919 2920 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2921 int index = hash & (table.length() - 1); 2922 ReferenceEntry<K, V> first = table.get(index); 2923 2924 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2925 K entryKey = e.getKey(); 2926 if (e.getHash() == hash 2927 && entryKey != null 2928 && map.keyEquivalence.equivalent(key, entryKey)) { 2929 ValueReference<K, V> valueReference = e.getValueReference(); 2930 V entryValue = valueReference.get(); 2931 if (entryValue == null) { 2932 if (valueReference.isActive()) { 2933 // If the value disappeared, this entry is partially collected. 2934 int newCount = this.count - 1; 2935 ++modCount; 2936 ReferenceEntry<K, V> newFirst = 2937 removeValueFromChain( 2938 first, 2939 e, 2940 entryKey, 2941 hash, 2942 entryValue, 2943 valueReference, 2944 RemovalCause.COLLECTED); 2945 newCount = this.count - 1; 2946 table.set(index, newFirst); 2947 this.count = newCount; // write-volatile 2948 } 2949 return false; 2950 } 2951 2952 if (map.valueEquivalence.equivalent(oldValue, entryValue)) { 2953 ++modCount; 2954 enqueueNotification( 2955 key, hash, entryValue, valueReference.getWeight(), RemovalCause.REPLACED); 2956 setValue(e, key, newValue, now); 2957 evictEntries(e); 2958 return true; 2959 } else { 2960 // Mimic 2961 // "if (map.containsKey(key) && map.get(key).equals(oldValue))..." 2962 recordLockedRead(e, now); 2963 return false; 2964 } 2965 } 2966 } 2967 2968 return false; 2969 } finally { 2970 unlock(); 2971 postWriteCleanup(); 2972 } 2973 } 2974 2975 @Nullable 2976 V replace(K key, int hash, V newValue) { 2977 lock(); 2978 try { 2979 long now = map.ticker.read(); 2980 preWriteCleanup(now); 2981 2982 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2983 int index = hash & (table.length() - 1); 2984 ReferenceEntry<K, V> first = table.get(index); 2985 2986 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2987 K entryKey = e.getKey(); 2988 if (e.getHash() == hash 2989 && entryKey != null 2990 && map.keyEquivalence.equivalent(key, entryKey)) { 2991 ValueReference<K, V> valueReference = e.getValueReference(); 2992 V entryValue = valueReference.get(); 2993 if (entryValue == null) { 2994 if (valueReference.isActive()) { 2995 // If the value disappeared, this entry is partially collected. 2996 int newCount = this.count - 1; 2997 ++modCount; 2998 ReferenceEntry<K, V> newFirst = 2999 removeValueFromChain( 3000 first, 3001 e, 3002 entryKey, 3003 hash, 3004 entryValue, 3005 valueReference, 3006 RemovalCause.COLLECTED); 3007 newCount = this.count - 1; 3008 table.set(index, newFirst); 3009 this.count = newCount; // write-volatile 3010 } 3011 return null; 3012 } 3013 3014 ++modCount; 3015 enqueueNotification( 3016 key, hash, entryValue, valueReference.getWeight(), RemovalCause.REPLACED); 3017 setValue(e, key, newValue, now); 3018 evictEntries(e); 3019 return entryValue; 3020 } 3021 } 3022 3023 return null; 3024 } finally { 3025 unlock(); 3026 postWriteCleanup(); 3027 } 3028 } 3029 3030 @Nullable 3031 V remove(Object key, int hash) { 3032 lock(); 3033 try { 3034 long now = map.ticker.read(); 3035 preWriteCleanup(now); 3036 3037 int newCount = this.count - 1; 3038 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3039 int index = hash & (table.length() - 1); 3040 ReferenceEntry<K, V> first = table.get(index); 3041 3042 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3043 K entryKey = e.getKey(); 3044 if (e.getHash() == hash 3045 && entryKey != null 3046 && map.keyEquivalence.equivalent(key, entryKey)) { 3047 ValueReference<K, V> valueReference = e.getValueReference(); 3048 V entryValue = valueReference.get(); 3049 3050 RemovalCause cause; 3051 if (entryValue != null) { 3052 cause = RemovalCause.EXPLICIT; 3053 } else if (valueReference.isActive()) { 3054 cause = RemovalCause.COLLECTED; 3055 } else { 3056 // currently loading 3057 return null; 3058 } 3059 3060 ++modCount; 3061 ReferenceEntry<K, V> newFirst = 3062 removeValueFromChain(first, e, entryKey, hash, entryValue, valueReference, cause); 3063 newCount = this.count - 1; 3064 table.set(index, newFirst); 3065 this.count = newCount; // write-volatile 3066 return entryValue; 3067 } 3068 } 3069 3070 return null; 3071 } finally { 3072 unlock(); 3073 postWriteCleanup(); 3074 } 3075 } 3076 3077 boolean remove(Object key, int hash, Object value) { 3078 lock(); 3079 try { 3080 long now = map.ticker.read(); 3081 preWriteCleanup(now); 3082 3083 int newCount = this.count - 1; 3084 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3085 int index = hash & (table.length() - 1); 3086 ReferenceEntry<K, V> first = table.get(index); 3087 3088 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3089 K entryKey = e.getKey(); 3090 if (e.getHash() == hash 3091 && entryKey != null 3092 && map.keyEquivalence.equivalent(key, entryKey)) { 3093 ValueReference<K, V> valueReference = e.getValueReference(); 3094 V entryValue = valueReference.get(); 3095 3096 RemovalCause cause; 3097 if (map.valueEquivalence.equivalent(value, entryValue)) { 3098 cause = RemovalCause.EXPLICIT; 3099 } else if (entryValue == null && valueReference.isActive()) { 3100 cause = RemovalCause.COLLECTED; 3101 } else { 3102 // currently loading 3103 return false; 3104 } 3105 3106 ++modCount; 3107 ReferenceEntry<K, V> newFirst = 3108 removeValueFromChain(first, e, entryKey, hash, entryValue, valueReference, cause); 3109 newCount = this.count - 1; 3110 table.set(index, newFirst); 3111 this.count = newCount; // write-volatile 3112 return (cause == RemovalCause.EXPLICIT); 3113 } 3114 } 3115 3116 return false; 3117 } finally { 3118 unlock(); 3119 postWriteCleanup(); 3120 } 3121 } 3122 3123 boolean storeLoadedValue( 3124 K key, int hash, LoadingValueReference<K, V> oldValueReference, V newValue) { 3125 lock(); 3126 try { 3127 long now = map.ticker.read(); 3128 preWriteCleanup(now); 3129 3130 int newCount = this.count + 1; 3131 if (newCount > this.threshold) { // ensure capacity 3132 expand(); 3133 newCount = this.count + 1; 3134 } 3135 3136 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3137 int index = hash & (table.length() - 1); 3138 ReferenceEntry<K, V> first = table.get(index); 3139 3140 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3141 K entryKey = e.getKey(); 3142 if (e.getHash() == hash 3143 && entryKey != null 3144 && map.keyEquivalence.equivalent(key, entryKey)) { 3145 ValueReference<K, V> valueReference = e.getValueReference(); 3146 V entryValue = valueReference.get(); 3147 // replace the old LoadingValueReference if it's live, otherwise 3148 // perform a putIfAbsent 3149 if (oldValueReference == valueReference 3150 || (entryValue == null && valueReference != UNSET)) { 3151 ++modCount; 3152 if (oldValueReference.isActive()) { 3153 RemovalCause cause = 3154 (entryValue == null) ? RemovalCause.COLLECTED : RemovalCause.REPLACED; 3155 enqueueNotification(key, hash, entryValue, oldValueReference.getWeight(), cause); 3156 newCount--; 3157 } 3158 setValue(e, key, newValue, now); 3159 this.count = newCount; // write-volatile 3160 evictEntries(e); 3161 return true; 3162 } 3163 3164 // the loaded value was already clobbered 3165 enqueueNotification(key, hash, newValue, 0, RemovalCause.REPLACED); 3166 return false; 3167 } 3168 } 3169 3170 ++modCount; 3171 ReferenceEntry<K, V> newEntry = newEntry(key, hash, first); 3172 setValue(newEntry, key, newValue, now); 3173 table.set(index, newEntry); 3174 this.count = newCount; // write-volatile 3175 evictEntries(newEntry); 3176 return true; 3177 } finally { 3178 unlock(); 3179 postWriteCleanup(); 3180 } 3181 } 3182 3183 void clear() { 3184 if (count != 0) { // read-volatile 3185 lock(); 3186 try { 3187 long now = map.ticker.read(); 3188 preWriteCleanup(now); 3189 3190 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3191 for (int i = 0; i < table.length(); ++i) { 3192 for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) { 3193 // Loading references aren't actually in the map yet. 3194 if (e.getValueReference().isActive()) { 3195 K key = e.getKey(); 3196 V value = e.getValueReference().get(); 3197 RemovalCause cause = 3198 (key == null || value == null) ? RemovalCause.COLLECTED : RemovalCause.EXPLICIT; 3199 enqueueNotification( 3200 key, e.getHash(), value, e.getValueReference().getWeight(), cause); 3201 } 3202 } 3203 } 3204 for (int i = 0; i < table.length(); ++i) { 3205 table.set(i, null); 3206 } 3207 clearReferenceQueues(); 3208 writeQueue.clear(); 3209 accessQueue.clear(); 3210 readCount.set(0); 3211 3212 ++modCount; 3213 count = 0; // write-volatile 3214 } finally { 3215 unlock(); 3216 postWriteCleanup(); 3217 } 3218 } 3219 } 3220 3221 @GuardedBy("this") 3222 @Nullable 3223 ReferenceEntry<K, V> removeValueFromChain( 3224 ReferenceEntry<K, V> first, 3225 ReferenceEntry<K, V> entry, 3226 @Nullable K key, 3227 int hash, 3228 V value, 3229 ValueReference<K, V> valueReference, 3230 RemovalCause cause) { 3231 enqueueNotification(key, hash, value, valueReference.getWeight(), cause); 3232 writeQueue.remove(entry); 3233 accessQueue.remove(entry); 3234 3235 if (valueReference.isLoading()) { 3236 valueReference.notifyNewValue(null); 3237 return first; 3238 } else { 3239 return removeEntryFromChain(first, entry); 3240 } 3241 } 3242 3243 @GuardedBy("this") 3244 @Nullable 3245 ReferenceEntry<K, V> removeEntryFromChain( 3246 ReferenceEntry<K, V> first, ReferenceEntry<K, V> entry) { 3247 int newCount = count; 3248 ReferenceEntry<K, V> newFirst = entry.getNext(); 3249 for (ReferenceEntry<K, V> e = first; e != entry; e = e.getNext()) { 3250 ReferenceEntry<K, V> next = copyEntry(e, newFirst); 3251 if (next != null) { 3252 newFirst = next; 3253 } else { 3254 removeCollectedEntry(e); 3255 newCount--; 3256 } 3257 } 3258 this.count = newCount; 3259 return newFirst; 3260 } 3261 3262 @GuardedBy("this") 3263 void removeCollectedEntry(ReferenceEntry<K, V> entry) { 3264 enqueueNotification( 3265 entry.getKey(), 3266 entry.getHash(), 3267 entry.getValueReference().get(), 3268 entry.getValueReference().getWeight(), 3269 RemovalCause.COLLECTED); 3270 writeQueue.remove(entry); 3271 accessQueue.remove(entry); 3272 } 3273 3274 /** Removes an entry whose key has been garbage collected. */ 3275 boolean reclaimKey(ReferenceEntry<K, V> entry, int hash) { 3276 lock(); 3277 try { 3278 int newCount = count - 1; 3279 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3280 int index = hash & (table.length() - 1); 3281 ReferenceEntry<K, V> first = table.get(index); 3282 3283 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3284 if (e == entry) { 3285 ++modCount; 3286 ReferenceEntry<K, V> newFirst = 3287 removeValueFromChain( 3288 first, 3289 e, 3290 e.getKey(), 3291 hash, 3292 e.getValueReference().get(), 3293 e.getValueReference(), 3294 RemovalCause.COLLECTED); 3295 newCount = this.count - 1; 3296 table.set(index, newFirst); 3297 this.count = newCount; // write-volatile 3298 return true; 3299 } 3300 } 3301 3302 return false; 3303 } finally { 3304 unlock(); 3305 postWriteCleanup(); 3306 } 3307 } 3308 3309 /** Removes an entry whose value has been garbage collected. */ 3310 boolean reclaimValue(K key, int hash, ValueReference<K, V> valueReference) { 3311 lock(); 3312 try { 3313 int newCount = this.count - 1; 3314 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3315 int index = hash & (table.length() - 1); 3316 ReferenceEntry<K, V> first = table.get(index); 3317 3318 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3319 K entryKey = e.getKey(); 3320 if (e.getHash() == hash 3321 && entryKey != null 3322 && map.keyEquivalence.equivalent(key, entryKey)) { 3323 ValueReference<K, V> v = e.getValueReference(); 3324 if (v == valueReference) { 3325 ++modCount; 3326 ReferenceEntry<K, V> newFirst = 3327 removeValueFromChain( 3328 first, 3329 e, 3330 entryKey, 3331 hash, 3332 valueReference.get(), 3333 valueReference, 3334 RemovalCause.COLLECTED); 3335 newCount = this.count - 1; 3336 table.set(index, newFirst); 3337 this.count = newCount; // write-volatile 3338 return true; 3339 } 3340 return false; 3341 } 3342 } 3343 3344 return false; 3345 } finally { 3346 unlock(); 3347 if (!isHeldByCurrentThread()) { // don't cleanup inside of put 3348 postWriteCleanup(); 3349 } 3350 } 3351 } 3352 3353 boolean removeLoadingValue(K key, int hash, LoadingValueReference<K, V> valueReference) { 3354 lock(); 3355 try { 3356 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3357 int index = hash & (table.length() - 1); 3358 ReferenceEntry<K, V> first = table.get(index); 3359 3360 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3361 K entryKey = e.getKey(); 3362 if (e.getHash() == hash 3363 && entryKey != null 3364 && map.keyEquivalence.equivalent(key, entryKey)) { 3365 ValueReference<K, V> v = e.getValueReference(); 3366 if (v == valueReference) { 3367 if (valueReference.isActive()) { 3368 e.setValueReference(valueReference.getOldValue()); 3369 } else { 3370 ReferenceEntry<K, V> newFirst = removeEntryFromChain(first, e); 3371 table.set(index, newFirst); 3372 } 3373 return true; 3374 } 3375 return false; 3376 } 3377 } 3378 3379 return false; 3380 } finally { 3381 unlock(); 3382 postWriteCleanup(); 3383 } 3384 } 3385 3386 @VisibleForTesting 3387 @GuardedBy("this") 3388 boolean removeEntry(ReferenceEntry<K, V> entry, int hash, RemovalCause cause) { 3389 int newCount = this.count - 1; 3390 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3391 int index = hash & (table.length() - 1); 3392 ReferenceEntry<K, V> first = table.get(index); 3393 3394 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3395 if (e == entry) { 3396 ++modCount; 3397 ReferenceEntry<K, V> newFirst = 3398 removeValueFromChain( 3399 first, 3400 e, 3401 e.getKey(), 3402 hash, 3403 e.getValueReference().get(), 3404 e.getValueReference(), 3405 cause); 3406 newCount = this.count - 1; 3407 table.set(index, newFirst); 3408 this.count = newCount; // write-volatile 3409 return true; 3410 } 3411 } 3412 3413 return false; 3414 } 3415 3416 /** 3417 * Performs routine cleanup following a read. Normally cleanup happens during writes. If cleanup 3418 * is not observed after a sufficient number of reads, try cleaning up from the read thread. 3419 */ 3420 void postReadCleanup() { 3421 if ((readCount.incrementAndGet() & DRAIN_THRESHOLD) == 0) { 3422 cleanUp(); 3423 } 3424 } 3425 3426 /** 3427 * Performs routine cleanup prior to executing a write. This should be called every time a write 3428 * thread acquires the segment lock, immediately after acquiring the lock. 3429 * 3430 * <p>Post-condition: expireEntries has been run. 3431 */ 3432 @GuardedBy("this") 3433 void preWriteCleanup(long now) { 3434 runLockedCleanup(now); 3435 } 3436 3437 /** Performs routine cleanup following a write. */ 3438 void postWriteCleanup() { 3439 runUnlockedCleanup(); 3440 } 3441 3442 void cleanUp() { 3443 long now = map.ticker.read(); 3444 runLockedCleanup(now); 3445 runUnlockedCleanup(); 3446 } 3447 3448 void runLockedCleanup(long now) { 3449 if (tryLock()) { 3450 try { 3451 drainReferenceQueues(); 3452 expireEntries(now); // calls drainRecencyQueue 3453 readCount.set(0); 3454 } finally { 3455 unlock(); 3456 } 3457 } 3458 } 3459 3460 void runUnlockedCleanup() { 3461 // locked cleanup may generate notifications we can send unlocked 3462 if (!isHeldByCurrentThread()) { 3463 map.processPendingNotifications(); 3464 } 3465 } 3466 } 3467 3468 static class LoadingValueReference<K, V> implements ValueReference<K, V> { 3469 volatile ValueReference<K, V> oldValue; 3470 3471 // TODO(fry): rename get, then extend AbstractFuture instead of containing SettableFuture 3472 final SettableFuture<V> futureValue = SettableFuture.create(); 3473 final Stopwatch stopwatch = Stopwatch.createUnstarted(); 3474 3475 public LoadingValueReference() { 3476 this(null); 3477 } 3478 3479 public LoadingValueReference(ValueReference<K, V> oldValue) { 3480 this.oldValue = (oldValue == null) ? LocalCache.<K, V>unset() : oldValue; 3481 } 3482 3483 @Override 3484 public boolean isLoading() { 3485 return true; 3486 } 3487 3488 @Override 3489 public boolean isActive() { 3490 return oldValue.isActive(); 3491 } 3492 3493 @Override 3494 public int getWeight() { 3495 return oldValue.getWeight(); 3496 } 3497 3498 public boolean set(@Nullable V newValue) { 3499 return futureValue.set(newValue); 3500 } 3501 3502 public boolean setException(Throwable t) { 3503 return futureValue.setException(t); 3504 } 3505 3506 private ListenableFuture<V> fullyFailedFuture(Throwable t) { 3507 return Futures.immediateFailedFuture(t); 3508 } 3509 3510 @Override 3511 public void notifyNewValue(@Nullable V newValue) { 3512 if (newValue != null) { 3513 // The pending load was clobbered by a manual write. 3514 // Unblock all pending gets, and have them return the new value. 3515 set(newValue); 3516 } else { 3517 // The pending load was removed. Delay notifications until loading completes. 3518 oldValue = unset(); 3519 } 3520 3521 // TODO(fry): could also cancel loading if we had a handle on its future 3522 } 3523 3524 public ListenableFuture<V> loadFuture(K key, CacheLoader<? super K, V> loader) { 3525 try { 3526 stopwatch.start(); 3527 V previousValue = oldValue.get(); 3528 if (previousValue == null) { 3529 V newValue = loader.load(key); 3530 return set(newValue) ? futureValue : Futures.immediateFuture(newValue); 3531 } 3532 ListenableFuture<V> newValue = loader.reload(key, previousValue); 3533 if (newValue == null) { 3534 return Futures.immediateFuture(null); 3535 } 3536 // To avoid a race, make sure the refreshed value is set into loadingValueReference 3537 // *before* returning newValue from the cache query. 3538 return transform( 3539 newValue, 3540 new com.google.common.base.Function<V, V>() { 3541 @Override 3542 public V apply(V newValue) { 3543 LoadingValueReference.this.set(newValue); 3544 return newValue; 3545 } 3546 }, 3547 directExecutor()); 3548 } catch (Throwable t) { 3549 ListenableFuture<V> result = setException(t) ? futureValue : fullyFailedFuture(t); 3550 if (t instanceof InterruptedException) { 3551 Thread.currentThread().interrupt(); 3552 } 3553 return result; 3554 } 3555 } 3556 3557 public V compute(K key, BiFunction<? super K, ? super V, ? extends V> function) { 3558 stopwatch.start(); 3559 V previousValue; 3560 try { 3561 previousValue = oldValue.waitForValue(); 3562 } catch (ExecutionException e) { 3563 previousValue = null; 3564 } 3565 V newValue; 3566 try { 3567 newValue = function.apply(key, previousValue); 3568 } catch (Throwable th) { 3569 this.setException(th); 3570 throw th; 3571 } 3572 this.set(newValue); 3573 return newValue; 3574 } 3575 3576 public long elapsedNanos() { 3577 return stopwatch.elapsed(NANOSECONDS); 3578 } 3579 3580 @Override 3581 public V waitForValue() throws ExecutionException { 3582 return getUninterruptibly(futureValue); 3583 } 3584 3585 @Override 3586 public V get() { 3587 return oldValue.get(); 3588 } 3589 3590 public ValueReference<K, V> getOldValue() { 3591 return oldValue; 3592 } 3593 3594 @Override 3595 public ReferenceEntry<K, V> getEntry() { 3596 return null; 3597 } 3598 3599 @Override 3600 public ValueReference<K, V> copyFor( 3601 ReferenceQueue<V> queue, @Nullable V value, ReferenceEntry<K, V> entry) { 3602 return this; 3603 } 3604 } 3605 3606 static class ComputingValueReference<K, V> extends LoadingValueReference<K, V> { 3607 ComputingValueReference(ValueReference<K, V> oldValue) { 3608 super(oldValue); 3609 } 3610 3611 @Override 3612 public boolean isLoading() { 3613 return false; 3614 } 3615 } 3616 3617 // Queues 3618 3619 /** 3620 * A custom queue for managing eviction order. Note that this is tightly integrated with {@code 3621 * ReferenceEntry}, upon which it relies to perform its linking. 3622 * 3623 * <p>Note that this entire implementation makes the assumption that all elements which are in the 3624 * map are also in this queue, and that all elements not in the queue are not in the map. 3625 * 3626 * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle of 3627 * the queue as part of copyWriteEntry, and (2) the contains method is highly optimized for the 3628 * current model. 3629 */ 3630 static final class WriteQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> { 3631 final ReferenceEntry<K, V> head = 3632 new AbstractReferenceEntry<K, V>() { 3633 3634 @Override 3635 public long getWriteTime() { 3636 return Long.MAX_VALUE; 3637 } 3638 3639 @Override 3640 public void setWriteTime(long time) {} 3641 3642 @Weak ReferenceEntry<K, V> nextWrite = this; 3643 3644 @Override 3645 public ReferenceEntry<K, V> getNextInWriteQueue() { 3646 return nextWrite; 3647 } 3648 3649 @Override 3650 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 3651 this.nextWrite = next; 3652 } 3653 3654 @Weak ReferenceEntry<K, V> previousWrite = this; 3655 3656 @Override 3657 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 3658 return previousWrite; 3659 } 3660 3661 @Override 3662 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 3663 this.previousWrite = previous; 3664 } 3665 }; 3666 3667 // implements Queue 3668 3669 @Override 3670 public boolean offer(ReferenceEntry<K, V> entry) { 3671 // unlink 3672 connectWriteOrder(entry.getPreviousInWriteQueue(), entry.getNextInWriteQueue()); 3673 3674 // add to tail 3675 connectWriteOrder(head.getPreviousInWriteQueue(), entry); 3676 connectWriteOrder(entry, head); 3677 3678 return true; 3679 } 3680 3681 @Override 3682 public ReferenceEntry<K, V> peek() { 3683 ReferenceEntry<K, V> next = head.getNextInWriteQueue(); 3684 return (next == head) ? null : next; 3685 } 3686 3687 @Override 3688 public ReferenceEntry<K, V> poll() { 3689 ReferenceEntry<K, V> next = head.getNextInWriteQueue(); 3690 if (next == head) { 3691 return null; 3692 } 3693 3694 remove(next); 3695 return next; 3696 } 3697 3698 @Override 3699 @SuppressWarnings("unchecked") 3700 public boolean remove(Object o) { 3701 ReferenceEntry<K, V> e = (ReferenceEntry<K, V>) o; 3702 ReferenceEntry<K, V> previous = e.getPreviousInWriteQueue(); 3703 ReferenceEntry<K, V> next = e.getNextInWriteQueue(); 3704 connectWriteOrder(previous, next); 3705 nullifyWriteOrder(e); 3706 3707 return next != NullEntry.INSTANCE; 3708 } 3709 3710 @Override 3711 @SuppressWarnings("unchecked") 3712 public boolean contains(Object o) { 3713 ReferenceEntry<K, V> e = (ReferenceEntry<K, V>) o; 3714 return e.getNextInWriteQueue() != NullEntry.INSTANCE; 3715 } 3716 3717 @Override 3718 public boolean isEmpty() { 3719 return head.getNextInWriteQueue() == head; 3720 } 3721 3722 @Override 3723 public int size() { 3724 int size = 0; 3725 for (ReferenceEntry<K, V> e = head.getNextInWriteQueue(); 3726 e != head; 3727 e = e.getNextInWriteQueue()) { 3728 size++; 3729 } 3730 return size; 3731 } 3732 3733 @Override 3734 public void clear() { 3735 ReferenceEntry<K, V> e = head.getNextInWriteQueue(); 3736 while (e != head) { 3737 ReferenceEntry<K, V> next = e.getNextInWriteQueue(); 3738 nullifyWriteOrder(e); 3739 e = next; 3740 } 3741 3742 head.setNextInWriteQueue(head); 3743 head.setPreviousInWriteQueue(head); 3744 } 3745 3746 @Override 3747 public Iterator<ReferenceEntry<K, V>> iterator() { 3748 return new AbstractSequentialIterator<ReferenceEntry<K, V>>(peek()) { 3749 @Override 3750 protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) { 3751 ReferenceEntry<K, V> next = previous.getNextInWriteQueue(); 3752 return (next == head) ? null : next; 3753 } 3754 }; 3755 } 3756 } 3757 3758 /** 3759 * A custom queue for managing access order. Note that this is tightly integrated with {@code 3760 * ReferenceEntry}, upon which it relies to perform its linking. 3761 * 3762 * <p>Note that this entire implementation makes the assumption that all elements which are in the 3763 * map are also in this queue, and that all elements not in the queue are not in the map. 3764 * 3765 * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle of 3766 * the queue as part of copyWriteEntry, and (2) the contains method is highly optimized for the 3767 * current model. 3768 */ 3769 static final class AccessQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> { 3770 final ReferenceEntry<K, V> head = 3771 new AbstractReferenceEntry<K, V>() { 3772 3773 @Override 3774 public long getAccessTime() { 3775 return Long.MAX_VALUE; 3776 } 3777 3778 @Override 3779 public void setAccessTime(long time) {} 3780 3781 @Weak ReferenceEntry<K, V> nextAccess = this; 3782 3783 @Override 3784 public ReferenceEntry<K, V> getNextInAccessQueue() { 3785 return nextAccess; 3786 } 3787 3788 @Override 3789 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 3790 this.nextAccess = next; 3791 } 3792 3793 @Weak ReferenceEntry<K, V> previousAccess = this; 3794 3795 @Override 3796 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 3797 return previousAccess; 3798 } 3799 3800 @Override 3801 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 3802 this.previousAccess = previous; 3803 } 3804 }; 3805 3806 // implements Queue 3807 3808 @Override 3809 public boolean offer(ReferenceEntry<K, V> entry) { 3810 // unlink 3811 connectAccessOrder(entry.getPreviousInAccessQueue(), entry.getNextInAccessQueue()); 3812 3813 // add to tail 3814 connectAccessOrder(head.getPreviousInAccessQueue(), entry); 3815 connectAccessOrder(entry, head); 3816 3817 return true; 3818 } 3819 3820 @Override 3821 public ReferenceEntry<K, V> peek() { 3822 ReferenceEntry<K, V> next = head.getNextInAccessQueue(); 3823 return (next == head) ? null : next; 3824 } 3825 3826 @Override 3827 public ReferenceEntry<K, V> poll() { 3828 ReferenceEntry<K, V> next = head.getNextInAccessQueue(); 3829 if (next == head) { 3830 return null; 3831 } 3832 3833 remove(next); 3834 return next; 3835 } 3836 3837 @Override 3838 @SuppressWarnings("unchecked") 3839 public boolean remove(Object o) { 3840 ReferenceEntry<K, V> e = (ReferenceEntry<K, V>) o; 3841 ReferenceEntry<K, V> previous = e.getPreviousInAccessQueue(); 3842 ReferenceEntry<K, V> next = e.getNextInAccessQueue(); 3843 connectAccessOrder(previous, next); 3844 nullifyAccessOrder(e); 3845 3846 return next != NullEntry.INSTANCE; 3847 } 3848 3849 @Override 3850 @SuppressWarnings("unchecked") 3851 public boolean contains(Object o) { 3852 ReferenceEntry<K, V> e = (ReferenceEntry<K, V>) o; 3853 return e.getNextInAccessQueue() != NullEntry.INSTANCE; 3854 } 3855 3856 @Override 3857 public boolean isEmpty() { 3858 return head.getNextInAccessQueue() == head; 3859 } 3860 3861 @Override 3862 public int size() { 3863 int size = 0; 3864 for (ReferenceEntry<K, V> e = head.getNextInAccessQueue(); 3865 e != head; 3866 e = e.getNextInAccessQueue()) { 3867 size++; 3868 } 3869 return size; 3870 } 3871 3872 @Override 3873 public void clear() { 3874 ReferenceEntry<K, V> e = head.getNextInAccessQueue(); 3875 while (e != head) { 3876 ReferenceEntry<K, V> next = e.getNextInAccessQueue(); 3877 nullifyAccessOrder(e); 3878 e = next; 3879 } 3880 3881 head.setNextInAccessQueue(head); 3882 head.setPreviousInAccessQueue(head); 3883 } 3884 3885 @Override 3886 public Iterator<ReferenceEntry<K, V>> iterator() { 3887 return new AbstractSequentialIterator<ReferenceEntry<K, V>>(peek()) { 3888 @Override 3889 protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) { 3890 ReferenceEntry<K, V> next = previous.getNextInAccessQueue(); 3891 return (next == head) ? null : next; 3892 } 3893 }; 3894 } 3895 } 3896 3897 // Cache support 3898 3899 public void cleanUp() { 3900 for (Segment<?, ?> segment : segments) { 3901 segment.cleanUp(); 3902 } 3903 } 3904 3905 // ConcurrentMap methods 3906 3907 @Override 3908 public boolean isEmpty() { 3909 /* 3910 * Sum per-segment modCounts to avoid mis-reporting when elements are concurrently added and 3911 * removed in one segment while checking another, in which case the table was never actually 3912 * empty at any point. (The sum ensures accuracy up through at least 1<<31 per-segment 3913 * modifications before recheck.) Method containsValue() uses similar constructions for 3914 * stability checks. 3915 */ 3916 long sum = 0L; 3917 Segment<K, V>[] segments = this.segments; 3918 for (int i = 0; i < segments.length; ++i) { 3919 if (segments[i].count != 0) { 3920 return false; 3921 } 3922 sum += segments[i].modCount; 3923 } 3924 3925 if (sum != 0L) { // recheck unless no modifications 3926 for (int i = 0; i < segments.length; ++i) { 3927 if (segments[i].count != 0) { 3928 return false; 3929 } 3930 sum -= segments[i].modCount; 3931 } 3932 return sum == 0L; 3933 } 3934 return true; 3935 } 3936 3937 long longSize() { 3938 Segment<K, V>[] segments = this.segments; 3939 long sum = 0; 3940 for (int i = 0; i < segments.length; ++i) { 3941 sum += segments[i].count; 3942 } 3943 return sum; 3944 } 3945 3946 @Override 3947 public int size() { 3948 return Ints.saturatedCast(longSize()); 3949 } 3950 3951 @Override 3952 public @Nullable V get(@Nullable Object key) { 3953 if (key == null) { 3954 return null; 3955 } 3956 int hash = hash(key); 3957 return segmentFor(hash).get(key, hash); 3958 } 3959 3960 V get(K key, CacheLoader<? super K, V> loader) throws ExecutionException { 3961 int hash = hash(checkNotNull(key)); 3962 return segmentFor(hash).get(key, hash, loader); 3963 } 3964 3965 public @Nullable V getIfPresent(Object key) { 3966 int hash = hash(checkNotNull(key)); 3967 V value = segmentFor(hash).get(key, hash); 3968 if (value == null) { 3969 globalStatsCounter.recordMisses(1); 3970 } else { 3971 globalStatsCounter.recordHits(1); 3972 } 3973 return value; 3974 } 3975 3976 // Only becomes available in Java 8 when it's on the interface. 3977 // @Override 3978 @Override 3979 public @Nullable V getOrDefault(@Nullable Object key, @Nullable V defaultValue) { 3980 V result = get(key); 3981 return (result != null) ? result : defaultValue; 3982 } 3983 3984 V getOrLoad(K key) throws ExecutionException { 3985 return get(key, defaultLoader); 3986 } 3987 3988 ImmutableMap<K, V> getAllPresent(Iterable<?> keys) { 3989 int hits = 0; 3990 int misses = 0; 3991 3992 Map<K, V> result = Maps.newLinkedHashMap(); 3993 for (Object key : keys) { 3994 V value = get(key); 3995 if (value == null) { 3996 misses++; 3997 } else { 3998 // TODO(fry): store entry key instead of query key 3999 @SuppressWarnings("unchecked") 4000 K castKey = (K) key; 4001 result.put(castKey, value); 4002 hits++; 4003 } 4004 } 4005 globalStatsCounter.recordHits(hits); 4006 globalStatsCounter.recordMisses(misses); 4007 return ImmutableMap.copyOf(result); 4008 } 4009 4010 ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException { 4011 int hits = 0; 4012 int misses = 0; 4013 4014 Map<K, V> result = Maps.newLinkedHashMap(); 4015 Set<K> keysToLoad = Sets.newLinkedHashSet(); 4016 for (K key : keys) { 4017 V value = get(key); 4018 if (!result.containsKey(key)) { 4019 result.put(key, value); 4020 if (value == null) { 4021 misses++; 4022 keysToLoad.add(key); 4023 } else { 4024 hits++; 4025 } 4026 } 4027 } 4028 4029 try { 4030 if (!keysToLoad.isEmpty()) { 4031 try { 4032 Map<K, V> newEntries = loadAll(keysToLoad, defaultLoader); 4033 for (K key : keysToLoad) { 4034 V value = newEntries.get(key); 4035 if (value == null) { 4036 throw new InvalidCacheLoadException("loadAll failed to return a value for " + key); 4037 } 4038 result.put(key, value); 4039 } 4040 } catch (UnsupportedLoadingOperationException e) { 4041 // loadAll not implemented, fallback to load 4042 for (K key : keysToLoad) { 4043 misses--; // get will count this miss 4044 result.put(key, get(key, defaultLoader)); 4045 } 4046 } 4047 } 4048 return ImmutableMap.copyOf(result); 4049 } finally { 4050 globalStatsCounter.recordHits(hits); 4051 globalStatsCounter.recordMisses(misses); 4052 } 4053 } 4054 4055 /** 4056 * Returns the result of calling {@link CacheLoader#loadAll}, or null if {@code loader} doesn't 4057 * implement {@code loadAll}. 4058 */ 4059 @Nullable 4060 Map<K, V> loadAll(Set<? extends K> keys, CacheLoader<? super K, V> loader) 4061 throws ExecutionException { 4062 checkNotNull(loader); 4063 checkNotNull(keys); 4064 Stopwatch stopwatch = Stopwatch.createStarted(); 4065 Map<K, V> result; 4066 boolean success = false; 4067 try { 4068 @SuppressWarnings("unchecked") // safe since all keys extend K 4069 Map<K, V> map = (Map<K, V>) loader.loadAll(keys); 4070 result = map; 4071 success = true; 4072 } catch (UnsupportedLoadingOperationException e) { 4073 success = true; 4074 throw e; 4075 } catch (InterruptedException e) { 4076 Thread.currentThread().interrupt(); 4077 throw new ExecutionException(e); 4078 } catch (RuntimeException e) { 4079 throw new UncheckedExecutionException(e); 4080 } catch (Exception e) { 4081 throw new ExecutionException(e); 4082 } catch (Error e) { 4083 throw new ExecutionError(e); 4084 } finally { 4085 if (!success) { 4086 globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS)); 4087 } 4088 } 4089 4090 if (result == null) { 4091 globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS)); 4092 throw new InvalidCacheLoadException(loader + " returned null map from loadAll"); 4093 } 4094 4095 stopwatch.stop(); 4096 // TODO(fry): batch by segment 4097 boolean nullsPresent = false; 4098 for (Entry<K, V> entry : result.entrySet()) { 4099 K key = entry.getKey(); 4100 V value = entry.getValue(); 4101 if (key == null || value == null) { 4102 // delay failure until non-null entries are stored 4103 nullsPresent = true; 4104 } else { 4105 put(key, value); 4106 } 4107 } 4108 4109 if (nullsPresent) { 4110 globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS)); 4111 throw new InvalidCacheLoadException(loader + " returned null keys or values from loadAll"); 4112 } 4113 4114 // TODO(fry): record count of loaded entries 4115 globalStatsCounter.recordLoadSuccess(stopwatch.elapsed(NANOSECONDS)); 4116 return result; 4117 } 4118 4119 /** 4120 * Returns the internal entry for the specified key. The entry may be loading, expired, or 4121 * partially collected. 4122 */ 4123 ReferenceEntry<K, V> getEntry(@Nullable Object key) { 4124 // does not impact recency ordering 4125 if (key == null) { 4126 return null; 4127 } 4128 int hash = hash(key); 4129 return segmentFor(hash).getEntry(key, hash); 4130 } 4131 4132 void refresh(K key) { 4133 int hash = hash(checkNotNull(key)); 4134 segmentFor(hash).refresh(key, hash, defaultLoader, false); 4135 } 4136 4137 @Override 4138 public boolean containsKey(@Nullable Object key) { 4139 // does not impact recency ordering 4140 if (key == null) { 4141 return false; 4142 } 4143 int hash = hash(key); 4144 return segmentFor(hash).containsKey(key, hash); 4145 } 4146 4147 @Override 4148 public boolean containsValue(@Nullable Object value) { 4149 // does not impact recency ordering 4150 if (value == null) { 4151 return false; 4152 } 4153 4154 // This implementation is patterned after ConcurrentHashMap, but without the locking. The only 4155 // way for it to return a false negative would be for the target value to jump around in the map 4156 // such that none of the subsequent iterations observed it, despite the fact that at every point 4157 // in time it was present somewhere int the map. This becomes increasingly unlikely as 4158 // CONTAINS_VALUE_RETRIES increases, though without locking it is theoretically possible. 4159 long now = ticker.read(); 4160 final Segment<K, V>[] segments = this.segments; 4161 long last = -1L; 4162 for (int i = 0; i < CONTAINS_VALUE_RETRIES; i++) { 4163 long sum = 0L; 4164 for (Segment<K, V> segment : segments) { 4165 // ensure visibility of most recent completed write 4166 int unused = segment.count; // read-volatile 4167 4168 AtomicReferenceArray<ReferenceEntry<K, V>> table = segment.table; 4169 for (int j = 0; j < table.length(); j++) { 4170 for (ReferenceEntry<K, V> e = table.get(j); e != null; e = e.getNext()) { 4171 V v = segment.getLiveValue(e, now); 4172 if (v != null && valueEquivalence.equivalent(value, v)) { 4173 return true; 4174 } 4175 } 4176 } 4177 sum += segment.modCount; 4178 } 4179 if (sum == last) { 4180 break; 4181 } 4182 last = sum; 4183 } 4184 return false; 4185 } 4186 4187 @Override 4188 public V put(K key, V value) { 4189 checkNotNull(key); 4190 checkNotNull(value); 4191 int hash = hash(key); 4192 return segmentFor(hash).put(key, hash, value, false); 4193 } 4194 4195 @Override 4196 public V putIfAbsent(K key, V value) { 4197 checkNotNull(key); 4198 checkNotNull(value); 4199 int hash = hash(key); 4200 return segmentFor(hash).put(key, hash, value, true); 4201 } 4202 4203 @Override 4204 public V compute(K key, BiFunction<? super K, ? super V, ? extends V> function) { 4205 checkNotNull(key); 4206 checkNotNull(function); 4207 int hash = hash(key); 4208 return segmentFor(hash).compute(key, hash, function); 4209 } 4210 4211 @Override 4212 public V computeIfAbsent(K key, Function<? super K, ? extends V> function) { 4213 checkNotNull(key); 4214 checkNotNull(function); 4215 return compute(key, (k, oldValue) -> (oldValue == null) ? function.apply(key) : oldValue); 4216 } 4217 4218 @Override 4219 public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> function) { 4220 checkNotNull(key); 4221 checkNotNull(function); 4222 return compute(key, (k, oldValue) -> (oldValue == null) ? null : function.apply(k, oldValue)); 4223 } 4224 4225 @Override 4226 public V merge(K key, V newValue, BiFunction<? super V, ? super V, ? extends V> function) { 4227 checkNotNull(key); 4228 checkNotNull(newValue); 4229 checkNotNull(function); 4230 return compute( 4231 key, (k, oldValue) -> (oldValue == null) ? newValue : function.apply(oldValue, newValue)); 4232 } 4233 4234 @Override 4235 public void putAll(Map<? extends K, ? extends V> m) { 4236 for (Entry<? extends K, ? extends V> e : m.entrySet()) { 4237 put(e.getKey(), e.getValue()); 4238 } 4239 } 4240 4241 @Override 4242 public V remove(@Nullable Object key) { 4243 if (key == null) { 4244 return null; 4245 } 4246 int hash = hash(key); 4247 return segmentFor(hash).remove(key, hash); 4248 } 4249 4250 @Override 4251 public boolean remove(@Nullable Object key, @Nullable Object value) { 4252 if (key == null || value == null) { 4253 return false; 4254 } 4255 int hash = hash(key); 4256 return segmentFor(hash).remove(key, hash, value); 4257 } 4258 4259 @Override 4260 public boolean replace(K key, @Nullable V oldValue, V newValue) { 4261 checkNotNull(key); 4262 checkNotNull(newValue); 4263 if (oldValue == null) { 4264 return false; 4265 } 4266 int hash = hash(key); 4267 return segmentFor(hash).replace(key, hash, oldValue, newValue); 4268 } 4269 4270 @Override 4271 public V replace(K key, V value) { 4272 checkNotNull(key); 4273 checkNotNull(value); 4274 int hash = hash(key); 4275 return segmentFor(hash).replace(key, hash, value); 4276 } 4277 4278 @Override 4279 public void clear() { 4280 for (Segment<K, V> segment : segments) { 4281 segment.clear(); 4282 } 4283 } 4284 4285 void invalidateAll(Iterable<?> keys) { 4286 // TODO(fry): batch by segment 4287 for (Object key : keys) { 4288 remove(key); 4289 } 4290 } 4291 4292 @RetainedWith @Nullable Set<K> keySet; 4293 4294 @Override 4295 public Set<K> keySet() { 4296 // does not impact recency ordering 4297 Set<K> ks = keySet; 4298 return (ks != null) ? ks : (keySet = new KeySet()); 4299 } 4300 4301 @RetainedWith @Nullable Collection<V> values; 4302 4303 @Override 4304 public Collection<V> values() { 4305 // does not impact recency ordering 4306 Collection<V> vs = values; 4307 return (vs != null) ? vs : (values = new Values()); 4308 } 4309 4310 @RetainedWith @Nullable Set<Entry<K, V>> entrySet; 4311 4312 @Override 4313 @GwtIncompatible // Not supported. 4314 public Set<Entry<K, V>> entrySet() { 4315 // does not impact recency ordering 4316 Set<Entry<K, V>> es = entrySet; 4317 return (es != null) ? es : (entrySet = new EntrySet()); 4318 } 4319 4320 // Iterator Support 4321 4322 abstract class HashIterator<T> implements Iterator<T> { 4323 4324 int nextSegmentIndex; 4325 int nextTableIndex; 4326 @Nullable Segment<K, V> currentSegment; 4327 @Nullable AtomicReferenceArray<ReferenceEntry<K, V>> currentTable; 4328 @Nullable ReferenceEntry<K, V> nextEntry; 4329 @Nullable WriteThroughEntry nextExternal; 4330 @Nullable WriteThroughEntry lastReturned; 4331 4332 HashIterator() { 4333 nextSegmentIndex = segments.length - 1; 4334 nextTableIndex = -1; 4335 advance(); 4336 } 4337 4338 @Override 4339 public abstract T next(); 4340 4341 final void advance() { 4342 nextExternal = null; 4343 4344 if (nextInChain()) { 4345 return; 4346 } 4347 4348 if (nextInTable()) { 4349 return; 4350 } 4351 4352 while (nextSegmentIndex >= 0) { 4353 currentSegment = segments[nextSegmentIndex--]; 4354 if (currentSegment.count != 0) { 4355 currentTable = currentSegment.table; 4356 nextTableIndex = currentTable.length() - 1; 4357 if (nextInTable()) { 4358 return; 4359 } 4360 } 4361 } 4362 } 4363 4364 /** Finds the next entry in the current chain. Returns true if an entry was found. */ 4365 boolean nextInChain() { 4366 if (nextEntry != null) { 4367 for (nextEntry = nextEntry.getNext(); nextEntry != null; nextEntry = nextEntry.getNext()) { 4368 if (advanceTo(nextEntry)) { 4369 return true; 4370 } 4371 } 4372 } 4373 return false; 4374 } 4375 4376 /** Finds the next entry in the current table. Returns true if an entry was found. */ 4377 boolean nextInTable() { 4378 while (nextTableIndex >= 0) { 4379 if ((nextEntry = currentTable.get(nextTableIndex--)) != null) { 4380 if (advanceTo(nextEntry) || nextInChain()) { 4381 return true; 4382 } 4383 } 4384 } 4385 return false; 4386 } 4387 4388 /** 4389 * Advances to the given entry. Returns true if the entry was valid, false if it should be 4390 * skipped. 4391 */ 4392 boolean advanceTo(ReferenceEntry<K, V> entry) { 4393 try { 4394 long now = ticker.read(); 4395 K key = entry.getKey(); 4396 V value = getLiveValue(entry, now); 4397 if (value != null) { 4398 nextExternal = new WriteThroughEntry(key, value); 4399 return true; 4400 } else { 4401 // Skip stale entry. 4402 return false; 4403 } 4404 } finally { 4405 currentSegment.postReadCleanup(); 4406 } 4407 } 4408 4409 @Override 4410 public boolean hasNext() { 4411 return nextExternal != null; 4412 } 4413 4414 WriteThroughEntry nextEntry() { 4415 if (nextExternal == null) { 4416 throw new NoSuchElementException(); 4417 } 4418 lastReturned = nextExternal; 4419 advance(); 4420 return lastReturned; 4421 } 4422 4423 @Override 4424 public void remove() { 4425 checkState(lastReturned != null); 4426 LocalCache.this.remove(lastReturned.getKey()); 4427 lastReturned = null; 4428 } 4429 } 4430 4431 final class KeyIterator extends HashIterator<K> { 4432 4433 @Override 4434 public K next() { 4435 return nextEntry().getKey(); 4436 } 4437 } 4438 4439 final class ValueIterator extends HashIterator<V> { 4440 4441 @Override 4442 public V next() { 4443 return nextEntry().getValue(); 4444 } 4445 } 4446 4447 /** 4448 * Custom Entry class used by EntryIterator.next(), that relays setValue changes to the underlying 4449 * map. 4450 */ 4451 final class WriteThroughEntry implements Entry<K, V> { 4452 final K key; // non-null 4453 V value; // non-null 4454 4455 WriteThroughEntry(K key, V value) { 4456 this.key = key; 4457 this.value = value; 4458 } 4459 4460 @Override 4461 public K getKey() { 4462 return key; 4463 } 4464 4465 @Override 4466 public V getValue() { 4467 return value; 4468 } 4469 4470 @Override 4471 public boolean equals(@Nullable Object object) { 4472 // Cannot use key and value equivalence 4473 if (object instanceof Entry) { 4474 Entry<?, ?> that = (Entry<?, ?>) object; 4475 return key.equals(that.getKey()) && value.equals(that.getValue()); 4476 } 4477 return false; 4478 } 4479 4480 @Override 4481 public int hashCode() { 4482 // Cannot use key and value equivalence 4483 return key.hashCode() ^ value.hashCode(); 4484 } 4485 4486 @Override 4487 public V setValue(V newValue) { 4488 V oldValue = put(key, newValue); 4489 value = newValue; // only if put succeeds 4490 return oldValue; 4491 } 4492 4493 @Override 4494 public String toString() { 4495 return getKey() + "=" + getValue(); 4496 } 4497 } 4498 4499 final class EntryIterator extends HashIterator<Entry<K, V>> { 4500 4501 @Override 4502 public Entry<K, V> next() { 4503 return nextEntry(); 4504 } 4505 } 4506 4507 abstract class AbstractCacheSet<T> extends AbstractSet<T> { 4508 @Override 4509 public int size() { 4510 return LocalCache.this.size(); 4511 } 4512 4513 @Override 4514 public boolean isEmpty() { 4515 return LocalCache.this.isEmpty(); 4516 } 4517 4518 @Override 4519 public void clear() { 4520 LocalCache.this.clear(); 4521 } 4522 4523 // super.toArray() may misbehave if size() is inaccurate, at least on old versions of Android. 4524 // https://code.google.com/p/android/issues/detail?id=36519 / http://r.android.com/47508 4525 4526 @Override 4527 public Object[] toArray() { 4528 return toArrayList(this).toArray(); 4529 } 4530 4531 @Override 4532 public <E> E[] toArray(E[] a) { 4533 return toArrayList(this).toArray(a); 4534 } 4535 } 4536 4537 private static <E> ArrayList<E> toArrayList(Collection<E> c) { 4538 // Avoid calling ArrayList(Collection), which may call back into toArray. 4539 ArrayList<E> result = new ArrayList<E>(c.size()); 4540 Iterators.addAll(result, c.iterator()); 4541 return result; 4542 } 4543 4544 boolean removeIf(BiPredicate<? super K, ? super V> filter) { 4545 checkNotNull(filter); 4546 boolean changed = false; 4547 for (K key : keySet()) { 4548 while (true) { 4549 V value = get(key); 4550 if (value == null || !filter.test(key, value)) { 4551 break; 4552 } else if (LocalCache.this.remove(key, value)) { 4553 changed = true; 4554 break; 4555 } 4556 } 4557 } 4558 return changed; 4559 } 4560 4561 final class KeySet extends AbstractCacheSet<K> { 4562 4563 @Override 4564 public Iterator<K> iterator() { 4565 return new KeyIterator(); 4566 } 4567 4568 @Override 4569 public boolean contains(Object o) { 4570 return LocalCache.this.containsKey(o); 4571 } 4572 4573 @Override 4574 public boolean remove(Object o) { 4575 return LocalCache.this.remove(o) != null; 4576 } 4577 } 4578 4579 final class Values extends AbstractCollection<V> { 4580 @Override 4581 public int size() { 4582 return LocalCache.this.size(); 4583 } 4584 4585 @Override 4586 public boolean isEmpty() { 4587 return LocalCache.this.isEmpty(); 4588 } 4589 4590 @Override 4591 public void clear() { 4592 LocalCache.this.clear(); 4593 } 4594 4595 @Override 4596 public Iterator<V> iterator() { 4597 return new ValueIterator(); 4598 } 4599 4600 @Override 4601 public boolean removeIf(Predicate<? super V> filter) { 4602 checkNotNull(filter); 4603 return LocalCache.this.removeIf((k, v) -> filter.test(v)); 4604 } 4605 4606 @Override 4607 public boolean contains(Object o) { 4608 return LocalCache.this.containsValue(o); 4609 } 4610 4611 // super.toArray() may misbehave if size() is inaccurate, at least on old versions of Android. 4612 // https://code.google.com/p/android/issues/detail?id=36519 / http://r.android.com/47508 4613 4614 @Override 4615 public Object[] toArray() { 4616 return toArrayList(this).toArray(); 4617 } 4618 4619 @Override 4620 public <E> E[] toArray(E[] a) { 4621 return toArrayList(this).toArray(a); 4622 } 4623 } 4624 4625 final class EntrySet extends AbstractCacheSet<Entry<K, V>> { 4626 4627 @Override 4628 public Iterator<Entry<K, V>> iterator() { 4629 return new EntryIterator(); 4630 } 4631 4632 @Override 4633 public boolean removeIf(Predicate<? super Entry<K, V>> filter) { 4634 checkNotNull(filter); 4635 return LocalCache.this.removeIf((k, v) -> filter.test(Maps.immutableEntry(k, v))); 4636 } 4637 4638 @Override 4639 public boolean contains(Object o) { 4640 if (!(o instanceof Entry)) { 4641 return false; 4642 } 4643 Entry<?, ?> e = (Entry<?, ?>) o; 4644 Object key = e.getKey(); 4645 if (key == null) { 4646 return false; 4647 } 4648 V v = LocalCache.this.get(key); 4649 4650 return v != null && valueEquivalence.equivalent(e.getValue(), v); 4651 } 4652 4653 @Override 4654 public boolean remove(Object o) { 4655 if (!(o instanceof Entry)) { 4656 return false; 4657 } 4658 Entry<?, ?> e = (Entry<?, ?>) o; 4659 Object key = e.getKey(); 4660 return key != null && LocalCache.this.remove(key, e.getValue()); 4661 } 4662 } 4663 4664 // Serialization Support 4665 4666 /** 4667 * Serializes the configuration of a LocalCache, reconstituting it as a Cache using CacheBuilder 4668 * upon deserialization. An instance of this class is fit for use by the writeReplace of 4669 * LocalManualCache. 4670 * 4671 * <p>Unfortunately, readResolve() doesn't get called when a circular dependency is present, so 4672 * the proxy must be able to behave as the cache itself. 4673 */ 4674 static class ManualSerializationProxy<K, V> extends ForwardingCache<K, V> 4675 implements Serializable { 4676 private static final long serialVersionUID = 1; 4677 4678 final Strength keyStrength; 4679 final Strength valueStrength; 4680 final Equivalence<Object> keyEquivalence; 4681 final Equivalence<Object> valueEquivalence; 4682 final long expireAfterWriteNanos; 4683 final long expireAfterAccessNanos; 4684 final long maxWeight; 4685 final Weigher<K, V> weigher; 4686 final int concurrencyLevel; 4687 final RemovalListener<? super K, ? super V> removalListener; 4688 final @Nullable Ticker ticker; 4689 final CacheLoader<? super K, V> loader; 4690 4691 transient @Nullable Cache<K, V> delegate; 4692 4693 ManualSerializationProxy(LocalCache<K, V> cache) { 4694 this( 4695 cache.keyStrength, 4696 cache.valueStrength, 4697 cache.keyEquivalence, 4698 cache.valueEquivalence, 4699 cache.expireAfterWriteNanos, 4700 cache.expireAfterAccessNanos, 4701 cache.maxWeight, 4702 cache.weigher, 4703 cache.concurrencyLevel, 4704 cache.removalListener, 4705 cache.ticker, 4706 cache.defaultLoader); 4707 } 4708 4709 private ManualSerializationProxy( 4710 Strength keyStrength, 4711 Strength valueStrength, 4712 Equivalence<Object> keyEquivalence, 4713 Equivalence<Object> valueEquivalence, 4714 long expireAfterWriteNanos, 4715 long expireAfterAccessNanos, 4716 long maxWeight, 4717 Weigher<K, V> weigher, 4718 int concurrencyLevel, 4719 RemovalListener<? super K, ? super V> removalListener, 4720 Ticker ticker, 4721 CacheLoader<? super K, V> loader) { 4722 this.keyStrength = keyStrength; 4723 this.valueStrength = valueStrength; 4724 this.keyEquivalence = keyEquivalence; 4725 this.valueEquivalence = valueEquivalence; 4726 this.expireAfterWriteNanos = expireAfterWriteNanos; 4727 this.expireAfterAccessNanos = expireAfterAccessNanos; 4728 this.maxWeight = maxWeight; 4729 this.weigher = weigher; 4730 this.concurrencyLevel = concurrencyLevel; 4731 this.removalListener = removalListener; 4732 this.ticker = (ticker == Ticker.systemTicker() || ticker == NULL_TICKER) ? null : ticker; 4733 this.loader = loader; 4734 } 4735 4736 CacheBuilder<K, V> recreateCacheBuilder() { 4737 CacheBuilder<K, V> builder = 4738 CacheBuilder.newBuilder() 4739 .setKeyStrength(keyStrength) 4740 .setValueStrength(valueStrength) 4741 .keyEquivalence(keyEquivalence) 4742 .valueEquivalence(valueEquivalence) 4743 .concurrencyLevel(concurrencyLevel) 4744 .removalListener(removalListener); 4745 builder.strictParsing = false; 4746 if (expireAfterWriteNanos > 0) { 4747 builder.expireAfterWrite(expireAfterWriteNanos, TimeUnit.NANOSECONDS); 4748 } 4749 if (expireAfterAccessNanos > 0) { 4750 builder.expireAfterAccess(expireAfterAccessNanos, TimeUnit.NANOSECONDS); 4751 } 4752 if (weigher != OneWeigher.INSTANCE) { 4753 builder.weigher(weigher); 4754 if (maxWeight != UNSET_INT) { 4755 builder.maximumWeight(maxWeight); 4756 } 4757 } else { 4758 if (maxWeight != UNSET_INT) { 4759 builder.maximumSize(maxWeight); 4760 } 4761 } 4762 if (ticker != null) { 4763 builder.ticker(ticker); 4764 } 4765 return builder; 4766 } 4767 4768 private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { 4769 in.defaultReadObject(); 4770 CacheBuilder<K, V> builder = recreateCacheBuilder(); 4771 this.delegate = builder.build(); 4772 } 4773 4774 private Object readResolve() { 4775 return delegate; 4776 } 4777 4778 @Override 4779 protected Cache<K, V> delegate() { 4780 return delegate; 4781 } 4782 } 4783 4784 /** 4785 * Serializes the configuration of a LocalCache, reconstituting it as an LoadingCache using 4786 * CacheBuilder upon deserialization. An instance of this class is fit for use by the writeReplace 4787 * of LocalLoadingCache. 4788 * 4789 * <p>Unfortunately, readResolve() doesn't get called when a circular dependency is present, so 4790 * the proxy must be able to behave as the cache itself. 4791 */ 4792 static final class LoadingSerializationProxy<K, V> extends ManualSerializationProxy<K, V> 4793 implements LoadingCache<K, V>, Serializable { 4794 private static final long serialVersionUID = 1; 4795 4796 transient @Nullable LoadingCache<K, V> autoDelegate; 4797 4798 LoadingSerializationProxy(LocalCache<K, V> cache) { 4799 super(cache); 4800 } 4801 4802 private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { 4803 in.defaultReadObject(); 4804 CacheBuilder<K, V> builder = recreateCacheBuilder(); 4805 this.autoDelegate = builder.build(loader); 4806 } 4807 4808 @Override 4809 public V get(K key) throws ExecutionException { 4810 return autoDelegate.get(key); 4811 } 4812 4813 @Override 4814 public V getUnchecked(K key) { 4815 return autoDelegate.getUnchecked(key); 4816 } 4817 4818 @Override 4819 public ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException { 4820 return autoDelegate.getAll(keys); 4821 } 4822 4823 @Override 4824 public final V apply(K key) { 4825 return autoDelegate.apply(key); 4826 } 4827 4828 @Override 4829 public void refresh(K key) { 4830 autoDelegate.refresh(key); 4831 } 4832 4833 private Object readResolve() { 4834 return autoDelegate; 4835 } 4836 } 4837 4838 static class LocalManualCache<K, V> implements Cache<K, V>, Serializable { 4839 final LocalCache<K, V> localCache; 4840 4841 LocalManualCache(CacheBuilder<? super K, ? super V> builder) { 4842 this(new LocalCache<K, V>(builder, null)); 4843 } 4844 4845 private LocalManualCache(LocalCache<K, V> localCache) { 4846 this.localCache = localCache; 4847 } 4848 4849 // Cache methods 4850 4851 @Override 4852 public @Nullable V getIfPresent(Object key) { 4853 return localCache.getIfPresent(key); 4854 } 4855 4856 @Override 4857 public V get(K key, final Callable<? extends V> valueLoader) throws ExecutionException { 4858 checkNotNull(valueLoader); 4859 return localCache.get( 4860 key, 4861 new CacheLoader<Object, V>() { 4862 @Override 4863 public V load(Object key) throws Exception { 4864 return valueLoader.call(); 4865 } 4866 }); 4867 } 4868 4869 @Override 4870 public ImmutableMap<K, V> getAllPresent(Iterable<?> keys) { 4871 return localCache.getAllPresent(keys); 4872 } 4873 4874 @Override 4875 public void put(K key, V value) { 4876 localCache.put(key, value); 4877 } 4878 4879 @Override 4880 public void putAll(Map<? extends K, ? extends V> m) { 4881 localCache.putAll(m); 4882 } 4883 4884 @Override 4885 public void invalidate(Object key) { 4886 checkNotNull(key); 4887 localCache.remove(key); 4888 } 4889 4890 @Override 4891 public void invalidateAll(Iterable<?> keys) { 4892 localCache.invalidateAll(keys); 4893 } 4894 4895 @Override 4896 public void invalidateAll() { 4897 localCache.clear(); 4898 } 4899 4900 @Override 4901 public long size() { 4902 return localCache.longSize(); 4903 } 4904 4905 @Override 4906 public ConcurrentMap<K, V> asMap() { 4907 return localCache; 4908 } 4909 4910 @Override 4911 public CacheStats stats() { 4912 SimpleStatsCounter aggregator = new SimpleStatsCounter(); 4913 aggregator.incrementBy(localCache.globalStatsCounter); 4914 for (Segment<K, V> segment : localCache.segments) { 4915 aggregator.incrementBy(segment.statsCounter); 4916 } 4917 return aggregator.snapshot(); 4918 } 4919 4920 @Override 4921 public void cleanUp() { 4922 localCache.cleanUp(); 4923 } 4924 4925 // Serialization Support 4926 4927 private static final long serialVersionUID = 1; 4928 4929 Object writeReplace() { 4930 return new ManualSerializationProxy<>(localCache); 4931 } 4932 } 4933 4934 static class LocalLoadingCache<K, V> extends LocalManualCache<K, V> 4935 implements LoadingCache<K, V> { 4936 4937 LocalLoadingCache( 4938 CacheBuilder<? super K, ? super V> builder, CacheLoader<? super K, V> loader) { 4939 super(new LocalCache<K, V>(builder, checkNotNull(loader))); 4940 } 4941 4942 // LoadingCache methods 4943 4944 @Override 4945 public V get(K key) throws ExecutionException { 4946 return localCache.getOrLoad(key); 4947 } 4948 4949 @Override 4950 public V getUnchecked(K key) { 4951 try { 4952 return get(key); 4953 } catch (ExecutionException e) { 4954 throw new UncheckedExecutionException(e.getCause()); 4955 } 4956 } 4957 4958 @Override 4959 public ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException { 4960 return localCache.getAll(keys); 4961 } 4962 4963 @Override 4964 public void refresh(K key) { 4965 localCache.refresh(key); 4966 } 4967 4968 @Override 4969 public final V apply(K key) { 4970 return getUnchecked(key); 4971 } 4972 4973 // Serialization Support 4974 4975 private static final long serialVersionUID = 1; 4976 4977 @Override 4978 Object writeReplace() { 4979 return new LoadingSerializationProxy<>(localCache); 4980 } 4981 } 4982 }