Coverage Summary for Class: CompactLinkedHashSet (com.google.common.collect)
| Class | Class, % | Method, % | Line, % |
|---|---|---|---|
| CompactLinkedHashSet | 0% (0/1) | 0% (0/23) | 0% (0/57) |
1 /* 2 * Copyright (C) 2012 The Guava Authors 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package com.google.common.collect; 18 19 import com.google.common.annotations.GwtIncompatible; 20 import com.google.errorprone.annotations.CanIgnoreReturnValue; 21 import java.util.Arrays; 22 import java.util.Collection; 23 import java.util.Collections; 24 import java.util.Set; 25 import java.util.Spliterator; 26 import java.util.Spliterators; 27 import org.checkerframework.checker.nullness.qual.Nullable; 28 29 /** 30 * CompactLinkedHashSet is an implementation of a Set, which a predictable iteration order that 31 * matches the insertion order. All optional operations (adding and removing) are supported. All 32 * elements, including {@code null}, are permitted. 33 * 34 * <p>{@code contains(x)}, {@code add(x)} and {@code remove(x)}, are all (expected and amortized) 35 * constant time operations. Expected in the hashtable sense (depends on the hash function doing a 36 * good job of distributing the elements to the buckets to a distribution not far from uniform), and 37 * amortized since some operations can trigger a hash table resize. 38 * 39 * <p>This implementation consumes significantly less memory than {@code java.util.LinkedHashSet} or 40 * even {@code java.util.HashSet}, and places considerably less load on the garbage collector. Like 41 * {@code java.util.LinkedHashSet}, it offers insertion-order iteration, with identical behavior. 42 * 43 * <p>This class should not be assumed to be universally superior to {@code 44 * java.util.LinkedHashSet}. Generally speaking, this class reduces object allocation and memory 45 * consumption at the price of moderately increased constant factors of CPU. Only use this class 46 * when there is a specific reason to prioritize memory over CPU. 47 * 48 * @author Louis Wasserman 49 */ 50 @GwtIncompatible // not worth using in GWT for now 51 class CompactLinkedHashSet<E> extends CompactHashSet<E> { 52 53 /** Creates an empty {@code CompactLinkedHashSet} instance. */ 54 public static <E> CompactLinkedHashSet<E> create() { 55 return new CompactLinkedHashSet<>(); 56 } 57 58 /** 59 * Creates a <i>mutable</i> {@code CompactLinkedHashSet} instance containing the elements of the 60 * given collection in the order returned by the collection's iterator. 61 * 62 * @param collection the elements that the set should contain 63 * @return a new {@code CompactLinkedHashSet} containing those elements (minus duplicates) 64 */ 65 public static <E> CompactLinkedHashSet<E> create(Collection<? extends E> collection) { 66 CompactLinkedHashSet<E> set = createWithExpectedSize(collection.size()); 67 set.addAll(collection); 68 return set; 69 } 70 71 /** 72 * Creates a {@code CompactLinkedHashSet} instance containing the given elements in unspecified 73 * order. 74 * 75 * @param elements the elements that the set should contain 76 * @return a new {@code CompactLinkedHashSet} containing those elements (minus duplicates) 77 */ 78 @SafeVarargs 79 public static <E> CompactLinkedHashSet<E> create(E... elements) { 80 CompactLinkedHashSet<E> set = createWithExpectedSize(elements.length); 81 Collections.addAll(set, elements); 82 return set; 83 } 84 85 /** 86 * Creates a {@code CompactLinkedHashSet} instance, with a high enough "initial capacity" that it 87 * <i>should</i> hold {@code expectedSize} elements without rebuilding internal data structures. 88 * 89 * @param expectedSize the number of elements you expect to add to the returned set 90 * @return a new, empty {@code CompactLinkedHashSet} with enough capacity to hold {@code 91 * expectedSize} elements without resizing 92 * @throws IllegalArgumentException if {@code expectedSize} is negative 93 */ 94 public static <E> CompactLinkedHashSet<E> createWithExpectedSize(int expectedSize) { 95 return new CompactLinkedHashSet<>(expectedSize); 96 } 97 98 private static final int ENDPOINT = -2; 99 100 // TODO(user): predecessors and successors should be collocated (reducing cache misses). 101 // Might also explore collocating all of [hash, next, predecessor, successor] fields of an 102 // entry in a *single* long[], though that reduces the maximum size of the set by a factor of 2 103 104 /** 105 * Pointer to the predecessor of an entry in insertion order. ENDPOINT indicates a node is the 106 * first node in insertion order; all values at indices ? {@link #size()} are UNSET. 107 */ 108 private transient int @Nullable [] predecessor; 109 110 /** 111 * Pointer to the successor of an entry in insertion order. ENDPOINT indicates a node is the last 112 * node in insertion order; all values at indices ? {@link #size()} are UNSET. 113 */ 114 private transient int @Nullable [] successor; 115 116 /** Pointer to the first node in the linked list, or {@code ENDPOINT} if there are no entries. */ 117 private transient int firstEntry; 118 119 /** Pointer to the last node in the linked list, or {@code ENDPOINT} if there are no entries. */ 120 private transient int lastEntry; 121 122 CompactLinkedHashSet() { 123 super(); 124 } 125 126 CompactLinkedHashSet(int expectedSize) { 127 super(expectedSize); 128 } 129 130 @Override 131 void init(int expectedSize) { 132 super.init(expectedSize); 133 this.firstEntry = ENDPOINT; 134 this.lastEntry = ENDPOINT; 135 } 136 137 @Override 138 int allocArrays() { 139 int expectedSize = super.allocArrays(); 140 this.predecessor = new int[expectedSize]; 141 this.successor = new int[expectedSize]; 142 return expectedSize; 143 } 144 145 @Override 146 @CanIgnoreReturnValue 147 Set<E> convertToHashFloodingResistantImplementation() { 148 Set<E> result = super.convertToHashFloodingResistantImplementation(); 149 this.predecessor = null; 150 this.successor = null; 151 return result; 152 } 153 154 private int getPredecessor(int entry) { 155 return predecessor[entry] - 1; 156 } 157 158 @Override 159 int getSuccessor(int entry) { 160 return successor[entry] - 1; 161 } 162 163 private void setSuccessor(int entry, int succ) { 164 successor[entry] = succ + 1; 165 } 166 167 private void setPredecessor(int entry, int pred) { 168 predecessor[entry] = pred + 1; 169 } 170 171 private void setSucceeds(int pred, int succ) { 172 if (pred == ENDPOINT) { 173 firstEntry = succ; 174 } else { 175 setSuccessor(pred, succ); 176 } 177 178 if (succ == ENDPOINT) { 179 lastEntry = pred; 180 } else { 181 setPredecessor(succ, pred); 182 } 183 } 184 185 @Override 186 void insertEntry(int entryIndex, @Nullable E object, int hash, int mask) { 187 super.insertEntry(entryIndex, object, hash, mask); 188 setSucceeds(lastEntry, entryIndex); 189 setSucceeds(entryIndex, ENDPOINT); 190 } 191 192 @Override 193 void moveLastEntry(int dstIndex, int mask) { 194 int srcIndex = size() - 1; 195 super.moveLastEntry(dstIndex, mask); 196 197 setSucceeds(getPredecessor(dstIndex), getSuccessor(dstIndex)); 198 if (dstIndex < srcIndex) { 199 setSucceeds(getPredecessor(srcIndex), dstIndex); 200 setSucceeds(dstIndex, getSuccessor(srcIndex)); 201 } 202 predecessor[srcIndex] = 0; 203 successor[srcIndex] = 0; 204 } 205 206 @Override 207 void resizeEntries(int newCapacity) { 208 super.resizeEntries(newCapacity); 209 predecessor = Arrays.copyOf(predecessor, newCapacity); 210 successor = Arrays.copyOf(successor, newCapacity); 211 } 212 213 @Override 214 int firstEntryIndex() { 215 return firstEntry; 216 } 217 218 @Override 219 int adjustAfterRemove(int indexBeforeRemove, int indexRemoved) { 220 return (indexBeforeRemove >= size()) ? indexRemoved : indexBeforeRemove; 221 } 222 223 @Override 224 public Object[] toArray() { 225 return ObjectArrays.toArrayImpl(this); 226 } 227 228 @Override 229 public <T> T[] toArray(T[] a) { 230 return ObjectArrays.toArrayImpl(this, a); 231 } 232 233 @Override 234 public Spliterator<E> spliterator() { 235 return Spliterators.spliterator(this, Spliterator.ORDERED | Spliterator.DISTINCT); 236 } 237 238 @Override 239 public void clear() { 240 if (needsAllocArrays()) { 241 return; 242 } 243 this.firstEntry = ENDPOINT; 244 this.lastEntry = ENDPOINT; 245 if (predecessor != null) { 246 Arrays.fill(predecessor, 0, size(), 0); 247 Arrays.fill(successor, 0, size(), 0); 248 } 249 super.clear(); 250 } 251 }