Coverage Summary for Class: CompactHashSet (com.google.common.collect)

Class Method, % Line, %
CompactHashSet 0% (0/39) 0% (0/236)
CompactHashSet$1 0% (0/6) 0% (0/21)
Total 0% (0/45) 0% (0/257)


1 /* 2  * Copyright (C) 2012 The Guava Authors 3  * 4  * Licensed under the Apache License, Version 2.0 (the "License"); 5  * you may not use this file except in compliance with the License. 6  * You may obtain a copy of the License at 7  * 8  * http://www.apache.org/licenses/LICENSE-2.0 9  * 10  * Unless required by applicable law or agreed to in writing, software 11  * distributed under the License is distributed on an "AS IS" BASIS, 12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13  * See the License for the specific language governing permissions and 14  * limitations under the License. 15  */ 16  17 package com.google.common.collect; 18  19 import static com.google.common.base.Preconditions.checkNotNull; 20 import static com.google.common.collect.CollectPreconditions.checkRemove; 21 import static com.google.common.collect.CompactHashing.UNSET; 22 import static com.google.common.collect.Hashing.smearedHash; 23  24 import com.google.common.annotations.GwtIncompatible; 25 import com.google.common.annotations.VisibleForTesting; 26 import com.google.common.base.Objects; 27 import com.google.common.base.Preconditions; 28 import com.google.common.primitives.Ints; 29 import com.google.errorprone.annotations.CanIgnoreReturnValue; 30 import java.io.IOException; 31 import java.io.InvalidObjectException; 32 import java.io.ObjectInputStream; 33 import java.io.ObjectOutputStream; 34 import java.io.Serializable; 35 import java.util.AbstractSet; 36 import java.util.Arrays; 37 import java.util.Collection; 38 import java.util.Collections; 39 import java.util.ConcurrentModificationException; 40 import java.util.Iterator; 41 import java.util.LinkedHashSet; 42 import java.util.NoSuchElementException; 43 import java.util.Set; 44 import java.util.Spliterator; 45 import java.util.Spliterators; 46 import java.util.function.Consumer; 47 import org.checkerframework.checker.nullness.qual.Nullable; 48  49 /** 50  * CompactHashSet is an implementation of a Set. All optional operations (adding and removing) are 51  * supported. The elements can be any objects. 52  * 53  * <p>{@code contains(x)}, {@code add(x)} and {@code remove(x)}, are all (expected and amortized) 54  * constant time operations. Expected in the hashtable sense (depends on the hash function doing a 55  * good job of distributing the elements to the buckets to a distribution not far from uniform), and 56  * amortized since some operations can trigger a hash table resize. 57  * 58  * <p>Unlike {@code java.util.HashSet}, iteration is only proportional to the actual {@code size()}, 59  * which is optimal, and <i>not</i> the size of the internal hashtable, which could be much larger 60  * than {@code size()}. Furthermore, this structure only depends on a fixed number of arrays; {@code 61  * add(x)} operations <i>do not</i> create objects for the garbage collector to deal with, and for 62  * every element added, the garbage collector will have to traverse {@code 1.5} references on 63  * average, in the marking phase, not {@code 5.0} as in {@code java.util.HashSet}. 64  * 65  * <p>If there are no removals, then {@link #iterator iteration} order is the same as insertion 66  * order. Any removal invalidates any ordering guarantees. 67  * 68  * <p>This class should not be assumed to be universally superior to {@code java.util.HashSet}. 69  * Generally speaking, this class reduces object allocation and memory consumption at the price of 70  * moderately increased constant factors of CPU. Only use this class when there is a specific reason 71  * to prioritize memory over CPU. 72  * 73  * @author Dimitris Andreou 74  * @author Jon Noack 75  */ 76 @GwtIncompatible // not worth using in GWT for now 77 class CompactHashSet<E> extends AbstractSet<E> implements Serializable { 78  // TODO(user): cache all field accesses in local vars 79  80  /** Creates an empty {@code CompactHashSet} instance. */ 81  public static <E> CompactHashSet<E> create() { 82  return new CompactHashSet<>(); 83  } 84  85  /** 86  * Creates a <i>mutable</i> {@code CompactHashSet} instance containing the elements of the given 87  * collection in unspecified order. 88  * 89  * @param collection the elements that the set should contain 90  * @return a new {@code CompactHashSet} containing those elements (minus duplicates) 91  */ 92  public static <E> CompactHashSet<E> create(Collection<? extends E> collection) { 93  CompactHashSet<E> set = createWithExpectedSize(collection.size()); 94  set.addAll(collection); 95  return set; 96  } 97  98  /** 99  * Creates a <i>mutable</i> {@code CompactHashSet} instance containing the given elements in 100  * unspecified order. 101  * 102  * @param elements the elements that the set should contain 103  * @return a new {@code CompactHashSet} containing those elements (minus duplicates) 104  */ 105  @SafeVarargs 106  public static <E> CompactHashSet<E> create(E... elements) { 107  CompactHashSet<E> set = createWithExpectedSize(elements.length); 108  Collections.addAll(set, elements); 109  return set; 110  } 111  112  /** 113  * Creates a {@code CompactHashSet} instance, with a high enough "initial capacity" that it 114  * <i>should</i> hold {@code expectedSize} elements without growth. 115  * 116  * @param expectedSize the number of elements you expect to add to the returned set 117  * @return a new, empty {@code CompactHashSet} with enough capacity to hold {@code expectedSize} 118  * elements without resizing 119  * @throws IllegalArgumentException if {@code expectedSize} is negative 120  */ 121  public static <E> CompactHashSet<E> createWithExpectedSize(int expectedSize) { 122  return new CompactHashSet<>(expectedSize); 123  } 124  125  /** 126  * Maximum allowed false positive probability of detecting a hash flooding attack given random 127  * input. 128  */ 129  @VisibleForTesting( 130  ) 131  static final double HASH_FLOODING_FPP = 0.001; 132  133  /** 134  * Maximum allowed length of a hash table bucket before falling back to a j.u.LinkedHashSet based 135  * implementation. Experimentally determined. 136  */ 137  private static final int MAX_HASH_BUCKET_LENGTH = 9; 138  139  /** 140  * The hashtable object. This can be either: 141  * 142  * <ul> 143  * <li>a byte[], short[], or int[], with size a power of two, created by 144  * CompactHashing.createTable, whose values are either 145  * <ul> 146  * <li>UNSET, meaning "null pointer" 147  * <li>one plus an index into the entries and elements array 148  * </ul> 149  * <li>another java.util.Set delegate implementation. In most modern JDKs, normal java.util hash 150  * collections intelligently fall back to a binary search tree if hash table collisions are 151  * detected. Rather than going to all the trouble of reimplementing this ourselves, we 152  * simply switch over to use the JDK implementation wholesale if probable hash flooding is 153  * detected, sacrificing the compactness guarantee in very rare cases in exchange for much 154  * more reliable worst-case behavior. 155  * <li>null, if no entries have yet been added to the map 156  * </ul> 157  */ 158  @Nullable private transient Object table; 159  160  /** 161  * Contains the logical entries, in the range of [0, size()). The high bits of each int are the 162  * part of the smeared hash of the element not covered by the hashtable mask, whereas the low bits 163  * are the "next" pointer (pointing to the next entry in the bucket chain), which will always be 164  * less than or equal to the hashtable mask. 165  * 166  * <pre> 167  * hash = aaaaaaaa 168  * mask = 0000ffff 169  * next = 0000bbbb 170  * entry = aaaabbbb 171  * </pre> 172  * 173  * <p>The pointers in [size(), entries.length) are all "null" (UNSET). 174  */ 175  private transient int @Nullable [] entries; 176  177  /** 178  * The elements contained in the set, in the range of [0, size()). The elements in [size(), 179  * elements.length) are all {@code null}. 180  */ 181  @VisibleForTesting transient Object @Nullable [] elements; 182  183  /** 184  * Keeps track of metadata like the number of hash table bits and modifications of this data 185  * structure (to make it possible to throw ConcurrentModificationException in the iterator). Note 186  * that we choose not to make this volatile, so we do less of a "best effort" to track such 187  * errors, for better performance. 188  */ 189  private transient int metadata; 190  191  /** The number of elements contained in the set. */ 192  private transient int size; 193  194  /** Constructs a new empty instance of {@code CompactHashSet}. */ 195  CompactHashSet() { 196  init(CompactHashing.DEFAULT_SIZE); 197  } 198  199  /** 200  * Constructs a new instance of {@code CompactHashSet} with the specified capacity. 201  * 202  * @param expectedSize the initial capacity of this {@code CompactHashSet}. 203  */ 204  CompactHashSet(int expectedSize) { 205  init(expectedSize); 206  } 207  208  /** Pseudoconstructor for serialization support. */ 209  void init(int expectedSize) { 210  Preconditions.checkArgument(expectedSize >= 0, "Expected size must be >= 0"); 211  212  // Save expectedSize for use in allocArrays() 213  this.metadata = Ints.constrainToRange(expectedSize, 1, CompactHashing.MAX_SIZE); 214  } 215  216  /** Returns whether arrays need to be allocated. */ 217  @VisibleForTesting 218  boolean needsAllocArrays() { 219  return table == null; 220  } 221  222  /** Handle lazy allocation of arrays. */ 223  @CanIgnoreReturnValue 224  int allocArrays() { 225  Preconditions.checkState(needsAllocArrays(), "Arrays already allocated"); 226  227  int expectedSize = metadata; 228  int buckets = CompactHashing.tableSize(expectedSize); 229  this.table = CompactHashing.createTable(buckets); 230  setHashTableMask(buckets - 1); 231  232  this.entries = new int[expectedSize]; 233  this.elements = new Object[expectedSize]; 234  235  return expectedSize; 236  } 237  238  @SuppressWarnings("unchecked") 239  @VisibleForTesting 240  @Nullable 241  Set<E> delegateOrNull() { 242  if (table instanceof Set) { 243  return (Set<E>) table; 244  } 245  return null; 246  } 247  248  private Set<E> createHashFloodingResistantDelegate(int tableSize) { 249  return new LinkedHashSet<>(tableSize, 1.0f); 250  } 251  252  @SuppressWarnings("unchecked") 253  @VisibleForTesting 254  @CanIgnoreReturnValue 255  Set<E> convertToHashFloodingResistantImplementation() { 256  Set<E> newDelegate = createHashFloodingResistantDelegate(hashTableMask() + 1); 257  for (int i = firstEntryIndex(); i >= 0; i = getSuccessor(i)) { 258  newDelegate.add((E) elements[i]); 259  } 260  this.table = newDelegate; 261  this.entries = null; 262  this.elements = null; 263  incrementModCount(); 264  return newDelegate; 265  } 266  267  @VisibleForTesting 268  boolean isUsingHashFloodingResistance() { 269  return delegateOrNull() != null; 270  } 271  272  /** Stores the hash table mask as the number of bits needed to represent an index. */ 273  private void setHashTableMask(int mask) { 274  int hashTableBits = Integer.SIZE - Integer.numberOfLeadingZeros(mask); 275  metadata = 276  CompactHashing.maskCombine(metadata, hashTableBits, CompactHashing.HASH_TABLE_BITS_MASK); 277  } 278  279  /** Gets the hash table mask using the stored number of hash table bits. */ 280  private int hashTableMask() { 281  return (1 << (metadata & CompactHashing.HASH_TABLE_BITS_MASK)) - 1; 282  } 283  284  void incrementModCount() { 285  metadata += CompactHashing.MODIFICATION_COUNT_INCREMENT; 286  } 287  288  @CanIgnoreReturnValue 289  @Override 290  public boolean add(@Nullable E object) { 291  if (needsAllocArrays()) { 292  allocArrays(); 293  } 294  @Nullable Set<E> delegate = delegateOrNull(); 295  if (delegate != null) { 296  return delegate.add(object); 297  } 298  int[] entries = this.entries; 299  Object[] elements = this.elements; 300  301  int newEntryIndex = this.size; // current size, and pointer to the entry to be appended 302  int newSize = newEntryIndex + 1; 303  int hash = smearedHash(object); 304  int mask = hashTableMask(); 305  int tableIndex = hash & mask; 306  int next = CompactHashing.tableGet(table, tableIndex); 307  if (next == UNSET) { // uninitialized bucket 308  if (newSize > mask) { 309  // Resize and add new entry 310  mask = resizeTable(mask, CompactHashing.newCapacity(mask), hash, newEntryIndex); 311  } else { 312  CompactHashing.tableSet(table, tableIndex, newEntryIndex + 1); 313  } 314  } else { 315  int entryIndex; 316  int entry; 317  int hashPrefix = CompactHashing.getHashPrefix(hash, mask); 318  int bucketLength = 0; 319  do { 320  entryIndex = next - 1; 321  entry = entries[entryIndex]; 322  if (CompactHashing.getHashPrefix(entry, mask) == hashPrefix 323  && Objects.equal(object, elements[entryIndex])) { 324  return false; 325  } 326  next = CompactHashing.getNext(entry, mask); 327  bucketLength++; 328  } while (next != UNSET); 329  330  if (bucketLength >= MAX_HASH_BUCKET_LENGTH) { 331  return convertToHashFloodingResistantImplementation().add(object); 332  } 333  334  if (newSize > mask) { 335  // Resize and add new entry 336  mask = resizeTable(mask, CompactHashing.newCapacity(mask), hash, newEntryIndex); 337  } else { 338  entries[entryIndex] = CompactHashing.maskCombine(entry, newEntryIndex + 1, mask); 339  } 340  } 341  resizeMeMaybe(newSize); 342  insertEntry(newEntryIndex, object, hash, mask); 343  this.size = newSize; 344  incrementModCount(); 345  return true; 346  } 347  348  /** 349  * Creates a fresh entry with the specified object at the specified position in the entry arrays. 350  */ 351  void insertEntry(int entryIndex, @Nullable E object, int hash, int mask) { 352  this.entries[entryIndex] = CompactHashing.maskCombine(hash, UNSET, mask); 353  this.elements[entryIndex] = object; 354  } 355  356  /** Resizes the entries storage if necessary. */ 357  private void resizeMeMaybe(int newSize) { 358  int entriesSize = entries.length; 359  if (newSize > entriesSize) { 360  // 1.5x but round up to nearest odd (this is optimal for memory consumption on Android) 361  int newCapacity = 362  Math.min(CompactHashing.MAX_SIZE, (entriesSize + Math.max(1, entriesSize >>> 1)) | 1); 363  if (newCapacity != entriesSize) { 364  resizeEntries(newCapacity); 365  } 366  } 367  } 368  369  /** 370  * Resizes the internal entries array to the specified capacity, which may be greater or less than 371  * the current capacity. 372  */ 373  void resizeEntries(int newCapacity) { 374  this.entries = Arrays.copyOf(entries, newCapacity); 375  this.elements = Arrays.copyOf(elements, newCapacity); 376  } 377  378  @CanIgnoreReturnValue 379  private int resizeTable(int mask, int newCapacity, int targetHash, int targetEntryIndex) { 380  Object newTable = CompactHashing.createTable(newCapacity); 381  int newMask = newCapacity - 1; 382  383  if (targetEntryIndex != UNSET) { 384  // Add target first; it must be last in the chain because its entry hasn't yet been created 385  CompactHashing.tableSet(newTable, targetHash & newMask, targetEntryIndex + 1); 386  } 387  388  Object table = this.table; 389  int[] entries = this.entries; 390  391  // Loop over current hashtable 392  for (int tableIndex = 0; tableIndex <= mask; tableIndex++) { 393  int next = CompactHashing.tableGet(table, tableIndex); 394  while (next != UNSET) { 395  int entryIndex = next - 1; 396  int entry = entries[entryIndex]; 397  398  // Rebuild hash using entry hashPrefix and tableIndex ("hashSuffix") 399  int hash = CompactHashing.getHashPrefix(entry, mask) | tableIndex; 400  401  int newTableIndex = hash & newMask; 402  int newNext = CompactHashing.tableGet(newTable, newTableIndex); 403  CompactHashing.tableSet(newTable, newTableIndex, next); 404  entries[entryIndex] = CompactHashing.maskCombine(hash, newNext, newMask); 405  406  next = CompactHashing.getNext(entry, mask); 407  } 408  } 409  410  this.table = newTable; 411  setHashTableMask(newMask); 412  return newMask; 413  } 414  415  @Override 416  public boolean contains(@Nullable Object object) { 417  if (needsAllocArrays()) { 418  return false; 419  } 420  @Nullable Set<E> delegate = delegateOrNull(); 421  if (delegate != null) { 422  return delegate.contains(object); 423  } 424  int hash = smearedHash(object); 425  int mask = hashTableMask(); 426  int next = CompactHashing.tableGet(table, hash & mask); 427  if (next == UNSET) { 428  return false; 429  } 430  int hashPrefix = CompactHashing.getHashPrefix(hash, mask); 431  do { 432  int entryIndex = next - 1; 433  int entry = entries[entryIndex]; 434  if (CompactHashing.getHashPrefix(entry, mask) == hashPrefix 435  && Objects.equal(object, elements[entryIndex])) { 436  return true; 437  } 438  next = CompactHashing.getNext(entry, mask); 439  } while (next != UNSET); 440  return false; 441  } 442  443  @CanIgnoreReturnValue 444  @Override 445  public boolean remove(@Nullable Object object) { 446  if (needsAllocArrays()) { 447  return false; 448  } 449  @Nullable Set<E> delegate = delegateOrNull(); 450  if (delegate != null) { 451  return delegate.remove(object); 452  } 453  int mask = hashTableMask(); 454  int index = 455  CompactHashing.remove( 456  object, /* value= */ null, mask, table, entries, elements, /* values= */ null); 457  if (index == -1) { 458  return false; 459  } 460  461  moveLastEntry(index, mask); 462  size--; 463  incrementModCount(); 464  465  return true; 466  } 467  468  /** 469  * Moves the last entry in the entry array into {@code dstIndex}, and nulls out its old position. 470  */ 471  void moveLastEntry(int dstIndex, int mask) { 472  int srcIndex = size() - 1; 473  if (dstIndex < srcIndex) { 474  // move last entry to deleted spot 475  @Nullable Object object = elements[srcIndex]; 476  elements[dstIndex] = object; 477  elements[srcIndex] = null; 478  479  // move the last entry to the removed spot, just like we moved the element 480  entries[dstIndex] = entries[srcIndex]; 481  entries[srcIndex] = 0; 482  483  // also need to update whoever's "next" pointer was pointing to the last entry place 484  int tableIndex = smearedHash(object) & mask; 485  int next = CompactHashing.tableGet(table, tableIndex); 486  int srcNext = srcIndex + 1; 487  if (next == srcNext) { 488  // we need to update the root pointer 489  CompactHashing.tableSet(table, tableIndex, dstIndex + 1); 490  } else { 491  // we need to update a pointer in an entry 492  int entryIndex; 493  int entry; 494  do { 495  entryIndex = next - 1; 496  entry = entries[entryIndex]; 497  next = CompactHashing.getNext(entry, mask); 498  } while (next != srcNext); 499  // here, entries[entryIndex] points to the old entry location; update it 500  entries[entryIndex] = CompactHashing.maskCombine(entry, dstIndex + 1, mask); 501  } 502  } else { 503  elements[dstIndex] = null; 504  entries[dstIndex] = 0; 505  } 506  } 507  508  int firstEntryIndex() { 509  return isEmpty() ? -1 : 0; 510  } 511  512  int getSuccessor(int entryIndex) { 513  return (entryIndex + 1 < size) ? entryIndex + 1 : -1; 514  } 515  516  /** 517  * Updates the index an iterator is pointing to after a call to remove: returns the index of the 518  * entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the 519  * index that *was* the next entry that would be looked at. 520  */ 521  int adjustAfterRemove(int indexBeforeRemove, @SuppressWarnings("unused") int indexRemoved) { 522  return indexBeforeRemove - 1; 523  } 524  525  @Override 526  public Iterator<E> iterator() { 527  @Nullable Set<E> delegate = delegateOrNull(); 528  if (delegate != null) { 529  return delegate.iterator(); 530  } 531  return new Iterator<E>() { 532  int expectedMetadata = metadata; 533  int currentIndex = firstEntryIndex(); 534  int indexToRemove = -1; 535  536  @Override 537  public boolean hasNext() { 538  return currentIndex >= 0; 539  } 540  541  @SuppressWarnings("unchecked") // known to be Es 542  @Override 543  public E next() { 544  checkForConcurrentModification(); 545  if (!hasNext()) { 546  throw new NoSuchElementException(); 547  } 548  indexToRemove = currentIndex; 549  E result = (E) elements[currentIndex]; 550  currentIndex = getSuccessor(currentIndex); 551  return result; 552  } 553  554  @Override 555  public void remove() { 556  checkForConcurrentModification(); 557  checkRemove(indexToRemove >= 0); 558  incrementExpectedModCount(); 559  CompactHashSet.this.remove(elements[indexToRemove]); 560  currentIndex = adjustAfterRemove(currentIndex, indexToRemove); 561  indexToRemove = -1; 562  } 563  564  void incrementExpectedModCount() { 565  expectedMetadata += CompactHashing.MODIFICATION_COUNT_INCREMENT; 566  } 567  568  private void checkForConcurrentModification() { 569  if (metadata != expectedMetadata) { 570  throw new ConcurrentModificationException(); 571  } 572  } 573  }; 574  } 575  576  @Override 577  public Spliterator<E> spliterator() { 578  if (needsAllocArrays()) { 579  return Spliterators.spliterator(new Object[0], Spliterator.DISTINCT | Spliterator.ORDERED); 580  } 581  @Nullable Set<E> delegate = delegateOrNull(); 582  return (delegate != null) 583  ? delegate.spliterator() 584  : Spliterators.spliterator(elements, 0, size, Spliterator.DISTINCT | Spliterator.ORDERED); 585  } 586  587  @SuppressWarnings("unchecked") // known to be Es 588  @Override 589  public void forEach(Consumer<? super E> action) { 590  checkNotNull(action); 591  @Nullable Set<E> delegate = delegateOrNull(); 592  if (delegate != null) { 593  delegate.forEach(action); 594  } else { 595  for (int i = firstEntryIndex(); i >= 0; i = getSuccessor(i)) { 596  action.accept((E) elements[i]); 597  } 598  } 599  } 600  601  @Override 602  public int size() { 603  @Nullable Set<E> delegate = delegateOrNull(); 604  return (delegate != null) ? delegate.size() : size; 605  } 606  607  @Override 608  public boolean isEmpty() { 609  return size() == 0; 610  } 611  612  @Override 613  public Object[] toArray() { 614  if (needsAllocArrays()) { 615  return new Object[0]; 616  } 617  @Nullable Set<E> delegate = delegateOrNull(); 618  return (delegate != null) ? delegate.toArray() : Arrays.copyOf(elements, size); 619  } 620  621  @CanIgnoreReturnValue 622  @Override 623  public <T> T[] toArray(T[] a) { 624  if (needsAllocArrays()) { 625  if (a.length > 0) { 626  a[0] = null; 627  } 628  return a; 629  } 630  @Nullable Set<E> delegate = delegateOrNull(); 631  return (delegate != null) 632  ? delegate.toArray(a) 633  : ObjectArrays.toArrayImpl(elements, 0, size, a); 634  } 635  636  /** 637  * Ensures that this {@code CompactHashSet} has the smallest representation in memory, given its 638  * current size. 639  */ 640  public void trimToSize() { 641  if (needsAllocArrays()) { 642  return; 643  } 644  @Nullable Set<E> delegate = delegateOrNull(); 645  if (delegate != null) { 646  Set<E> newDelegate = createHashFloodingResistantDelegate(size()); 647  newDelegate.addAll(delegate); 648  this.table = newDelegate; 649  return; 650  } 651  int size = this.size; 652  if (size < entries.length) { 653  resizeEntries(size); 654  } 655  int minimumTableSize = CompactHashing.tableSize(size); 656  int mask = hashTableMask(); 657  if (minimumTableSize < mask) { // smaller table size will always be less than current mask 658  resizeTable(mask, minimumTableSize, UNSET, UNSET); 659  } 660  } 661  662  @Override 663  public void clear() { 664  if (needsAllocArrays()) { 665  return; 666  } 667  incrementModCount(); 668  @Nullable Set<E> delegate = delegateOrNull(); 669  if (delegate != null) { 670  metadata = 671  Ints.constrainToRange(size(), CompactHashing.DEFAULT_SIZE, CompactHashing.MAX_SIZE); 672  delegate.clear(); // invalidate any iterators left over! 673  table = null; 674  size = 0; 675  } else { 676  Arrays.fill(elements, 0, size, null); 677  CompactHashing.tableClear(table); 678  Arrays.fill(entries, 0, size, 0); 679  this.size = 0; 680  } 681  } 682  683  private void writeObject(ObjectOutputStream stream) throws IOException { 684  stream.defaultWriteObject(); 685  stream.writeInt(size()); 686  for (E e : this) { 687  stream.writeObject(e); 688  } 689  } 690  691  @SuppressWarnings("unchecked") 692  private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException { 693  stream.defaultReadObject(); 694  int elementCount = stream.readInt(); 695  if (elementCount < 0) { 696  throw new InvalidObjectException("Invalid size: " + elementCount); 697  } 698  init(elementCount); 699  for (int i = 0; i < elementCount; i++) { 700  E element = (E) stream.readObject(); 701  add(element); 702  } 703  } 704 }