From b85493f6095d4585cca32a4a8e74139830bcb496 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Thu, 26 Jul 2018 20:15:49 +1000 Subject: [PATCH] Bug 1477626 - Move js::Hash{Set,Map} into MFBT. r=Waldo The main change is that the patch copies js/public/HashTable.h to mfbt/HashTable.h, and then changes it as follows. - Changes `js` namespaces to `mozilla` (and removes some now-unnecessary `mozilla::` qualifiers). - Changes the default AllocPolicy from the SpiderMonkey-specific `TempAllocPolicy` to the generic `MallocAllocPolicy`. - Adds `#include "AllocPolicy.h"` (like mfbt/Vector.h). - Changes `JS_DEBUG` use to `DEBUG`. - Minor comment updates, as necessary. js/public/HashTable.h is now tiny, holding just a few renamings of things from the `mozilla` namespace into the `js` namespace to minimize churn elsewhere. (Those renamings keep `TempAllocPolicy` as the default AllocPolicy for js::Hash{Set,Map}.) Also, various template specializations had to be moved from the `js` namespace to the `mozilla` namespace to avoid compile errors. MozReview-Commit-ID: GS9Qn9YeYDA --HG-- rename : js/public/HashTable.h => mfbt/HashTable.h --- js/public/HashTable.h | 1959 +-------------------------- js/public/RootingAPI.h | 12 +- js/public/UbiNode.h | 4 +- js/src/ds/OrderedHashTable.h | 2 + js/src/gc/Barrier.h | 24 +- js/src/jsapi-tests/testUbiNode.cpp | 4 +- js/src/vm/ObjectGroup.cpp | 4 +- js/src/vm/SavedFrame.h | 14 +- js/src/vm/Shape.h | 10 +- js/src/vm/Stack.h | 14 +- js/src/vm/UbiNodeCensus.cpp | 9 +- js/src/wasm/WasmValidate.cpp | 2 +- mfbt/HashTable.h | 1960 ++++++++++++++++++++++++++++ mfbt/moz.build | 1 + 14 files changed, 2042 insertions(+), 1977 deletions(-) create mode 100644 mfbt/HashTable.h diff --git a/js/public/HashTable.h b/js/public/HashTable.h index 9f97479eefd7c..981a701bc5a9e 100644 --- a/js/public/HashTable.h +++ b/js/public/HashTable.h @@ -7,21 +7,7 @@ #ifndef js_HashTable_h #define js_HashTable_h -#include "mozilla/Assertions.h" -#include "mozilla/Attributes.h" -#include "mozilla/Casting.h" -#include "mozilla/HashFunctions.h" -#include "mozilla/MathAlgorithms.h" -#include "mozilla/MemoryChecking.h" -#include "mozilla/MemoryReporting.h" -#include "mozilla/Move.h" -#include "mozilla/Opaque.h" -#include "mozilla/PodOperations.h" -#include "mozilla/ReentrancyGuard.h" -#include "mozilla/TypeTraits.h" -#include "mozilla/UniquePtr.h" - -#include "js/Utility.h" +#include "mozilla/HashTable.h" namespace js { @@ -29,1945 +15,24 @@ using HashNumber = mozilla::HashNumber; static const uint32_t kHashNumberBits = mozilla::kHashNumberBits; class TempAllocPolicy; -template struct DefaultHasher; -template class HashMapEntry; -namespace detail { - template class HashTableEntry; - template class HashTable; -} // namespace detail -/*****************************************************************************/ +template +using DefaultHasher = mozilla::DefaultHasher; -// The "generation" of a hash table is an opaque value indicating the state of -// modification of the hash table through its lifetime. If the generation of -// a hash table compares equal at times T1 and T2, then lookups in the hash -// table, pointers to (or into) hash table entries, etc. at time T1 are valid -// at time T2. If the generation compares unequal, these computations are all -// invalid and must be performed again to be used. -// -// Generations are meaningfully comparable only with respect to a single hash -// table. It's always nonsensical to compare the generation of distinct hash -// tables H1 and H2. -using Generation = mozilla::Opaque; +template +using PointerHasher = mozilla::PointerHasher; -// A JS-friendly, STL-like container providing a hash-based map from keys to -// values. In particular, HashMap calls constructors and destructors of all -// objects added so non-PODs may be used safely. -// -// Key/Value requirements: -// - movable, destructible, assignable -// HashPolicy requirements: -// - see Hash Policy section below -// AllocPolicy: -// - see AllocPolicy.h -// -// Note: -// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members -// called by HashMap must not call back into the same HashMap object. -// - Due to the lack of exception handling, the user must call |init()|. -template , +template , class AllocPolicy = TempAllocPolicy> -class HashMap -{ - typedef HashMapEntry TableEntry; - - struct MapHashPolicy : HashPolicy - { - using Base = HashPolicy; - typedef Key KeyType; - static const Key& getKey(TableEntry& e) { return e.key(); } - static void setKey(TableEntry& e, Key& k) { HashPolicy::rekey(e.mutableKey(), k); } - }; - - typedef detail::HashTable Impl; - Impl impl; - - public: - typedef typename HashPolicy::Lookup Lookup; - typedef TableEntry Entry; - - // HashMap construction is fallible (due to OOM); thus the user must call - // init after constructing a HashMap and check the return value. - explicit HashMap(AllocPolicy a = AllocPolicy()) : impl(a) {} - MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); } - bool initialized() const { return impl.initialized(); } - - // Return whether the given lookup value is present in the map. E.g.: - // - // typedef HashMap HM; - // HM h; - // if (HM::Ptr p = h.lookup(3)) { - // const HM::Entry& e = *p; // p acts like a pointer to Entry - // assert(p->key == 3); // Entry contains the key - // char val = p->value; // and value - // } - // - // Also see the definition of Ptr in HashTable above (with T = Entry). - typedef typename Impl::Ptr Ptr; - MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); } - - // Like lookup, but does not assert if two threads call lookup at the same - // time. Only use this method when none of the threads will modify the map. - MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const { - return impl.readonlyThreadsafeLookup(l); - } - - // Assuming |p.found()|, remove |*p|. - void remove(Ptr p) { impl.remove(p); } - - // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient - // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using - // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.: - // - // typedef HashMap HM; - // HM h; - // HM::AddPtr p = h.lookupForAdd(3); - // if (!p) { - // if (!h.add(p, 3, 'a')) - // return false; - // } - // const HM::Entry& e = *p; // p acts like a pointer to Entry - // assert(p->key == 3); // Entry contains the key - // char val = p->value; // and value - // - // Also see the definition of AddPtr in HashTable above (with T = Entry). - // - // N.B. The caller must ensure that no mutating hash table operations - // occur between a pair of |lookupForAdd| and |add| calls. To avoid - // looking up the key a second time, the caller may use the more efficient - // relookupOrAdd method. This method reuses part of the hashing computation - // to more efficiently insert the key if it has not been added. For - // example, a mutation-handling version of the previous example: - // - // HM::AddPtr p = h.lookupForAdd(3); - // if (!p) { - // call_that_may_mutate_h(); - // if (!h.relookupOrAdd(p, 3, 'a')) - // return false; - // } - // const HM::Entry& e = *p; - // assert(p->key == 3); - // char val = p->value; - typedef typename Impl::AddPtr AddPtr; - MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const { - return impl.lookupForAdd(l); - } - - template - MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) { - return impl.add(p, - std::forward(k), - std::forward(v)); - } - - template - MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k) { - return impl.add(p, std::forward(k), Value()); - } - - template - MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) { - return impl.relookupOrAdd(p, k, - std::forward(k), - std::forward(v)); - } - - // |all()| returns a Range containing |count()| elements. E.g.: - // - // typedef HashMap HM; - // HM h; - // for (HM::Range r = h.all(); !r.empty(); r.popFront()) - // char c = r.front().value(); - // - // Also see the definition of Range in HashTable above (with T = Entry). - typedef typename Impl::Range Range; - Range all() const { return impl.all(); } - - // Typedef for the enumeration class. An Enum may be used to examine and - // remove table entries: - // - // typedef HashMap HM; - // HM s; - // for (HM::Enum e(s); !e.empty(); e.popFront()) - // if (e.front().value() == 'l') - // e.removeFront(); - // - // Table resize may occur in Enum's destructor. Also see the definition of - // Enum in HashTable above (with T = Entry). - typedef typename Impl::Enum Enum; - - // Remove all entries. This does not shrink the table. For that consider - // using the finish() method. - void clear() { impl.clear(); } - - // Remove all entries. Unlike clear() this method tries to shrink the table. - // Unlike finish() it does not require the map to be initialized again. - void clearAndShrink() { impl.clearAndShrink(); } - - // Remove all the entries and release all internal buffers. The map must - // be initialized again before any use. - void finish() { impl.finish(); } - - // Does the table contain any entries? - bool empty() const { return impl.empty(); } - - // Number of live elements in the map. - uint32_t count() const { return impl.count(); } - - // Total number of allocation in the dynamic table. Note: resize will - // happen well before count() == capacity(). - size_t capacity() const { return impl.capacity(); } - - // Don't just call |impl.sizeOfExcludingThis()| because there's no - // guarantee that |impl| is the first field in HashMap. - size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { - return impl.sizeOfExcludingThis(mallocSizeOf); - } - size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { - return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf); - } - - Generation generation() const { - return impl.generation(); - } - - /************************************************** Shorthand operations */ - - bool has(const Lookup& l) const { - return impl.lookup(l).found(); - } - - // Overwrite existing value with v. Return false on oom. - template - MOZ_MUST_USE bool put(KeyInput&& k, ValueInput&& v) { - AddPtr p = lookupForAdd(k); - if (p) { - p->value() = std::forward(v); - return true; - } - return add(p, std::forward(k), std::forward(v)); - } - - // Like put, but assert that the given key is not already present. - template - MOZ_MUST_USE bool putNew(KeyInput&& k, ValueInput&& v) { - return impl.putNew(k, std::forward(k), std::forward(v)); - } - - // Only call this to populate an empty map after reserving space with init(). - template - void putNewInfallible(KeyInput&& k, ValueInput&& v) { - impl.putNewInfallible(k, std::forward(k), std::forward(v)); - } - - // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom. - Ptr lookupWithDefault(const Key& k, const Value& defaultValue) { - AddPtr p = lookupForAdd(k); - if (p) - return p; - bool ok = add(p, k, defaultValue); - MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom. - (void)ok; - return p; - } - - // Remove if present. - void remove(const Lookup& l) { - if (Ptr p = lookup(l)) - remove(p); - } - - // Infallibly rekey one entry, if necessary. - // Requires template parameters Key and HashPolicy::Lookup to be the same type. - void rekeyIfMoved(const Key& old_key, const Key& new_key) { - if (old_key != new_key) - rekeyAs(old_key, new_key, new_key); - } - - // Infallibly rekey one entry if present, and return whether that happened. - bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const Key& new_key) { - if (Ptr p = lookup(old_lookup)) { - impl.rekeyAndMaybeRehash(p, new_lookup, new_key); - return true; - } - return false; - } - - // HashMap is movable - HashMap(HashMap&& rhs) : impl(std::move(rhs.impl)) {} - void operator=(HashMap&& rhs) { - MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); - impl = std::move(rhs.impl); - } - - private: - // HashMap is not copyable or assignable - HashMap(const HashMap& hm) = delete; - HashMap& operator=(const HashMap& hm) = delete; +using HashSet = mozilla::HashSet; - friend class Impl::Enum; -}; - -/*****************************************************************************/ - -// A JS-friendly, STL-like container providing a hash-based set of values. In -// particular, HashSet calls constructors and destructors of all objects added -// so non-PODs may be used safely. -// -// T requirements: -// - movable, destructible, assignable -// HashPolicy requirements: -// - see Hash Policy section below -// AllocPolicy: -// - see AllocPolicy.h -// -// Note: -// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by -// HashSet must not call back into the same HashSet object. -// - Due to the lack of exception handling, the user must call |init()|. -template , +template , class AllocPolicy = TempAllocPolicy> -class HashSet -{ - struct SetOps : HashPolicy - { - using Base = HashPolicy; - typedef T KeyType; - static const KeyType& getKey(const T& t) { return t; } - static void setKey(T& t, KeyType& k) { HashPolicy::rekey(t, k); } - }; - - typedef detail::HashTable Impl; - Impl impl; - - public: - typedef typename HashPolicy::Lookup Lookup; - typedef T Entry; - - // HashSet construction is fallible (due to OOM); thus the user must call - // init after constructing a HashSet and check the return value. - explicit HashSet(AllocPolicy a = AllocPolicy()) : impl(a) {} - MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); } - bool initialized() const { return impl.initialized(); } - - // Return whether the given lookup value is present in the map. E.g.: - // - // typedef HashSet HS; - // HS h; - // if (HS::Ptr p = h.lookup(3)) { - // assert(*p == 3); // p acts like a pointer to int - // } - // - // Also see the definition of Ptr in HashTable above. - typedef typename Impl::Ptr Ptr; - MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); } - - // Like lookup, but does not assert if two threads call lookup at the same - // time. Only use this method when none of the threads will modify the map. - MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const { - return impl.readonlyThreadsafeLookup(l); - } - - // Assuming |p.found()|, remove |*p|. - void remove(Ptr p) { impl.remove(p); } - - // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient - // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using - // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.: - // - // typedef HashSet HS; - // HS h; - // HS::AddPtr p = h.lookupForAdd(3); - // if (!p) { - // if (!h.add(p, 3)) - // return false; - // } - // assert(*p == 3); // p acts like a pointer to int - // - // Also see the definition of AddPtr in HashTable above. - // - // N.B. The caller must ensure that no mutating hash table operations - // occur between a pair of |lookupForAdd| and |add| calls. To avoid - // looking up the key a second time, the caller may use the more efficient - // relookupOrAdd method. This method reuses part of the hashing computation - // to more efficiently insert the key if it has not been added. For - // example, a mutation-handling version of the previous example: - // - // HS::AddPtr p = h.lookupForAdd(3); - // if (!p) { - // call_that_may_mutate_h(); - // if (!h.relookupOrAdd(p, 3, 3)) - // return false; - // } - // assert(*p == 3); - // - // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the - // entry |t|, where the caller ensures match(l,t). - typedef typename Impl::AddPtr AddPtr; - MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const { - return impl.lookupForAdd(l); - } - - template - MOZ_MUST_USE bool add(AddPtr& p, U&& u) { - return impl.add(p, std::forward(u)); - } - - template - MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, U&& u) { - return impl.relookupOrAdd(p, l, std::forward(u)); - } - - // |all()| returns a Range containing |count()| elements: - // - // typedef HashSet HS; - // HS h; - // for (HS::Range r = h.all(); !r.empty(); r.popFront()) - // int i = r.front(); - // - // Also see the definition of Range in HashTable above. - typedef typename Impl::Range Range; - Range all() const { return impl.all(); } - - // Typedef for the enumeration class. An Enum may be used to examine and - // remove table entries: - // - // typedef HashSet HS; - // HS s; - // for (HS::Enum e(s); !e.empty(); e.popFront()) - // if (e.front() == 42) - // e.removeFront(); - // - // Table resize may occur in Enum's destructor. Also see the definition of - // Enum in HashTable above. - typedef typename Impl::Enum Enum; - - // Remove all entries. This does not shrink the table. For that consider - // using the finish() method. - void clear() { impl.clear(); } - - // Remove all entries. Unlike clear() this method tries to shrink the table. - // Unlike finish() it does not require the set to be initialized again. - void clearAndShrink() { impl.clearAndShrink(); } - - // Remove all the entries and release all internal buffers. The set must - // be initialized again before any use. - void finish() { impl.finish(); } - - // Does the table contain any entries? - bool empty() const { return impl.empty(); } - - // Number of live elements in the map. - uint32_t count() const { return impl.count(); } - - // Total number of allocation in the dynamic table. Note: resize will - // happen well before count() == capacity(). - size_t capacity() const { return impl.capacity(); } - - // Don't just call |impl.sizeOfExcludingThis()| because there's no - // guarantee that |impl| is the first field in HashSet. - size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { - return impl.sizeOfExcludingThis(mallocSizeOf); - } - size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { - return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf); - } - - Generation generation() const { - return impl.generation(); - } - - /************************************************** Shorthand operations */ - - bool has(const Lookup& l) const { - return impl.lookup(l).found(); - } - - // Add |u| if it is not present already. Return false on oom. - template - MOZ_MUST_USE bool put(U&& u) { - AddPtr p = lookupForAdd(u); - return p ? true : add(p, std::forward(u)); - } - - // Like put, but assert that the given key is not already present. - template - MOZ_MUST_USE bool putNew(U&& u) { - return impl.putNew(u, std::forward(u)); - } - - template - MOZ_MUST_USE bool putNew(const Lookup& l, U&& u) { - return impl.putNew(l, std::forward(u)); - } - - // Only call this to populate an empty set after reserving space with init(). - template - void putNewInfallible(const Lookup& l, U&& u) { - impl.putNewInfallible(l, std::forward(u)); - } - - void remove(const Lookup& l) { - if (Ptr p = lookup(l)) - remove(p); - } - - // Infallibly rekey one entry, if present. - // Requires template parameters T and HashPolicy::Lookup to be the same type. - void rekeyIfMoved(const Lookup& old_value, const T& new_value) { - if (old_value != new_value) - rekeyAs(old_value, new_value, new_value); - } - - // Infallibly rekey one entry if present, and return whether that happened. - bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const T& new_value) { - if (Ptr p = lookup(old_lookup)) { - impl.rekeyAndMaybeRehash(p, new_lookup, new_value); - return true; - } - return false; - } - - // Infallibly replace the current key at |p| with an equivalent key. - // Specifically, both HashPolicy::hash and HashPolicy::match must return - // identical results for the new and old key when applied against all - // possible matching values. - void replaceKey(Ptr p, const T& new_value) { - MOZ_ASSERT(p.found()); - MOZ_ASSERT(*p != new_value); - MOZ_ASSERT(HashPolicy::hash(*p) == HashPolicy::hash(new_value)); - MOZ_ASSERT(HashPolicy::match(*p, new_value)); - const_cast(*p) = new_value; - } - - // HashSet is movable - HashSet(HashSet&& rhs) : impl(std::move(rhs.impl)) {} - void operator=(HashSet&& rhs) { - MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); - impl = std::move(rhs.impl); - } - - private: - // HashSet is not copyable or assignable - HashSet(const HashSet& hs) = delete; - HashSet& operator=(const HashSet& hs) = delete; - - friend class Impl::Enum; -}; - -/*****************************************************************************/ +using HashMap = mozilla::HashMap; -// Hash Policy -// -// A hash policy P for a hash table with key-type Key must provide: -// - a type |P::Lookup| to use to lookup table entries; -// - a static member function |P::hash| with signature -// -// static js::HashNumber hash(Lookup) -// -// to use to hash the lookup type; and -// - a static member function |P::match| with signature -// -// static bool match(Key, Lookup) -// -// to use to test equality of key and lookup values. -// -// Normally, Lookup = Key. In general, though, different values and types of -// values can be used to lookup and store. If a Lookup value |l| is != to the -// added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.: -// -// js::HashSet::AddPtr p = h.lookup(l); -// if (!p) { -// assert(P::match(k, l)); // must hold -// h.add(p, k); -// } - -// Pointer hashing policy that uses HashGeneric() to create good hashes for -// pointers. Note that we don't shift out the lowest k bits to generate a -// good distribution for arena allocated pointers. -template -struct PointerHasher -{ - typedef Key Lookup; - static HashNumber hash(const Lookup& l) { - size_t word = reinterpret_cast(l); - return mozilla::HashGeneric(word); - } - static bool match(const Key& k, const Lookup& l) { - return k == l; - } - static void rekey(Key& k, const Key& newKey) { - k = newKey; - } -}; - -// Default hash policy: just use the 'lookup' value. This of course only -// works if the lookup value is integral. HashTable applies ScrambleHashCode to -// the result of the 'hash' which means that it is 'ok' if the lookup value is -// not well distributed over the HashNumber domain. -template -struct DefaultHasher -{ - typedef Key Lookup; - static HashNumber hash(const Lookup& l) { - // Hash if can implicitly cast to hash number type. - return l; - } - static bool match(const Key& k, const Lookup& l) { - // Use builtin or overloaded operator==. - return k == l; - } - static void rekey(Key& k, const Key& newKey) { - k = newKey; - } -}; - -// Specialize hashing policy for pointer types. It assumes that the type is -// at least word-aligned. For types with smaller size use PointerHasher. -template -struct DefaultHasher : PointerHasher -{}; - -// Specialize hashing policy for mozilla::UniquePtr to proxy the UniquePtr's -// raw pointer to PointerHasher. -template -struct DefaultHasher> -{ - using Lookup = mozilla::UniquePtr; - using PtrHasher = PointerHasher; - - static HashNumber hash(const Lookup& l) { - return PtrHasher::hash(l.get()); - } - static bool match(const mozilla::UniquePtr& k, const Lookup& l) { - return PtrHasher::match(k.get(), l.get()); - } - static void rekey(mozilla::UniquePtr& k, mozilla::UniquePtr&& newKey) { - k = std::move(newKey); - } -}; - -// For doubles, we can xor the two uint32s. -template <> -struct DefaultHasher -{ - typedef double Lookup; - static HashNumber hash(double d) { - static_assert(sizeof(HashNumber) == 4, - "subsequent code assumes a four-byte hash"); - uint64_t u = mozilla::BitwiseCast(d); - return HashNumber(u ^ (u >> 32)); - } - static bool match(double lhs, double rhs) { - return mozilla::BitwiseCast(lhs) == mozilla::BitwiseCast(rhs); - } -}; - -template <> -struct DefaultHasher -{ - typedef float Lookup; - static HashNumber hash(float f) { - static_assert(sizeof(HashNumber) == 4, - "subsequent code assumes a four-byte hash"); - return HashNumber(mozilla::BitwiseCast(f)); - } - static bool match(float lhs, float rhs) { - return mozilla::BitwiseCast(lhs) == mozilla::BitwiseCast(rhs); - } -}; - -// A hash policy that compares C strings. -struct CStringHasher -{ - typedef const char* Lookup; - static js::HashNumber hash(Lookup l) { - return mozilla::HashString(l); - } - static bool match(const char* key, Lookup lookup) { - return strcmp(key, lookup) == 0; - } -}; - -// Fallible hashing interface. -// -// Most of the time generating a hash code is infallible so this class provides -// default methods that always succeed. Specialize this class for your own hash -// policy to provide fallible hashing. -// -// This is used by MovableCellHasher to handle the fact that generating a unique -// ID for cell pointer may fail due to OOM. -template -struct FallibleHashMethods -{ - // Return true if a hashcode is already available for its argument. Once - // this returns true for a specific argument it must continue to do so. - template static bool hasHash(Lookup&& l) { return true; } - - // Fallible method to ensure a hashcode exists for its argument and create - // one if not. Returns false on error, e.g. out of memory. - template static bool ensureHash(Lookup&& l) { return true; } -}; - -template -static bool -HasHash(Lookup&& l) { - return FallibleHashMethods::hasHash(std::forward(l)); } -template -static bool -EnsureHash(Lookup&& l) { - return FallibleHashMethods::ensureHash(std::forward(l)); -} - -/*****************************************************************************/ - -// Both HashMap and HashSet are implemented by a single HashTable that is even -// more heavily parameterized than the other two. This leaves HashTable gnarly -// and extremely coupled to HashMap and HashSet; thus code should not use -// HashTable directly. - -template -class HashMapEntry -{ - Key key_; - Value value_; - - template friend class detail::HashTable; - template friend class detail::HashTableEntry; - template friend class HashMap; - - public: - template - HashMapEntry(KeyInput&& k, ValueInput&& v) - : key_(std::forward(k)), - value_(std::forward(v)) - {} - - HashMapEntry(HashMapEntry&& rhs) - : key_(std::move(rhs.key_)), - value_(std::move(rhs.value_)) - {} - - void operator=(HashMapEntry&& rhs) { - key_ = std::move(rhs.key_); - value_ = std::move(rhs.value_); - } - - typedef Key KeyType; - typedef Value ValueType; - - const Key& key() const { return key_; } - Key& mutableKey() { return key_; } - const Value& value() const { return value_; } - Value& value() { return value_; } - - private: - HashMapEntry(const HashMapEntry&) = delete; - void operator=(const HashMapEntry&) = delete; -}; - -} // namespace js - -namespace mozilla { - -template -struct IsPod > - : IntegralConstant::value && IsPod::value> -{}; - -} // namespace mozilla - -namespace js { - -namespace detail { - -template -class HashTable; - -template -class HashTableEntry -{ - private: - using NonConstT = typename mozilla::RemoveConst::Type; - - static const HashNumber sFreeKey = 0; - static const HashNumber sRemovedKey = 1; - static const HashNumber sCollisionBit = 1; - - HashNumber keyHash = sFreeKey; - alignas(NonConstT) unsigned char valueData_[sizeof(NonConstT)]; - - private: - template friend class HashTable; - - // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a - // -Werror compile error) to reinterpret_cast<> |valueData_| to |T*|, even - // through |void*|. Placing the latter cast in these separate functions - // breaks the chain such that affected GCC versions no longer warn/error. - void* rawValuePtr() { return valueData_; } - - static bool isLiveHash(HashNumber hash) - { - return hash > sRemovedKey; - } - - HashTableEntry(const HashTableEntry&) = delete; - void operator=(const HashTableEntry&) = delete; - - NonConstT* valuePtr() { return reinterpret_cast(rawValuePtr()); } - - void destroyStoredT() { - NonConstT* ptr = valuePtr(); - ptr->~T(); - MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr)); - } - - public: - HashTableEntry() = default; - - ~HashTableEntry() { - if (isLive()) - destroyStoredT(); - - MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this)); - } - - void destroy() { - MOZ_ASSERT(isLive()); - destroyStoredT(); - } - - void swap(HashTableEntry* other) { - if (this == other) - return; - MOZ_ASSERT(isLive()); - if (other->isLive()) { - mozilla::Swap(*valuePtr(), *other->valuePtr()); - } else { - *other->valuePtr() = std::move(*valuePtr()); - destroy(); - } - mozilla::Swap(keyHash, other->keyHash); - } - - T& get() { - MOZ_ASSERT(isLive()); - return *valuePtr(); - } - - NonConstT& getMutable() { - MOZ_ASSERT(isLive()); - return *valuePtr(); - } - - bool isFree() const { - return keyHash == sFreeKey; - } - - void clearLive() { - MOZ_ASSERT(isLive()); - keyHash = sFreeKey; - destroyStoredT(); - } - - void clear() { - if (isLive()) - destroyStoredT(); - - MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this)); - keyHash = sFreeKey; - } - - bool isRemoved() const { - return keyHash == sRemovedKey; - } - - void removeLive() { - MOZ_ASSERT(isLive()); - keyHash = sRemovedKey; - destroyStoredT(); - } - - bool isLive() const { - return isLiveHash(keyHash); - } - - void setCollision() { - MOZ_ASSERT(isLive()); - keyHash |= sCollisionBit; - } - - void unsetCollision() { - keyHash &= ~sCollisionBit; - } - - bool hasCollision() const { - return keyHash & sCollisionBit; - } - - bool matchHash(HashNumber hn) { - return (keyHash & ~sCollisionBit) == hn; - } - - HashNumber getKeyHash() const { - return keyHash & ~sCollisionBit; - } - - template - void setLive(HashNumber hn, Args&&... args) - { - MOZ_ASSERT(!isLive()); - keyHash = hn; - new (valuePtr()) T(std::forward(args)...); - MOZ_ASSERT(isLive()); - } -}; - -template -class HashTable : private AllocPolicy -{ - friend class mozilla::ReentrancyGuard; - - typedef typename mozilla::RemoveConst::Type NonConstT; - typedef typename HashPolicy::KeyType Key; - typedef typename HashPolicy::Lookup Lookup; - - public: - using Entry = HashTableEntry; - - // A nullable pointer to a hash table element. A Ptr |p| can be tested - // either explicitly |if (p.found()) p->...| or using boolean conversion - // |if (p) p->...|. Ptr objects must not be used after any mutating hash - // table operations unless |generation()| is tested. - class Ptr - { - friend class HashTable; - - Entry* entry_; -#ifdef JS_DEBUG - const HashTable* table_; - Generation generation; -#endif - - protected: - Ptr(Entry& entry, const HashTable& tableArg) - : entry_(&entry) -#ifdef JS_DEBUG - , table_(&tableArg) - , generation(tableArg.generation()) -#endif - {} - - public: - Ptr() - : entry_(nullptr) -#ifdef JS_DEBUG - , table_(nullptr) - , generation(0) -#endif - {} - - bool isValid() const { - return !!entry_; - } - - bool found() const { - if (!isValid()) - return false; -#ifdef JS_DEBUG - MOZ_ASSERT(generation == table_->generation()); -#endif - return entry_->isLive(); - } - - explicit operator bool() const { - return found(); - } - - bool operator==(const Ptr& rhs) const { - MOZ_ASSERT(found() && rhs.found()); - return entry_ == rhs.entry_; - } - - bool operator!=(const Ptr& rhs) const { -#ifdef JS_DEBUG - MOZ_ASSERT(generation == table_->generation()); -#endif - return !(*this == rhs); - } - - T& operator*() const { -#ifdef JS_DEBUG - MOZ_ASSERT(found()); - MOZ_ASSERT(generation == table_->generation()); -#endif - return entry_->get(); - } - - T* operator->() const { -#ifdef JS_DEBUG - MOZ_ASSERT(found()); - MOZ_ASSERT(generation == table_->generation()); -#endif - return &entry_->get(); - } - }; - - // A Ptr that can be used to add a key after a failed lookup. - class AddPtr : public Ptr - { - friend class HashTable; - HashNumber keyHash; -#ifdef JS_DEBUG - uint64_t mutationCount; -#endif - - AddPtr(Entry& entry, const HashTable& tableArg, HashNumber hn) - : Ptr(entry, tableArg) - , keyHash(hn) -#ifdef JS_DEBUG - , mutationCount(tableArg.mutationCount) -#endif - {} - - public: - AddPtr() : keyHash(0) {} - }; - - // A collection of hash table entries. The collection is enumerated by - // calling |front()| followed by |popFront()| as long as |!empty()|. As - // with Ptr/AddPtr, Range objects must not be used after any mutating hash - // table operation unless the |generation()| is tested. - class Range - { - protected: - friend class HashTable; - - Range(const HashTable& tableArg, Entry* c, Entry* e) - : cur(c) - , end(e) -#ifdef JS_DEBUG - , table_(&tableArg) - , mutationCount(tableArg.mutationCount) - , generation(tableArg.generation()) - , validEntry(true) -#endif - { - while (cur < end && !cur->isLive()) - ++cur; - } - - Entry* cur; - Entry* end; -#ifdef JS_DEBUG - const HashTable* table_; - uint64_t mutationCount; - Generation generation; - bool validEntry; -#endif - - public: - Range() - : cur(nullptr) - , end(nullptr) -#ifdef JS_DEBUG - , table_(nullptr) - , mutationCount(0) - , generation(0) - , validEntry(false) -#endif - {} - - bool empty() const { -#ifdef JS_DEBUG - MOZ_ASSERT(generation == table_->generation()); - MOZ_ASSERT(mutationCount == table_->mutationCount); -#endif - return cur == end; - } - - T& front() const { - MOZ_ASSERT(!empty()); -#ifdef JS_DEBUG - MOZ_ASSERT(validEntry); - MOZ_ASSERT(generation == table_->generation()); - MOZ_ASSERT(mutationCount == table_->mutationCount); -#endif - return cur->get(); - } - - void popFront() { - MOZ_ASSERT(!empty()); -#ifdef JS_DEBUG - MOZ_ASSERT(generation == table_->generation()); - MOZ_ASSERT(mutationCount == table_->mutationCount); -#endif - while (++cur < end && !cur->isLive()) - continue; -#ifdef JS_DEBUG - validEntry = true; -#endif - } - }; - - // A Range whose lifetime delimits a mutating enumeration of a hash table. - // Since rehashing when elements were removed during enumeration would be - // bad, it is postponed until the Enum is destructed. Since the Enum's - // destructor touches the hash table, the user must ensure that the hash - // table is still alive when the destructor runs. - class Enum : public Range - { - friend class HashTable; - - HashTable& table_; - bool rekeyed; - bool removed; - - // Enum is movable but not copyable. - Enum(const Enum&) = delete; - void operator=(const Enum&) = delete; - - public: - template - explicit Enum(Map& map) - : Range(map.all()), table_(map.impl), rekeyed(false), removed(false) {} - - MOZ_IMPLICIT Enum(Enum&& other) - : Range(other), table_(other.table_), rekeyed(other.rekeyed), removed(other.removed) - { - other.rekeyed = false; - other.removed = false; - } - - // Removes the |front()| element from the table, leaving |front()| - // invalid until the next call to |popFront()|. For example: - // - // HashSet s; - // for (HashSet::Enum e(s); !e.empty(); e.popFront()) - // if (e.front() == 42) - // e.removeFront(); - void removeFront() { - table_.remove(*this->cur); - removed = true; -#ifdef JS_DEBUG - this->validEntry = false; - this->mutationCount = table_.mutationCount; -#endif - } - - NonConstT& mutableFront() { - MOZ_ASSERT(!this->empty()); -#ifdef JS_DEBUG - MOZ_ASSERT(this->validEntry); - MOZ_ASSERT(this->generation == this->Range::table_->generation()); - MOZ_ASSERT(this->mutationCount == this->Range::table_->mutationCount); -#endif - return this->cur->getMutable(); - } - - // Removes the |front()| element and re-inserts it into the table with - // a new key at the new Lookup position. |front()| is invalid after - // this operation until the next call to |popFront()|. - void rekeyFront(const Lookup& l, const Key& k) { - MOZ_ASSERT(&k != &HashPolicy::getKey(this->cur->get())); - Ptr p(*this->cur, table_); - table_.rekeyWithoutRehash(p, l, k); - rekeyed = true; -#ifdef JS_DEBUG - this->validEntry = false; - this->mutationCount = table_.mutationCount; -#endif - } - - void rekeyFront(const Key& k) { - rekeyFront(k, k); - } - - // Potentially rehashes the table. - ~Enum() { - if (rekeyed) { - table_.gen++; - table_.checkOverRemoved(); - } - - if (removed) - table_.compactIfUnderloaded(); - } - }; - - // HashTable is movable - HashTable(HashTable&& rhs) - : AllocPolicy(rhs) - { - mozilla::PodAssign(this, &rhs); - rhs.table = nullptr; - } - void operator=(HashTable&& rhs) { - MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); - if (table) - destroyTable(*this, table, capacity()); - mozilla::PodAssign(this, &rhs); - rhs.table = nullptr; - } - - private: - // HashTable is not copyable or assignable - HashTable(const HashTable&) = delete; - void operator=(const HashTable&) = delete; - - private: - static const size_t CAP_BITS = 30; - - public: - uint64_t gen:56; // entry storage generation number - uint64_t hashShift:8; // multiplicative hash shift - Entry* table; // entry storage - uint32_t entryCount; // number of entries in table - uint32_t removedCount; // removed entry sentinels in table - -#ifdef JS_DEBUG - uint64_t mutationCount; - mutable bool mEntered; - // Note that some updates to these stats are not thread-safe. See the - // comment on the three-argument overloading of HashTable::lookup(). - mutable struct Stats - { - uint32_t searches; // total number of table searches - uint32_t steps; // hash chain links traversed - uint32_t hits; // searches that found key - uint32_t misses; // searches that didn't find key - uint32_t addOverRemoved; // adds that recycled a removed entry - uint32_t removes; // calls to remove - uint32_t removeFrees; // calls to remove that freed the entry - uint32_t grows; // table expansions - uint32_t shrinks; // table contractions - uint32_t compresses; // table compressions - uint32_t rehashes; // tombstone decontaminations - } stats; -# define METER(x) x -#else -# define METER(x) -#endif - - // The default initial capacity is 32 (enough to hold 16 elements), but it - // can be as low as 4. - static const uint32_t sMinCapacity = 4; - static const uint32_t sMaxInit = 1u << (CAP_BITS - 1); - static const uint32_t sMaxCapacity = 1u << CAP_BITS; - - // Hash-table alpha is conceptually a fraction, but to avoid floating-point - // math we implement it as a ratio of integers. - static const uint8_t sAlphaDenominator = 4; - static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4 - static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4 - - static const HashNumber sFreeKey = Entry::sFreeKey; - static const HashNumber sRemovedKey = Entry::sRemovedKey; - static const HashNumber sCollisionBit = Entry::sCollisionBit; - - void setTableSizeLog2(uint32_t sizeLog2) - { - hashShift = js::kHashNumberBits - sizeLog2; - } - - static bool isLiveHash(HashNumber hash) - { - return Entry::isLiveHash(hash); - } - - static HashNumber prepareHash(const Lookup& l) - { - HashNumber keyHash = mozilla::ScrambleHashCode(HashPolicy::hash(l)); - - // Avoid reserved hash codes. - if (!isLiveHash(keyHash)) - keyHash -= (sRemovedKey + 1); - return keyHash & ~sCollisionBit; - } - - enum FailureBehavior { DontReportFailure = false, ReportFailure = true }; - - static Entry* createTable(AllocPolicy& alloc, uint32_t capacity, - FailureBehavior reportFailure = ReportFailure) - { - Entry* table = reportFailure - ? alloc.template pod_malloc(capacity) - : alloc.template maybe_pod_malloc(capacity); - if (table) { - for (uint32_t i = 0; i < capacity; i++) - new (&table[i]) Entry(); - } - return table; - } - - static Entry* maybeCreateTable(AllocPolicy& alloc, uint32_t capacity) - { - Entry* table = alloc.template maybe_pod_malloc(capacity); - if (table) { - for (uint32_t i = 0; i < capacity; i++) - new (&table[i]) Entry(); - } - return table; - } - - static void destroyTable(AllocPolicy& alloc, Entry* oldTable, uint32_t capacity) - { - Entry* end = oldTable + capacity; - for (Entry* e = oldTable; e < end; ++e) - e->~Entry(); - alloc.free_(oldTable, capacity); - } - - public: - explicit HashTable(AllocPolicy ap) - : AllocPolicy(ap) - , gen(0) - , hashShift(js::kHashNumberBits) - , table(nullptr) - , entryCount(0) - , removedCount(0) -#ifdef JS_DEBUG - , mutationCount(0) - , mEntered(false) -#endif - {} - - MOZ_MUST_USE bool init(uint32_t length) - { - MOZ_ASSERT(!initialized()); - - // Reject all lengths whose initial computed capacity would exceed - // sMaxCapacity. Round that maximum length down to the nearest power - // of two for speedier code. - if (MOZ_UNLIKELY(length > sMaxInit)) { - this->reportAllocOverflow(); - return false; - } - - static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit, - "multiplication in numerator below could overflow"); - static_assert(sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator, - "numerator calculation below could potentially overflow"); - - // Compute the smallest capacity allowing |length| elements to be - // inserted without rehashing: ceil(length / max-alpha). (Ceiling - // integral division: .) - uint32_t newCapacity = - (length * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator; - if (newCapacity < sMinCapacity) - newCapacity = sMinCapacity; - - // Round up capacity to next power-of-two. - uint32_t log2 = mozilla::CeilingLog2(newCapacity); - newCapacity = 1u << log2; - - MOZ_ASSERT(newCapacity >= length); - MOZ_ASSERT(newCapacity <= sMaxCapacity); - - table = createTable(*this, newCapacity); - if (!table) - return false; - - setTableSizeLog2(log2); - METER(memset(&stats, 0, sizeof(stats))); - return true; - } - - bool initialized() const - { - return !!table; - } - - ~HashTable() - { - if (table) - destroyTable(*this, table, capacity()); - } - - private: - HashNumber hash1(HashNumber hash0) const - { - return hash0 >> hashShift; - } - - struct DoubleHash - { - HashNumber h2; - HashNumber sizeMask; - }; - - DoubleHash hash2(HashNumber curKeyHash) const - { - uint32_t sizeLog2 = js::kHashNumberBits - hashShift; - DoubleHash dh = { - ((curKeyHash << sizeLog2) >> hashShift) | 1, - (HashNumber(1) << sizeLog2) - 1 - }; - return dh; - } - - static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh) - { - return (h1 - dh.h2) & dh.sizeMask; - } - - bool overloaded() - { - static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator, - "multiplication below could overflow"); - return entryCount + removedCount >= - capacity() * sMaxAlphaNumerator / sAlphaDenominator; - } - - // Would the table be underloaded if it had the given capacity and entryCount? - static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount) - { - static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator, - "multiplication below could overflow"); - return capacity > sMinCapacity && - entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator; - } - - bool underloaded() - { - return wouldBeUnderloaded(capacity(), entryCount); - } - - static MOZ_ALWAYS_INLINE bool match(Entry& e, const Lookup& l) - { - return HashPolicy::match(HashPolicy::getKey(e.get()), l); - } - - // Warning: in order for readonlyThreadsafeLookup() to be safe this - // function must not modify the table in any way when |collisionBit| is 0. - // (The use of the METER() macro to increment stats violates this - // restriction but we will live with that for now because it's enabled so - // rarely.) - MOZ_ALWAYS_INLINE Entry& - lookup(const Lookup& l, HashNumber keyHash, uint32_t collisionBit) const - { - MOZ_ASSERT(isLiveHash(keyHash)); - MOZ_ASSERT(!(keyHash & sCollisionBit)); - MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit); - MOZ_ASSERT(table); - METER(stats.searches++); - - // Compute the primary hash address. - HashNumber h1 = hash1(keyHash); - Entry* entry = &table[h1]; - - // Miss: return space for a new entry. - if (entry->isFree()) { - METER(stats.misses++); - return *entry; - } - - // Hit: return entry. - if (entry->matchHash(keyHash) && match(*entry, l)) { - METER(stats.hits++); - return *entry; - } - - // Collision: double hash. - DoubleHash dh = hash2(keyHash); - - // Save the first removed entry pointer so we can recycle later. - Entry* firstRemoved = nullptr; - - while (true) { - if (MOZ_UNLIKELY(entry->isRemoved())) { - if (!firstRemoved) - firstRemoved = entry; - } else { - if (collisionBit == sCollisionBit) - entry->setCollision(); - } - - METER(stats.steps++); - h1 = applyDoubleHash(h1, dh); - - entry = &table[h1]; - if (entry->isFree()) { - METER(stats.misses++); - return firstRemoved ? *firstRemoved : *entry; - } - - if (entry->matchHash(keyHash) && match(*entry, l)) { - METER(stats.hits++); - return *entry; - } - } - } - - // This is a copy of lookup hardcoded to the assumptions: - // 1. the lookup is a lookupForAdd - // 2. the key, whose |keyHash| has been passed is not in the table, - // 3. no entries have been removed from the table. - // This specialized search avoids the need for recovering lookup values - // from entries, which allows more flexible Lookup/Key types. - Entry& findFreeEntry(HashNumber keyHash) - { - MOZ_ASSERT(!(keyHash & sCollisionBit)); - MOZ_ASSERT(table); - METER(stats.searches++); - - // We assume 'keyHash' has already been distributed. - - // Compute the primary hash address. - HashNumber h1 = hash1(keyHash); - Entry* entry = &table[h1]; - - // Miss: return space for a new entry. - if (!entry->isLive()) { - METER(stats.misses++); - return *entry; - } - - // Collision: double hash. - DoubleHash dh = hash2(keyHash); - - while (true) { - MOZ_ASSERT(!entry->isRemoved()); - entry->setCollision(); - - METER(stats.steps++); - h1 = applyDoubleHash(h1, dh); - - entry = &table[h1]; - if (!entry->isLive()) { - METER(stats.misses++); - return *entry; - } - } - } - - enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed }; - - RebuildStatus changeTableSize(int deltaLog2, FailureBehavior reportFailure = ReportFailure) - { - // Look, but don't touch, until we succeed in getting new entry store. - Entry* oldTable = table; - uint32_t oldCap = capacity(); - uint32_t newLog2 = js::kHashNumberBits - hashShift + deltaLog2; - uint32_t newCapacity = 1u << newLog2; - if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) { - if (reportFailure) - this->reportAllocOverflow(); - return RehashFailed; - } - - Entry* newTable = createTable(*this, newCapacity, reportFailure); - if (!newTable) - return RehashFailed; - - // We can't fail from here on, so update table parameters. - setTableSizeLog2(newLog2); - removedCount = 0; - gen++; - table = newTable; - - // Copy only live entries, leaving removed ones behind. - Entry* end = oldTable + oldCap; - for (Entry* src = oldTable; src < end; ++src) { - if (src->isLive()) { - HashNumber hn = src->getKeyHash(); - findFreeEntry(hn).setLive( - hn, std::move(const_cast(src->get()))); - } - - src->~Entry(); - } - - // All entries have been destroyed, no need to destroyTable. - this->free_(oldTable, oldCap); - return Rehashed; - } - - bool shouldCompressTable() - { - // Compress if a quarter or more of all entries are removed. - return removedCount >= (capacity() >> 2); - } - - RebuildStatus checkOverloaded(FailureBehavior reportFailure = ReportFailure) - { - if (!overloaded()) - return NotOverloaded; - - int deltaLog2; - if (shouldCompressTable()) { - METER(stats.compresses++); - deltaLog2 = 0; - } else { - METER(stats.grows++); - deltaLog2 = 1; - } - - return changeTableSize(deltaLog2, reportFailure); - } - - // Infallibly rehash the table if we are overloaded with removals. - void checkOverRemoved() - { - if (overloaded()) { - if (checkOverloaded(DontReportFailure) == RehashFailed) - rehashTableInPlace(); - } - } - - void remove(Entry& e) - { - MOZ_ASSERT(table); - METER(stats.removes++); - - if (e.hasCollision()) { - e.removeLive(); - removedCount++; - } else { - METER(stats.removeFrees++); - e.clearLive(); - } - entryCount--; -#ifdef JS_DEBUG - mutationCount++; -#endif - } - - void checkUnderloaded() - { - if (underloaded()) { - METER(stats.shrinks++); - (void) changeTableSize(-1, DontReportFailure); - } - } - - // Resize the table down to the largest capacity which doesn't underload the - // table. Since we call checkUnderloaded() on every remove, you only need - // to call this after a bulk removal of items done without calling remove(). - void compactIfUnderloaded() - { - int32_t resizeLog2 = 0; - uint32_t newCapacity = capacity(); - while (wouldBeUnderloaded(newCapacity, entryCount)) { - newCapacity = newCapacity >> 1; - resizeLog2--; - } - - if (resizeLog2 != 0) - (void) changeTableSize(resizeLog2, DontReportFailure); - } - - // This is identical to changeTableSize(currentSize), but without requiring - // a second table. We do this by recycling the collision bits to tell us if - // the element is already inserted or still waiting to be inserted. Since - // already-inserted elements win any conflicts, we get the same table as we - // would have gotten through random insertion order. - void rehashTableInPlace() - { - METER(stats.rehashes++); - removedCount = 0; - gen++; - for (size_t i = 0; i < capacity(); ++i) - table[i].unsetCollision(); - - for (size_t i = 0; i < capacity();) { - Entry* src = &table[i]; - - if (!src->isLive() || src->hasCollision()) { - ++i; - continue; - } - - HashNumber keyHash = src->getKeyHash(); - HashNumber h1 = hash1(keyHash); - DoubleHash dh = hash2(keyHash); - Entry* tgt = &table[h1]; - while (true) { - if (!tgt->hasCollision()) { - src->swap(tgt); - tgt->setCollision(); - break; - } - - h1 = applyDoubleHash(h1, dh); - tgt = &table[h1]; - } - } - - // TODO: this algorithm leaves collision bits on *all* elements, even if - // they are on no collision path. We have the option of setting the - // collision bits correctly on a subsequent pass or skipping the rehash - // unless we are totally filled with tombstones: benchmark to find out - // which approach is best. - } - - // Note: |l| may be a reference to a piece of |u|, so this function - // must take care not to use |l| after moving |u|. - // - // Prefer to use putNewInfallible; this function does not check - // invariants. - template - void putNewInfallibleInternal(const Lookup& l, Args&&... args) - { - MOZ_ASSERT(table); - - HashNumber keyHash = prepareHash(l); - Entry* entry = &findFreeEntry(keyHash); - MOZ_ASSERT(entry); - - if (entry->isRemoved()) { - METER(stats.addOverRemoved++); - removedCount--; - keyHash |= sCollisionBit; - } - - entry->setLive(keyHash, std::forward(args)...); - entryCount++; -#ifdef JS_DEBUG - mutationCount++; -#endif - } - - public: - void clear() - { - Entry* end = table + capacity(); - for (Entry* e = table; e < end; ++e) - e->clear(); - - removedCount = 0; - entryCount = 0; -#ifdef JS_DEBUG - mutationCount++; -#endif - } - - void clearAndShrink() - { - clear(); - compactIfUnderloaded(); - } - - void finish() - { -#ifdef JS_DEBUG - MOZ_ASSERT(!mEntered); -#endif - if (!table) - return; - - destroyTable(*this, table, capacity()); - table = nullptr; - gen++; - entryCount = 0; - removedCount = 0; -#ifdef JS_DEBUG - mutationCount++; -#endif - } - - Range all() const - { - MOZ_ASSERT(table); - return Range(*this, table, table + capacity()); - } - - bool empty() const - { - MOZ_ASSERT(table); - return !entryCount; - } - - uint32_t count() const - { - MOZ_ASSERT(table); - return entryCount; - } - - uint32_t capacity() const - { - MOZ_ASSERT(table); - return 1u << (js::kHashNumberBits - hashShift); - } - - Generation generation() const - { - MOZ_ASSERT(table); - return Generation(gen); - } - - size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const - { - return mallocSizeOf(table); - } - - size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const - { - return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf); - } - - MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const - { - mozilla::ReentrancyGuard g(*this); - if (!HasHash(l)) - return Ptr(); - HashNumber keyHash = prepareHash(l); - return Ptr(lookup(l, keyHash, 0), *this); - } - - MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const - { - if (!HasHash(l)) - return Ptr(); - HashNumber keyHash = prepareHash(l); - return Ptr(lookup(l, keyHash, 0), *this); - } - - MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const - { - mozilla::ReentrancyGuard g(*this); - if (!EnsureHash(l)) - return AddPtr(); - HashNumber keyHash = prepareHash(l); - // Directly call the constructor in the return statement to avoid - // excess copying when building with Visual Studio 2017. - // See bug 1385181. - return AddPtr(lookup(l, keyHash, sCollisionBit), *this, keyHash); - } - - template - MOZ_MUST_USE bool add(AddPtr& p, Args&&... args) - { - mozilla::ReentrancyGuard g(*this); - MOZ_ASSERT(table); - MOZ_ASSERT_IF(p.isValid(), p.table_ == this); - MOZ_ASSERT(!p.found()); - MOZ_ASSERT(!(p.keyHash & sCollisionBit)); - - // Check for error from ensureHash() here. - if (!p.isValid()) - return false; - - MOZ_ASSERT(p.generation == generation()); -#ifdef JS_DEBUG - MOZ_ASSERT(p.mutationCount == mutationCount); -#endif - - // Changing an entry from removed to live does not affect whether we - // are overloaded and can be handled separately. - if (p.entry_->isRemoved()) { - if (!this->checkSimulatedOOM()) - return false; - METER(stats.addOverRemoved++); - removedCount--; - p.keyHash |= sCollisionBit; - } else { - // Preserve the validity of |p.entry_|. - RebuildStatus status = checkOverloaded(); - if (status == RehashFailed) - return false; - if (status == NotOverloaded && !this->checkSimulatedOOM()) - return false; - if (status == Rehashed) - p.entry_ = &findFreeEntry(p.keyHash); - } - - p.entry_->setLive(p.keyHash, std::forward(args)...); - entryCount++; -#ifdef JS_DEBUG - mutationCount++; - p.generation = generation(); - p.mutationCount = mutationCount; -#endif - return true; - } - - // Note: |l| may be a reference to a piece of |u|, so this function - // must take care not to use |l| after moving |u|. - template - void putNewInfallible(const Lookup& l, Args&&... args) - { - MOZ_ASSERT(!lookup(l).found()); - mozilla::ReentrancyGuard g(*this); - putNewInfallibleInternal(l, std::forward(args)...); - } - - // Note: |l| may be alias arguments in |args|, so this function must take - // care not to use |l| after moving |args|. - template - MOZ_MUST_USE bool putNew(const Lookup& l, Args&&... args) - { - if (!this->checkSimulatedOOM()) - return false; - - if (!EnsureHash(l)) - return false; - - if (checkOverloaded() == RehashFailed) - return false; - - putNewInfallible(l, std::forward(args)...); - return true; - } - - // Note: |l| may be a reference to a piece of |u|, so this function - // must take care not to use |l| after moving |u|. - template - MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args) - { - // Check for error from ensureHash() here. - if (!p.isValid()) - return false; - -#ifdef JS_DEBUG - p.generation = generation(); - p.mutationCount = mutationCount; -#endif - { - mozilla::ReentrancyGuard g(*this); - MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed - p.entry_ = &lookup(l, p.keyHash, sCollisionBit); - } - return p.found() || add(p, std::forward(args)...); - } - - void remove(Ptr p) - { - MOZ_ASSERT(table); - mozilla::ReentrancyGuard g(*this); - MOZ_ASSERT(p.found()); - MOZ_ASSERT(p.generation == generation()); - remove(*p.entry_); - checkUnderloaded(); - } - - void rekeyWithoutRehash(Ptr p, const Lookup& l, const Key& k) - { - MOZ_ASSERT(table); - mozilla::ReentrancyGuard g(*this); - MOZ_ASSERT(p.found()); - MOZ_ASSERT(p.generation == generation()); - typename HashTableEntry::NonConstT t(std::move(*p)); - HashPolicy::setKey(t, const_cast(k)); - remove(*p.entry_); - putNewInfallibleInternal(l, std::move(t)); - } - - void rekeyAndMaybeRehash(Ptr p, const Lookup& l, const Key& k) - { - rekeyWithoutRehash(p, l, k); - checkOverRemoved(); - } - -#undef METER -}; - -} // namespace detail -} // namespace js - #endif /* js_HashTable_h */ diff --git a/js/public/RootingAPI.h b/js/public/RootingAPI.h index b1414e000f4c3..c39c757b95087 100644 --- a/js/public/RootingAPI.h +++ b/js/public/RootingAPI.h @@ -766,18 +766,22 @@ struct JS_PUBLIC_API(MovableCellHasher>) static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); } }; +} // namespace js + +namespace mozilla { + template -struct FallibleHashMethods> +struct FallibleHashMethods> { template static bool hasHash(Lookup&& l) { - return MovableCellHasher::hasHash(std::forward(l)); + return js::MovableCellHasher::hasHash(std::forward(l)); } template static bool ensureHash(Lookup&& l) { - return MovableCellHasher::ensureHash(std::forward(l)); + return js::MovableCellHasher::ensureHash(std::forward(l)); } }; -} /* namespace js */ +} // namespace mozilla namespace js { diff --git a/js/public/UbiNode.h b/js/public/UbiNode.h index 37a0af1ed0553..72cab375dedcc 100644 --- a/js/public/UbiNode.h +++ b/js/public/UbiNode.h @@ -1185,12 +1185,12 @@ void SetConstructUbiNodeForDOMObjectCallback(JSContext* cx, void (*callback)(voi } // namespace ubi } // namespace JS -namespace js { +namespace mozilla { // Make ubi::Node::HashPolicy the default hash policy for ubi::Node. template<> struct DefaultHasher : JS::ubi::Node::HashPolicy { }; template<> struct DefaultHasher : JS::ubi::StackFrame::HashPolicy { }; -} // namespace js +} // namespace mozilla #endif // js_UbiNode_h diff --git a/js/src/ds/OrderedHashTable.h b/js/src/ds/OrderedHashTable.h index cc1d274317706..7823ad467494a 100644 --- a/js/src/ds/OrderedHashTable.h +++ b/js/src/ds/OrderedHashTable.h @@ -40,6 +40,8 @@ #include "mozilla/HashFunctions.h" #include "mozilla/Move.h" +#include "js/HashTable.h" + namespace js { namespace detail { diff --git a/js/src/gc/Barrier.h b/js/src/gc/Barrier.h index ae279ce9981b6..d77597e0d0f23 100644 --- a/js/src/gc/Barrier.h +++ b/js/src/gc/Barrier.h @@ -871,10 +871,6 @@ struct GCPtrHasher static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); } }; -/* Specialized hashing policy for GCPtrs. */ -template -struct DefaultHasher> : GCPtrHasher {}; - template struct PreBarrieredHasher { @@ -886,9 +882,6 @@ struct PreBarrieredHasher static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); } }; -template -struct DefaultHasher> : PreBarrieredHasher { }; - /* Useful for hashtables with a ReadBarriered as key. */ template struct ReadBarrieredHasher @@ -901,9 +894,24 @@ struct ReadBarrieredHasher static void rekey(Key& k, const Key& newKey) { k.set(newKey.unbarrieredGet()); } }; +} // namespace js + +namespace mozilla { + +/* Specialized hashing policy for GCPtrs. */ +template +struct DefaultHasher> : js::GCPtrHasher {}; + +template +struct DefaultHasher> : js::PreBarrieredHasher { }; + /* Specialized hashing policy for ReadBarriereds. */ template -struct DefaultHasher> : ReadBarrieredHasher { }; +struct DefaultHasher> : js::ReadBarrieredHasher { }; + +} // namespace mozilla + +namespace js { class ArrayObject; class ArrayBufferObject; diff --git a/js/src/jsapi-tests/testUbiNode.cpp b/js/src/jsapi-tests/testUbiNode.cpp index 5b704b2dc7d77..4835f5bf2fe30 100644 --- a/js/src/jsapi-tests/testUbiNode.cpp +++ b/js/src/jsapi-tests/testUbiNode.cpp @@ -311,7 +311,7 @@ struct ExpectedEdge { } }; -namespace js { +namespace mozilla { template <> struct DefaultHasher @@ -327,7 +327,7 @@ struct DefaultHasher } }; -} // namespace js +} // namespace mozilla BEGIN_TEST(test_ubiPostOrder) { diff --git a/js/src/vm/ObjectGroup.cpp b/js/src/vm/ObjectGroup.cpp index 12c85bda2e3ba..240966a8ac7d1 100644 --- a/js/src/vm/ObjectGroup.cpp +++ b/js/src/vm/ObjectGroup.cpp @@ -431,7 +431,7 @@ struct ObjectGroupRealm::NewEntry } }; -namespace js { +namespace mozilla { template <> struct FallibleHashMethods { @@ -442,7 +442,7 @@ struct FallibleHashMethods return ObjectGroupRealm::NewEntry::ensureHash(std::forward(l)); } }; -} // namespace js +} // namespace mozilla class ObjectGroupRealm::NewTable : public JS::WeakCache> diff --git a/js/src/vm/SavedFrame.h b/js/src/vm/SavedFrame.h index 777e3b8e6cfbb..5158466036989 100644 --- a/js/src/vm/SavedFrame.h +++ b/js/src/vm/SavedFrame.h @@ -172,17 +172,25 @@ struct SavedFrame::HashPolicy static void rekey(Key& key, const Key& newKey); }; +} // namespace js + +namespace mozilla { + template <> -struct FallibleHashMethods +struct FallibleHashMethods { template static bool hasHash(Lookup&& l) { - return SavedFrame::HashPolicy::hasHash(std::forward(l)); + return js::SavedFrame::HashPolicy::hasHash(std::forward(l)); } template static bool ensureHash(Lookup&& l) { - return SavedFrame::HashPolicy::ensureHash(std::forward(l)); + return js::SavedFrame::HashPolicy::ensureHash(std::forward(l)); } }; +} // namespace mozilla + +namespace js { + // Assert that if the given object is not null, that it must be either a // SavedFrame object or wrapper (Xray or CCW) around a SavedFrame object. inline void AssertObjectIsSavedFrameOrWrapper(JSContext* cx, HandleObject stack); diff --git a/js/src/vm/Shape.h b/js/src/vm/Shape.h index 7d5419b8be639..dba7c3dcd8d01 100644 --- a/js/src/vm/Shape.h +++ b/js/src/vm/Shape.h @@ -668,18 +668,26 @@ HashId(jsid id) return mozilla::HashGeneric(JSID_BITS(id)); } +} // namespace js + +namespace mozilla { + template <> struct DefaultHasher { typedef jsid Lookup; static HashNumber hash(jsid id) { - return HashId(id); + return js::HashId(id); } static bool match(jsid id1, jsid id2) { return id1 == id2; } }; +} // namespace mozilla + +namespace js { + using BaseShapeSet = JS::WeakCache, StackBaseShape, SystemAllocPolicy>>; diff --git a/js/src/vm/Stack.h b/js/src/vm/Stack.h index c608f44dbcc6e..65c5307acae43 100644 --- a/js/src/vm/Stack.h +++ b/js/src/vm/Stack.h @@ -1053,19 +1053,27 @@ FillArgumentsFromArraylike(JSContext* cx, Args& args, const Arraylike& arraylike return true; } +} // namespace js + +namespace mozilla { + template <> -struct DefaultHasher { - typedef AbstractFramePtr Lookup; +struct DefaultHasher { + typedef js::AbstractFramePtr Lookup; static js::HashNumber hash(const Lookup& key) { return mozilla::HashGeneric(key.raw()); } - static bool match(const AbstractFramePtr& k, const Lookup& l) { + static bool match(const js::AbstractFramePtr& k, const Lookup& l) { return k == l; } }; +} // namespace mozilla + +namespace js { + /*****************************************************************************/ // SavedFrame caching to minimize stack walking. diff --git a/js/src/vm/UbiNodeCensus.cpp b/js/src/vm/UbiNodeCensus.cpp index 852c2ff742491..5fcf7dd75adf5 100644 --- a/js/src/vm/UbiNodeCensus.cpp +++ b/js/src/vm/UbiNodeCensus.cpp @@ -346,7 +346,8 @@ static int compareEntries(const void* lhsVoid, const void* rhsVoid) { } // A hash map mapping from C strings to counts. -using CStringCountMap = HashMap; +using CStringCountMap = + HashMap; // Convert a HashMap into an object with each key one of the entries from the // map and each value the associated count's report. For use during census @@ -897,7 +898,7 @@ ByAllocationStack::report(JSContext* cx, CountBase& countBase, MutableHandleValu #ifdef DEBUG // Check that nothing rehashes our table while we hold pointers into it. - Generation generation = count.table.generation(); + mozilla::Generation generation = count.table.generation(); #endif // Build a vector of pointers to entries; sort by total; and then use @@ -959,11 +960,11 @@ class ByFilename : public CountType { using Lookup = UniqueCString; static js::HashNumber hash(const Lookup& lookup) { - return CStringHasher::hash(lookup.get()); + return mozilla::CStringHasher::hash(lookup.get()); } static bool match(const UniqueCString& key, const Lookup& lookup) { - return CStringHasher::match(key.get(), lookup.get()); + return mozilla::CStringHasher::match(key.get(), lookup.get()); } }; diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp index 0aad0149fee12..0ac987645df7e 100644 --- a/js/src/wasm/WasmValidate.cpp +++ b/js/src/wasm/WasmValidate.cpp @@ -1763,7 +1763,7 @@ DecodeGlobalSection(Decoder& d, ModuleEnvironment* env) return d.finishSection(*range, "global"); } -typedef HashSet CStringSet; +typedef HashSet CStringSet; static UniqueChars DecodeExportName(Decoder& d, CStringSet* dupSet) diff --git a/mfbt/HashTable.h b/mfbt/HashTable.h new file mode 100644 index 0000000000000..dadcfcea4eb31 --- /dev/null +++ b/mfbt/HashTable.h @@ -0,0 +1,1960 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_HashTable_h +#define mozilla_HashTable_h + +#include "mozilla/AllocPolicy.h" +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/Casting.h" +#include "mozilla/HashFunctions.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/MemoryChecking.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/Move.h" +#include "mozilla/Opaque.h" +#include "mozilla/PodOperations.h" +#include "mozilla/ReentrancyGuard.h" +#include "mozilla/TypeTraits.h" +#include "mozilla/UniquePtr.h" + +namespace mozilla { + +template struct DefaultHasher; +template class HashMapEntry; +namespace detail { + template class HashTableEntry; + template class HashTable; +} // namespace detail + +/*****************************************************************************/ + +// The "generation" of a hash table is an opaque value indicating the state of +// modification of the hash table through its lifetime. If the generation of +// a hash table compares equal at times T1 and T2, then lookups in the hash +// table, pointers to (or into) hash table entries, etc. at time T1 are valid +// at time T2. If the generation compares unequal, these computations are all +// invalid and must be performed again to be used. +// +// Generations are meaningfully comparable only with respect to a single hash +// table. It's always nonsensical to compare the generation of distinct hash +// tables H1 and H2. +using Generation = Opaque; + +// A performant, STL-like container providing a hash-based map from keys to +// values. In particular, HashMap calls constructors and destructors of all +// objects added so non-PODs may be used safely. +// +// Key/Value requirements: +// - movable, destructible, assignable +// HashPolicy requirements: +// - see Hash Policy section below +// AllocPolicy: +// - see AllocPolicy.h +// +// Note: +// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members +// called by HashMap must not call back into the same HashMap object. +// - Due to the lack of exception handling, the user must call |init()|. +template , + class AllocPolicy = MallocAllocPolicy> +class HashMap +{ + typedef HashMapEntry TableEntry; + + struct MapHashPolicy : HashPolicy + { + using Base = HashPolicy; + typedef Key KeyType; + static const Key& getKey(TableEntry& e) { return e.key(); } + static void setKey(TableEntry& e, Key& k) { HashPolicy::rekey(e.mutableKey(), k); } + }; + + typedef detail::HashTable Impl; + Impl impl; + + public: + typedef typename HashPolicy::Lookup Lookup; + typedef TableEntry Entry; + + // HashMap construction is fallible (due to OOM); thus the user must call + // init after constructing a HashMap and check the return value. + explicit HashMap(AllocPolicy a = AllocPolicy()) : impl(a) {} + MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); } + bool initialized() const { return impl.initialized(); } + + // Return whether the given lookup value is present in the map. E.g.: + // + // typedef HashMap HM; + // HM h; + // if (HM::Ptr p = h.lookup(3)) { + // const HM::Entry& e = *p; // p acts like a pointer to Entry + // assert(p->key == 3); // Entry contains the key + // char val = p->value; // and value + // } + // + // Also see the definition of Ptr in HashTable above (with T = Entry). + typedef typename Impl::Ptr Ptr; + MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); } + + // Like lookup, but does not assert if two threads call lookup at the same + // time. Only use this method when none of the threads will modify the map. + MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const { + return impl.readonlyThreadsafeLookup(l); + } + + // Assuming |p.found()|, remove |*p|. + void remove(Ptr p) { impl.remove(p); } + + // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient + // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using + // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.: + // + // typedef HashMap HM; + // HM h; + // HM::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // if (!h.add(p, 3, 'a')) + // return false; + // } + // const HM::Entry& e = *p; // p acts like a pointer to Entry + // assert(p->key == 3); // Entry contains the key + // char val = p->value; // and value + // + // Also see the definition of AddPtr in HashTable above (with T = Entry). + // + // N.B. The caller must ensure that no mutating hash table operations + // occur between a pair of |lookupForAdd| and |add| calls. To avoid + // looking up the key a second time, the caller may use the more efficient + // relookupOrAdd method. This method reuses part of the hashing computation + // to more efficiently insert the key if it has not been added. For + // example, a mutation-handling version of the previous example: + // + // HM::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // call_that_may_mutate_h(); + // if (!h.relookupOrAdd(p, 3, 'a')) + // return false; + // } + // const HM::Entry& e = *p; + // assert(p->key == 3); + // char val = p->value; + typedef typename Impl::AddPtr AddPtr; + MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const { + return impl.lookupForAdd(l); + } + + template + MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) { + return impl.add(p, + std::forward(k), + std::forward(v)); + } + + template + MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k) { + return impl.add(p, std::forward(k), Value()); + } + + template + MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) { + return impl.relookupOrAdd(p, k, + std::forward(k), + std::forward(v)); + } + + // |all()| returns a Range containing |count()| elements. E.g.: + // + // typedef HashMap HM; + // HM h; + // for (HM::Range r = h.all(); !r.empty(); r.popFront()) + // char c = r.front().value(); + // + // Also see the definition of Range in HashTable above (with T = Entry). + typedef typename Impl::Range Range; + Range all() const { return impl.all(); } + + // Typedef for the enumeration class. An Enum may be used to examine and + // remove table entries: + // + // typedef HashMap HM; + // HM s; + // for (HM::Enum e(s); !e.empty(); e.popFront()) + // if (e.front().value() == 'l') + // e.removeFront(); + // + // Table resize may occur in Enum's destructor. Also see the definition of + // Enum in HashTable above (with T = Entry). + typedef typename Impl::Enum Enum; + + // Remove all entries. This does not shrink the table. For that consider + // using the finish() method. + void clear() { impl.clear(); } + + // Remove all entries. Unlike clear() this method tries to shrink the table. + // Unlike finish() it does not require the map to be initialized again. + void clearAndShrink() { impl.clearAndShrink(); } + + // Remove all the entries and release all internal buffers. The map must + // be initialized again before any use. + void finish() { impl.finish(); } + + // Does the table contain any entries? + bool empty() const { return impl.empty(); } + + // Number of live elements in the map. + uint32_t count() const { return impl.count(); } + + // Total number of allocation in the dynamic table. Note: resize will + // happen well before count() == capacity(). + size_t capacity() const { return impl.capacity(); } + + // Don't just call |impl.sizeOfExcludingThis()| because there's no + // guarantee that |impl| is the first field in HashMap. + size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const { + return impl.sizeOfExcludingThis(mallocSizeOf); + } + size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const { + return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf); + } + + Generation generation() const { + return impl.generation(); + } + + /************************************************** Shorthand operations */ + + bool has(const Lookup& l) const { + return impl.lookup(l).found(); + } + + // Overwrite existing value with v. Return false on oom. + template + MOZ_MUST_USE bool put(KeyInput&& k, ValueInput&& v) { + AddPtr p = lookupForAdd(k); + if (p) { + p->value() = std::forward(v); + return true; + } + return add(p, std::forward(k), std::forward(v)); + } + + // Like put, but assert that the given key is not already present. + template + MOZ_MUST_USE bool putNew(KeyInput&& k, ValueInput&& v) { + return impl.putNew(k, std::forward(k), std::forward(v)); + } + + // Only call this to populate an empty map after reserving space with init(). + template + void putNewInfallible(KeyInput&& k, ValueInput&& v) { + impl.putNewInfallible(k, std::forward(k), std::forward(v)); + } + + // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom. + Ptr lookupWithDefault(const Key& k, const Value& defaultValue) { + AddPtr p = lookupForAdd(k); + if (p) + return p; + bool ok = add(p, k, defaultValue); + MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom. + (void)ok; + return p; + } + + // Remove if present. + void remove(const Lookup& l) { + if (Ptr p = lookup(l)) + remove(p); + } + + // Infallibly rekey one entry, if necessary. + // Requires template parameters Key and HashPolicy::Lookup to be the same type. + void rekeyIfMoved(const Key& old_key, const Key& new_key) { + if (old_key != new_key) + rekeyAs(old_key, new_key, new_key); + } + + // Infallibly rekey one entry if present, and return whether that happened. + bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const Key& new_key) { + if (Ptr p = lookup(old_lookup)) { + impl.rekeyAndMaybeRehash(p, new_lookup, new_key); + return true; + } + return false; + } + + // HashMap is movable + HashMap(HashMap&& rhs) : impl(std::move(rhs.impl)) {} + void operator=(HashMap&& rhs) { + MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); + impl = std::move(rhs.impl); + } + + private: + // HashMap is not copyable or assignable + HashMap(const HashMap& hm) = delete; + HashMap& operator=(const HashMap& hm) = delete; + + friend class Impl::Enum; +}; + +/*****************************************************************************/ + +// A performant, STL-like container providing a hash-based set of values. In +// particular, HashSet calls constructors and destructors of all objects added +// so non-PODs may be used safely. +// +// T requirements: +// - movable, destructible, assignable +// HashPolicy requirements: +// - see Hash Policy section below +// AllocPolicy: +// - see AllocPolicy.h +// +// Note: +// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by +// HashSet must not call back into the same HashSet object. +// - Due to the lack of exception handling, the user must call |init()|. +template , + class AllocPolicy = MallocAllocPolicy> +class HashSet +{ + struct SetOps : HashPolicy + { + using Base = HashPolicy; + typedef T KeyType; + static const KeyType& getKey(const T& t) { return t; } + static void setKey(T& t, KeyType& k) { HashPolicy::rekey(t, k); } + }; + + typedef detail::HashTable Impl; + Impl impl; + + public: + typedef typename HashPolicy::Lookup Lookup; + typedef T Entry; + + // HashSet construction is fallible (due to OOM); thus the user must call + // init after constructing a HashSet and check the return value. + explicit HashSet(AllocPolicy a = AllocPolicy()) : impl(a) {} + MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); } + bool initialized() const { return impl.initialized(); } + + // Return whether the given lookup value is present in the map. E.g.: + // + // typedef HashSet HS; + // HS h; + // if (HS::Ptr p = h.lookup(3)) { + // assert(*p == 3); // p acts like a pointer to int + // } + // + // Also see the definition of Ptr in HashTable above. + typedef typename Impl::Ptr Ptr; + MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); } + + // Like lookup, but does not assert if two threads call lookup at the same + // time. Only use this method when none of the threads will modify the map. + MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const { + return impl.readonlyThreadsafeLookup(l); + } + + // Assuming |p.found()|, remove |*p|. + void remove(Ptr p) { impl.remove(p); } + + // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient + // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using + // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.: + // + // typedef HashSet HS; + // HS h; + // HS::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // if (!h.add(p, 3)) + // return false; + // } + // assert(*p == 3); // p acts like a pointer to int + // + // Also see the definition of AddPtr in HashTable above. + // + // N.B. The caller must ensure that no mutating hash table operations + // occur between a pair of |lookupForAdd| and |add| calls. To avoid + // looking up the key a second time, the caller may use the more efficient + // relookupOrAdd method. This method reuses part of the hashing computation + // to more efficiently insert the key if it has not been added. For + // example, a mutation-handling version of the previous example: + // + // HS::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // call_that_may_mutate_h(); + // if (!h.relookupOrAdd(p, 3, 3)) + // return false; + // } + // assert(*p == 3); + // + // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the + // entry |t|, where the caller ensures match(l,t). + typedef typename Impl::AddPtr AddPtr; + MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const { + return impl.lookupForAdd(l); + } + + template + MOZ_MUST_USE bool add(AddPtr& p, U&& u) { + return impl.add(p, std::forward(u)); + } + + template + MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, U&& u) { + return impl.relookupOrAdd(p, l, std::forward(u)); + } + + // |all()| returns a Range containing |count()| elements: + // + // typedef HashSet HS; + // HS h; + // for (HS::Range r = h.all(); !r.empty(); r.popFront()) + // int i = r.front(); + // + // Also see the definition of Range in HashTable above. + typedef typename Impl::Range Range; + Range all() const { return impl.all(); } + + // Typedef for the enumeration class. An Enum may be used to examine and + // remove table entries: + // + // typedef HashSet HS; + // HS s; + // for (HS::Enum e(s); !e.empty(); e.popFront()) + // if (e.front() == 42) + // e.removeFront(); + // + // Table resize may occur in Enum's destructor. Also see the definition of + // Enum in HashTable above. + typedef typename Impl::Enum Enum; + + // Remove all entries. This does not shrink the table. For that consider + // using the finish() method. + void clear() { impl.clear(); } + + // Remove all entries. Unlike clear() this method tries to shrink the table. + // Unlike finish() it does not require the set to be initialized again. + void clearAndShrink() { impl.clearAndShrink(); } + + // Remove all the entries and release all internal buffers. The set must + // be initialized again before any use. + void finish() { impl.finish(); } + + // Does the table contain any entries? + bool empty() const { return impl.empty(); } + + // Number of live elements in the map. + uint32_t count() const { return impl.count(); } + + // Total number of allocation in the dynamic table. Note: resize will + // happen well before count() == capacity(). + size_t capacity() const { return impl.capacity(); } + + // Don't just call |impl.sizeOfExcludingThis()| because there's no + // guarantee that |impl| is the first field in HashSet. + size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const { + return impl.sizeOfExcludingThis(mallocSizeOf); + } + size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const { + return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf); + } + + Generation generation() const { + return impl.generation(); + } + + /************************************************** Shorthand operations */ + + bool has(const Lookup& l) const { + return impl.lookup(l).found(); + } + + // Add |u| if it is not present already. Return false on oom. + template + MOZ_MUST_USE bool put(U&& u) { + AddPtr p = lookupForAdd(u); + return p ? true : add(p, std::forward(u)); + } + + // Like put, but assert that the given key is not already present. + template + MOZ_MUST_USE bool putNew(U&& u) { + return impl.putNew(u, std::forward(u)); + } + + template + MOZ_MUST_USE bool putNew(const Lookup& l, U&& u) { + return impl.putNew(l, std::forward(u)); + } + + // Only call this to populate an empty set after reserving space with init(). + template + void putNewInfallible(const Lookup& l, U&& u) { + impl.putNewInfallible(l, std::forward(u)); + } + + void remove(const Lookup& l) { + if (Ptr p = lookup(l)) + remove(p); + } + + // Infallibly rekey one entry, if present. + // Requires template parameters T and HashPolicy::Lookup to be the same type. + void rekeyIfMoved(const Lookup& old_value, const T& new_value) { + if (old_value != new_value) + rekeyAs(old_value, new_value, new_value); + } + + // Infallibly rekey one entry if present, and return whether that happened. + bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const T& new_value) { + if (Ptr p = lookup(old_lookup)) { + impl.rekeyAndMaybeRehash(p, new_lookup, new_value); + return true; + } + return false; + } + + // Infallibly replace the current key at |p| with an equivalent key. + // Specifically, both HashPolicy::hash and HashPolicy::match must return + // identical results for the new and old key when applied against all + // possible matching values. + void replaceKey(Ptr p, const T& new_value) { + MOZ_ASSERT(p.found()); + MOZ_ASSERT(*p != new_value); + MOZ_ASSERT(HashPolicy::hash(*p) == HashPolicy::hash(new_value)); + MOZ_ASSERT(HashPolicy::match(*p, new_value)); + const_cast(*p) = new_value; + } + + // HashSet is movable + HashSet(HashSet&& rhs) : impl(std::move(rhs.impl)) {} + void operator=(HashSet&& rhs) { + MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); + impl = std::move(rhs.impl); + } + + private: + // HashSet is not copyable or assignable + HashSet(const HashSet& hs) = delete; + HashSet& operator=(const HashSet& hs) = delete; + + friend class Impl::Enum; +}; + +/*****************************************************************************/ + +// Hash Policy +// +// A hash policy P for a hash table with key-type Key must provide: +// - a type |P::Lookup| to use to lookup table entries; +// - a static member function |P::hash| with signature +// +// static mozilla::HashNumber hash(Lookup) +// +// to use to hash the lookup type; and +// - a static member function |P::match| with signature +// +// static bool match(Key, Lookup) +// +// to use to test equality of key and lookup values. +// +// Normally, Lookup = Key. In general, though, different values and types of +// values can be used to lookup and store. If a Lookup value |l| is != to the +// added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.: +// +// mozilla::HashSet::AddPtr p = h.lookup(l); +// if (!p) { +// assert(P::match(k, l)); // must hold +// h.add(p, k); +// } + +// Pointer hashing policy that uses HashGeneric() to create good hashes for +// pointers. Note that we don't shift out the lowest k bits to generate a +// good distribution for arena allocated pointers. +template +struct PointerHasher +{ + typedef Key Lookup; + static HashNumber hash(const Lookup& l) { + size_t word = reinterpret_cast(l); + return HashGeneric(word); + } + static bool match(const Key& k, const Lookup& l) { + return k == l; + } + static void rekey(Key& k, const Key& newKey) { + k = newKey; + } +}; + +// Default hash policy: just use the 'lookup' value. This of course only +// works if the lookup value is integral. HashTable applies ScrambleHashCode to +// the result of the 'hash' which means that it is 'ok' if the lookup value is +// not well distributed over the HashNumber domain. +template +struct DefaultHasher +{ + typedef Key Lookup; + static HashNumber hash(const Lookup& l) { + // Hash if can implicitly cast to hash number type. + return l; + } + static bool match(const Key& k, const Lookup& l) { + // Use builtin or overloaded operator==. + return k == l; + } + static void rekey(Key& k, const Key& newKey) { + k = newKey; + } +}; + +// Specialize hashing policy for pointer types. It assumes that the type is +// at least word-aligned. For types with smaller size use PointerHasher. +template +struct DefaultHasher : PointerHasher +{}; + +// Specialize hashing policy for mozilla::UniquePtr to proxy the UniquePtr's +// raw pointer to PointerHasher. +template +struct DefaultHasher> +{ + using Lookup = UniquePtr; + using PtrHasher = PointerHasher; + + static HashNumber hash(const Lookup& l) { + return PtrHasher::hash(l.get()); + } + static bool match(const UniquePtr& k, const Lookup& l) { + return PtrHasher::match(k.get(), l.get()); + } + static void rekey(UniquePtr& k, UniquePtr&& newKey) { + k = std::move(newKey); + } +}; + +// For doubles, we can xor the two uint32s. +template <> +struct DefaultHasher +{ + typedef double Lookup; + static HashNumber hash(double d) { + static_assert(sizeof(HashNumber) == 4, + "subsequent code assumes a four-byte hash"); + uint64_t u = BitwiseCast(d); + return HashNumber(u ^ (u >> 32)); + } + static bool match(double lhs, double rhs) { + return BitwiseCast(lhs) == BitwiseCast(rhs); + } +}; + +template <> +struct DefaultHasher +{ + typedef float Lookup; + static HashNumber hash(float f) { + static_assert(sizeof(HashNumber) == 4, + "subsequent code assumes a four-byte hash"); + return HashNumber(BitwiseCast(f)); + } + static bool match(float lhs, float rhs) { + return BitwiseCast(lhs) == BitwiseCast(rhs); + } +}; + +// A hash policy that compares C strings. +struct CStringHasher +{ + typedef const char* Lookup; + static HashNumber hash(Lookup l) { + return HashString(l); + } + static bool match(const char* key, Lookup lookup) { + return strcmp(key, lookup) == 0; + } +}; + +// Fallible hashing interface. +// +// Most of the time generating a hash code is infallible so this class provides +// default methods that always succeed. Specialize this class for your own hash +// policy to provide fallible hashing. +// +// This is used by MovableCellHasher to handle the fact that generating a unique +// ID for cell pointer may fail due to OOM. +template +struct FallibleHashMethods +{ + // Return true if a hashcode is already available for its argument. Once + // this returns true for a specific argument it must continue to do so. + template static bool hasHash(Lookup&& l) { return true; } + + // Fallible method to ensure a hashcode exists for its argument and create + // one if not. Returns false on error, e.g. out of memory. + template static bool ensureHash(Lookup&& l) { return true; } +}; + +template +static bool +HasHash(Lookup&& l) { + return FallibleHashMethods::hasHash(std::forward(l)); +} + +template +static bool +EnsureHash(Lookup&& l) { + return FallibleHashMethods::ensureHash(std::forward(l)); +} + +/*****************************************************************************/ + +// Both HashMap and HashSet are implemented by a single HashTable that is even +// more heavily parameterized than the other two. This leaves HashTable gnarly +// and extremely coupled to HashMap and HashSet; thus code should not use +// HashTable directly. + +template +class HashMapEntry +{ + Key key_; + Value value_; + + template friend class detail::HashTable; + template friend class detail::HashTableEntry; + template friend class HashMap; + + public: + template + HashMapEntry(KeyInput&& k, ValueInput&& v) + : key_(std::forward(k)), + value_(std::forward(v)) + {} + + HashMapEntry(HashMapEntry&& rhs) + : key_(std::move(rhs.key_)), + value_(std::move(rhs.value_)) + {} + + void operator=(HashMapEntry&& rhs) { + key_ = std::move(rhs.key_); + value_ = std::move(rhs.value_); + } + + typedef Key KeyType; + typedef Value ValueType; + + const Key& key() const { return key_; } + Key& mutableKey() { return key_; } + const Value& value() const { return value_; } + Value& value() { return value_; } + + private: + HashMapEntry(const HashMapEntry&) = delete; + void operator=(const HashMapEntry&) = delete; +}; + +template +struct IsPod > + : IntegralConstant::value && IsPod::value> +{}; + +namespace detail { + +template +class HashTable; + +template +class HashTableEntry +{ + private: + using NonConstT = typename RemoveConst::Type; + + static const HashNumber sFreeKey = 0; + static const HashNumber sRemovedKey = 1; + static const HashNumber sCollisionBit = 1; + + HashNumber keyHash = sFreeKey; + alignas(NonConstT) unsigned char valueData_[sizeof(NonConstT)]; + + private: + template friend class HashTable; + + // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a + // -Werror compile error) to reinterpret_cast<> |valueData_| to |T*|, even + // through |void*|. Placing the latter cast in these separate functions + // breaks the chain such that affected GCC versions no longer warn/error. + void* rawValuePtr() { return valueData_; } + + static bool isLiveHash(HashNumber hash) + { + return hash > sRemovedKey; + } + + HashTableEntry(const HashTableEntry&) = delete; + void operator=(const HashTableEntry&) = delete; + + NonConstT* valuePtr() { return reinterpret_cast(rawValuePtr()); } + + void destroyStoredT() { + NonConstT* ptr = valuePtr(); + ptr->~T(); + MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr)); + } + + public: + HashTableEntry() = default; + + ~HashTableEntry() { + if (isLive()) + destroyStoredT(); + + MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this)); + } + + void destroy() { + MOZ_ASSERT(isLive()); + destroyStoredT(); + } + + void swap(HashTableEntry* other) { + if (this == other) + return; + MOZ_ASSERT(isLive()); + if (other->isLive()) { + Swap(*valuePtr(), *other->valuePtr()); + } else { + *other->valuePtr() = std::move(*valuePtr()); + destroy(); + } + Swap(keyHash, other->keyHash); + } + + T& get() { + MOZ_ASSERT(isLive()); + return *valuePtr(); + } + + NonConstT& getMutable() { + MOZ_ASSERT(isLive()); + return *valuePtr(); + } + + bool isFree() const { + return keyHash == sFreeKey; + } + + void clearLive() { + MOZ_ASSERT(isLive()); + keyHash = sFreeKey; + destroyStoredT(); + } + + void clear() { + if (isLive()) + destroyStoredT(); + + MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this)); + keyHash = sFreeKey; + } + + bool isRemoved() const { + return keyHash == sRemovedKey; + } + + void removeLive() { + MOZ_ASSERT(isLive()); + keyHash = sRemovedKey; + destroyStoredT(); + } + + bool isLive() const { + return isLiveHash(keyHash); + } + + void setCollision() { + MOZ_ASSERT(isLive()); + keyHash |= sCollisionBit; + } + + void unsetCollision() { + keyHash &= ~sCollisionBit; + } + + bool hasCollision() const { + return keyHash & sCollisionBit; + } + + bool matchHash(HashNumber hn) { + return (keyHash & ~sCollisionBit) == hn; + } + + HashNumber getKeyHash() const { + return keyHash & ~sCollisionBit; + } + + template + void setLive(HashNumber hn, Args&&... args) + { + MOZ_ASSERT(!isLive()); + keyHash = hn; + new (valuePtr()) T(std::forward(args)...); + MOZ_ASSERT(isLive()); + } +}; + +template +class HashTable : private AllocPolicy +{ + friend class mozilla::ReentrancyGuard; + + typedef typename RemoveConst::Type NonConstT; + typedef typename HashPolicy::KeyType Key; + typedef typename HashPolicy::Lookup Lookup; + + public: + using Entry = HashTableEntry; + + // A nullable pointer to a hash table element. A Ptr |p| can be tested + // either explicitly |if (p.found()) p->...| or using boolean conversion + // |if (p) p->...|. Ptr objects must not be used after any mutating hash + // table operations unless |generation()| is tested. + class Ptr + { + friend class HashTable; + + Entry* entry_; +#ifdef DEBUG + const HashTable* table_; + Generation generation; +#endif + + protected: + Ptr(Entry& entry, const HashTable& tableArg) + : entry_(&entry) +#ifdef DEBUG + , table_(&tableArg) + , generation(tableArg.generation()) +#endif + {} + + public: + Ptr() + : entry_(nullptr) +#ifdef DEBUG + , table_(nullptr) + , generation(0) +#endif + {} + + bool isValid() const { + return !!entry_; + } + + bool found() const { + if (!isValid()) + return false; +#ifdef DEBUG + MOZ_ASSERT(generation == table_->generation()); +#endif + return entry_->isLive(); + } + + explicit operator bool() const { + return found(); + } + + bool operator==(const Ptr& rhs) const { + MOZ_ASSERT(found() && rhs.found()); + return entry_ == rhs.entry_; + } + + bool operator!=(const Ptr& rhs) const { +#ifdef DEBUG + MOZ_ASSERT(generation == table_->generation()); +#endif + return !(*this == rhs); + } + + T& operator*() const { +#ifdef DEBUG + MOZ_ASSERT(found()); + MOZ_ASSERT(generation == table_->generation()); +#endif + return entry_->get(); + } + + T* operator->() const { +#ifdef DEBUG + MOZ_ASSERT(found()); + MOZ_ASSERT(generation == table_->generation()); +#endif + return &entry_->get(); + } + }; + + // A Ptr that can be used to add a key after a failed lookup. + class AddPtr : public Ptr + { + friend class HashTable; + HashNumber keyHash; +#ifdef DEBUG + uint64_t mutationCount; +#endif + + AddPtr(Entry& entry, const HashTable& tableArg, HashNumber hn) + : Ptr(entry, tableArg) + , keyHash(hn) +#ifdef DEBUG + , mutationCount(tableArg.mutationCount) +#endif + {} + + public: + AddPtr() : keyHash(0) {} + }; + + // A collection of hash table entries. The collection is enumerated by + // calling |front()| followed by |popFront()| as long as |!empty()|. As + // with Ptr/AddPtr, Range objects must not be used after any mutating hash + // table operation unless the |generation()| is tested. + class Range + { + protected: + friend class HashTable; + + Range(const HashTable& tableArg, Entry* c, Entry* e) + : cur(c) + , end(e) +#ifdef DEBUG + , table_(&tableArg) + , mutationCount(tableArg.mutationCount) + , generation(tableArg.generation()) + , validEntry(true) +#endif + { + while (cur < end && !cur->isLive()) + ++cur; + } + + Entry* cur; + Entry* end; +#ifdef DEBUG + const HashTable* table_; + uint64_t mutationCount; + Generation generation; + bool validEntry; +#endif + + public: + Range() + : cur(nullptr) + , end(nullptr) +#ifdef DEBUG + , table_(nullptr) + , mutationCount(0) + , generation(0) + , validEntry(false) +#endif + {} + + bool empty() const { +#ifdef DEBUG + MOZ_ASSERT(generation == table_->generation()); + MOZ_ASSERT(mutationCount == table_->mutationCount); +#endif + return cur == end; + } + + T& front() const { + MOZ_ASSERT(!empty()); +#ifdef DEBUG + MOZ_ASSERT(validEntry); + MOZ_ASSERT(generation == table_->generation()); + MOZ_ASSERT(mutationCount == table_->mutationCount); +#endif + return cur->get(); + } + + void popFront() { + MOZ_ASSERT(!empty()); +#ifdef DEBUG + MOZ_ASSERT(generation == table_->generation()); + MOZ_ASSERT(mutationCount == table_->mutationCount); +#endif + while (++cur < end && !cur->isLive()) + continue; +#ifdef DEBUG + validEntry = true; +#endif + } + }; + + // A Range whose lifetime delimits a mutating enumeration of a hash table. + // Since rehashing when elements were removed during enumeration would be + // bad, it is postponed until the Enum is destructed. Since the Enum's + // destructor touches the hash table, the user must ensure that the hash + // table is still alive when the destructor runs. + class Enum : public Range + { + friend class HashTable; + + HashTable& table_; + bool rekeyed; + bool removed; + + // Enum is movable but not copyable. + Enum(const Enum&) = delete; + void operator=(const Enum&) = delete; + + public: + template + explicit Enum(Map& map) + : Range(map.all()), table_(map.impl), rekeyed(false), removed(false) {} + + MOZ_IMPLICIT Enum(Enum&& other) + : Range(other), table_(other.table_), rekeyed(other.rekeyed), removed(other.removed) + { + other.rekeyed = false; + other.removed = false; + } + + // Removes the |front()| element from the table, leaving |front()| + // invalid until the next call to |popFront()|. For example: + // + // HashSet s; + // for (HashSet::Enum e(s); !e.empty(); e.popFront()) + // if (e.front() == 42) + // e.removeFront(); + void removeFront() { + table_.remove(*this->cur); + removed = true; +#ifdef DEBUG + this->validEntry = false; + this->mutationCount = table_.mutationCount; +#endif + } + + NonConstT& mutableFront() { + MOZ_ASSERT(!this->empty()); +#ifdef DEBUG + MOZ_ASSERT(this->validEntry); + MOZ_ASSERT(this->generation == this->Range::table_->generation()); + MOZ_ASSERT(this->mutationCount == this->Range::table_->mutationCount); +#endif + return this->cur->getMutable(); + } + + // Removes the |front()| element and re-inserts it into the table with + // a new key at the new Lookup position. |front()| is invalid after + // this operation until the next call to |popFront()|. + void rekeyFront(const Lookup& l, const Key& k) { + MOZ_ASSERT(&k != &HashPolicy::getKey(this->cur->get())); + Ptr p(*this->cur, table_); + table_.rekeyWithoutRehash(p, l, k); + rekeyed = true; +#ifdef DEBUG + this->validEntry = false; + this->mutationCount = table_.mutationCount; +#endif + } + + void rekeyFront(const Key& k) { + rekeyFront(k, k); + } + + // Potentially rehashes the table. + ~Enum() { + if (rekeyed) { + table_.gen++; + table_.checkOverRemoved(); + } + + if (removed) + table_.compactIfUnderloaded(); + } + }; + + // HashTable is movable + HashTable(HashTable&& rhs) + : AllocPolicy(rhs) + { + PodAssign(this, &rhs); + rhs.table = nullptr; + } + void operator=(HashTable&& rhs) { + MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); + if (table) + destroyTable(*this, table, capacity()); + PodAssign(this, &rhs); + rhs.table = nullptr; + } + + private: + // HashTable is not copyable or assignable + HashTable(const HashTable&) = delete; + void operator=(const HashTable&) = delete; + + private: + static const size_t CAP_BITS = 30; + + public: + uint64_t gen:56; // entry storage generation number + uint64_t hashShift:8; // multiplicative hash shift + Entry* table; // entry storage + uint32_t entryCount; // number of entries in table + uint32_t removedCount; // removed entry sentinels in table + +#ifdef DEBUG + uint64_t mutationCount; + mutable bool mEntered; + // Note that some updates to these stats are not thread-safe. See the + // comment on the three-argument overloading of HashTable::lookup(). + mutable struct Stats + { + uint32_t searches; // total number of table searches + uint32_t steps; // hash chain links traversed + uint32_t hits; // searches that found key + uint32_t misses; // searches that didn't find key + uint32_t addOverRemoved; // adds that recycled a removed entry + uint32_t removes; // calls to remove + uint32_t removeFrees; // calls to remove that freed the entry + uint32_t grows; // table expansions + uint32_t shrinks; // table contractions + uint32_t compresses; // table compressions + uint32_t rehashes; // tombstone decontaminations + } stats; +# define METER(x) x +#else +# define METER(x) +#endif + + // The default initial capacity is 32 (enough to hold 16 elements), but it + // can be as low as 4. + static const uint32_t sMinCapacity = 4; + static const uint32_t sMaxInit = 1u << (CAP_BITS - 1); + static const uint32_t sMaxCapacity = 1u << CAP_BITS; + + // Hash-table alpha is conceptually a fraction, but to avoid floating-point + // math we implement it as a ratio of integers. + static const uint8_t sAlphaDenominator = 4; + static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4 + static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4 + + static const HashNumber sFreeKey = Entry::sFreeKey; + static const HashNumber sRemovedKey = Entry::sRemovedKey; + static const HashNumber sCollisionBit = Entry::sCollisionBit; + + void setTableSizeLog2(uint32_t sizeLog2) + { + hashShift = kHashNumberBits - sizeLog2; + } + + static bool isLiveHash(HashNumber hash) + { + return Entry::isLiveHash(hash); + } + + static HashNumber prepareHash(const Lookup& l) + { + HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l)); + + // Avoid reserved hash codes. + if (!isLiveHash(keyHash)) + keyHash -= (sRemovedKey + 1); + return keyHash & ~sCollisionBit; + } + + enum FailureBehavior { DontReportFailure = false, ReportFailure = true }; + + static Entry* createTable(AllocPolicy& alloc, uint32_t capacity, + FailureBehavior reportFailure = ReportFailure) + { + Entry* table = reportFailure + ? alloc.template pod_malloc(capacity) + : alloc.template maybe_pod_malloc(capacity); + if (table) { + for (uint32_t i = 0; i < capacity; i++) + new (&table[i]) Entry(); + } + return table; + } + + static Entry* maybeCreateTable(AllocPolicy& alloc, uint32_t capacity) + { + Entry* table = alloc.template maybe_pod_malloc(capacity); + if (table) { + for (uint32_t i = 0; i < capacity; i++) + new (&table[i]) Entry(); + } + return table; + } + + static void destroyTable(AllocPolicy& alloc, Entry* oldTable, uint32_t capacity) + { + Entry* end = oldTable + capacity; + for (Entry* e = oldTable; e < end; ++e) + e->~Entry(); + alloc.free_(oldTable, capacity); + } + + public: + explicit HashTable(AllocPolicy ap) + : AllocPolicy(ap) + , gen(0) + , hashShift(kHashNumberBits) + , table(nullptr) + , entryCount(0) + , removedCount(0) +#ifdef DEBUG + , mutationCount(0) + , mEntered(false) +#endif + {} + + MOZ_MUST_USE bool init(uint32_t length) + { + MOZ_ASSERT(!initialized()); + + // Reject all lengths whose initial computed capacity would exceed + // sMaxCapacity. Round that maximum length down to the nearest power + // of two for speedier code. + if (MOZ_UNLIKELY(length > sMaxInit)) { + this->reportAllocOverflow(); + return false; + } + + static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit, + "multiplication in numerator below could overflow"); + static_assert(sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator, + "numerator calculation below could potentially overflow"); + + // Compute the smallest capacity allowing |length| elements to be + // inserted without rehashing: ceil(length / max-alpha). (Ceiling + // integral division: .) + uint32_t newCapacity = + (length * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator; + if (newCapacity < sMinCapacity) + newCapacity = sMinCapacity; + + // Round up capacity to next power-of-two. + uint32_t log2 = mozilla::CeilingLog2(newCapacity); + newCapacity = 1u << log2; + + MOZ_ASSERT(newCapacity >= length); + MOZ_ASSERT(newCapacity <= sMaxCapacity); + + table = createTable(*this, newCapacity); + if (!table) + return false; + + setTableSizeLog2(log2); + METER(memset(&stats, 0, sizeof(stats))); + return true; + } + + bool initialized() const + { + return !!table; + } + + ~HashTable() + { + if (table) + destroyTable(*this, table, capacity()); + } + + private: + HashNumber hash1(HashNumber hash0) const + { + return hash0 >> hashShift; + } + + struct DoubleHash + { + HashNumber h2; + HashNumber sizeMask; + }; + + DoubleHash hash2(HashNumber curKeyHash) const + { + uint32_t sizeLog2 = kHashNumberBits - hashShift; + DoubleHash dh = { + ((curKeyHash << sizeLog2) >> hashShift) | 1, + (HashNumber(1) << sizeLog2) - 1 + }; + return dh; + } + + static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh) + { + return (h1 - dh.h2) & dh.sizeMask; + } + + bool overloaded() + { + static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator, + "multiplication below could overflow"); + return entryCount + removedCount >= + capacity() * sMaxAlphaNumerator / sAlphaDenominator; + } + + // Would the table be underloaded if it had the given capacity and entryCount? + static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount) + { + static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator, + "multiplication below could overflow"); + return capacity > sMinCapacity && + entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator; + } + + bool underloaded() + { + return wouldBeUnderloaded(capacity(), entryCount); + } + + static MOZ_ALWAYS_INLINE bool match(Entry& e, const Lookup& l) + { + return HashPolicy::match(HashPolicy::getKey(e.get()), l); + } + + // Warning: in order for readonlyThreadsafeLookup() to be safe this + // function must not modify the table in any way when |collisionBit| is 0. + // (The use of the METER() macro to increment stats violates this + // restriction but we will live with that for now because it's enabled so + // rarely.) + MOZ_ALWAYS_INLINE Entry& + lookup(const Lookup& l, HashNumber keyHash, uint32_t collisionBit) const + { + MOZ_ASSERT(isLiveHash(keyHash)); + MOZ_ASSERT(!(keyHash & sCollisionBit)); + MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit); + MOZ_ASSERT(table); + METER(stats.searches++); + + // Compute the primary hash address. + HashNumber h1 = hash1(keyHash); + Entry* entry = &table[h1]; + + // Miss: return space for a new entry. + if (entry->isFree()) { + METER(stats.misses++); + return *entry; + } + + // Hit: return entry. + if (entry->matchHash(keyHash) && match(*entry, l)) { + METER(stats.hits++); + return *entry; + } + + // Collision: double hash. + DoubleHash dh = hash2(keyHash); + + // Save the first removed entry pointer so we can recycle later. + Entry* firstRemoved = nullptr; + + while (true) { + if (MOZ_UNLIKELY(entry->isRemoved())) { + if (!firstRemoved) + firstRemoved = entry; + } else { + if (collisionBit == sCollisionBit) + entry->setCollision(); + } + + METER(stats.steps++); + h1 = applyDoubleHash(h1, dh); + + entry = &table[h1]; + if (entry->isFree()) { + METER(stats.misses++); + return firstRemoved ? *firstRemoved : *entry; + } + + if (entry->matchHash(keyHash) && match(*entry, l)) { + METER(stats.hits++); + return *entry; + } + } + } + + // This is a copy of lookup hardcoded to the assumptions: + // 1. the lookup is a lookupForAdd + // 2. the key, whose |keyHash| has been passed is not in the table, + // 3. no entries have been removed from the table. + // This specialized search avoids the need for recovering lookup values + // from entries, which allows more flexible Lookup/Key types. + Entry& findFreeEntry(HashNumber keyHash) + { + MOZ_ASSERT(!(keyHash & sCollisionBit)); + MOZ_ASSERT(table); + METER(stats.searches++); + + // We assume 'keyHash' has already been distributed. + + // Compute the primary hash address. + HashNumber h1 = hash1(keyHash); + Entry* entry = &table[h1]; + + // Miss: return space for a new entry. + if (!entry->isLive()) { + METER(stats.misses++); + return *entry; + } + + // Collision: double hash. + DoubleHash dh = hash2(keyHash); + + while (true) { + MOZ_ASSERT(!entry->isRemoved()); + entry->setCollision(); + + METER(stats.steps++); + h1 = applyDoubleHash(h1, dh); + + entry = &table[h1]; + if (!entry->isLive()) { + METER(stats.misses++); + return *entry; + } + } + } + + enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed }; + + RebuildStatus changeTableSize(int deltaLog2, FailureBehavior reportFailure = ReportFailure) + { + // Look, but don't touch, until we succeed in getting new entry store. + Entry* oldTable = table; + uint32_t oldCap = capacity(); + uint32_t newLog2 = kHashNumberBits - hashShift + deltaLog2; + uint32_t newCapacity = 1u << newLog2; + if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) { + if (reportFailure) + this->reportAllocOverflow(); + return RehashFailed; + } + + Entry* newTable = createTable(*this, newCapacity, reportFailure); + if (!newTable) + return RehashFailed; + + // We can't fail from here on, so update table parameters. + setTableSizeLog2(newLog2); + removedCount = 0; + gen++; + table = newTable; + + // Copy only live entries, leaving removed ones behind. + Entry* end = oldTable + oldCap; + for (Entry* src = oldTable; src < end; ++src) { + if (src->isLive()) { + HashNumber hn = src->getKeyHash(); + findFreeEntry(hn).setLive( + hn, std::move(const_cast(src->get()))); + } + + src->~Entry(); + } + + // All entries have been destroyed, no need to destroyTable. + this->free_(oldTable, oldCap); + return Rehashed; + } + + bool shouldCompressTable() + { + // Compress if a quarter or more of all entries are removed. + return removedCount >= (capacity() >> 2); + } + + RebuildStatus checkOverloaded(FailureBehavior reportFailure = ReportFailure) + { + if (!overloaded()) + return NotOverloaded; + + int deltaLog2; + if (shouldCompressTable()) { + METER(stats.compresses++); + deltaLog2 = 0; + } else { + METER(stats.grows++); + deltaLog2 = 1; + } + + return changeTableSize(deltaLog2, reportFailure); + } + + // Infallibly rehash the table if we are overloaded with removals. + void checkOverRemoved() + { + if (overloaded()) { + if (checkOverloaded(DontReportFailure) == RehashFailed) + rehashTableInPlace(); + } + } + + void remove(Entry& e) + { + MOZ_ASSERT(table); + METER(stats.removes++); + + if (e.hasCollision()) { + e.removeLive(); + removedCount++; + } else { + METER(stats.removeFrees++); + e.clearLive(); + } + entryCount--; +#ifdef DEBUG + mutationCount++; +#endif + } + + void checkUnderloaded() + { + if (underloaded()) { + METER(stats.shrinks++); + (void) changeTableSize(-1, DontReportFailure); + } + } + + // Resize the table down to the largest capacity which doesn't underload the + // table. Since we call checkUnderloaded() on every remove, you only need + // to call this after a bulk removal of items done without calling remove(). + void compactIfUnderloaded() + { + int32_t resizeLog2 = 0; + uint32_t newCapacity = capacity(); + while (wouldBeUnderloaded(newCapacity, entryCount)) { + newCapacity = newCapacity >> 1; + resizeLog2--; + } + + if (resizeLog2 != 0) + (void) changeTableSize(resizeLog2, DontReportFailure); + } + + // This is identical to changeTableSize(currentSize), but without requiring + // a second table. We do this by recycling the collision bits to tell us if + // the element is already inserted or still waiting to be inserted. Since + // already-inserted elements win any conflicts, we get the same table as we + // would have gotten through random insertion order. + void rehashTableInPlace() + { + METER(stats.rehashes++); + removedCount = 0; + gen++; + for (size_t i = 0; i < capacity(); ++i) + table[i].unsetCollision(); + + for (size_t i = 0; i < capacity();) { + Entry* src = &table[i]; + + if (!src->isLive() || src->hasCollision()) { + ++i; + continue; + } + + HashNumber keyHash = src->getKeyHash(); + HashNumber h1 = hash1(keyHash); + DoubleHash dh = hash2(keyHash); + Entry* tgt = &table[h1]; + while (true) { + if (!tgt->hasCollision()) { + src->swap(tgt); + tgt->setCollision(); + break; + } + + h1 = applyDoubleHash(h1, dh); + tgt = &table[h1]; + } + } + + // TODO: this algorithm leaves collision bits on *all* elements, even if + // they are on no collision path. We have the option of setting the + // collision bits correctly on a subsequent pass or skipping the rehash + // unless we are totally filled with tombstones: benchmark to find out + // which approach is best. + } + + // Note: |l| may be a reference to a piece of |u|, so this function + // must take care not to use |l| after moving |u|. + // + // Prefer to use putNewInfallible; this function does not check + // invariants. + template + void putNewInfallibleInternal(const Lookup& l, Args&&... args) + { + MOZ_ASSERT(table); + + HashNumber keyHash = prepareHash(l); + Entry* entry = &findFreeEntry(keyHash); + MOZ_ASSERT(entry); + + if (entry->isRemoved()) { + METER(stats.addOverRemoved++); + removedCount--; + keyHash |= sCollisionBit; + } + + entry->setLive(keyHash, std::forward(args)...); + entryCount++; +#ifdef DEBUG + mutationCount++; +#endif + } + + public: + void clear() + { + Entry* end = table + capacity(); + for (Entry* e = table; e < end; ++e) + e->clear(); + + removedCount = 0; + entryCount = 0; +#ifdef DEBUG + mutationCount++; +#endif + } + + void clearAndShrink() + { + clear(); + compactIfUnderloaded(); + } + + void finish() + { +#ifdef DEBUG + MOZ_ASSERT(!mEntered); +#endif + if (!table) + return; + + destroyTable(*this, table, capacity()); + table = nullptr; + gen++; + entryCount = 0; + removedCount = 0; +#ifdef DEBUG + mutationCount++; +#endif + } + + Range all() const + { + MOZ_ASSERT(table); + return Range(*this, table, table + capacity()); + } + + bool empty() const + { + MOZ_ASSERT(table); + return !entryCount; + } + + uint32_t count() const + { + MOZ_ASSERT(table); + return entryCount; + } + + uint32_t capacity() const + { + MOZ_ASSERT(table); + return 1u << (kHashNumberBits - hashShift); + } + + Generation generation() const + { + MOZ_ASSERT(table); + return Generation(gen); + } + + size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const + { + return mallocSizeOf(table); + } + + size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const + { + return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf); + } + + MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const + { + ReentrancyGuard g(*this); + if (!HasHash(l)) + return Ptr(); + HashNumber keyHash = prepareHash(l); + return Ptr(lookup(l, keyHash, 0), *this); + } + + MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const + { + if (!HasHash(l)) + return Ptr(); + HashNumber keyHash = prepareHash(l); + return Ptr(lookup(l, keyHash, 0), *this); + } + + MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const + { + ReentrancyGuard g(*this); + if (!EnsureHash(l)) + return AddPtr(); + HashNumber keyHash = prepareHash(l); + // Directly call the constructor in the return statement to avoid + // excess copying when building with Visual Studio 2017. + // See bug 1385181. + return AddPtr(lookup(l, keyHash, sCollisionBit), *this, keyHash); + } + + template + MOZ_MUST_USE bool add(AddPtr& p, Args&&... args) + { + ReentrancyGuard g(*this); + MOZ_ASSERT(table); + MOZ_ASSERT_IF(p.isValid(), p.table_ == this); + MOZ_ASSERT(!p.found()); + MOZ_ASSERT(!(p.keyHash & sCollisionBit)); + + // Check for error from ensureHash() here. + if (!p.isValid()) + return false; + + MOZ_ASSERT(p.generation == generation()); +#ifdef DEBUG + MOZ_ASSERT(p.mutationCount == mutationCount); +#endif + + // Changing an entry from removed to live does not affect whether we + // are overloaded and can be handled separately. + if (p.entry_->isRemoved()) { + if (!this->checkSimulatedOOM()) + return false; + METER(stats.addOverRemoved++); + removedCount--; + p.keyHash |= sCollisionBit; + } else { + // Preserve the validity of |p.entry_|. + RebuildStatus status = checkOverloaded(); + if (status == RehashFailed) + return false; + if (status == NotOverloaded && !this->checkSimulatedOOM()) + return false; + if (status == Rehashed) + p.entry_ = &findFreeEntry(p.keyHash); + } + + p.entry_->setLive(p.keyHash, std::forward(args)...); + entryCount++; +#ifdef DEBUG + mutationCount++; + p.generation = generation(); + p.mutationCount = mutationCount; +#endif + return true; + } + + // Note: |l| may be a reference to a piece of |u|, so this function + // must take care not to use |l| after moving |u|. + template + void putNewInfallible(const Lookup& l, Args&&... args) + { + MOZ_ASSERT(!lookup(l).found()); + ReentrancyGuard g(*this); + putNewInfallibleInternal(l, std::forward(args)...); + } + + // Note: |l| may be alias arguments in |args|, so this function must take + // care not to use |l| after moving |args|. + template + MOZ_MUST_USE bool putNew(const Lookup& l, Args&&... args) + { + if (!this->checkSimulatedOOM()) + return false; + + if (!EnsureHash(l)) + return false; + + if (checkOverloaded() == RehashFailed) + return false; + + putNewInfallible(l, std::forward(args)...); + return true; + } + + // Note: |l| may be a reference to a piece of |u|, so this function + // must take care not to use |l| after moving |u|. + template + MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args) + { + // Check for error from ensureHash() here. + if (!p.isValid()) + return false; + +#ifdef DEBUG + p.generation = generation(); + p.mutationCount = mutationCount; +#endif + { + ReentrancyGuard g(*this); + MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed + p.entry_ = &lookup(l, p.keyHash, sCollisionBit); + } + return p.found() || add(p, std::forward(args)...); + } + + void remove(Ptr p) + { + MOZ_ASSERT(table); + ReentrancyGuard g(*this); + MOZ_ASSERT(p.found()); + MOZ_ASSERT(p.generation == generation()); + remove(*p.entry_); + checkUnderloaded(); + } + + void rekeyWithoutRehash(Ptr p, const Lookup& l, const Key& k) + { + MOZ_ASSERT(table); + ReentrancyGuard g(*this); + MOZ_ASSERT(p.found()); + MOZ_ASSERT(p.generation == generation()); + typename HashTableEntry::NonConstT t(std::move(*p)); + HashPolicy::setKey(t, const_cast(k)); + remove(*p.entry_); + putNewInfallibleInternal(l, std::move(t)); + } + + void rekeyAndMaybeRehash(Ptr p, const Lookup& l, const Key& k) + { + rekeyWithoutRehash(p, l, k); + checkOverRemoved(); + } + +#undef METER +}; + +} // namespace detail +} // namespace mozilla + +#endif /* mozilla_HashTable_h */ diff --git a/mfbt/moz.build b/mfbt/moz.build index 3cf490a896c5f..108cc13483633 100644 --- a/mfbt/moz.build +++ b/mfbt/moz.build @@ -44,6 +44,7 @@ EXPORTS.mozilla = [ 'FStream.h', 'GuardObjects.h', 'HashFunctions.h', + 'HashTable.h', 'IntegerPrintfMacros.h', 'IntegerRange.h', 'IntegerTypeTraits.h',