File: | var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h |
Warning: | line 1191, column 38 Dereference of null pointer (loaded from field 'mKeyHash') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ | |||
2 | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ | |||
3 | /* This Source Code Form is subject to the terms of the Mozilla Public | |||
4 | * License, v. 2.0. If a copy of the MPL was not distributed with this | |||
5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | |||
6 | ||||
7 | #include "memory_hooks.h" | |||
8 | ||||
9 | #include "nscore.h" | |||
10 | ||||
11 | #include "mozilla/Assertions.h" | |||
12 | #include "mozilla/Atomics.h" | |||
13 | #include "mozilla/FastBernoulliTrial.h" | |||
14 | #include "mozilla/IntegerPrintfMacros.h" | |||
15 | #include "mozilla/JSONWriter.h" | |||
16 | #include "mozilla/MemoryReporting.h" | |||
17 | #include "mozilla/PlatformMutex.h" | |||
18 | #include "mozilla/ProfilerCounts.h" | |||
19 | #include "mozilla/ThreadLocal.h" | |||
20 | #include "mozilla/ThreadSafety.h" | |||
21 | ||||
22 | #include "GeckoProfiler.h" | |||
23 | #include "prenv.h" | |||
24 | #include "replace_malloc.h" | |||
25 | ||||
26 | #include <ctype.h> | |||
27 | #include <errno(*__errno_location ()).h> | |||
28 | #include <limits.h> | |||
29 | #include <stdarg.h> | |||
30 | #include <stdio.h> | |||
31 | #include <stdlib.h> | |||
32 | #include <string.h> | |||
33 | ||||
34 | #ifdef XP_WIN | |||
35 | # include <windows.h> | |||
36 | # include <process.h> | |||
37 | #else | |||
38 | # include <pthread.h> | |||
39 | # include <sys/types.h> | |||
40 | # include <unistd.h> | |||
41 | #endif | |||
42 | ||||
43 | #ifdef ANDROID | |||
44 | # include <android/log.h> | |||
45 | #endif | |||
46 | ||||
47 | // The counters start out as a nullptr, and then get initialized only once. They | |||
48 | // are never destroyed, as it would cause race conditions for the memory hooks | |||
49 | // that use the counters. This helps guard against potentially expensive | |||
50 | // operations like using a mutex. | |||
51 | // | |||
52 | // In addition, this is a raw pointer and not a UniquePtr, as the counter | |||
53 | // machinery will try and de-register itself from the profiler. This could | |||
54 | // happen after the profiler and its PSMutex was already destroyed, resulting in | |||
55 | // a crash. | |||
56 | static ProfilerCounterTotal* sCounter; | |||
57 | ||||
58 | // The gBernoulli value starts out as a nullptr, and only gets initialized once. | |||
59 | // It then lives for the entire lifetime of the process. It cannot be deleted | |||
60 | // without additional multi-threaded protections, since if we deleted it during | |||
61 | // profiler_stop then there could be a race between threads already in a | |||
62 | // memory hook that might try to access the value after or during deletion. | |||
63 | static mozilla::FastBernoulliTrial* gBernoulli; | |||
64 | ||||
65 | namespace mozilla::profiler { | |||
66 | ||||
67 | //--------------------------------------------------------------------------- | |||
68 | // Utilities | |||
69 | //--------------------------------------------------------------------------- | |||
70 | ||||
71 | // Returns true or or false depending on whether the marker was actually added | |||
72 | // or not. | |||
73 | static bool profiler_add_native_allocation_marker(int64_t aSize, | |||
74 | uintptr_t aMemoryAddress) { | |||
75 | if (!profiler_thread_is_being_profiled_for_markers( | |||
76 | profiler_main_thread_id())) { | |||
77 | return false; | |||
78 | } | |||
79 | ||||
80 | // Because native allocations may be intercepted anywhere, blocking while | |||
81 | // locking the profiler mutex here could end up causing a deadlock if another | |||
82 | // mutex is taken, which the profiler may indirectly need elsewhere. | |||
83 | // See bug 1642726 for such a scenario. | |||
84 | // So instead we bail out if the mutex is already locked. Native allocations | |||
85 | // are statistically sampled anyway, so missing a few because of this is | |||
86 | // acceptable. | |||
87 | if (profiler_is_locked_on_current_thread()) { | |||
88 | return false; | |||
89 | } | |||
90 | ||||
91 | struct NativeAllocationMarker { | |||
92 | static constexpr mozilla::Span<const char> MarkerTypeName() { | |||
93 | return mozilla::MakeStringSpan("Native allocation"); | |||
94 | } | |||
95 | static void StreamJSONMarkerData( | |||
96 | mozilla::baseprofiler::SpliceableJSONWriter& aWriter, int64_t aSize, | |||
97 | uintptr_t aMemoryAddress, ProfilerThreadId aThreadId) { | |||
98 | aWriter.IntProperty("size", aSize); | |||
99 | aWriter.IntProperty("memoryAddress", | |||
100 | static_cast<int64_t>(aMemoryAddress)); | |||
101 | // Tech note: If `ToNumber()` returns a uint64_t, the conversion to | |||
102 | // int64_t is "implementation-defined" before C++20. This is acceptable | |||
103 | // here, because this is a one-way conversion to a unique identifier | |||
104 | // that's used to visually separate data by thread on the front-end. | |||
105 | aWriter.IntProperty("threadId", | |||
106 | static_cast<int64_t>(aThreadId.ToNumber())); | |||
107 | } | |||
108 | static mozilla::MarkerSchema MarkerTypeDisplay() { | |||
109 | return mozilla::MarkerSchema::SpecialFrontendLocation{}; | |||
110 | } | |||
111 | }; | |||
112 | ||||
113 | profiler_add_marker("Native allocation", geckoprofiler::category::OTHER,do { if (profiler_is_collecting_markers()) { ::profiler_add_marker_impl ("Native allocation", geckoprofiler::category::OTHER, {MarkerThreadId ::MainThread(), MarkerStack::Capture()}, NativeAllocationMarker {}, aSize, aMemoryAddress, profiler_current_thread_id()); } } while (false) | |||
114 | {MarkerThreadId::MainThread(), MarkerStack::Capture()},do { if (profiler_is_collecting_markers()) { ::profiler_add_marker_impl ("Native allocation", geckoprofiler::category::OTHER, {MarkerThreadId ::MainThread(), MarkerStack::Capture()}, NativeAllocationMarker {}, aSize, aMemoryAddress, profiler_current_thread_id()); } } while (false) | |||
115 | NativeAllocationMarker{}, aSize, aMemoryAddress,do { if (profiler_is_collecting_markers()) { ::profiler_add_marker_impl ("Native allocation", geckoprofiler::category::OTHER, {MarkerThreadId ::MainThread(), MarkerStack::Capture()}, NativeAllocationMarker {}, aSize, aMemoryAddress, profiler_current_thread_id()); } } while (false) | |||
116 | profiler_current_thread_id())do { if (profiler_is_collecting_markers()) { ::profiler_add_marker_impl ("Native allocation", geckoprofiler::category::OTHER, {MarkerThreadId ::MainThread(), MarkerStack::Capture()}, NativeAllocationMarker {}, aSize, aMemoryAddress, profiler_current_thread_id()); } } while (false); | |||
117 | return true; | |||
118 | } | |||
119 | ||||
120 | static malloc_table_t gMallocTable; | |||
121 | ||||
122 | // This is only needed because of the |const void*| vs |void*| arg mismatch. | |||
123 | static size_t MallocSizeOf(const void* aPtr) { | |||
124 | return gMallocTable.malloc_usable_size(const_cast<void*>(aPtr)); | |||
125 | } | |||
126 | ||||
127 | // The values for the Bernoulli trial are taken from DMD. According to DMD: | |||
128 | // | |||
129 | // In testing, a probability of 0.003 resulted in ~25% of heap blocks getting | |||
130 | // a stack trace and ~80% of heap bytes getting a stack trace. (This is | |||
131 | // possible because big heap blocks are more likely to get a stack trace.) | |||
132 | // | |||
133 | // The random number seeds are arbitrary and were obtained from random.org. | |||
134 | // | |||
135 | // However this value resulted in a lot of slowdown since the profiler stacks | |||
136 | // are pretty heavy to collect. The value was lowered to 10% of the original to | |||
137 | // 0.0003. | |||
138 | static void EnsureBernoulliIsInstalled() { | |||
139 | if (!gBernoulli) { | |||
140 | // This is only installed once. See the gBernoulli definition for more | |||
141 | // information. | |||
142 | gBernoulli = | |||
143 | new FastBernoulliTrial(0.0003, 0x8e26eeee166bc8ca, 0x56820f304a9c9ae0); | |||
144 | } | |||
145 | } | |||
146 | ||||
147 | // This class provides infallible allocations (they abort on OOM) like | |||
148 | // mozalloc's InfallibleAllocPolicy, except that memory hooks are bypassed. This | |||
149 | // policy is used by the HashSet. | |||
150 | class InfallibleAllocWithoutHooksPolicy { | |||
151 | static void ExitOnFailure(const void* aP) { | |||
152 | if (!aP) { | |||
153 | MOZ_CRASH("Profiler memory hooks out of memory; aborting")do { do { } while (false); MOZ_ReportCrash("" "Profiler memory hooks out of memory; aborting" , "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 153); AnnotateMozCrashReason("MOZ_CRASH(" "Profiler memory hooks out of memory; aborting" ")"); do { *((volatile int*)__null) = 153; __attribute__((nomerge )) ::abort(); } while (false); } while (false); | |||
154 | } | |||
155 | } | |||
156 | ||||
157 | public: | |||
158 | template <typename T> | |||
159 | static T* maybe_pod_malloc(size_t aNumElems) { | |||
160 | if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value) { | |||
161 | return nullptr; | |||
162 | } | |||
163 | return (T*)gMallocTable.malloc(aNumElems * sizeof(T)); | |||
164 | } | |||
165 | ||||
166 | template <typename T> | |||
167 | static T* maybe_pod_calloc(size_t aNumElems) { | |||
168 | return (T*)gMallocTable.calloc(aNumElems, sizeof(T)); | |||
169 | } | |||
170 | ||||
171 | template <typename T> | |||
172 | static T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) { | |||
173 | if (aNewSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value) { | |||
174 | return nullptr; | |||
175 | } | |||
176 | return (T*)gMallocTable.realloc(aPtr, aNewSize * sizeof(T)); | |||
177 | } | |||
178 | ||||
179 | template <typename T> | |||
180 | static T* pod_malloc(size_t aNumElems) { | |||
181 | T* p = maybe_pod_malloc<T>(aNumElems); | |||
182 | ExitOnFailure(p); | |||
183 | return p; | |||
184 | } | |||
185 | ||||
186 | template <typename T> | |||
187 | static T* pod_calloc(size_t aNumElems) { | |||
188 | T* p = maybe_pod_calloc<T>(aNumElems); | |||
189 | ExitOnFailure(p); | |||
190 | return p; | |||
191 | } | |||
192 | ||||
193 | template <typename T> | |||
194 | static T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) { | |||
195 | T* p = maybe_pod_realloc(aPtr, aOldSize, aNewSize); | |||
196 | ExitOnFailure(p); | |||
197 | return p; | |||
198 | } | |||
199 | ||||
200 | template <typename T> | |||
201 | static void free_(T* aPtr, size_t aSize = 0) { | |||
202 | gMallocTable.free(aPtr); | |||
203 | } | |||
204 | ||||
205 | static void reportAllocOverflow() { ExitOnFailure(nullptr); } | |||
206 | bool checkSimulatedOOM() const { return true; } | |||
207 | }; | |||
208 | ||||
209 | // We can't use mozilla::Mutex because it causes re-entry into the memory hooks. | |||
210 | // Define a custom implementation here. | |||
211 | class MOZ_CAPABILITY("mutex")__attribute__((capability("mutex"))) Mutex : private ::mozilla::detail::MutexImpl { | |||
212 | public: | |||
213 | Mutex() = default; | |||
214 | ||||
215 | void Lock() MOZ_CAPABILITY_ACQUIRE()__attribute__((exclusive_lock_function())) { ::mozilla::detail::MutexImpl::lock(); } | |||
216 | void Unlock() MOZ_CAPABILITY_RELEASE()__attribute__((unlock_function())) { | |||
217 | ::mozilla::detail::MutexImpl::unlock(); | |||
218 | } | |||
219 | }; | |||
220 | ||||
221 | class MOZ_SCOPED_CAPABILITY__attribute__((scoped_lockable)) MutexAutoLock { | |||
222 | MutexAutoLock(const MutexAutoLock&) = delete; | |||
223 | void operator=(const MutexAutoLock&) = delete; | |||
224 | ||||
225 | Mutex& mMutex; | |||
226 | ||||
227 | public: | |||
228 | explicit MutexAutoLock(Mutex& aMutex) MOZ_CAPABILITY_ACQUIRE(aMutex)__attribute__((exclusive_lock_function(aMutex))) | |||
229 | : mMutex(aMutex) { | |||
230 | mMutex.Lock(); | |||
231 | } | |||
232 | ~MutexAutoLock() MOZ_CAPABILITY_RELEASE()__attribute__((unlock_function())) { mMutex.Unlock(); } | |||
233 | }; | |||
234 | ||||
235 | //--------------------------------------------------------------------------- | |||
236 | // Tracked allocations | |||
237 | //--------------------------------------------------------------------------- | |||
238 | ||||
239 | // The allocation tracker is shared between multiple threads, and is the | |||
240 | // coordinator for knowing when allocations have been tracked. The mutable | |||
241 | // internal state is protected by a mutex, and managed by the methods. | |||
242 | // | |||
243 | // The tracker knows about all the allocations that we have added to the | |||
244 | // profiler. This way, whenever any given piece of memory is freed, we can see | |||
245 | // if it was previously tracked, and we can track its deallocation. | |||
246 | ||||
247 | class AllocationTracker { | |||
248 | // This type tracks all of the allocations that we have captured. This way, we | |||
249 | // can see if a deallocation is inside of this set. We want to provide a | |||
250 | // balanced view into the allocations and deallocations. | |||
251 | typedef mozilla::HashSet<const void*, mozilla::DefaultHasher<const void*>, | |||
252 | InfallibleAllocWithoutHooksPolicy> | |||
253 | AllocationSet; | |||
254 | ||||
255 | public: | |||
256 | AllocationTracker() = default; | |||
257 | ||||
258 | void AddMemoryAddress(const void* memoryAddress) { | |||
259 | MutexAutoLock lock(mMutex); | |||
260 | if (!mAllocations.put(memoryAddress)) { | |||
261 | MOZ_CRASH("Out of memory while tracking native allocations.")do { do { } while (false); MOZ_ReportCrash("" "Out of memory while tracking native allocations." , "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 261); AnnotateMozCrashReason("MOZ_CRASH(" "Out of memory while tracking native allocations." ")"); do { *((volatile int*)__null) = 261; __attribute__((nomerge )) ::abort(); } while (false); } while (false); | |||
262 | }; | |||
263 | } | |||
264 | ||||
265 | void Reset() { | |||
266 | MutexAutoLock lock(mMutex); | |||
267 | mAllocations.clearAndCompact(); | |||
268 | } | |||
269 | ||||
270 | // Returns true when the memory address is found and removed, otherwise that | |||
271 | // memory address is not being tracked and it returns false. | |||
272 | bool RemoveMemoryAddressIfFound(const void* memoryAddress) { | |||
273 | MutexAutoLock lock(mMutex); | |||
274 | ||||
275 | auto ptr = mAllocations.lookup(memoryAddress); | |||
276 | if (ptr) { | |||
277 | // The memory was present. It no longer needs to be tracked. | |||
278 | mAllocations.remove(ptr); | |||
279 | return true; | |||
280 | } | |||
281 | ||||
282 | return false; | |||
283 | } | |||
284 | ||||
285 | private: | |||
286 | AllocationSet mAllocations; | |||
287 | Mutex mMutex MOZ_UNANNOTATED; | |||
288 | }; | |||
289 | ||||
290 | static AllocationTracker* gAllocationTracker; | |||
291 | ||||
292 | static void EnsureAllocationTrackerIsInstalled() { | |||
293 | if (!gAllocationTracker) { | |||
294 | // This is only installed once. | |||
295 | gAllocationTracker = new AllocationTracker(); | |||
296 | } | |||
297 | } | |||
298 | ||||
299 | //--------------------------------------------------------------------------- | |||
300 | // Per-thread blocking of intercepts | |||
301 | //--------------------------------------------------------------------------- | |||
302 | ||||
303 | // On MacOS, and Linux the first __thread/thread_local access calls malloc, | |||
304 | // which leads to an infinite loop. So we use pthread-based TLS instead, which | |||
305 | // doesn't have this problem as long as the TLS key is registered early. | |||
306 | // | |||
307 | // This is a little different from the TLS storage used with mozjemalloc which | |||
308 | // uses native TLS on Linux possibly because it is not only initialised but | |||
309 | // **used** early. | |||
310 | #if !defined(XP_DARWIN) && !defined(XP_LINUX1) | |||
311 | # define PROFILER_THREAD_LOCAL(T)::mozilla::detail::ThreadLocal<T, ::mozilla::detail::ThreadLocalKeyStorage > MOZ_THREAD_LOCAL(T)__thread ::mozilla::detail::ThreadLocal< T, ::mozilla::detail ::ThreadLocalNativeStorage> | |||
312 | #else | |||
313 | # define PROFILER_THREAD_LOCAL(T)::mozilla::detail::ThreadLocal<T, ::mozilla::detail::ThreadLocalKeyStorage > \ | |||
314 | ::mozilla::detail::ThreadLocal<T, ::mozilla::detail::ThreadLocalKeyStorage> | |||
315 | #endif | |||
316 | ||||
317 | // This class is used to determine if allocations on this thread should be | |||
318 | // intercepted or not. | |||
319 | // Creating a ThreadIntercept object on the stack will implicitly block nested | |||
320 | // ones. There are other reasons to block: The feature is off, or we're inside a | |||
321 | // profiler function that is locking a mutex. | |||
322 | class MOZ_RAII ThreadIntercept { | |||
323 | // When set to true, malloc does not intercept additional allocations. This is | |||
324 | // needed because collecting stacks creates new allocations. When blocked, | |||
325 | // these allocations are then ignored by the memory hook. | |||
326 | static PROFILER_THREAD_LOCAL(bool)::mozilla::detail::ThreadLocal<bool, ::mozilla::detail::ThreadLocalKeyStorage > tlsIsBlocked; | |||
327 | ||||
328 | // This is a quick flag to check and see if the allocations feature is enabled | |||
329 | // or disabled. | |||
330 | static mozilla::Atomic<bool, mozilla::Relaxed> sAllocationsFeatureEnabled; | |||
331 | ||||
332 | // True if this ThreadIntercept has set tlsIsBlocked. | |||
333 | bool mIsBlockingTLS; | |||
334 | ||||
335 | // True if interception is blocked for any reason. | |||
336 | bool mIsBlocked; | |||
337 | ||||
338 | public: | |||
339 | static void Init() { | |||
340 | tlsIsBlocked.infallibleInit(); | |||
341 | // infallibleInit should zero-initialize, which corresponds to `false`. | |||
342 | MOZ_ASSERT(!tlsIsBlocked.get())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!tlsIsBlocked.get())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!tlsIsBlocked.get()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!tlsIsBlocked.get()" , "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 342); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!tlsIsBlocked.get()" ")"); do { *((volatile int*)__null) = 342; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
343 | } | |||
344 | ||||
345 | ThreadIntercept() { | |||
346 | // If the allocation interception feature is enabled, and the TLS is not | |||
347 | // blocked yet, we will block the TLS now, and unblock on destruction. | |||
348 | mIsBlockingTLS = sAllocationsFeatureEnabled && !tlsIsBlocked.get(); | |||
349 | if (mIsBlockingTLS) { | |||
350 | MOZ_ASSERT(!tlsIsBlocked.get())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!tlsIsBlocked.get())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!tlsIsBlocked.get()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!tlsIsBlocked.get()" , "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 350); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!tlsIsBlocked.get()" ")"); do { *((volatile int*)__null) = 350; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
351 | tlsIsBlocked.set(true); | |||
352 | // Since this is the top-level ThreadIntercept, interceptions are not | |||
353 | // blocked unless the profiler itself holds a locked mutex, in which case | |||
354 | // we don't want to intercept allocations that originate from such a | |||
355 | // profiler call. | |||
356 | mIsBlocked = profiler_is_locked_on_current_thread(); | |||
357 | } else { | |||
358 | // The feature is off, or the TLS was already blocked, then we block this | |||
359 | // interception. | |||
360 | mIsBlocked = true; | |||
361 | } | |||
362 | } | |||
363 | ||||
364 | ~ThreadIntercept() { | |||
365 | if (mIsBlockingTLS) { | |||
366 | MOZ_ASSERT(tlsIsBlocked.get())do { static_assert( mozilla::detail::AssertionConditionType< decltype(tlsIsBlocked.get())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(tlsIsBlocked.get()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("tlsIsBlocked.get()" , "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 366); AnnotateMozCrashReason("MOZ_ASSERT" "(" "tlsIsBlocked.get()" ")"); do { *((volatile int*)__null) = 366; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
367 | tlsIsBlocked.set(false); | |||
368 | } | |||
369 | } | |||
370 | ||||
371 | // Is this ThreadIntercept effectively blocked? (Feature is off, or this | |||
372 | // ThreadIntercept is nested, or we're inside a locked-Profiler function.) | |||
373 | bool IsBlocked() const { return mIsBlocked; } | |||
374 | ||||
375 | static void EnableAllocationFeature() { sAllocationsFeatureEnabled = true; } | |||
376 | ||||
377 | static void DisableAllocationFeature() { sAllocationsFeatureEnabled = false; } | |||
378 | }; | |||
379 | ||||
380 | PROFILER_THREAD_LOCAL(bool)::mozilla::detail::ThreadLocal<bool, ::mozilla::detail::ThreadLocalKeyStorage > ThreadIntercept::tlsIsBlocked; | |||
381 | ||||
382 | mozilla::Atomic<bool, mozilla::Relaxed> | |||
383 | ThreadIntercept::sAllocationsFeatureEnabled(false); | |||
384 | ||||
385 | //--------------------------------------------------------------------------- | |||
386 | // malloc/free callbacks | |||
387 | //--------------------------------------------------------------------------- | |||
388 | ||||
389 | static void AllocCallback(void* aPtr, size_t aReqSize) { | |||
390 | if (!aPtr) { | |||
391 | return; | |||
392 | } | |||
393 | ||||
394 | // The first part of this function does not allocate. | |||
395 | size_t actualSize = gMallocTable.malloc_usable_size(aPtr); | |||
396 | if (actualSize > 0) { | |||
397 | sCounter->Add(actualSize); | |||
398 | } | |||
399 | ||||
400 | ThreadIntercept threadIntercept; | |||
401 | if (threadIntercept.IsBlocked()) { | |||
402 | // Either the native allocations feature is not turned on, or we may be | |||
403 | // recursing into a memory hook, return. We'll still collect counter | |||
404 | // information about this allocation, but no stack. | |||
405 | return; | |||
406 | } | |||
407 | ||||
408 | AUTO_PROFILER_LABEL("AllocCallback", PROFILER)mozilla::AutoProfilerLabel raiiObject408( "AllocCallback", nullptr , JS::ProfilingCategoryPair::PROFILER); | |||
409 | ||||
410 | // Perform a bernoulli trial, which will return true or false based on its | |||
411 | // configured probability. It takes into account the byte size so that | |||
412 | // larger allocations are weighted heavier than smaller allocations. | |||
413 | MOZ_ASSERT(gBernoulli,do { static_assert( mozilla::detail::AssertionConditionType< decltype(gBernoulli)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(gBernoulli))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("gBernoulli" " (" "gBernoulli must be properly installed for the memory hooks." ")", "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 414); AnnotateMozCrashReason("MOZ_ASSERT" "(" "gBernoulli" ") (" "gBernoulli must be properly installed for the memory hooks." ")"); do { *((volatile int*)__null) = 414; __attribute__((nomerge )) ::abort(); } while (false); } } while (false) | |||
414 | "gBernoulli must be properly installed for the memory hooks.")do { static_assert( mozilla::detail::AssertionConditionType< decltype(gBernoulli)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(gBernoulli))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("gBernoulli" " (" "gBernoulli must be properly installed for the memory hooks." ")", "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 414); AnnotateMozCrashReason("MOZ_ASSERT" "(" "gBernoulli" ") (" "gBernoulli must be properly installed for the memory hooks." ")"); do { *((volatile int*)__null) = 414; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
415 | if ( | |||
416 | // First perform the Bernoulli trial. | |||
417 | gBernoulli->trial(actualSize) && | |||
418 | // Second, attempt to add a marker if the Bernoulli trial passed. | |||
419 | profiler_add_native_allocation_marker( | |||
420 | static_cast<int64_t>(actualSize), | |||
421 | reinterpret_cast<uintptr_t>(aPtr))) { | |||
422 | MOZ_ASSERT(gAllocationTracker,do { static_assert( mozilla::detail::AssertionConditionType< decltype(gAllocationTracker)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(gAllocationTracker))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("gAllocationTracker" " (" "gAllocationTracker must be properly installed for the memory " "hooks." ")", "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 424); AnnotateMozCrashReason("MOZ_ASSERT" "(" "gAllocationTracker" ") (" "gAllocationTracker must be properly installed for the memory " "hooks." ")"); do { *((volatile int*)__null) = 424; __attribute__ ((nomerge)) ::abort(); } while (false); } } while (false) | |||
423 | "gAllocationTracker must be properly installed for the memory "do { static_assert( mozilla::detail::AssertionConditionType< decltype(gAllocationTracker)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(gAllocationTracker))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("gAllocationTracker" " (" "gAllocationTracker must be properly installed for the memory " "hooks." ")", "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 424); AnnotateMozCrashReason("MOZ_ASSERT" "(" "gAllocationTracker" ") (" "gAllocationTracker must be properly installed for the memory " "hooks." ")"); do { *((volatile int*)__null) = 424; __attribute__ ((nomerge)) ::abort(); } while (false); } } while (false) | |||
424 | "hooks.")do { static_assert( mozilla::detail::AssertionConditionType< decltype(gAllocationTracker)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(gAllocationTracker))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("gAllocationTracker" " (" "gAllocationTracker must be properly installed for the memory " "hooks." ")", "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 424); AnnotateMozCrashReason("MOZ_ASSERT" "(" "gAllocationTracker" ") (" "gAllocationTracker must be properly installed for the memory " "hooks." ")"); do { *((volatile int*)__null) = 424; __attribute__ ((nomerge)) ::abort(); } while (false); } } while (false); | |||
425 | // Only track the memory if the allocation marker was actually added to the | |||
426 | // profiler. | |||
427 | gAllocationTracker->AddMemoryAddress(aPtr); | |||
428 | } | |||
429 | ||||
430 | // We're ignoring aReqSize here | |||
431 | } | |||
432 | ||||
433 | static void FreeCallback(void* aPtr) { | |||
434 | if (!aPtr) { | |||
435 | return; | |||
436 | } | |||
437 | ||||
438 | // The first part of this function does not allocate. | |||
439 | size_t unsignedSize = MallocSizeOf(aPtr); | |||
440 | int64_t signedSize = -(static_cast<int64_t>(unsignedSize)); | |||
441 | sCounter->Add(signedSize); | |||
442 | ||||
443 | ThreadIntercept threadIntercept; | |||
444 | if (threadIntercept.IsBlocked()) { | |||
445 | // Either the native allocations feature is not turned on, or we may be | |||
446 | // recursing into a memory hook, return. We'll still collect counter | |||
447 | // information about this allocation, but no stack. | |||
448 | return; | |||
449 | } | |||
450 | ||||
451 | AUTO_PROFILER_LABEL("FreeCallback", PROFILER)mozilla::AutoProfilerLabel raiiObject451( "FreeCallback", nullptr , JS::ProfilingCategoryPair::PROFILER); | |||
452 | ||||
453 | // Perform a bernoulli trial, which will return true or false based on its | |||
454 | // configured probability. It takes into account the byte size so that | |||
455 | // larger allocations are weighted heavier than smaller allocations. | |||
456 | MOZ_ASSERT(do { static_assert( mozilla::detail::AssertionConditionType< decltype(gAllocationTracker)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(gAllocationTracker))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("gAllocationTracker" " (" "gAllocationTracker must be properly installed for the memory hooks." ")", "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 458); AnnotateMozCrashReason("MOZ_ASSERT" "(" "gAllocationTracker" ") (" "gAllocationTracker must be properly installed for the memory hooks." ")"); do { *((volatile int*)__null) = 458; __attribute__((nomerge )) ::abort(); } while (false); } } while (false) | |||
457 | gAllocationTracker,do { static_assert( mozilla::detail::AssertionConditionType< decltype(gAllocationTracker)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(gAllocationTracker))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("gAllocationTracker" " (" "gAllocationTracker must be properly installed for the memory hooks." ")", "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 458); AnnotateMozCrashReason("MOZ_ASSERT" "(" "gAllocationTracker" ") (" "gAllocationTracker must be properly installed for the memory hooks." ")"); do { *((volatile int*)__null) = 458; __attribute__((nomerge )) ::abort(); } while (false); } } while (false) | |||
458 | "gAllocationTracker must be properly installed for the memory hooks.")do { static_assert( mozilla::detail::AssertionConditionType< decltype(gAllocationTracker)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(gAllocationTracker))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("gAllocationTracker" " (" "gAllocationTracker must be properly installed for the memory hooks." ")", "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 458); AnnotateMozCrashReason("MOZ_ASSERT" "(" "gAllocationTracker" ") (" "gAllocationTracker must be properly installed for the memory hooks." ")"); do { *((volatile int*)__null) = 458; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
459 | if (gAllocationTracker->RemoveMemoryAddressIfFound(aPtr)) { | |||
460 | // This size here is negative, indicating a deallocation. | |||
461 | profiler_add_native_allocation_marker(signedSize, | |||
462 | reinterpret_cast<uintptr_t>(aPtr)); | |||
463 | } | |||
464 | } | |||
465 | ||||
466 | } // namespace mozilla::profiler | |||
467 | ||||
468 | //--------------------------------------------------------------------------- | |||
469 | // malloc/free interception | |||
470 | //--------------------------------------------------------------------------- | |||
471 | ||||
472 | using namespace mozilla::profiler; | |||
473 | ||||
474 | static void* replace_malloc(size_t aSize) { | |||
475 | // This must be a call to malloc from outside. Intercept it. | |||
476 | void* ptr = gMallocTable.malloc(aSize); | |||
477 | AllocCallback(ptr, aSize); | |||
478 | return ptr; | |||
479 | } | |||
480 | ||||
481 | static void* replace_calloc(size_t aCount, size_t aSize) { | |||
482 | void* ptr = gMallocTable.calloc(aCount, aSize); | |||
483 | AllocCallback(ptr, aCount * aSize); | |||
484 | return ptr; | |||
485 | } | |||
486 | ||||
487 | static void* replace_realloc(void* aOldPtr, size_t aSize) { | |||
488 | // If |aOldPtr| is nullptr, the call is equivalent to |malloc(aSize)|. | |||
489 | if (!aOldPtr) { | |||
490 | return replace_malloc(aSize); | |||
491 | } | |||
492 | ||||
493 | FreeCallback(aOldPtr); | |||
494 | void* ptr = gMallocTable.realloc(aOldPtr, aSize); | |||
495 | if (ptr) { | |||
496 | AllocCallback(ptr, aSize); | |||
497 | } else { | |||
498 | // If realloc fails, we undo the prior operations by re-inserting the old | |||
499 | // pointer into the live block table. We don't have to do anything with the | |||
500 | // dead block list because the dead block hasn't yet been inserted. The | |||
501 | // block will end up looking like it was allocated for the first time here, | |||
502 | // which is untrue, and the slop bytes will be zero, which may be untrue. | |||
503 | // But this case is rare and doing better isn't worth the effort. | |||
504 | AllocCallback(aOldPtr, gMallocTable.malloc_usable_size(aOldPtr)); | |||
505 | } | |||
506 | return ptr; | |||
507 | } | |||
508 | ||||
509 | static void* replace_memalign(size_t aAlignment, size_t aSize) { | |||
510 | void* ptr = gMallocTable.memalign(aAlignment, aSize); | |||
511 | AllocCallback(ptr, aSize); | |||
512 | return ptr; | |||
513 | } | |||
514 | ||||
515 | static void replace_free(void* aPtr) { | |||
516 | FreeCallback(aPtr); | |||
517 | gMallocTable.free(aPtr); | |||
518 | } | |||
519 | ||||
520 | static void* replace_moz_arena_malloc(arena_id_t aArena, size_t aSize) { | |||
521 | void* ptr = gMallocTable.moz_arena_malloc(aArena, aSize); | |||
522 | AllocCallback(ptr, aSize); | |||
523 | return ptr; | |||
524 | } | |||
525 | ||||
526 | static void* replace_moz_arena_calloc(arena_id_t aArena, size_t aCount, | |||
527 | size_t aSize) { | |||
528 | void* ptr = gMallocTable.moz_arena_calloc(aArena, aCount, aSize); | |||
529 | AllocCallback(ptr, aCount * aSize); | |||
530 | return ptr; | |||
531 | } | |||
532 | ||||
533 | static void* replace_moz_arena_realloc(arena_id_t aArena, void* aPtr, | |||
534 | size_t aSize) { | |||
535 | void* ptr = gMallocTable.moz_arena_realloc(aArena, aPtr, aSize); | |||
536 | AllocCallback(ptr, aSize); | |||
537 | return ptr; | |||
538 | } | |||
539 | ||||
540 | static void replace_moz_arena_free(arena_id_t aArena, void* aPtr) { | |||
541 | FreeCallback(aPtr); | |||
| ||||
542 | gMallocTable.moz_arena_free(aArena, aPtr); | |||
543 | } | |||
544 | ||||
545 | static void* replace_moz_arena_memalign(arena_id_t aArena, size_t aAlignment, | |||
546 | size_t aSize) { | |||
547 | void* ptr = gMallocTable.moz_arena_memalign(aArena, aAlignment, aSize); | |||
548 | AllocCallback(ptr, aSize); | |||
549 | return ptr; | |||
550 | } | |||
551 | ||||
552 | // we have to replace these or jemalloc will assume we don't implement any | |||
553 | // of the arena replacements! | |||
554 | static arena_id_t replace_moz_create_arena_with_params( | |||
555 | arena_params_t* aParams) { | |||
556 | return gMallocTable.moz_create_arena_with_params(aParams); | |||
557 | } | |||
558 | ||||
559 | static void replace_moz_dispose_arena(arena_id_t aArenaId) { | |||
560 | return gMallocTable.moz_dispose_arena(aArenaId); | |||
561 | } | |||
562 | ||||
563 | static void replace_moz_set_max_dirty_page_modifier(int32_t aModifier) { | |||
564 | return gMallocTable.moz_set_max_dirty_page_modifier(aModifier); | |||
565 | } | |||
566 | ||||
567 | // Must come after all the replace_* funcs | |||
568 | void replace_initprofiler_init(malloc_table_t* aMallocTable, ReplaceMallocBridge** aBridge) { | |||
569 | gMallocTable = *aMallocTable; | |||
570 | #define MALLOC_FUNCS (MALLOC_FUNCS_MALLOC_BASE1 | MALLOC_FUNCS_ARENA(8 | 16)) | |||
571 | #define MALLOC_DECL(name, ...) aMallocTable->name = replace_##name; | |||
572 | #include "malloc_decls.h" | |||
573 | } | |||
574 | ||||
575 | void profiler_replace_remove() {} | |||
576 | ||||
577 | namespace mozilla::profiler { | |||
578 | //--------------------------------------------------------------------------- | |||
579 | // Initialization | |||
580 | //--------------------------------------------------------------------------- | |||
581 | ||||
582 | BaseProfilerCount* install_memory_hooks() { | |||
583 | if (!sCounter) { | |||
584 | sCounter = new ProfilerCounterTotal("malloc", "Memory", | |||
585 | "Amount of allocated memory"); | |||
586 | } else { | |||
587 | sCounter->Clear(); | |||
588 | sCounter->Register(); | |||
589 | } | |||
590 | jemalloc_replace_dynamic(replace_initprofiler_init); | |||
591 | return sCounter; | |||
592 | } | |||
593 | ||||
594 | // Remove the hooks, but leave the sCounter machinery. Deleting the counter | |||
595 | // would race with any existing memory hooks that are currently running. Rather | |||
596 | // than adding overhead here of mutexes it's cheaper for the performance to just | |||
597 | // leak these values. | |||
598 | void remove_memory_hooks() { jemalloc_replace_dynamic(nullptr); } | |||
599 | ||||
600 | void enable_native_allocations() { | |||
601 | // The bloat log tracks allocations and deallocations. This can conflict | |||
602 | // with the memory hook machinery, as the bloat log creates its own | |||
603 | // allocations. This means we can re-enter inside the bloat log machinery. At | |||
604 | // this time, the bloat log does not know about cannot handle the native | |||
605 | // allocation feature. | |||
606 | // | |||
607 | // At the time of this writing, we hit this assertion: | |||
608 | // IsIdle(oldState) || IsRead(oldState) in Checker::StartReadOp() | |||
609 | // | |||
610 | // #01: GetBloatEntry(char const*, unsigned int) | |||
611 | // #02: NS_LogCtor | |||
612 | // #03: profiler_get_backtrace() | |||
613 | // #04: profiler_add_native_allocation_marker(long long) | |||
614 | // #05: mozilla::profiler::AllocCallback(void*, unsigned long) | |||
615 | // #06: replace_calloc(unsigned long, unsigned long) | |||
616 | // #07: PLDHashTable::ChangeTable(int) | |||
617 | // #08: PLDHashTable::Add(void const*, std::nothrow_t const&) | |||
618 | // #09: nsBaseHashtable<nsDepCharHashKey, nsAutoPtr<BloatEntry>, ... | |||
619 | // #10: GetBloatEntry(char const*, unsigned int) | |||
620 | // #11: NS_LogCtor | |||
621 | // #12: profiler_get_backtrace() | |||
622 | // ... | |||
623 | MOZ_ASSERT(!PR_GetEnv("XPCOM_MEM_BLOAT_LOG"),do { static_assert( mozilla::detail::AssertionConditionType< decltype(!PR_GetEnv("XPCOM_MEM_BLOAT_LOG"))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!PR_GetEnv("XPCOM_MEM_BLOAT_LOG" )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("!PR_GetEnv(\"XPCOM_MEM_BLOAT_LOG\")" " (" "The bloat log feature is not compatible with the native " "allocations instrumentation." ")", "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 625); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!PR_GetEnv(\"XPCOM_MEM_BLOAT_LOG\")" ") (" "The bloat log feature is not compatible with the native " "allocations instrumentation." ")"); do { *((volatile int*)__null ) = 625; __attribute__((nomerge)) ::abort(); } while (false); } } while (false) | |||
624 | "The bloat log feature is not compatible with the native "do { static_assert( mozilla::detail::AssertionConditionType< decltype(!PR_GetEnv("XPCOM_MEM_BLOAT_LOG"))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!PR_GetEnv("XPCOM_MEM_BLOAT_LOG" )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("!PR_GetEnv(\"XPCOM_MEM_BLOAT_LOG\")" " (" "The bloat log feature is not compatible with the native " "allocations instrumentation." ")", "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 625); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!PR_GetEnv(\"XPCOM_MEM_BLOAT_LOG\")" ") (" "The bloat log feature is not compatible with the native " "allocations instrumentation." ")"); do { *((volatile int*)__null ) = 625; __attribute__((nomerge)) ::abort(); } while (false); } } while (false) | |||
625 | "allocations instrumentation.")do { static_assert( mozilla::detail::AssertionConditionType< decltype(!PR_GetEnv("XPCOM_MEM_BLOAT_LOG"))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!PR_GetEnv("XPCOM_MEM_BLOAT_LOG" )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("!PR_GetEnv(\"XPCOM_MEM_BLOAT_LOG\")" " (" "The bloat log feature is not compatible with the native " "allocations instrumentation." ")", "/var/lib/jenkins/workspace/firefox-scan-build/tools/profiler/core/memory_hooks.cpp" , 625); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!PR_GetEnv(\"XPCOM_MEM_BLOAT_LOG\")" ") (" "The bloat log feature is not compatible with the native " "allocations instrumentation." ")"); do { *((volatile int*)__null ) = 625; __attribute__((nomerge)) ::abort(); } while (false); } } while (false); | |||
626 | ||||
627 | EnsureBernoulliIsInstalled(); | |||
628 | EnsureAllocationTrackerIsInstalled(); | |||
629 | ThreadIntercept::EnableAllocationFeature(); | |||
630 | } | |||
631 | ||||
632 | // This is safe to call even if native allocations hasn't been enabled. | |||
633 | void disable_native_allocations() { | |||
634 | ThreadIntercept::DisableAllocationFeature(); | |||
635 | if (gAllocationTracker) { | |||
636 | gAllocationTracker->Reset(); | |||
637 | } | |||
638 | } | |||
639 | ||||
640 | void unregister_memory_counter() { | |||
641 | if (sCounter) { | |||
642 | sCounter->Unregister(); | |||
643 | } | |||
644 | } | |||
645 | ||||
646 | void memory_hooks_tls_init() { | |||
647 | // Initialise the TLS early so that it is allocated with a lower key and on an | |||
648 | // earlier page in order to avoid allocation when setting the variable. | |||
649 | ThreadIntercept::Init(); | |||
650 | } | |||
651 | ||||
652 | } // namespace mozilla::profiler |
1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ | |||
2 | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ | |||
3 | /* This Source Code Form is subject to the terms of the Mozilla Public | |||
4 | * License, v. 2.0. If a copy of the MPL was not distributed with this | |||
5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | |||
6 | ||||
7 | //--------------------------------------------------------------------------- | |||
8 | // Overview | |||
9 | //--------------------------------------------------------------------------- | |||
10 | // | |||
11 | // This file defines HashMap<Key, Value> and HashSet<T>, hash tables that are | |||
12 | // fast and have a nice API. | |||
13 | // | |||
14 | // Both hash tables have two optional template parameters. | |||
15 | // | |||
16 | // - HashPolicy. This defines the operations for hashing and matching keys. The | |||
17 | // default HashPolicy is appropriate when both of the following two | |||
18 | // conditions are true. | |||
19 | // | |||
20 | // - The key type stored in the table (|Key| for |HashMap<Key, Value>|, |T| | |||
21 | // for |HashSet<T>|) is an integer, pointer, UniquePtr, float, or double. | |||
22 | // | |||
23 | // - The type used for lookups (|Lookup|) is the same as the key type. This | |||
24 | // is usually the case, but not always. | |||
25 | // | |||
26 | // There is also a |CStringHasher| policy for |char*| keys. If your keys | |||
27 | // don't match any of the above cases, you must provide your own hash policy; | |||
28 | // see the "Hash Policy" section below. | |||
29 | // | |||
30 | // - AllocPolicy. This defines how allocations are done by the table. | |||
31 | // | |||
32 | // - |MallocAllocPolicy| is the default and is usually appropriate; note that | |||
33 | // operations (such as insertions) that might cause allocations are | |||
34 | // fallible and must be checked for OOM. These checks are enforced by the | |||
35 | // use of [[nodiscard]]. | |||
36 | // | |||
37 | // - |InfallibleAllocPolicy| is another possibility; it allows the | |||
38 | // abovementioned OOM checks to be done with MOZ_ALWAYS_TRUE(). | |||
39 | // | |||
40 | // Note that entry storage allocation is lazy, and not done until the first | |||
41 | // lookupForAdd(), put(), or putNew() is performed. | |||
42 | // | |||
43 | // See AllocPolicy.h for more details. | |||
44 | // | |||
45 | // Documentation on how to use HashMap and HashSet, including examples, is | |||
46 | // present within those classes. Search for "class HashMap" and "class | |||
47 | // HashSet". | |||
48 | // | |||
49 | // Both HashMap and HashSet are implemented on top of a third class, HashTable. | |||
50 | // You only need to look at HashTable if you want to understand the | |||
51 | // implementation. | |||
52 | // | |||
53 | // How does mozilla::HashTable (this file) compare with PLDHashTable (and its | |||
54 | // subclasses, such as nsTHashtable)? | |||
55 | // | |||
56 | // - mozilla::HashTable is a lot faster, largely because it uses templates | |||
57 | // throughout *and* inlines everything. PLDHashTable inlines operations much | |||
58 | // less aggressively, and also uses "virtual ops" for operations like hashing | |||
59 | // and matching entries that require function calls. | |||
60 | // | |||
61 | // - Correspondingly, mozilla::HashTable use is likely to increase executable | |||
62 | // size much more than PLDHashTable. | |||
63 | // | |||
64 | // - mozilla::HashTable has a nicer API, with a proper HashSet vs. HashMap | |||
65 | // distinction. | |||
66 | // | |||
67 | // - mozilla::HashTable requires more explicit OOM checking. As mentioned | |||
68 | // above, the use of |InfallibleAllocPolicy| can simplify things. | |||
69 | // | |||
70 | // - mozilla::HashTable has a default capacity on creation of 32 and a minimum | |||
71 | // capacity of 4. PLDHashTable has a default capacity on creation of 8 and a | |||
72 | // minimum capacity of 8. | |||
73 | ||||
74 | #ifndef mozilla_HashTable_h | |||
75 | #define mozilla_HashTable_h | |||
76 | ||||
77 | #include <utility> | |||
78 | #include <type_traits> | |||
79 | ||||
80 | #include "mozilla/AllocPolicy.h" | |||
81 | #include "mozilla/Assertions.h" | |||
82 | #include "mozilla/Attributes.h" | |||
83 | #include "mozilla/Casting.h" | |||
84 | #include "mozilla/HashFunctions.h" | |||
85 | #include "mozilla/MathAlgorithms.h" | |||
86 | #include "mozilla/Maybe.h" | |||
87 | #include "mozilla/MemoryChecking.h" | |||
88 | #include "mozilla/MemoryReporting.h" | |||
89 | #include "mozilla/Opaque.h" | |||
90 | #include "mozilla/OperatorNewExtensions.h" | |||
91 | #include "mozilla/ReentrancyGuard.h" | |||
92 | #include "mozilla/UniquePtr.h" | |||
93 | #include "mozilla/WrappingOperations.h" | |||
94 | ||||
95 | namespace mozilla { | |||
96 | ||||
97 | template <class, class = void> | |||
98 | struct DefaultHasher; | |||
99 | ||||
100 | template <class, class> | |||
101 | class HashMapEntry; | |||
102 | ||||
103 | namespace detail { | |||
104 | ||||
105 | template <typename T> | |||
106 | class HashTableEntry; | |||
107 | ||||
108 | template <class T, class HashPolicy, class AllocPolicy> | |||
109 | class HashTable; | |||
110 | ||||
111 | } // namespace detail | |||
112 | ||||
113 | // The "generation" of a hash table is an opaque value indicating the state of | |||
114 | // modification of the hash table through its lifetime. If the generation of | |||
115 | // a hash table compares equal at times T1 and T2, then lookups in the hash | |||
116 | // table, pointers to (or into) hash table entries, etc. at time T1 are valid | |||
117 | // at time T2. If the generation compares unequal, these computations are all | |||
118 | // invalid and must be performed again to be used. | |||
119 | // | |||
120 | // Generations are meaningfully comparable only with respect to a single hash | |||
121 | // table. It's always nonsensical to compare the generation of distinct hash | |||
122 | // tables H1 and H2. | |||
123 | using Generation = Opaque<uint64_t>; | |||
124 | ||||
125 | //--------------------------------------------------------------------------- | |||
126 | // HashMap | |||
127 | //--------------------------------------------------------------------------- | |||
128 | ||||
129 | // HashMap is a fast hash-based map from keys to values. | |||
130 | // | |||
131 | // Template parameter requirements: | |||
132 | // - Key/Value: movable, destructible, assignable. | |||
133 | // - HashPolicy: see the "Hash Policy" section below. | |||
134 | // - AllocPolicy: see AllocPolicy.h. | |||
135 | // | |||
136 | // Note: | |||
137 | // - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members | |||
138 | // called by HashMap must not call back into the same HashMap object. | |||
139 | // | |||
140 | template <class Key, class Value, class HashPolicy = DefaultHasher<Key>, | |||
141 | class AllocPolicy = MallocAllocPolicy> | |||
142 | class HashMap { | |||
143 | // -- Implementation details ----------------------------------------------- | |||
144 | ||||
145 | // HashMap is not copyable or assignable. | |||
146 | HashMap(const HashMap& hm) = delete; | |||
147 | HashMap& operator=(const HashMap& hm) = delete; | |||
148 | ||||
149 | using TableEntry = HashMapEntry<Key, Value>; | |||
150 | ||||
151 | struct MapHashPolicy : HashPolicy { | |||
152 | using Base = HashPolicy; | |||
153 | using KeyType = Key; | |||
154 | ||||
155 | static const Key& getKey(TableEntry& aEntry) { return aEntry.key(); } | |||
156 | ||||
157 | static void setKey(TableEntry& aEntry, Key& aKey) { | |||
158 | HashPolicy::rekey(aEntry.mutableKey(), aKey); | |||
159 | } | |||
160 | }; | |||
161 | ||||
162 | using Impl = detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy>; | |||
163 | Impl mImpl; | |||
164 | ||||
165 | friend class Impl::Enum; | |||
166 | ||||
167 | public: | |||
168 | using Lookup = typename HashPolicy::Lookup; | |||
169 | using Entry = TableEntry; | |||
170 | ||||
171 | // -- Initialization ------------------------------------------------------- | |||
172 | ||||
173 | explicit HashMap(AllocPolicy aAllocPolicy = AllocPolicy(), | |||
174 | uint32_t aLen = Impl::sDefaultLen) | |||
175 | : mImpl(std::move(aAllocPolicy), aLen) {} | |||
176 | ||||
177 | explicit HashMap(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {} | |||
178 | ||||
179 | // HashMap is movable. | |||
180 | HashMap(HashMap&& aRhs) = default; | |||
181 | HashMap& operator=(HashMap&& aRhs) = default; | |||
182 | ||||
183 | // -- Status and sizing ---------------------------------------------------- | |||
184 | ||||
185 | // The map's current generation. | |||
186 | Generation generation() const { return mImpl.generation(); } | |||
187 | ||||
188 | // Is the map empty? | |||
189 | bool empty() const { return mImpl.empty(); } | |||
190 | ||||
191 | // Number of keys/values in the map. | |||
192 | uint32_t count() const { return mImpl.count(); } | |||
193 | ||||
194 | // Number of key/value slots in the map. Note: resize will happen well before | |||
195 | // count() == capacity(). | |||
196 | uint32_t capacity() const { return mImpl.capacity(); } | |||
197 | ||||
198 | // The size of the map's entry storage, in bytes. If the keys/values contain | |||
199 | // pointers to other heap blocks, you must iterate over the map and measure | |||
200 | // them separately; hence the "shallow" prefix. | |||
201 | size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { | |||
202 | return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); | |||
203 | } | |||
204 | size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { | |||
205 | return aMallocSizeOf(this) + | |||
206 | mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); | |||
207 | } | |||
208 | ||||
209 | // Attempt to minimize the capacity(). If the table is empty, this will free | |||
210 | // the empty storage and upon regrowth it will be given the minimum capacity. | |||
211 | void compact() { mImpl.compact(); } | |||
212 | ||||
213 | // Attempt to reserve enough space to fit at least |aLen| elements. This is | |||
214 | // total capacity, including elements already present. Does nothing if the | |||
215 | // map already has sufficient capacity. | |||
216 | [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); } | |||
217 | ||||
218 | // -- Lookups -------------------------------------------------------------- | |||
219 | ||||
220 | // Does the map contain a key/value matching |aLookup|? | |||
221 | bool has(const Lookup& aLookup) const { | |||
222 | return mImpl.lookup(aLookup).found(); | |||
223 | } | |||
224 | ||||
225 | // Return a Ptr indicating whether a key/value matching |aLookup| is | |||
226 | // present in the map. E.g.: | |||
227 | // | |||
228 | // using HM = HashMap<int,char>; | |||
229 | // HM h; | |||
230 | // if (HM::Ptr p = h.lookup(3)) { | |||
231 | // assert(p->key() == 3); | |||
232 | // char val = p->value(); | |||
233 | // } | |||
234 | // | |||
235 | using Ptr = typename Impl::Ptr; | |||
236 | MOZ_ALWAYS_INLINEinline Ptr lookup(const Lookup& aLookup) const { | |||
237 | return mImpl.lookup(aLookup); | |||
238 | } | |||
239 | ||||
240 | // Like lookup(), but does not assert if two threads call it at the same | |||
241 | // time. Only use this method when none of the threads will modify the map. | |||
242 | MOZ_ALWAYS_INLINEinline Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const { | |||
243 | return mImpl.readonlyThreadsafeLookup(aLookup); | |||
244 | } | |||
245 | ||||
246 | // -- Insertions ----------------------------------------------------------- | |||
247 | ||||
248 | // Overwrite existing value with |aValue|, or add it if not present. Returns | |||
249 | // false on OOM. | |||
250 | template <typename KeyInput, typename ValueInput> | |||
251 | [[nodiscard]] bool put(KeyInput&& aKey, ValueInput&& aValue) { | |||
252 | return put(aKey, std::forward<KeyInput>(aKey), | |||
253 | std::forward<ValueInput>(aValue)); | |||
254 | } | |||
255 | ||||
256 | template <typename KeyInput, typename ValueInput> | |||
257 | [[nodiscard]] bool put(const Lookup& aLookup, KeyInput&& aKey, | |||
258 | ValueInput&& aValue) { | |||
259 | AddPtr p = lookupForAdd(aLookup); | |||
260 | if (p) { | |||
261 | p->value() = std::forward<ValueInput>(aValue); | |||
262 | return true; | |||
263 | } | |||
264 | return add(p, std::forward<KeyInput>(aKey), | |||
265 | std::forward<ValueInput>(aValue)); | |||
266 | } | |||
267 | ||||
268 | // Like put(), but slightly faster. Must only be used when the given key is | |||
269 | // not already present. (In debug builds, assertions check this.) | |||
270 | template <typename KeyInput, typename ValueInput> | |||
271 | [[nodiscard]] bool putNew(KeyInput&& aKey, ValueInput&& aValue) { | |||
272 | return mImpl.putNew(aKey, std::forward<KeyInput>(aKey), | |||
273 | std::forward<ValueInput>(aValue)); | |||
274 | } | |||
275 | ||||
276 | template <typename KeyInput, typename ValueInput> | |||
277 | [[nodiscard]] bool putNew(const Lookup& aLookup, KeyInput&& aKey, | |||
278 | ValueInput&& aValue) { | |||
279 | return mImpl.putNew(aLookup, std::forward<KeyInput>(aKey), | |||
280 | std::forward<ValueInput>(aValue)); | |||
281 | } | |||
282 | ||||
283 | // Like putNew(), but should be only used when the table is known to be big | |||
284 | // enough for the insertion, and hashing cannot fail. Typically this is used | |||
285 | // to populate an empty map with known-unique keys after reserving space with | |||
286 | // reserve(), e.g. | |||
287 | // | |||
288 | // using HM = HashMap<int,char>; | |||
289 | // HM h; | |||
290 | // if (!h.reserve(3)) { | |||
291 | // MOZ_CRASH("OOM"); | |||
292 | // } | |||
293 | // h.putNewInfallible(1, 'a'); // unique key | |||
294 | // h.putNewInfallible(2, 'b'); // unique key | |||
295 | // h.putNewInfallible(3, 'c'); // unique key | |||
296 | // | |||
297 | template <typename KeyInput, typename ValueInput> | |||
298 | void putNewInfallible(KeyInput&& aKey, ValueInput&& aValue) { | |||
299 | mImpl.putNewInfallible(aKey, std::forward<KeyInput>(aKey), | |||
300 | std::forward<ValueInput>(aValue)); | |||
301 | } | |||
302 | ||||
303 | // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient | |||
304 | // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using | |||
305 | // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new key/value. E.g.: | |||
306 | // | |||
307 | // using HM = HashMap<int,char>; | |||
308 | // HM h; | |||
309 | // HM::AddPtr p = h.lookupForAdd(3); | |||
310 | // if (!p) { | |||
311 | // if (!h.add(p, 3, 'a')) { | |||
312 | // return false; | |||
313 | // } | |||
314 | // } | |||
315 | // assert(p->key() == 3); | |||
316 | // char val = p->value(); | |||
317 | // | |||
318 | // N.B. The caller must ensure that no mutating hash table operations occur | |||
319 | // between a pair of lookupForAdd() and add() calls. To avoid looking up the | |||
320 | // key a second time, the caller may use the more efficient relookupOrAdd() | |||
321 | // method. This method reuses part of the hashing computation to more | |||
322 | // efficiently insert the key if it has not been added. For example, a | |||
323 | // mutation-handling version of the previous example: | |||
324 | // | |||
325 | // HM::AddPtr p = h.lookupForAdd(3); | |||
326 | // if (!p) { | |||
327 | // call_that_may_mutate_h(); | |||
328 | // if (!h.relookupOrAdd(p, 3, 'a')) { | |||
329 | // return false; | |||
330 | // } | |||
331 | // } | |||
332 | // assert(p->key() == 3); | |||
333 | // char val = p->value(); | |||
334 | // | |||
335 | using AddPtr = typename Impl::AddPtr; | |||
336 | MOZ_ALWAYS_INLINEinline AddPtr lookupForAdd(const Lookup& aLookup) { | |||
337 | return mImpl.lookupForAdd(aLookup); | |||
338 | } | |||
339 | ||||
340 | // Add a key/value. Returns false on OOM. | |||
341 | template <typename KeyInput, typename ValueInput> | |||
342 | [[nodiscard]] bool add(AddPtr& aPtr, KeyInput&& aKey, ValueInput&& aValue) { | |||
343 | return mImpl.add(aPtr, std::forward<KeyInput>(aKey), | |||
344 | std::forward<ValueInput>(aValue)); | |||
345 | } | |||
346 | ||||
347 | // See the comment above lookupForAdd() for details. | |||
348 | template <typename KeyInput, typename ValueInput> | |||
349 | [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, KeyInput&& aKey, | |||
350 | ValueInput&& aValue) { | |||
351 | return mImpl.relookupOrAdd(aPtr, aKey, std::forward<KeyInput>(aKey), | |||
352 | std::forward<ValueInput>(aValue)); | |||
353 | } | |||
354 | ||||
355 | // -- Removal -------------------------------------------------------------- | |||
356 | ||||
357 | // Lookup and remove the key/value matching |aLookup|, if present. | |||
358 | void remove(const Lookup& aLookup) { | |||
359 | if (Ptr p = lookup(aLookup)) { | |||
360 | remove(p); | |||
361 | } | |||
362 | } | |||
363 | ||||
364 | // Remove a previously found key/value (assuming aPtr.found()). The map must | |||
365 | // not have been mutated in the interim. | |||
366 | void remove(Ptr aPtr) { mImpl.remove(aPtr); } | |||
367 | ||||
368 | // Remove all keys/values without changing the capacity. | |||
369 | void clear() { mImpl.clear(); } | |||
370 | ||||
371 | // Like clear() followed by compact(). | |||
372 | void clearAndCompact() { mImpl.clearAndCompact(); } | |||
373 | ||||
374 | // -- Rekeying ------------------------------------------------------------- | |||
375 | ||||
376 | // Infallibly rekey one entry, if necessary. Requires that template | |||
377 | // parameters Key and HashPolicy::Lookup are the same type. | |||
378 | void rekeyIfMoved(const Key& aOldKey, const Key& aNewKey) { | |||
379 | if (aOldKey != aNewKey) { | |||
380 | rekeyAs(aOldKey, aNewKey, aNewKey); | |||
381 | } | |||
382 | } | |||
383 | ||||
384 | // Infallibly rekey one entry if present, and return whether that happened. | |||
385 | bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup, | |||
386 | const Key& aNewKey) { | |||
387 | if (Ptr p = lookup(aOldLookup)) { | |||
388 | mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewKey); | |||
389 | return true; | |||
390 | } | |||
391 | return false; | |||
392 | } | |||
393 | ||||
394 | // -- Iteration ------------------------------------------------------------ | |||
395 | ||||
396 | // |iter()| returns an Iterator: | |||
397 | // | |||
398 | // HashMap<int, char> h; | |||
399 | // for (auto iter = h.iter(); !iter.done(); iter.next()) { | |||
400 | // char c = iter.get().value(); | |||
401 | // } | |||
402 | // | |||
403 | using Iterator = typename Impl::Iterator; | |||
404 | Iterator iter() const { return mImpl.iter(); } | |||
405 | ||||
406 | // |modIter()| returns a ModIterator: | |||
407 | // | |||
408 | // HashMap<int, char> h; | |||
409 | // for (auto iter = h.modIter(); !iter.done(); iter.next()) { | |||
410 | // if (iter.get().value() == 'l') { | |||
411 | // iter.remove(); | |||
412 | // } | |||
413 | // } | |||
414 | // | |||
415 | // Table resize may occur in ModIterator's destructor. | |||
416 | using ModIterator = typename Impl::ModIterator; | |||
417 | ModIterator modIter() { return mImpl.modIter(); } | |||
418 | ||||
419 | // These are similar to Iterator/ModIterator/iter(), but use different | |||
420 | // terminology. | |||
421 | using Range = typename Impl::Range; | |||
422 | using Enum = typename Impl::Enum; | |||
423 | Range all() const { return mImpl.all(); } | |||
424 | }; | |||
425 | ||||
426 | //--------------------------------------------------------------------------- | |||
427 | // HashSet | |||
428 | //--------------------------------------------------------------------------- | |||
429 | ||||
430 | // HashSet is a fast hash-based set of values. | |||
431 | // | |||
432 | // Template parameter requirements: | |||
433 | // - T: movable, destructible, assignable. | |||
434 | // - HashPolicy: see the "Hash Policy" section below. | |||
435 | // - AllocPolicy: see AllocPolicy.h | |||
436 | // | |||
437 | // Note: | |||
438 | // - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by | |||
439 | // HashSet must not call back into the same HashSet object. | |||
440 | // | |||
441 | template <class T, class HashPolicy = DefaultHasher<T>, | |||
442 | class AllocPolicy = MallocAllocPolicy> | |||
443 | class HashSet { | |||
444 | // -- Implementation details ----------------------------------------------- | |||
445 | ||||
446 | // HashSet is not copyable or assignable. | |||
447 | HashSet(const HashSet& hs) = delete; | |||
448 | HashSet& operator=(const HashSet& hs) = delete; | |||
449 | ||||
450 | struct SetHashPolicy : HashPolicy { | |||
451 | using Base = HashPolicy; | |||
452 | using KeyType = T; | |||
453 | ||||
454 | static const KeyType& getKey(const T& aT) { return aT; } | |||
455 | ||||
456 | static void setKey(T& aT, KeyType& aKey) { HashPolicy::rekey(aT, aKey); } | |||
457 | }; | |||
458 | ||||
459 | using Impl = detail::HashTable<const T, SetHashPolicy, AllocPolicy>; | |||
460 | Impl mImpl; | |||
461 | ||||
462 | friend class Impl::Enum; | |||
463 | ||||
464 | public: | |||
465 | using Lookup = typename HashPolicy::Lookup; | |||
466 | using Entry = T; | |||
467 | ||||
468 | // -- Initialization ------------------------------------------------------- | |||
469 | ||||
470 | explicit HashSet(AllocPolicy aAllocPolicy = AllocPolicy(), | |||
471 | uint32_t aLen = Impl::sDefaultLen) | |||
472 | : mImpl(std::move(aAllocPolicy), aLen) {} | |||
473 | ||||
474 | explicit HashSet(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {} | |||
475 | ||||
476 | // HashSet is movable. | |||
477 | HashSet(HashSet&& aRhs) = default; | |||
478 | HashSet& operator=(HashSet&& aRhs) = default; | |||
479 | ||||
480 | // -- Status and sizing ---------------------------------------------------- | |||
481 | ||||
482 | // The set's current generation. | |||
483 | Generation generation() const { return mImpl.generation(); } | |||
484 | ||||
485 | // Is the set empty? | |||
486 | bool empty() const { return mImpl.empty(); } | |||
487 | ||||
488 | // Number of elements in the set. | |||
489 | uint32_t count() const { return mImpl.count(); } | |||
490 | ||||
491 | // Number of element slots in the set. Note: resize will happen well before | |||
492 | // count() == capacity(). | |||
493 | uint32_t capacity() const { return mImpl.capacity(); } | |||
494 | ||||
495 | // The size of the set's entry storage, in bytes. If the elements contain | |||
496 | // pointers to other heap blocks, you must iterate over the set and measure | |||
497 | // them separately; hence the "shallow" prefix. | |||
498 | size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { | |||
499 | return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); | |||
500 | } | |||
501 | size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { | |||
502 | return aMallocSizeOf(this) + | |||
503 | mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); | |||
504 | } | |||
505 | ||||
506 | // Attempt to minimize the capacity(). If the table is empty, this will free | |||
507 | // the empty storage and upon regrowth it will be given the minimum capacity. | |||
508 | void compact() { mImpl.compact(); } | |||
509 | ||||
510 | // Attempt to reserve enough space to fit at least |aLen| elements. This is | |||
511 | // total capacity, including elements already present. Does nothing if the | |||
512 | // map already has sufficient capacity. | |||
513 | [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); } | |||
514 | ||||
515 | // -- Lookups -------------------------------------------------------------- | |||
516 | ||||
517 | // Does the set contain an element matching |aLookup|? | |||
518 | bool has(const Lookup& aLookup) const { | |||
519 | return mImpl.lookup(aLookup).found(); | |||
520 | } | |||
521 | ||||
522 | // Return a Ptr indicating whether an element matching |aLookup| is present | |||
523 | // in the set. E.g.: | |||
524 | // | |||
525 | // using HS = HashSet<int>; | |||
526 | // HS h; | |||
527 | // if (HS::Ptr p = h.lookup(3)) { | |||
528 | // assert(*p == 3); // p acts like a pointer to int | |||
529 | // } | |||
530 | // | |||
531 | using Ptr = typename Impl::Ptr; | |||
532 | MOZ_ALWAYS_INLINEinline Ptr lookup(const Lookup& aLookup) const { | |||
533 | return mImpl.lookup(aLookup); | |||
534 | } | |||
535 | ||||
536 | // Like lookup(), but does not assert if two threads call it at the same | |||
537 | // time. Only use this method when none of the threads will modify the set. | |||
538 | MOZ_ALWAYS_INLINEinline Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const { | |||
539 | return mImpl.readonlyThreadsafeLookup(aLookup); | |||
540 | } | |||
541 | ||||
542 | // -- Insertions ----------------------------------------------------------- | |||
543 | ||||
544 | // Add |aU| if it is not present already. Returns false on OOM. | |||
545 | template <typename U> | |||
546 | [[nodiscard]] bool put(U&& aU) { | |||
547 | AddPtr p = lookupForAdd(aU); | |||
548 | return p ? true : add(p, std::forward<U>(aU)); | |||
549 | } | |||
550 | ||||
551 | // Like put(), but slightly faster. Must only be used when the given element | |||
552 | // is not already present. (In debug builds, assertions check this.) | |||
553 | template <typename U> | |||
554 | [[nodiscard]] bool putNew(U&& aU) { | |||
555 | return mImpl.putNew(aU, std::forward<U>(aU)); | |||
556 | } | |||
557 | ||||
558 | // Like the other putNew(), but for when |Lookup| is different to |T|. | |||
559 | template <typename U> | |||
560 | [[nodiscard]] bool putNew(const Lookup& aLookup, U&& aU) { | |||
561 | return mImpl.putNew(aLookup, std::forward<U>(aU)); | |||
562 | } | |||
563 | ||||
564 | // Like putNew(), but should be only used when the table is known to be big | |||
565 | // enough for the insertion, and hashing cannot fail. Typically this is used | |||
566 | // to populate an empty set with known-unique elements after reserving space | |||
567 | // with reserve(), e.g. | |||
568 | // | |||
569 | // using HS = HashMap<int>; | |||
570 | // HS h; | |||
571 | // if (!h.reserve(3)) { | |||
572 | // MOZ_CRASH("OOM"); | |||
573 | // } | |||
574 | // h.putNewInfallible(1); // unique element | |||
575 | // h.putNewInfallible(2); // unique element | |||
576 | // h.putNewInfallible(3); // unique element | |||
577 | // | |||
578 | template <typename U> | |||
579 | void putNewInfallible(const Lookup& aLookup, U&& aU) { | |||
580 | mImpl.putNewInfallible(aLookup, std::forward<U>(aU)); | |||
581 | } | |||
582 | ||||
583 | // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient | |||
584 | // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using | |||
585 | // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.: | |||
586 | // | |||
587 | // using HS = HashSet<int>; | |||
588 | // HS h; | |||
589 | // HS::AddPtr p = h.lookupForAdd(3); | |||
590 | // if (!p) { | |||
591 | // if (!h.add(p, 3)) { | |||
592 | // return false; | |||
593 | // } | |||
594 | // } | |||
595 | // assert(*p == 3); // p acts like a pointer to int | |||
596 | // | |||
597 | // N.B. The caller must ensure that no mutating hash table operations occur | |||
598 | // between a pair of lookupForAdd() and add() calls. To avoid looking up the | |||
599 | // key a second time, the caller may use the more efficient relookupOrAdd() | |||
600 | // method. This method reuses part of the hashing computation to more | |||
601 | // efficiently insert the key if it has not been added. For example, a | |||
602 | // mutation-handling version of the previous example: | |||
603 | // | |||
604 | // HS::AddPtr p = h.lookupForAdd(3); | |||
605 | // if (!p) { | |||
606 | // call_that_may_mutate_h(); | |||
607 | // if (!h.relookupOrAdd(p, 3, 3)) { | |||
608 | // return false; | |||
609 | // } | |||
610 | // } | |||
611 | // assert(*p == 3); | |||
612 | // | |||
613 | // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the | |||
614 | // entry |t|, where the caller ensures match(l,t). | |||
615 | using AddPtr = typename Impl::AddPtr; | |||
616 | MOZ_ALWAYS_INLINEinline AddPtr lookupForAdd(const Lookup& aLookup) { | |||
617 | return mImpl.lookupForAdd(aLookup); | |||
618 | } | |||
619 | ||||
620 | // Add an element. Returns false on OOM. | |||
621 | template <typename U> | |||
622 | [[nodiscard]] bool add(AddPtr& aPtr, U&& aU) { | |||
623 | return mImpl.add(aPtr, std::forward<U>(aU)); | |||
624 | } | |||
625 | ||||
626 | // See the comment above lookupForAdd() for details. | |||
627 | template <typename U> | |||
628 | [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup, | |||
629 | U&& aU) { | |||
630 | return mImpl.relookupOrAdd(aPtr, aLookup, std::forward<U>(aU)); | |||
631 | } | |||
632 | ||||
633 | // -- Removal -------------------------------------------------------------- | |||
634 | ||||
635 | // Lookup and remove the element matching |aLookup|, if present. | |||
636 | void remove(const Lookup& aLookup) { | |||
637 | if (Ptr p = lookup(aLookup)) { | |||
638 | remove(p); | |||
639 | } | |||
640 | } | |||
641 | ||||
642 | // Remove a previously found element (assuming aPtr.found()). The set must | |||
643 | // not have been mutated in the interim. | |||
644 | void remove(Ptr aPtr) { mImpl.remove(aPtr); } | |||
645 | ||||
646 | // Remove all keys/values without changing the capacity. | |||
647 | void clear() { mImpl.clear(); } | |||
648 | ||||
649 | // Like clear() followed by compact(). | |||
650 | void clearAndCompact() { mImpl.clearAndCompact(); } | |||
651 | ||||
652 | // -- Rekeying ------------------------------------------------------------- | |||
653 | ||||
654 | // Infallibly rekey one entry, if present. Requires that template parameters | |||
655 | // T and HashPolicy::Lookup are the same type. | |||
656 | void rekeyIfMoved(const Lookup& aOldValue, const T& aNewValue) { | |||
657 | if (aOldValue != aNewValue) { | |||
658 | rekeyAs(aOldValue, aNewValue, aNewValue); | |||
659 | } | |||
660 | } | |||
661 | ||||
662 | // Infallibly rekey one entry if present, and return whether that happened. | |||
663 | bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup, | |||
664 | const T& aNewValue) { | |||
665 | if (Ptr p = lookup(aOldLookup)) { | |||
666 | mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewValue); | |||
667 | return true; | |||
668 | } | |||
669 | return false; | |||
670 | } | |||
671 | ||||
672 | // Infallibly replace the current key at |aPtr| with an equivalent key. | |||
673 | // Specifically, both HashPolicy::hash and HashPolicy::match must return | |||
674 | // identical results for the new and old key when applied against all | |||
675 | // possible matching values. | |||
676 | void replaceKey(Ptr aPtr, const Lookup& aLookup, const T& aNewValue) { | |||
677 | MOZ_ASSERT(aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPtr.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 677); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.found()" ")"); do { *((volatile int*)__null) = 677; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
678 | MOZ_ASSERT(*aPtr != aNewValue)do { static_assert( mozilla::detail::AssertionConditionType< decltype(*aPtr != aNewValue)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(*aPtr != aNewValue))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("*aPtr != aNewValue" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 678); AnnotateMozCrashReason("MOZ_ASSERT" "(" "*aPtr != aNewValue" ")"); do { *((volatile int*)__null) = 678; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
679 | MOZ_ASSERT(HashPolicy::match(*aPtr, aLookup))do { static_assert( mozilla::detail::AssertionConditionType< decltype(HashPolicy::match(*aPtr, aLookup))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(HashPolicy::match(*aPtr, aLookup )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("HashPolicy::match(*aPtr, aLookup)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 679); AnnotateMozCrashReason("MOZ_ASSERT" "(" "HashPolicy::match(*aPtr, aLookup)" ")"); do { *((volatile int*)__null) = 679; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
680 | MOZ_ASSERT(HashPolicy::match(aNewValue, aLookup))do { static_assert( mozilla::detail::AssertionConditionType< decltype(HashPolicy::match(aNewValue, aLookup))>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(! !(HashPolicy::match(aNewValue, aLookup)))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("HashPolicy::match(aNewValue, aLookup)" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 680); AnnotateMozCrashReason("MOZ_ASSERT" "(" "HashPolicy::match(aNewValue, aLookup)" ")"); do { *((volatile int*)__null) = 680; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
681 | const_cast<T&>(*aPtr) = aNewValue; | |||
682 | MOZ_ASSERT(*lookup(aLookup) == aNewValue)do { static_assert( mozilla::detail::AssertionConditionType< decltype(*lookup(aLookup) == aNewValue)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(*lookup(aLookup) == aNewValue ))), 0))) { do { } while (false); MOZ_ReportAssertionFailure( "*lookup(aLookup) == aNewValue", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 682); AnnotateMozCrashReason("MOZ_ASSERT" "(" "*lookup(aLookup) == aNewValue" ")"); do { *((volatile int*)__null) = 682; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
683 | } | |||
684 | void replaceKey(Ptr aPtr, const T& aNewValue) { | |||
685 | replaceKey(aPtr, aNewValue, aNewValue); | |||
686 | } | |||
687 | ||||
688 | // -- Iteration ------------------------------------------------------------ | |||
689 | ||||
690 | // |iter()| returns an Iterator: | |||
691 | // | |||
692 | // HashSet<int> h; | |||
693 | // for (auto iter = h.iter(); !iter.done(); iter.next()) { | |||
694 | // int i = iter.get(); | |||
695 | // } | |||
696 | // | |||
697 | using Iterator = typename Impl::Iterator; | |||
698 | Iterator iter() const { return mImpl.iter(); } | |||
699 | ||||
700 | // |modIter()| returns a ModIterator: | |||
701 | // | |||
702 | // HashSet<int> h; | |||
703 | // for (auto iter = h.modIter(); !iter.done(); iter.next()) { | |||
704 | // if (iter.get() == 42) { | |||
705 | // iter.remove(); | |||
706 | // } | |||
707 | // } | |||
708 | // | |||
709 | // Table resize may occur in ModIterator's destructor. | |||
710 | using ModIterator = typename Impl::ModIterator; | |||
711 | ModIterator modIter() { return mImpl.modIter(); } | |||
712 | ||||
713 | // These are similar to Iterator/ModIterator/iter(), but use different | |||
714 | // terminology. | |||
715 | using Range = typename Impl::Range; | |||
716 | using Enum = typename Impl::Enum; | |||
717 | Range all() const { return mImpl.all(); } | |||
718 | }; | |||
719 | ||||
720 | //--------------------------------------------------------------------------- | |||
721 | // Hash Policy | |||
722 | //--------------------------------------------------------------------------- | |||
723 | ||||
724 | // A hash policy |HP| for a hash table with key-type |Key| must provide: | |||
725 | // | |||
726 | // - a type |HP::Lookup| to use to lookup table entries; | |||
727 | // | |||
728 | // - a static member function |HP::hash| that hashes lookup values: | |||
729 | // | |||
730 | // static mozilla::HashNumber hash(const Lookup&); | |||
731 | // | |||
732 | // - a static member function |HP::match| that tests equality of key and | |||
733 | // lookup values: | |||
734 | // | |||
735 | // static bool match(const Key& aKey, const Lookup& aLookup); | |||
736 | // | |||
737 | // |aKey| and |aLookup| can have different hash numbers, only when a | |||
738 | // collision happens with |prepareHash| operation, which is less frequent. | |||
739 | // Thus, |HP::match| shouldn't assume the hash equality in the comparison, | |||
740 | // even if the hash numbers are almost always same between them. | |||
741 | // | |||
742 | // Normally, Lookup = Key. In general, though, different values and types of | |||
743 | // values can be used to lookup and store. If a Lookup value |l| is not equal | |||
744 | // to the added Key value |k|, the user must ensure that |HP::match(k,l)| is | |||
745 | // true. E.g.: | |||
746 | // | |||
747 | // mozilla::HashSet<Key, HP>::AddPtr p = h.lookup(l); | |||
748 | // if (!p) { | |||
749 | // assert(HP::match(k, l)); // must hold | |||
750 | // h.add(p, k); | |||
751 | // } | |||
752 | ||||
753 | // A pointer hashing policy that uses HashGeneric() to create good hashes for | |||
754 | // pointers. Note that we don't shift out the lowest k bits because we don't | |||
755 | // want to assume anything about the alignment of the pointers. | |||
756 | template <typename Key> | |||
757 | struct PointerHasher { | |||
758 | using Lookup = Key; | |||
759 | ||||
760 | static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); } | |||
761 | ||||
762 | static bool match(const Key& aKey, const Lookup& aLookup) { | |||
763 | return aKey == aLookup; | |||
764 | } | |||
765 | ||||
766 | static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; } | |||
767 | }; | |||
768 | ||||
769 | // The default hash policy, which only works with integers. | |||
770 | template <class Key, typename> | |||
771 | struct DefaultHasher { | |||
772 | using Lookup = Key; | |||
773 | ||||
774 | static HashNumber hash(const Lookup& aLookup) { | |||
775 | // Just convert the integer to a HashNumber and use that as is. (This | |||
776 | // discards the high 32-bits of 64-bit integers!) ScrambleHashCode() is | |||
777 | // subsequently called on the value to improve the distribution. | |||
778 | return aLookup; | |||
779 | } | |||
780 | ||||
781 | static bool match(const Key& aKey, const Lookup& aLookup) { | |||
782 | // Use builtin or overloaded operator==. | |||
783 | return aKey == aLookup; | |||
784 | } | |||
785 | ||||
786 | static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; } | |||
787 | }; | |||
788 | ||||
789 | // A DefaultHasher specialization for enums. | |||
790 | template <class T> | |||
791 | struct DefaultHasher<T, std::enable_if_t<std::is_enum_v<T>>> { | |||
792 | using Key = T; | |||
793 | using Lookup = Key; | |||
794 | ||||
795 | static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); } | |||
796 | ||||
797 | static bool match(const Key& aKey, const Lookup& aLookup) { | |||
798 | // Use builtin or overloaded operator==. | |||
799 | return aKey == static_cast<Key>(aLookup); | |||
800 | } | |||
801 | ||||
802 | static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; } | |||
803 | }; | |||
804 | ||||
805 | // A DefaultHasher specialization for pointers. | |||
806 | template <class T> | |||
807 | struct DefaultHasher<T*> : PointerHasher<T*> {}; | |||
808 | ||||
809 | // A DefaultHasher specialization for mozilla::UniquePtr. | |||
810 | template <class T, class D> | |||
811 | struct DefaultHasher<UniquePtr<T, D>> { | |||
812 | using Key = UniquePtr<T, D>; | |||
813 | using Lookup = Key; | |||
814 | using PtrHasher = PointerHasher<T*>; | |||
815 | ||||
816 | static HashNumber hash(const Lookup& aLookup) { | |||
817 | return PtrHasher::hash(aLookup.get()); | |||
818 | } | |||
819 | ||||
820 | static bool match(const Key& aKey, const Lookup& aLookup) { | |||
821 | return PtrHasher::match(aKey.get(), aLookup.get()); | |||
822 | } | |||
823 | ||||
824 | static void rekey(UniquePtr<T, D>& aKey, UniquePtr<T, D>&& aNewKey) { | |||
825 | aKey = std::move(aNewKey); | |||
826 | } | |||
827 | }; | |||
828 | ||||
829 | // A DefaultHasher specialization for doubles. | |||
830 | template <> | |||
831 | struct DefaultHasher<double> { | |||
832 | using Key = double; | |||
833 | using Lookup = Key; | |||
834 | ||||
835 | static HashNumber hash(const Lookup& aLookup) { | |||
836 | // Just xor the high bits with the low bits, and then treat the bits of the | |||
837 | // result as a uint32_t. | |||
838 | static_assert(sizeof(HashNumber) == 4, | |||
839 | "subsequent code assumes a four-byte hash"); | |||
840 | uint64_t u = BitwiseCast<uint64_t>(aLookup); | |||
841 | return HashNumber(u ^ (u >> 32)); | |||
842 | } | |||
843 | ||||
844 | static bool match(const Key& aKey, const Lookup& aLookup) { | |||
845 | return BitwiseCast<uint64_t>(aKey) == BitwiseCast<uint64_t>(aLookup); | |||
846 | } | |||
847 | }; | |||
848 | ||||
849 | // A DefaultHasher specialization for floats. | |||
850 | template <> | |||
851 | struct DefaultHasher<float> { | |||
852 | using Key = float; | |||
853 | using Lookup = Key; | |||
854 | ||||
855 | static HashNumber hash(const Lookup& aLookup) { | |||
856 | // Just use the value as if its bits form an integer. ScrambleHashCode() is | |||
857 | // subsequently called on the value to improve the distribution. | |||
858 | static_assert(sizeof(HashNumber) == 4, | |||
859 | "subsequent code assumes a four-byte hash"); | |||
860 | return HashNumber(BitwiseCast<uint32_t>(aLookup)); | |||
861 | } | |||
862 | ||||
863 | static bool match(const Key& aKey, const Lookup& aLookup) { | |||
864 | return BitwiseCast<uint32_t>(aKey) == BitwiseCast<uint32_t>(aLookup); | |||
865 | } | |||
866 | }; | |||
867 | ||||
868 | // A hash policy for C strings. | |||
869 | struct CStringHasher { | |||
870 | using Key = const char*; | |||
871 | using Lookup = const char*; | |||
872 | ||||
873 | static HashNumber hash(const Lookup& aLookup) { return HashString(aLookup); } | |||
874 | ||||
875 | static bool match(const Key& aKey, const Lookup& aLookup) { | |||
876 | return strcmp(aKey, aLookup) == 0; | |||
877 | } | |||
878 | }; | |||
879 | ||||
880 | //--------------------------------------------------------------------------- | |||
881 | // Fallible Hashing Interface | |||
882 | //--------------------------------------------------------------------------- | |||
883 | ||||
884 | // Most of the time generating a hash code is infallible, but sometimes it is | |||
885 | // necessary to generate hash codes on demand in a way that can fail. Specialize | |||
886 | // this class for your own hash policy to provide fallible hashing. | |||
887 | // | |||
888 | // This is used by MovableCellHasher to handle the fact that generating a unique | |||
889 | // ID for cell pointer may fail due to OOM. | |||
890 | // | |||
891 | // The default implementations of these methods delegate to the usual HashPolicy | |||
892 | // implementation and always succeed. | |||
893 | template <typename HashPolicy> | |||
894 | struct FallibleHashMethods { | |||
895 | // Return true if a hashcode is already available for its argument, and | |||
896 | // sets |aHashOut|. Once this succeeds for a specific argument it | |||
897 | // must continue to do so. | |||
898 | // | |||
899 | // Return false if a hashcode is not already available. This implies that any | |||
900 | // lookup must fail, as the hash code would have to have been successfully | |||
901 | // created on insertion. | |||
902 | template <typename Lookup> | |||
903 | static bool maybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) { | |||
904 | *aHashOut = HashPolicy::hash(aLookup); | |||
905 | return true; | |||
906 | } | |||
907 | ||||
908 | // Fallible method to ensure a hashcode exists for its argument and create one | |||
909 | // if not. Sets |aHashOut| to the hashcode and retuns true on success. Returns | |||
910 | // false on error, e.g. out of memory. | |||
911 | template <typename Lookup> | |||
912 | static bool ensureHash(Lookup&& aLookup, HashNumber* aHashOut) { | |||
913 | *aHashOut = HashPolicy::hash(aLookup); | |||
914 | return true; | |||
915 | } | |||
916 | }; | |||
917 | ||||
918 | template <typename HashPolicy, typename Lookup> | |||
919 | static bool MaybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) { | |||
920 | return FallibleHashMethods<typename HashPolicy::Base>::maybeGetHash( | |||
921 | std::forward<Lookup>(aLookup), aHashOut); | |||
922 | } | |||
923 | ||||
924 | template <typename HashPolicy, typename Lookup> | |||
925 | static bool EnsureHash(Lookup&& aLookup, HashNumber* aHashOut) { | |||
926 | return FallibleHashMethods<typename HashPolicy::Base>::ensureHash( | |||
927 | std::forward<Lookup>(aLookup), aHashOut); | |||
928 | } | |||
929 | ||||
930 | //--------------------------------------------------------------------------- | |||
931 | // Implementation Details (HashMapEntry, HashTableEntry, HashTable) | |||
932 | //--------------------------------------------------------------------------- | |||
933 | ||||
934 | // Both HashMap and HashSet are implemented by a single HashTable that is even | |||
935 | // more heavily parameterized than the other two. This leaves HashTable gnarly | |||
936 | // and extremely coupled to HashMap and HashSet; thus code should not use | |||
937 | // HashTable directly. | |||
938 | ||||
939 | template <class Key, class Value> | |||
940 | class HashMapEntry { | |||
941 | Key key_; | |||
942 | Value value_; | |||
943 | ||||
944 | template <class, class, class> | |||
945 | friend class detail::HashTable; | |||
946 | template <class> | |||
947 | friend class detail::HashTableEntry; | |||
948 | template <class, class, class, class> | |||
949 | friend class HashMap; | |||
950 | ||||
951 | public: | |||
952 | template <typename KeyInput, typename ValueInput> | |||
953 | HashMapEntry(KeyInput&& aKey, ValueInput&& aValue) | |||
954 | : key_(std::forward<KeyInput>(aKey)), | |||
955 | value_(std::forward<ValueInput>(aValue)) {} | |||
956 | ||||
957 | HashMapEntry(HashMapEntry&& aRhs) = default; | |||
958 | HashMapEntry& operator=(HashMapEntry&& aRhs) = default; | |||
959 | ||||
960 | using KeyType = Key; | |||
961 | using ValueType = Value; | |||
962 | ||||
963 | const Key& key() const { return key_; } | |||
964 | ||||
965 | // Use this method with caution! If the key is changed such that its hash | |||
966 | // value also changes, the map will be left in an invalid state. | |||
967 | Key& mutableKey() { return key_; } | |||
968 | ||||
969 | const Value& value() const { return value_; } | |||
970 | Value& value() { return value_; } | |||
971 | ||||
972 | private: | |||
973 | HashMapEntry(const HashMapEntry&) = delete; | |||
974 | void operator=(const HashMapEntry&) = delete; | |||
975 | }; | |||
976 | ||||
977 | namespace detail { | |||
978 | ||||
979 | template <class T, class HashPolicy, class AllocPolicy> | |||
980 | class HashTable; | |||
981 | ||||
982 | template <typename T> | |||
983 | class EntrySlot; | |||
984 | ||||
985 | template <typename T> | |||
986 | class HashTableEntry { | |||
987 | private: | |||
988 | using NonConstT = std::remove_const_t<T>; | |||
989 | ||||
990 | // Instead of having a hash table entry store that looks like this: | |||
991 | // | |||
992 | // +--------+--------+--------+--------+ | |||
993 | // | entry0 | entry1 | .... | entryN | | |||
994 | // +--------+--------+--------+--------+ | |||
995 | // | |||
996 | // where the entries contained their cached hash code, we're going to lay out | |||
997 | // the entry store thusly: | |||
998 | // | |||
999 | // +-------+-------+-------+-------+--------+--------+--------+--------+ | |||
1000 | // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN | | |||
1001 | // +-------+-------+-------+-------+--------+--------+--------+--------+ | |||
1002 | // | |||
1003 | // with all the cached hashes prior to the actual entries themselves. | |||
1004 | // | |||
1005 | // We do this because implementing the first strategy requires us to make | |||
1006 | // HashTableEntry look roughly like: | |||
1007 | // | |||
1008 | // template <typename T> | |||
1009 | // class HashTableEntry { | |||
1010 | // HashNumber mKeyHash; | |||
1011 | // T mValue; | |||
1012 | // }; | |||
1013 | // | |||
1014 | // The problem with this setup is that, depending on the layout of `T`, there | |||
1015 | // may be platform ABI-mandated padding between `mKeyHash` and the first | |||
1016 | // member of `T`. This ABI-mandated padding is wasted space, and can be | |||
1017 | // surprisingly common, e.g. when `T` is a single pointer on 64-bit platforms. | |||
1018 | // In such cases, we're throwing away a quarter of our entry store on padding, | |||
1019 | // which is undesirable. | |||
1020 | // | |||
1021 | // The second layout above, namely: | |||
1022 | // | |||
1023 | // +-------+-------+-------+-------+--------+--------+--------+--------+ | |||
1024 | // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN | | |||
1025 | // +-------+-------+-------+-------+--------+--------+--------+--------+ | |||
1026 | // | |||
1027 | // means there is no wasted space between the hashes themselves, and no wasted | |||
1028 | // space between the entries themselves. However, we would also like there to | |||
1029 | // be no gap between the last hash and the first entry. The memory allocator | |||
1030 | // guarantees the alignment of the start of the hashes. The use of a | |||
1031 | // power-of-two capacity of at least 4 guarantees that the alignment of the | |||
1032 | // *end* of the hash array is no less than the alignment of the start. | |||
1033 | // Finally, the static_asserts here guarantee that the entries themselves | |||
1034 | // don't need to be any more aligned than the alignment of the entry store | |||
1035 | // itself. | |||
1036 | // | |||
1037 | // This assertion is safe for 32-bit builds because on both Windows and Linux | |||
1038 | // (including Android), the minimum alignment for allocations larger than 8 | |||
1039 | // bytes is 8 bytes, and the actual data for entries in our entry store is | |||
1040 | // guaranteed to have that alignment as well, thanks to the power-of-two | |||
1041 | // number of cached hash values stored prior to the entry data. | |||
1042 | ||||
1043 | // The allocation policy must allocate a table with at least this much | |||
1044 | // alignment. | |||
1045 | static constexpr size_t kMinimumAlignment = 8; | |||
1046 | ||||
1047 | static_assert(alignof(HashNumber) <= kMinimumAlignment, | |||
1048 | "[N*2 hashes, N*2 T values] allocation's alignment must be " | |||
1049 | "enough to align each hash"); | |||
1050 | static_assert(alignof(NonConstT) <= 2 * sizeof(HashNumber), | |||
1051 | "subsequent N*2 T values must not require more than an even " | |||
1052 | "number of HashNumbers provides"); | |||
1053 | ||||
1054 | static const HashNumber sFreeKey = 0; | |||
1055 | static const HashNumber sRemovedKey = 1; | |||
1056 | static const HashNumber sCollisionBit = 1; | |||
1057 | ||||
1058 | alignas(NonConstT) unsigned char mValueData[sizeof(NonConstT)]; | |||
1059 | ||||
1060 | private: | |||
1061 | template <class, class, class> | |||
1062 | friend class HashTable; | |||
1063 | template <typename> | |||
1064 | friend class EntrySlot; | |||
1065 | ||||
1066 | // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a | |||
1067 | // -Werror compile error) to reinterpret_cast<> |mValueData| to |T*|, even | |||
1068 | // through |void*|. Placing the latter cast in these separate functions | |||
1069 | // breaks the chain such that affected GCC versions no longer warn/error. | |||
1070 | void* rawValuePtr() { return mValueData; } | |||
1071 | ||||
1072 | static bool isLiveHash(HashNumber hash) { return hash > sRemovedKey; } | |||
1073 | ||||
1074 | HashTableEntry(const HashTableEntry&) = delete; | |||
1075 | void operator=(const HashTableEntry&) = delete; | |||
1076 | ||||
1077 | NonConstT* valuePtr() { return reinterpret_cast<NonConstT*>(rawValuePtr()); } | |||
1078 | ||||
1079 | void destroyStoredT() { | |||
1080 | NonConstT* ptr = valuePtr(); | |||
1081 | ptr->~T(); | |||
1082 | MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr))do { } while (0); | |||
1083 | } | |||
1084 | ||||
1085 | public: | |||
1086 | HashTableEntry() = default; | |||
1087 | ||||
1088 | ~HashTableEntry() { MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this))do { } while (0); } | |||
1089 | ||||
1090 | void destroy() { destroyStoredT(); } | |||
1091 | ||||
1092 | void swap(HashTableEntry* aOther, bool aIsLive) { | |||
1093 | // This allows types to use Argument-Dependent-Lookup, and thus use a custom | |||
1094 | // std::swap, which is needed by types like JS::Heap and such. | |||
1095 | using std::swap; | |||
1096 | ||||
1097 | if (this == aOther) { | |||
1098 | return; | |||
1099 | } | |||
1100 | if (aIsLive) { | |||
1101 | swap(*valuePtr(), *aOther->valuePtr()); | |||
1102 | } else { | |||
1103 | *aOther->valuePtr() = std::move(*valuePtr()); | |||
1104 | destroy(); | |||
1105 | } | |||
1106 | } | |||
1107 | ||||
1108 | T& get() { return *valuePtr(); } | |||
1109 | ||||
1110 | NonConstT& getMutable() { return *valuePtr(); } | |||
1111 | }; | |||
1112 | ||||
1113 | // A slot represents a cached hash value and its associated entry stored | |||
1114 | // in the hash table. These two things are not stored in contiguous memory. | |||
1115 | template <class T> | |||
1116 | class EntrySlot { | |||
1117 | using NonConstT = std::remove_const_t<T>; | |||
1118 | ||||
1119 | using Entry = HashTableEntry<T>; | |||
1120 | ||||
1121 | Entry* mEntry; | |||
1122 | HashNumber* mKeyHash; | |||
1123 | ||||
1124 | template <class, class, class> | |||
1125 | friend class HashTable; | |||
1126 | ||||
1127 | EntrySlot(Entry* aEntry, HashNumber* aKeyHash) | |||
1128 | : mEntry(aEntry), mKeyHash(aKeyHash) {} | |||
1129 | ||||
1130 | public: | |||
1131 | static bool isLiveHash(HashNumber hash) { return hash > Entry::sRemovedKey; } | |||
1132 | ||||
1133 | EntrySlot(const EntrySlot&) = default; | |||
1134 | EntrySlot(EntrySlot&& aOther) = default; | |||
1135 | ||||
1136 | EntrySlot& operator=(const EntrySlot&) = default; | |||
1137 | EntrySlot& operator=(EntrySlot&&) = default; | |||
1138 | ||||
1139 | bool operator==(const EntrySlot& aRhs) const { return mEntry == aRhs.mEntry; } | |||
1140 | ||||
1141 | bool operator<(const EntrySlot& aRhs) const { return mEntry < aRhs.mEntry; } | |||
1142 | ||||
1143 | EntrySlot& operator++() { | |||
1144 | ++mEntry; | |||
1145 | ++mKeyHash; | |||
1146 | return *this; | |||
1147 | } | |||
1148 | ||||
1149 | void destroy() { mEntry->destroy(); } | |||
1150 | ||||
1151 | void swap(EntrySlot& aOther) { | |||
1152 | mEntry->swap(aOther.mEntry, aOther.isLive()); | |||
1153 | std::swap(*mKeyHash, *aOther.mKeyHash); | |||
1154 | } | |||
1155 | ||||
1156 | T& get() const { return mEntry->get(); } | |||
1157 | ||||
1158 | NonConstT& getMutable() { return mEntry->getMutable(); } | |||
1159 | ||||
1160 | bool isFree() const { return *mKeyHash == Entry::sFreeKey; } | |||
1161 | ||||
1162 | void clearLive() { | |||
1163 | MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1163); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")" ); do { *((volatile int*)__null) = 1163; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1164 | *mKeyHash = Entry::sFreeKey; | |||
1165 | mEntry->destroyStoredT(); | |||
1166 | } | |||
1167 | ||||
1168 | void clear() { | |||
1169 | if (isLive()) { | |||
1170 | mEntry->destroyStoredT(); | |||
1171 | } | |||
1172 | MOZ_MAKE_MEM_UNDEFINED(mEntry, sizeof(*mEntry))do { } while (0); | |||
1173 | *mKeyHash = Entry::sFreeKey; | |||
1174 | } | |||
1175 | ||||
1176 | bool isRemoved() const { return *mKeyHash == Entry::sRemovedKey; } | |||
1177 | ||||
1178 | void removeLive() { | |||
1179 | MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1179); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")" ); do { *((volatile int*)__null) = 1179; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1180 | *mKeyHash = Entry::sRemovedKey; | |||
1181 | mEntry->destroyStoredT(); | |||
1182 | } | |||
1183 | ||||
1184 | bool isLive() const { return isLiveHash(*mKeyHash); } | |||
1185 | ||||
1186 | void setCollision() { | |||
1187 | MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1187); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")" ); do { *((volatile int*)__null) = 1187; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1188 | *mKeyHash |= Entry::sCollisionBit; | |||
1189 | } | |||
1190 | void unsetCollision() { *mKeyHash &= ~Entry::sCollisionBit; } | |||
1191 | bool hasCollision() const { return *mKeyHash & Entry::sCollisionBit; } | |||
| ||||
1192 | bool matchHash(HashNumber hn) { | |||
1193 | return (*mKeyHash & ~Entry::sCollisionBit) == hn; | |||
1194 | } | |||
1195 | HashNumber getKeyHash() const { return *mKeyHash & ~Entry::sCollisionBit; } | |||
1196 | ||||
1197 | template <typename... Args> | |||
1198 | void setLive(HashNumber aHashNumber, Args&&... aArgs) { | |||
1199 | MOZ_ASSERT(!isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1199); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!isLive()" ")" ); do { *((volatile int*)__null) = 1199; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1200 | *mKeyHash = aHashNumber; | |||
1201 | new (KnownNotNull, mEntry->valuePtr()) T(std::forward<Args>(aArgs)...); | |||
1202 | MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1202); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")" ); do { *((volatile int*)__null) = 1202; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1203 | } | |||
1204 | ||||
1205 | Entry* toEntry() const { return mEntry; } | |||
1206 | }; | |||
1207 | ||||
1208 | template <class T, class HashPolicy, class AllocPolicy> | |||
1209 | class HashTable : private AllocPolicy { | |||
1210 | friend class mozilla::ReentrancyGuard; | |||
1211 | ||||
1212 | using NonConstT = std::remove_const_t<T>; | |||
1213 | using Key = typename HashPolicy::KeyType; | |||
1214 | using Lookup = typename HashPolicy::Lookup; | |||
1215 | ||||
1216 | public: | |||
1217 | using Entry = HashTableEntry<T>; | |||
1218 | using Slot = EntrySlot<T>; | |||
1219 | ||||
1220 | template <typename F> | |||
1221 | static void forEachSlot(char* aTable, uint32_t aCapacity, F&& f) { | |||
1222 | auto hashes = reinterpret_cast<HashNumber*>(aTable); | |||
1223 | auto entries = reinterpret_cast<Entry*>(&hashes[aCapacity]); | |||
1224 | Slot slot(entries, hashes); | |||
1225 | for (size_t i = 0; i < size_t(aCapacity); ++i) { | |||
1226 | f(slot); | |||
1227 | ++slot; | |||
1228 | } | |||
1229 | } | |||
1230 | ||||
1231 | // A nullable pointer to a hash table element. A Ptr |p| can be tested | |||
1232 | // either explicitly |if (p.found()) p->...| or using boolean conversion | |||
1233 | // |if (p) p->...|. Ptr objects must not be used after any mutating hash | |||
1234 | // table operations unless |generation()| is tested. | |||
1235 | class Ptr { | |||
1236 | friend class HashTable; | |||
1237 | ||||
1238 | Slot mSlot; | |||
1239 | #ifdef DEBUG1 | |||
1240 | const HashTable* mTable; | |||
1241 | Generation mGeneration; | |||
1242 | #endif | |||
1243 | ||||
1244 | protected: | |||
1245 | Ptr(Slot aSlot, const HashTable& aTable) | |||
1246 | : mSlot(aSlot) | |||
1247 | #ifdef DEBUG1 | |||
1248 | , | |||
1249 | mTable(&aTable), | |||
1250 | mGeneration(aTable.generation()) | |||
1251 | #endif | |||
1252 | { | |||
1253 | } | |||
1254 | ||||
1255 | // This constructor is used only by AddPtr() within lookupForAdd(). | |||
1256 | explicit Ptr(const HashTable& aTable) | |||
1257 | : mSlot(nullptr, nullptr) | |||
1258 | #ifdef DEBUG1 | |||
1259 | , | |||
1260 | mTable(&aTable), | |||
1261 | mGeneration(aTable.generation()) | |||
1262 | #endif | |||
1263 | { | |||
1264 | } | |||
1265 | ||||
1266 | bool isValid() const { return !!mSlot.toEntry(); } | |||
1267 | ||||
1268 | public: | |||
1269 | Ptr() | |||
1270 | : mSlot(nullptr, nullptr) | |||
1271 | #ifdef DEBUG1 | |||
1272 | , | |||
1273 | mTable(nullptr), | |||
1274 | mGeneration(0) | |||
1275 | #endif | |||
1276 | { | |||
1277 | } | |||
1278 | ||||
1279 | bool found() const { | |||
1280 | if (!isValid()) { | |||
1281 | return false; | |||
1282 | } | |||
1283 | #ifdef DEBUG1 | |||
1284 | MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable->generation())>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mGeneration == mTable->generation()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1284); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()" ")"); do { *((volatile int*)__null) = 1284; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1285 | #endif | |||
1286 | return mSlot.isLive(); | |||
1287 | } | |||
1288 | ||||
1289 | explicit operator bool() const { return found(); } | |||
1290 | ||||
1291 | bool operator==(const Ptr& aRhs) const { | |||
1292 | MOZ_ASSERT(found() && aRhs.found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(found() && aRhs.found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(found() && aRhs.found ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("found() && aRhs.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1292); AnnotateMozCrashReason("MOZ_ASSERT" "(" "found() && aRhs.found()" ")"); do { *((volatile int*)__null) = 1292; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1293 | return mSlot == aRhs.mSlot; | |||
1294 | } | |||
1295 | ||||
1296 | bool operator!=(const Ptr& aRhs) const { | |||
1297 | #ifdef DEBUG1 | |||
1298 | MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable->generation())>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mGeneration == mTable->generation()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1298); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()" ")"); do { *((volatile int*)__null) = 1298; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1299 | #endif | |||
1300 | return !(*this == aRhs); | |||
1301 | } | |||
1302 | ||||
1303 | T& operator*() const { | |||
1304 | #ifdef DEBUG1 | |||
1305 | MOZ_ASSERT(found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1305); AnnotateMozCrashReason("MOZ_ASSERT" "(" "found()" ")" ); do { *((volatile int*)__null) = 1305; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1306 | MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable->generation())>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mGeneration == mTable->generation()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1306); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()" ")"); do { *((volatile int*)__null) = 1306; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1307 | #endif | |||
1308 | return mSlot.get(); | |||
1309 | } | |||
1310 | ||||
1311 | T* operator->() const { | |||
1312 | #ifdef DEBUG1 | |||
1313 | MOZ_ASSERT(found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1313); AnnotateMozCrashReason("MOZ_ASSERT" "(" "found()" ")" ); do { *((volatile int*)__null) = 1313; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1314 | MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable->generation())>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mGeneration == mTable->generation()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1314); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()" ")"); do { *((volatile int*)__null) = 1314; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1315 | #endif | |||
1316 | return &mSlot.get(); | |||
1317 | } | |||
1318 | }; | |||
1319 | ||||
1320 | // A Ptr that can be used to add a key after a failed lookup. | |||
1321 | class AddPtr : public Ptr { | |||
1322 | friend class HashTable; | |||
1323 | ||||
1324 | HashNumber mKeyHash; | |||
1325 | #ifdef DEBUG1 | |||
1326 | uint64_t mMutationCount; | |||
1327 | #endif | |||
1328 | ||||
1329 | AddPtr(Slot aSlot, const HashTable& aTable, HashNumber aHashNumber) | |||
1330 | : Ptr(aSlot, aTable), | |||
1331 | mKeyHash(aHashNumber) | |||
1332 | #ifdef DEBUG1 | |||
1333 | , | |||
1334 | mMutationCount(aTable.mMutationCount) | |||
1335 | #endif | |||
1336 | { | |||
1337 | } | |||
1338 | ||||
1339 | // This constructor is used when lookupForAdd() is performed on a table | |||
1340 | // lacking entry storage; it leaves mSlot null but initializes everything | |||
1341 | // else. | |||
1342 | AddPtr(const HashTable& aTable, HashNumber aHashNumber) | |||
1343 | : Ptr(aTable), | |||
1344 | mKeyHash(aHashNumber) | |||
1345 | #ifdef DEBUG1 | |||
1346 | , | |||
1347 | mMutationCount(aTable.mMutationCount) | |||
1348 | #endif | |||
1349 | { | |||
1350 | MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1350); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")" ); do { *((volatile int*)__null) = 1350; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1351 | } | |||
1352 | ||||
1353 | bool isLive() const { return isLiveHash(mKeyHash); } | |||
1354 | ||||
1355 | public: | |||
1356 | AddPtr() : mKeyHash(0) {} | |||
1357 | }; | |||
1358 | ||||
1359 | // A hash table iterator that (mostly) doesn't allow table modifications. | |||
1360 | // As with Ptr/AddPtr, Iterator objects must not be used after any mutating | |||
1361 | // hash table operation unless the |generation()| is tested. | |||
1362 | class Iterator { | |||
1363 | void moveToNextLiveEntry() { | |||
1364 | while (++mCur < mEnd && !mCur.isLive()) { | |||
1365 | continue; | |||
1366 | } | |||
1367 | } | |||
1368 | ||||
1369 | protected: | |||
1370 | friend class HashTable; | |||
1371 | ||||
1372 | explicit Iterator(const HashTable& aTable) | |||
1373 | : mCur(aTable.slotForIndex(0)), | |||
1374 | mEnd(aTable.slotForIndex(aTable.capacity())) | |||
1375 | #ifdef DEBUG1 | |||
1376 | , | |||
1377 | mTable(aTable), | |||
1378 | mMutationCount(aTable.mMutationCount), | |||
1379 | mGeneration(aTable.generation()), | |||
1380 | mValidEntry(true) | |||
1381 | #endif | |||
1382 | { | |||
1383 | if (!done() && !mCur.isLive()) { | |||
1384 | moveToNextLiveEntry(); | |||
1385 | } | |||
1386 | } | |||
1387 | ||||
1388 | Slot mCur; | |||
1389 | Slot mEnd; | |||
1390 | #ifdef DEBUG1 | |||
1391 | const HashTable& mTable; | |||
1392 | uint64_t mMutationCount; | |||
1393 | Generation mGeneration; | |||
1394 | bool mValidEntry; | |||
1395 | #endif | |||
1396 | ||||
1397 | public: | |||
1398 | bool done() const { | |||
1399 | MOZ_ASSERT(mGeneration == mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable.generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mGeneration == mTable.generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("mGeneration == mTable.generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1399); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable.generation()" ")"); do { *((volatile int*)__null) = 1399; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1400 | MOZ_ASSERT(mMutationCount == mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mMutationCount == mTable.mMutationCount)>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mMutationCount == mTable.mMutationCount))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mMutationCount == mTable.mMutationCount" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1400); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mMutationCount == mTable.mMutationCount" ")"); do { *((volatile int*)__null) = 1400; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1401 | return mCur == mEnd; | |||
1402 | } | |||
1403 | ||||
1404 | T& get() const { | |||
1405 | MOZ_ASSERT(!done())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!done())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!done()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!done()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1405); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!done()" ")" ); do { *((volatile int*)__null) = 1405; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1406 | MOZ_ASSERT(mValidEntry)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mValidEntry)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mValidEntry))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mValidEntry", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1406); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mValidEntry" ")"); do { *((volatile int*)__null) = 1406; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1407 | MOZ_ASSERT(mGeneration == mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable.generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mGeneration == mTable.generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("mGeneration == mTable.generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1407); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable.generation()" ")"); do { *((volatile int*)__null) = 1407; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1408 | MOZ_ASSERT(mMutationCount == mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mMutationCount == mTable.mMutationCount)>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mMutationCount == mTable.mMutationCount))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mMutationCount == mTable.mMutationCount" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1408); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mMutationCount == mTable.mMutationCount" ")"); do { *((volatile int*)__null) = 1408; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1409 | return mCur.get(); | |||
1410 | } | |||
1411 | ||||
1412 | void next() { | |||
1413 | MOZ_ASSERT(!done())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!done())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!done()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!done()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1413); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!done()" ")" ); do { *((volatile int*)__null) = 1413; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1414 | MOZ_ASSERT(mGeneration == mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable.generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mGeneration == mTable.generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("mGeneration == mTable.generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1414); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable.generation()" ")"); do { *((volatile int*)__null) = 1414; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1415 | MOZ_ASSERT(mMutationCount == mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mMutationCount == mTable.mMutationCount)>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mMutationCount == mTable.mMutationCount))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mMutationCount == mTable.mMutationCount" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1415); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mMutationCount == mTable.mMutationCount" ")"); do { *((volatile int*)__null) = 1415; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1416 | moveToNextLiveEntry(); | |||
1417 | #ifdef DEBUG1 | |||
1418 | mValidEntry = true; | |||
1419 | #endif | |||
1420 | } | |||
1421 | }; | |||
1422 | ||||
1423 | // A hash table iterator that permits modification, removal and rekeying. | |||
1424 | // Since rehashing when elements were removed during enumeration would be | |||
1425 | // bad, it is postponed until the ModIterator is destructed. Since the | |||
1426 | // ModIterator's destructor touches the hash table, the user must ensure | |||
1427 | // that the hash table is still alive when the destructor runs. | |||
1428 | class ModIterator : public Iterator { | |||
1429 | friend class HashTable; | |||
1430 | ||||
1431 | HashTable& mTable; | |||
1432 | bool mRekeyed; | |||
1433 | bool mRemoved; | |||
1434 | ||||
1435 | // ModIterator is movable but not copyable. | |||
1436 | ModIterator(const ModIterator&) = delete; | |||
1437 | void operator=(const ModIterator&) = delete; | |||
1438 | ||||
1439 | protected: | |||
1440 | explicit ModIterator(HashTable& aTable) | |||
1441 | : Iterator(aTable), mTable(aTable), mRekeyed(false), mRemoved(false) {} | |||
1442 | ||||
1443 | public: | |||
1444 | MOZ_IMPLICIT ModIterator(ModIterator&& aOther) | |||
1445 | : Iterator(aOther), | |||
1446 | mTable(aOther.mTable), | |||
1447 | mRekeyed(aOther.mRekeyed), | |||
1448 | mRemoved(aOther.mRemoved) { | |||
1449 | aOther.mRekeyed = false; | |||
1450 | aOther.mRemoved = false; | |||
1451 | } | |||
1452 | ||||
1453 | // Removes the current element from the table, leaving |get()| | |||
1454 | // invalid until the next call to |next()|. | |||
1455 | void remove() { | |||
1456 | mTable.remove(this->mCur); | |||
1457 | mRemoved = true; | |||
1458 | #ifdef DEBUG1 | |||
1459 | this->mValidEntry = false; | |||
1460 | this->mMutationCount = mTable.mMutationCount; | |||
1461 | #endif | |||
1462 | } | |||
1463 | ||||
1464 | NonConstT& getMutable() { | |||
1465 | MOZ_ASSERT(!this->done())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!this->done())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!this->done()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!this->done()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1465); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!this->done()" ")"); do { *((volatile int*)__null) = 1465; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1466 | MOZ_ASSERT(this->mValidEntry)do { static_assert( mozilla::detail::AssertionConditionType< decltype(this->mValidEntry)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(this->mValidEntry))), 0)) ) { do { } while (false); MOZ_ReportAssertionFailure("this->mValidEntry" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1466); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this->mValidEntry" ")"); do { *((volatile int*)__null) = 1466; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1467 | MOZ_ASSERT(this->mGeneration == this->Iterator::mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(this->mGeneration == this->Iterator::mTable.generation ())>::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!(this->mGeneration == this->Iterator::mTable.generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("this->mGeneration == this->Iterator::mTable.generation()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1467); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this->mGeneration == this->Iterator::mTable.generation()" ")"); do { *((volatile int*)__null) = 1467; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1468 | MOZ_ASSERT(this->mMutationCount == this->Iterator::mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType< decltype(this->mMutationCount == this->Iterator::mTable .mMutationCount)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(this->mMutationCount == this ->Iterator::mTable.mMutationCount))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("this->mMutationCount == this->Iterator::mTable.mMutationCount" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1468); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this->mMutationCount == this->Iterator::mTable.mMutationCount" ")"); do { *((volatile int*)__null) = 1468; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1469 | return this->mCur.getMutable(); | |||
1470 | } | |||
1471 | ||||
1472 | // Removes the current element and re-inserts it into the table with | |||
1473 | // a new key at the new Lookup position. |get()| is invalid after | |||
1474 | // this operation until the next call to |next()|. | |||
1475 | void rekey(const Lookup& l, const Key& k) { | |||
1476 | MOZ_ASSERT(&k != &HashPolicy::getKey(this->mCur.get()))do { static_assert( mozilla::detail::AssertionConditionType< decltype(&k != &HashPolicy::getKey(this->mCur.get( )))>::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!(&k != &HashPolicy::getKey(this->mCur.get( ))))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("&k != &HashPolicy::getKey(this->mCur.get())", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1476); AnnotateMozCrashReason("MOZ_ASSERT" "(" "&k != &HashPolicy::getKey(this->mCur.get())" ")"); do { *((volatile int*)__null) = 1476; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1477 | Ptr p(this->mCur, mTable); | |||
1478 | mTable.rekeyWithoutRehash(p, l, k); | |||
1479 | mRekeyed = true; | |||
1480 | #ifdef DEBUG1 | |||
1481 | this->mValidEntry = false; | |||
1482 | this->mMutationCount = mTable.mMutationCount; | |||
1483 | #endif | |||
1484 | } | |||
1485 | ||||
1486 | void rekey(const Key& k) { rekey(k, k); } | |||
1487 | ||||
1488 | // Potentially rehashes the table. | |||
1489 | ~ModIterator() { | |||
1490 | if (mRekeyed) { | |||
1491 | mTable.mGen++; | |||
1492 | mTable.infallibleRehashIfOverloaded(); | |||
1493 | } | |||
1494 | ||||
1495 | if (mRemoved) { | |||
1496 | mTable.compact(); | |||
1497 | } | |||
1498 | } | |||
1499 | }; | |||
1500 | ||||
1501 | // Range is similar to Iterator, but uses different terminology. | |||
1502 | class Range { | |||
1503 | friend class HashTable; | |||
1504 | ||||
1505 | Iterator mIter; | |||
1506 | ||||
1507 | protected: | |||
1508 | explicit Range(const HashTable& table) : mIter(table) {} | |||
1509 | ||||
1510 | public: | |||
1511 | bool empty() const { return mIter.done(); } | |||
1512 | ||||
1513 | T& front() const { return mIter.get(); } | |||
1514 | ||||
1515 | void popFront() { return mIter.next(); } | |||
1516 | }; | |||
1517 | ||||
1518 | // Enum is similar to ModIterator, but uses different terminology. | |||
1519 | class Enum { | |||
1520 | ModIterator mIter; | |||
1521 | ||||
1522 | // Enum is movable but not copyable. | |||
1523 | Enum(const Enum&) = delete; | |||
1524 | void operator=(const Enum&) = delete; | |||
1525 | ||||
1526 | public: | |||
1527 | template <class Map> | |||
1528 | explicit Enum(Map& map) : mIter(map.mImpl) {} | |||
1529 | ||||
1530 | MOZ_IMPLICIT Enum(Enum&& other) : mIter(std::move(other.mIter)) {} | |||
1531 | ||||
1532 | bool empty() const { return mIter.done(); } | |||
1533 | ||||
1534 | T& front() const { return mIter.get(); } | |||
1535 | ||||
1536 | void popFront() { return mIter.next(); } | |||
1537 | ||||
1538 | void removeFront() { mIter.remove(); } | |||
1539 | ||||
1540 | NonConstT& mutableFront() { return mIter.getMutable(); } | |||
1541 | ||||
1542 | void rekeyFront(const Lookup& aLookup, const Key& aKey) { | |||
1543 | mIter.rekey(aLookup, aKey); | |||
1544 | } | |||
1545 | ||||
1546 | void rekeyFront(const Key& aKey) { mIter.rekey(aKey); } | |||
1547 | }; | |||
1548 | ||||
1549 | // HashTable is movable | |||
1550 | HashTable(HashTable&& aRhs) : AllocPolicy(std::move(aRhs)) { moveFrom(aRhs); } | |||
1551 | HashTable& operator=(HashTable&& aRhs) { | |||
1552 | MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited")do { static_assert( mozilla::detail::AssertionConditionType< decltype(this != &aRhs)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(this != &aRhs))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("this != &aRhs" " (" "self-move assignment is prohibited" ")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1552); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this != &aRhs" ") (" "self-move assignment is prohibited" ")"); do { *((volatile int*)__null) = 1552; __attribute__((nomerge)) ::abort(); } while (false); } } while (false); | |||
1553 | if (mTable) { | |||
1554 | destroyTable(*this, mTable, capacity()); | |||
1555 | } | |||
1556 | AllocPolicy::operator=(std::move(aRhs)); | |||
1557 | moveFrom(aRhs); | |||
1558 | return *this; | |||
1559 | } | |||
1560 | ||||
1561 | private: | |||
1562 | void moveFrom(HashTable& aRhs) { | |||
1563 | mGen = aRhs.mGen; | |||
1564 | mHashShift = aRhs.mHashShift; | |||
1565 | mTable = aRhs.mTable; | |||
1566 | mEntryCount = aRhs.mEntryCount; | |||
1567 | mRemovedCount = aRhs.mRemovedCount; | |||
1568 | #ifdef DEBUG1 | |||
1569 | mMutationCount = aRhs.mMutationCount; | |||
1570 | mEntered = aRhs.mEntered; | |||
1571 | #endif | |||
1572 | aRhs.mTable = nullptr; | |||
1573 | aRhs.clearAndCompact(); | |||
1574 | } | |||
1575 | ||||
1576 | // HashTable is not copyable or assignable | |||
1577 | HashTable(const HashTable&) = delete; | |||
1578 | void operator=(const HashTable&) = delete; | |||
1579 | ||||
1580 | static const uint32_t CAP_BITS = 30; | |||
1581 | ||||
1582 | public: | |||
1583 | uint64_t mGen : 56; // entry storage generation number | |||
1584 | uint64_t mHashShift : 8; // multiplicative hash shift | |||
1585 | char* mTable; // entry storage | |||
1586 | uint32_t mEntryCount; // number of entries in mTable | |||
1587 | uint32_t mRemovedCount; // removed entry sentinels in mTable | |||
1588 | ||||
1589 | #ifdef DEBUG1 | |||
1590 | uint64_t mMutationCount; | |||
1591 | mutable bool mEntered; | |||
1592 | #endif | |||
1593 | ||||
1594 | // The default initial capacity is 32 (enough to hold 16 elements), but it | |||
1595 | // can be as low as 4. | |||
1596 | static const uint32_t sDefaultLen = 16; | |||
1597 | static const uint32_t sMinCapacity = 4; | |||
1598 | // See the comments in HashTableEntry about this value. | |||
1599 | static_assert(sMinCapacity >= 4, "too-small sMinCapacity breaks assumptions"); | |||
1600 | static const uint32_t sMaxInit = 1u << (CAP_BITS - 1); | |||
1601 | static const uint32_t sMaxCapacity = 1u << CAP_BITS; | |||
1602 | ||||
1603 | // Hash-table alpha is conceptually a fraction, but to avoid floating-point | |||
1604 | // math we implement it as a ratio of integers. | |||
1605 | static const uint8_t sAlphaDenominator = 4; | |||
1606 | static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4 | |||
1607 | static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4 | |||
1608 | ||||
1609 | static const HashNumber sFreeKey = Entry::sFreeKey; | |||
1610 | static const HashNumber sRemovedKey = Entry::sRemovedKey; | |||
1611 | static const HashNumber sCollisionBit = Entry::sCollisionBit; | |||
1612 | ||||
1613 | static uint32_t bestCapacity(uint32_t aLen) { | |||
1614 | static_assert( | |||
1615 | (sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit, | |||
1616 | "multiplication in numerator below could overflow"); | |||
1617 | static_assert( | |||
1618 | sMaxInit * sAlphaDenominator <= UINT32_MAX(4294967295U) - sMaxAlphaNumerator, | |||
1619 | "numerator calculation below could potentially overflow"); | |||
1620 | ||||
1621 | // Callers should ensure this is true. | |||
1622 | MOZ_ASSERT(aLen <= sMaxInit)do { static_assert( mozilla::detail::AssertionConditionType< decltype(aLen <= sMaxInit)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aLen <= sMaxInit))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aLen <= sMaxInit" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1622); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aLen <= sMaxInit" ")"); do { *((volatile int*)__null) = 1622; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1623 | ||||
1624 | // Compute the smallest capacity allowing |aLen| elements to be | |||
1625 | // inserted without rehashing: ceil(aLen / max-alpha). (Ceiling | |||
1626 | // integral division: <http://stackoverflow.com/a/2745086>.) | |||
1627 | uint32_t capacity = (aLen * sAlphaDenominator + sMaxAlphaNumerator - 1) / | |||
1628 | sMaxAlphaNumerator; | |||
1629 | capacity = (capacity < sMinCapacity) ? sMinCapacity : RoundUpPow2(capacity); | |||
1630 | ||||
1631 | MOZ_ASSERT(capacity >= aLen)do { static_assert( mozilla::detail::AssertionConditionType< decltype(capacity >= aLen)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(capacity >= aLen))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("capacity >= aLen" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1631); AnnotateMozCrashReason("MOZ_ASSERT" "(" "capacity >= aLen" ")"); do { *((volatile int*)__null) = 1631; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1632 | MOZ_ASSERT(capacity <= sMaxCapacity)do { static_assert( mozilla::detail::AssertionConditionType< decltype(capacity <= sMaxCapacity)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(capacity <= sMaxCapacity) )), 0))) { do { } while (false); MOZ_ReportAssertionFailure("capacity <= sMaxCapacity" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1632); AnnotateMozCrashReason("MOZ_ASSERT" "(" "capacity <= sMaxCapacity" ")"); do { *((volatile int*)__null) = 1632; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1633 | ||||
1634 | return capacity; | |||
1635 | } | |||
1636 | ||||
1637 | static uint32_t hashShift(uint32_t aLen) { | |||
1638 | // Reject all lengths whose initial computed capacity would exceed | |||
1639 | // sMaxCapacity. Round that maximum aLen down to the nearest power of two | |||
1640 | // for speedier code. | |||
1641 | if (MOZ_UNLIKELY(aLen > sMaxInit)(__builtin_expect(!!(aLen > sMaxInit), 0))) { | |||
1642 | MOZ_CRASH("initial length is too large")do { do { } while (false); MOZ_ReportCrash("" "initial length is too large" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1642); AnnotateMozCrashReason("MOZ_CRASH(" "initial length is too large" ")"); do { *((volatile int*)__null) = 1642; __attribute__((nomerge )) ::abort(); } while (false); } while (false); | |||
1643 | } | |||
1644 | ||||
1645 | return kHashNumberBits - mozilla::CeilingLog2(bestCapacity(aLen)); | |||
1646 | } | |||
1647 | ||||
1648 | static bool isLiveHash(HashNumber aHash) { return Entry::isLiveHash(aHash); } | |||
1649 | ||||
1650 | static HashNumber prepareHash(HashNumber aInputHash) { | |||
1651 | HashNumber keyHash = ScrambleHashCode(aInputHash); | |||
1652 | ||||
1653 | // Avoid reserved hash codes. | |||
1654 | if (!isLiveHash(keyHash)) { | |||
1655 | keyHash -= (sRemovedKey + 1); | |||
1656 | } | |||
1657 | return keyHash & ~sCollisionBit; | |||
1658 | } | |||
1659 | ||||
1660 | enum FailureBehavior { DontReportFailure = false, ReportFailure = true }; | |||
1661 | ||||
1662 | // Fake a struct that we're going to alloc. See the comments in | |||
1663 | // HashTableEntry about how the table is laid out, and why it's safe. | |||
1664 | struct FakeSlot { | |||
1665 | unsigned char c[sizeof(HashNumber) + sizeof(typename Entry::NonConstT)]; | |||
1666 | }; | |||
1667 | ||||
1668 | static char* createTable(AllocPolicy& aAllocPolicy, uint32_t aCapacity, | |||
1669 | FailureBehavior aReportFailure = ReportFailure) { | |||
1670 | FakeSlot* fake = | |||
1671 | aReportFailure | |||
1672 | ? aAllocPolicy.template pod_malloc<FakeSlot>(aCapacity) | |||
1673 | : aAllocPolicy.template maybe_pod_malloc<FakeSlot>(aCapacity); | |||
1674 | ||||
1675 | MOZ_ASSERT((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) ==do { static_assert( mozilla::detail::AssertionConditionType< decltype((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment ) == 0)>::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment ) == 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1676); AnnotateMozCrashReason("MOZ_ASSERT" "(" "(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0" ")"); do { *((volatile int*)__null) = 1676; __attribute__((nomerge )) ::abort(); } while (false); } } while (false) | |||
1676 | 0)do { static_assert( mozilla::detail::AssertionConditionType< decltype((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment ) == 0)>::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment ) == 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1676); AnnotateMozCrashReason("MOZ_ASSERT" "(" "(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0" ")"); do { *((volatile int*)__null) = 1676; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1677 | ||||
1678 | char* table = reinterpret_cast<char*>(fake); | |||
1679 | if (table) { | |||
1680 | forEachSlot(table, aCapacity, [&](Slot& slot) { | |||
1681 | *slot.mKeyHash = sFreeKey; | |||
1682 | new (KnownNotNull, slot.toEntry()) Entry(); | |||
1683 | }); | |||
1684 | } | |||
1685 | return table; | |||
1686 | } | |||
1687 | ||||
1688 | static void destroyTable(AllocPolicy& aAllocPolicy, char* aOldTable, | |||
1689 | uint32_t aCapacity) { | |||
1690 | forEachSlot(aOldTable, aCapacity, [&](const Slot& slot) { | |||
1691 | if (slot.isLive()) { | |||
1692 | slot.toEntry()->destroyStoredT(); | |||
1693 | } | |||
1694 | }); | |||
1695 | freeTable(aAllocPolicy, aOldTable, aCapacity); | |||
1696 | } | |||
1697 | ||||
1698 | static void freeTable(AllocPolicy& aAllocPolicy, char* aOldTable, | |||
1699 | uint32_t aCapacity) { | |||
1700 | FakeSlot* fake = reinterpret_cast<FakeSlot*>(aOldTable); | |||
1701 | aAllocPolicy.free_(fake, aCapacity); | |||
1702 | } | |||
1703 | ||||
1704 | public: | |||
1705 | HashTable(AllocPolicy aAllocPolicy, uint32_t aLen) | |||
1706 | : AllocPolicy(std::move(aAllocPolicy)), | |||
1707 | mGen(0), | |||
1708 | mHashShift(hashShift(aLen)), | |||
1709 | mTable(nullptr), | |||
1710 | mEntryCount(0), | |||
1711 | mRemovedCount(0) | |||
1712 | #ifdef DEBUG1 | |||
1713 | , | |||
1714 | mMutationCount(0), | |||
1715 | mEntered(false) | |||
1716 | #endif | |||
1717 | { | |||
1718 | } | |||
1719 | ||||
1720 | explicit HashTable(AllocPolicy aAllocPolicy) | |||
1721 | : HashTable(aAllocPolicy, sDefaultLen) {} | |||
1722 | ||||
1723 | ~HashTable() { | |||
1724 | if (mTable) { | |||
1725 | destroyTable(*this, mTable, capacity()); | |||
1726 | } | |||
1727 | } | |||
1728 | ||||
1729 | private: | |||
1730 | HashNumber hash1(HashNumber aHash0) const { return aHash0 >> mHashShift; } | |||
1731 | ||||
1732 | struct DoubleHash { | |||
1733 | HashNumber mHash2; | |||
1734 | HashNumber mSizeMask; | |||
1735 | }; | |||
1736 | ||||
1737 | DoubleHash hash2(HashNumber aCurKeyHash) const { | |||
1738 | uint32_t sizeLog2 = kHashNumberBits - mHashShift; | |||
1739 | DoubleHash dh = {((aCurKeyHash << sizeLog2) >> mHashShift) | 1, | |||
1740 | (HashNumber(1) << sizeLog2) - 1}; | |||
1741 | return dh; | |||
1742 | } | |||
1743 | ||||
1744 | static HashNumber applyDoubleHash(HashNumber aHash1, | |||
1745 | const DoubleHash& aDoubleHash) { | |||
1746 | return WrappingSubtract(aHash1, aDoubleHash.mHash2) & aDoubleHash.mSizeMask; | |||
1747 | } | |||
1748 | ||||
1749 | static MOZ_ALWAYS_INLINEinline bool match(T& aEntry, const Lookup& aLookup) { | |||
1750 | return HashPolicy::match(HashPolicy::getKey(aEntry), aLookup); | |||
1751 | } | |||
1752 | ||||
1753 | enum LookupReason { ForNonAdd, ForAdd }; | |||
1754 | ||||
1755 | Slot slotForIndex(HashNumber aIndex) const { | |||
1756 | auto hashes = reinterpret_cast<HashNumber*>(mTable); | |||
1757 | auto entries = reinterpret_cast<Entry*>(&hashes[capacity()]); | |||
1758 | return Slot(&entries[aIndex], &hashes[aIndex]); | |||
1759 | } | |||
1760 | ||||
1761 | // Warning: in order for readonlyThreadsafeLookup() to be safe this | |||
1762 | // function must not modify the table in any way when Reason==ForNonAdd. | |||
1763 | template <LookupReason Reason> | |||
1764 | MOZ_ALWAYS_INLINEinline Slot lookup(const Lookup& aLookup, | |||
1765 | HashNumber aKeyHash) const { | |||
1766 | MOZ_ASSERT(isLiveHash(aKeyHash))do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLiveHash(aKeyHash))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLiveHash(aKeyHash)))), 0)) ) { do { } while (false); MOZ_ReportAssertionFailure("isLiveHash(aKeyHash)" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1766); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLiveHash(aKeyHash)" ")"); do { *((volatile int*)__null) = 1766; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1767 | MOZ_ASSERT(!(aKeyHash & sCollisionBit))do { static_assert( mozilla::detail::AssertionConditionType< decltype(!(aKeyHash & sCollisionBit))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!(aKeyHash & sCollisionBit )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("!(aKeyHash & sCollisionBit)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1767); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!(aKeyHash & sCollisionBit)" ")"); do { *((volatile int*)__null) = 1767; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1768 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1768); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 1768; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1769 | ||||
1770 | // Compute the primary hash address. | |||
1771 | HashNumber h1 = hash1(aKeyHash); | |||
1772 | Slot slot = slotForIndex(h1); | |||
1773 | ||||
1774 | // Miss: return space for a new entry. | |||
1775 | if (slot.isFree()) { | |||
1776 | return slot; | |||
1777 | } | |||
1778 | ||||
1779 | // Hit: return entry. | |||
1780 | if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) { | |||
1781 | return slot; | |||
1782 | } | |||
1783 | ||||
1784 | // Collision: double hash. | |||
1785 | DoubleHash dh = hash2(aKeyHash); | |||
1786 | ||||
1787 | // Save the first removed entry pointer so we can recycle later. | |||
1788 | Maybe<Slot> firstRemoved; | |||
1789 | ||||
1790 | while (true) { | |||
1791 | if (Reason == ForAdd && !firstRemoved) { | |||
1792 | if (MOZ_UNLIKELY(slot.isRemoved())(__builtin_expect(!!(slot.isRemoved()), 0))) { | |||
1793 | firstRemoved.emplace(slot); | |||
1794 | } else { | |||
1795 | slot.setCollision(); | |||
1796 | } | |||
1797 | } | |||
1798 | ||||
1799 | h1 = applyDoubleHash(h1, dh); | |||
1800 | ||||
1801 | slot = slotForIndex(h1); | |||
1802 | if (slot.isFree()) { | |||
1803 | return firstRemoved.refOr(slot); | |||
1804 | } | |||
1805 | ||||
1806 | if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) { | |||
1807 | return slot; | |||
1808 | } | |||
1809 | } | |||
1810 | } | |||
1811 | ||||
1812 | // This is a copy of lookup() hardcoded to the assumptions: | |||
1813 | // 1. the lookup is for an add; | |||
1814 | // 2. the key, whose |keyHash| has been passed, is not in the table. | |||
1815 | Slot findNonLiveSlot(HashNumber aKeyHash) { | |||
1816 | MOZ_ASSERT(!(aKeyHash & sCollisionBit))do { static_assert( mozilla::detail::AssertionConditionType< decltype(!(aKeyHash & sCollisionBit))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!(aKeyHash & sCollisionBit )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("!(aKeyHash & sCollisionBit)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1816); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!(aKeyHash & sCollisionBit)" ")"); do { *((volatile int*)__null) = 1816; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1817 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1817); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 1817; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1818 | ||||
1819 | // We assume 'aKeyHash' has already been distributed. | |||
1820 | ||||
1821 | // Compute the primary hash address. | |||
1822 | HashNumber h1 = hash1(aKeyHash); | |||
1823 | Slot slot = slotForIndex(h1); | |||
1824 | ||||
1825 | // Miss: return space for a new entry. | |||
1826 | if (!slot.isLive()) { | |||
1827 | return slot; | |||
1828 | } | |||
1829 | ||||
1830 | // Collision: double hash. | |||
1831 | DoubleHash dh = hash2(aKeyHash); | |||
1832 | ||||
1833 | while (true) { | |||
1834 | slot.setCollision(); | |||
1835 | ||||
1836 | h1 = applyDoubleHash(h1, dh); | |||
1837 | ||||
1838 | slot = slotForIndex(h1); | |||
1839 | if (!slot.isLive()) { | |||
1840 | return slot; | |||
1841 | } | |||
1842 | } | |||
1843 | } | |||
1844 | ||||
1845 | enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed }; | |||
1846 | ||||
1847 | RebuildStatus changeTableSize( | |||
1848 | uint32_t newCapacity, FailureBehavior aReportFailure = ReportFailure) { | |||
1849 | MOZ_ASSERT(IsPowerOfTwo(newCapacity))do { static_assert( mozilla::detail::AssertionConditionType< decltype(IsPowerOfTwo(newCapacity))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(IsPowerOfTwo(newCapacity)))) , 0))) { do { } while (false); MOZ_ReportAssertionFailure("IsPowerOfTwo(newCapacity)" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1849); AnnotateMozCrashReason("MOZ_ASSERT" "(" "IsPowerOfTwo(newCapacity)" ")"); do { *((volatile int*)__null) = 1849; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1850 | MOZ_ASSERT(!!mTable == !!capacity())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!!mTable == !!capacity())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!!mTable == !!capacity()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!!mTable == !!capacity()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1850); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!!mTable == !!capacity()" ")"); do { *((volatile int*)__null) = 1850; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1851 | ||||
1852 | // Look, but don't touch, until we succeed in getting new entry store. | |||
1853 | char* oldTable = mTable; | |||
1854 | uint32_t oldCapacity = capacity(); | |||
1855 | uint32_t newLog2 = mozilla::CeilingLog2(newCapacity); | |||
1856 | ||||
1857 | if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)(__builtin_expect(!!(newCapacity > sMaxCapacity), 0))) { | |||
1858 | if (aReportFailure) { | |||
1859 | this->reportAllocOverflow(); | |||
1860 | } | |||
1861 | return RehashFailed; | |||
1862 | } | |||
1863 | ||||
1864 | char* newTable = createTable(*this, newCapacity, aReportFailure); | |||
1865 | if (!newTable) { | |||
1866 | return RehashFailed; | |||
1867 | } | |||
1868 | ||||
1869 | // We can't fail from here on, so update table parameters. | |||
1870 | mHashShift = kHashNumberBits - newLog2; | |||
1871 | mRemovedCount = 0; | |||
1872 | mGen++; | |||
1873 | mTable = newTable; | |||
1874 | ||||
1875 | // Copy only live entries, leaving removed ones behind. | |||
1876 | forEachSlot(oldTable, oldCapacity, [&](Slot& slot) { | |||
1877 | if (slot.isLive()) { | |||
1878 | HashNumber hn = slot.getKeyHash(); | |||
1879 | findNonLiveSlot(hn).setLive( | |||
1880 | hn, std::move(const_cast<typename Entry::NonConstT&>(slot.get()))); | |||
1881 | } | |||
1882 | ||||
1883 | slot.clear(); | |||
1884 | }); | |||
1885 | ||||
1886 | // All entries have been destroyed, no need to destroyTable. | |||
1887 | freeTable(*this, oldTable, oldCapacity); | |||
1888 | return Rehashed; | |||
1889 | } | |||
1890 | ||||
1891 | RebuildStatus rehashIfOverloaded( | |||
1892 | FailureBehavior aReportFailure = ReportFailure) { | |||
1893 | static_assert(sMaxCapacity <= UINT32_MAX(4294967295U) / sMaxAlphaNumerator, | |||
1894 | "multiplication below could overflow"); | |||
1895 | ||||
1896 | // Note: if capacity() is zero, this will always succeed, which is | |||
1897 | // what we want. | |||
1898 | bool overloaded = mEntryCount + mRemovedCount >= | |||
1899 | capacity() * sMaxAlphaNumerator / sAlphaDenominator; | |||
1900 | ||||
1901 | if (!overloaded) { | |||
1902 | return NotOverloaded; | |||
1903 | } | |||
1904 | ||||
1905 | // Succeed if a quarter or more of all entries are removed. Note that this | |||
1906 | // always succeeds if capacity() == 0 (i.e. entry storage has not been | |||
1907 | // allocated), which is what we want, because it means changeTableSize() | |||
1908 | // will allocate the requested capacity rather than doubling it. | |||
1909 | bool manyRemoved = mRemovedCount >= (capacity() >> 2); | |||
1910 | uint32_t newCapacity = manyRemoved ? rawCapacity() : rawCapacity() * 2; | |||
1911 | return changeTableSize(newCapacity, aReportFailure); | |||
1912 | } | |||
1913 | ||||
1914 | void infallibleRehashIfOverloaded() { | |||
1915 | if (rehashIfOverloaded(DontReportFailure) == RehashFailed) { | |||
1916 | rehashTableInPlace(); | |||
1917 | } | |||
1918 | } | |||
1919 | ||||
1920 | void remove(Slot& aSlot) { | |||
1921 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1921); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 1921; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1922 | ||||
1923 | if (aSlot.hasCollision()) { | |||
1924 | aSlot.removeLive(); | |||
1925 | mRemovedCount++; | |||
1926 | } else { | |||
1927 | aSlot.clearLive(); | |||
1928 | } | |||
1929 | mEntryCount--; | |||
1930 | #ifdef DEBUG1 | |||
1931 | mMutationCount++; | |||
1932 | #endif | |||
1933 | } | |||
1934 | ||||
1935 | void shrinkIfUnderloaded() { | |||
1936 | static_assert(sMaxCapacity <= UINT32_MAX(4294967295U) / sMinAlphaNumerator, | |||
1937 | "multiplication below could overflow"); | |||
1938 | bool underloaded = | |||
1939 | capacity() > sMinCapacity && | |||
1940 | mEntryCount <= capacity() * sMinAlphaNumerator / sAlphaDenominator; | |||
1941 | ||||
1942 | if (underloaded) { | |||
1943 | (void)changeTableSize(capacity() / 2, DontReportFailure); | |||
1944 | } | |||
1945 | } | |||
1946 | ||||
1947 | // This is identical to changeTableSize(currentSize), but without requiring | |||
1948 | // a second table. We do this by recycling the collision bits to tell us if | |||
1949 | // the element is already inserted or still waiting to be inserted. Since | |||
1950 | // already-inserted elements win any conflicts, we get the same table as we | |||
1951 | // would have gotten through random insertion order. | |||
1952 | void rehashTableInPlace() { | |||
1953 | mRemovedCount = 0; | |||
1954 | mGen++; | |||
1955 | forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.unsetCollision(); }); | |||
1956 | for (uint32_t i = 0; i < capacity();) { | |||
1957 | Slot src = slotForIndex(i); | |||
1958 | ||||
1959 | if (!src.isLive() || src.hasCollision()) { | |||
1960 | ++i; | |||
1961 | continue; | |||
1962 | } | |||
1963 | ||||
1964 | HashNumber keyHash = src.getKeyHash(); | |||
1965 | HashNumber h1 = hash1(keyHash); | |||
1966 | DoubleHash dh = hash2(keyHash); | |||
1967 | Slot tgt = slotForIndex(h1); | |||
1968 | while (true) { | |||
1969 | if (!tgt.hasCollision()) { | |||
1970 | src.swap(tgt); | |||
1971 | tgt.setCollision(); | |||
1972 | break; | |||
1973 | } | |||
1974 | ||||
1975 | h1 = applyDoubleHash(h1, dh); | |||
1976 | tgt = slotForIndex(h1); | |||
1977 | } | |||
1978 | } | |||
1979 | ||||
1980 | // TODO: this algorithm leaves collision bits on *all* elements, even if | |||
1981 | // they are on no collision path. We have the option of setting the | |||
1982 | // collision bits correctly on a subsequent pass or skipping the rehash | |||
1983 | // unless we are totally filled with tombstones: benchmark to find out | |||
1984 | // which approach is best. | |||
1985 | } | |||
1986 | ||||
1987 | // Prefer to use putNewInfallible; this function does not check | |||
1988 | // invariants. | |||
1989 | template <typename... Args> | |||
1990 | void putNewInfallibleInternal(HashNumber aKeyHash, Args&&... aArgs) { | |||
1991 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1991); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 1991; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1992 | ||||
1993 | Slot slot = findNonLiveSlot(aKeyHash); | |||
1994 | ||||
1995 | if (slot.isRemoved()) { | |||
1996 | mRemovedCount--; | |||
1997 | aKeyHash |= sCollisionBit; | |||
1998 | } | |||
1999 | ||||
2000 | slot.setLive(aKeyHash, std::forward<Args>(aArgs)...); | |||
2001 | mEntryCount++; | |||
2002 | #ifdef DEBUG1 | |||
2003 | mMutationCount++; | |||
2004 | #endif | |||
2005 | } | |||
2006 | ||||
2007 | public: | |||
2008 | void clear() { | |||
2009 | forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.clear(); }); | |||
2010 | mRemovedCount = 0; | |||
2011 | mEntryCount = 0; | |||
2012 | #ifdef DEBUG1 | |||
2013 | mMutationCount++; | |||
2014 | #endif | |||
2015 | } | |||
2016 | ||||
2017 | // Resize the table down to the smallest capacity that doesn't overload the | |||
2018 | // table. Since we call shrinkIfUnderloaded() on every remove, you only need | |||
2019 | // to call this after a bulk removal of items done without calling remove(). | |||
2020 | void compact() { | |||
2021 | if (empty()) { | |||
2022 | // Free the entry storage. | |||
2023 | freeTable(*this, mTable, capacity()); | |||
2024 | mGen++; | |||
2025 | mHashShift = hashShift(0); // gives minimum capacity on regrowth | |||
2026 | mTable = nullptr; | |||
2027 | mRemovedCount = 0; | |||
2028 | return; | |||
2029 | } | |||
2030 | ||||
2031 | uint32_t bestCapacity = this->bestCapacity(mEntryCount); | |||
2032 | MOZ_ASSERT(bestCapacity <= capacity())do { static_assert( mozilla::detail::AssertionConditionType< decltype(bestCapacity <= capacity())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(bestCapacity <= capacity( )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("bestCapacity <= capacity()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2032); AnnotateMozCrashReason("MOZ_ASSERT" "(" "bestCapacity <= capacity()" ")"); do { *((volatile int*)__null) = 2032; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2033 | ||||
2034 | if (bestCapacity < capacity()) { | |||
2035 | (void)changeTableSize(bestCapacity, DontReportFailure); | |||
2036 | } | |||
2037 | } | |||
2038 | ||||
2039 | void clearAndCompact() { | |||
2040 | clear(); | |||
2041 | compact(); | |||
2042 | } | |||
2043 | ||||
2044 | [[nodiscard]] bool reserve(uint32_t aLen) { | |||
2045 | if (aLen == 0) { | |||
2046 | return true; | |||
2047 | } | |||
2048 | ||||
2049 | if (MOZ_UNLIKELY(aLen > sMaxInit)(__builtin_expect(!!(aLen > sMaxInit), 0))) { | |||
2050 | this->reportAllocOverflow(); | |||
2051 | return false; | |||
2052 | } | |||
2053 | ||||
2054 | uint32_t bestCapacity = this->bestCapacity(aLen); | |||
2055 | if (bestCapacity <= capacity()) { | |||
2056 | return true; // Capacity is already sufficient. | |||
2057 | } | |||
2058 | ||||
2059 | RebuildStatus status = changeTableSize(bestCapacity, ReportFailure); | |||
2060 | MOZ_ASSERT(status != NotOverloaded)do { static_assert( mozilla::detail::AssertionConditionType< decltype(status != NotOverloaded)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(status != NotOverloaded))), 0 ))) { do { } while (false); MOZ_ReportAssertionFailure("status != NotOverloaded" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2060); AnnotateMozCrashReason("MOZ_ASSERT" "(" "status != NotOverloaded" ")"); do { *((volatile int*)__null) = 2060; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2061 | return status != RehashFailed; | |||
2062 | } | |||
2063 | ||||
2064 | Iterator iter() const { return Iterator(*this); } | |||
2065 | ||||
2066 | ModIterator modIter() { return ModIterator(*this); } | |||
2067 | ||||
2068 | Range all() const { return Range(*this); } | |||
2069 | ||||
2070 | bool empty() const { return mEntryCount == 0; } | |||
2071 | ||||
2072 | uint32_t count() const { return mEntryCount; } | |||
2073 | ||||
2074 | uint32_t rawCapacity() const { return 1u << (kHashNumberBits - mHashShift); } | |||
2075 | ||||
2076 | uint32_t capacity() const { return mTable ? rawCapacity() : 0; } | |||
2077 | ||||
2078 | Generation generation() const { return Generation(mGen); } | |||
2079 | ||||
2080 | size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { | |||
2081 | return aMallocSizeOf(mTable); | |||
2082 | } | |||
2083 | ||||
2084 | size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { | |||
2085 | return aMallocSizeOf(this) + shallowSizeOfExcludingThis(aMallocSizeOf); | |||
2086 | } | |||
2087 | ||||
2088 | MOZ_ALWAYS_INLINEinline Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const { | |||
2089 | if (empty()) { | |||
2090 | return Ptr(); | |||
2091 | } | |||
2092 | ||||
2093 | HashNumber inputHash; | |||
2094 | if (!MaybeGetHash<HashPolicy>(aLookup, &inputHash)) { | |||
2095 | return Ptr(); | |||
2096 | } | |||
2097 | ||||
2098 | HashNumber keyHash = prepareHash(inputHash); | |||
2099 | return Ptr(lookup<ForNonAdd>(aLookup, keyHash), *this); | |||
2100 | } | |||
2101 | ||||
2102 | MOZ_ALWAYS_INLINEinline Ptr lookup(const Lookup& aLookup) const { | |||
2103 | ReentrancyGuard g(*this); | |||
2104 | return readonlyThreadsafeLookup(aLookup); | |||
2105 | } | |||
2106 | ||||
2107 | MOZ_ALWAYS_INLINEinline AddPtr lookupForAdd(const Lookup& aLookup) { | |||
2108 | ReentrancyGuard g(*this); | |||
2109 | ||||
2110 | HashNumber inputHash; | |||
2111 | if (!EnsureHash<HashPolicy>(aLookup, &inputHash)) { | |||
2112 | return AddPtr(); | |||
2113 | } | |||
2114 | ||||
2115 | HashNumber keyHash = prepareHash(inputHash); | |||
2116 | ||||
2117 | if (!mTable) { | |||
2118 | return AddPtr(*this, keyHash); | |||
2119 | } | |||
2120 | ||||
2121 | // Directly call the constructor in the return statement to avoid | |||
2122 | // excess copying when building with Visual Studio 2017. | |||
2123 | // See bug 1385181. | |||
2124 | return AddPtr(lookup<ForAdd>(aLookup, keyHash), *this, keyHash); | |||
2125 | } | |||
2126 | ||||
2127 | template <typename... Args> | |||
2128 | [[nodiscard]] bool add(AddPtr& aPtr, Args&&... aArgs) { | |||
2129 | ReentrancyGuard g(*this); | |||
2130 | MOZ_ASSERT_IF(aPtr.isValid(), mTable)do { if (aPtr.isValid()) { do { static_assert( mozilla::detail ::AssertionConditionType<decltype(mTable)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2130); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 2130; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); } } while ( false); | |||
2131 | MOZ_ASSERT_IF(aPtr.isValid(), aPtr.mTable == this)do { if (aPtr.isValid()) { do { static_assert( mozilla::detail ::AssertionConditionType<decltype(aPtr.mTable == this)> ::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!(aPtr.mTable == this))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPtr.mTable == this", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2131); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mTable == this" ")"); do { *((volatile int*)__null) = 2131; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); } } while ( false); | |||
2132 | MOZ_ASSERT(!aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!aPtr.found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!aPtr.found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!aPtr.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2132); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!aPtr.found()" ")"); do { *((volatile int*)__null) = 2132; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2133 | MOZ_ASSERT(!(aPtr.mKeyHash & sCollisionBit))do { static_assert( mozilla::detail::AssertionConditionType< decltype(!(aPtr.mKeyHash & sCollisionBit))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!(aPtr.mKeyHash & sCollisionBit )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("!(aPtr.mKeyHash & sCollisionBit)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2133); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!(aPtr.mKeyHash & sCollisionBit)" ")"); do { *((volatile int*)__null) = 2133; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2134 | ||||
2135 | // Check for error from ensureHash() here. | |||
2136 | if (!aPtr.isLive()) { | |||
2137 | return false; | |||
2138 | } | |||
2139 | ||||
2140 | MOZ_ASSERT(aPtr.mGeneration == generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.mGeneration == generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.mGeneration == generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("aPtr.mGeneration == generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2140); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mGeneration == generation()" ")"); do { *((volatile int*)__null) = 2140; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2141 | #ifdef DEBUG1 | |||
2142 | MOZ_ASSERT(aPtr.mMutationCount == mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.mMutationCount == mMutationCount)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(! !(aPtr.mMutationCount == mMutationCount))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPtr.mMutationCount == mMutationCount" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2142); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mMutationCount == mMutationCount" ")"); do { *((volatile int*)__null) = 2142; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2143 | #endif | |||
2144 | ||||
2145 | if (!aPtr.isValid()) { | |||
2146 | MOZ_ASSERT(!mTable && mEntryCount == 0)do { static_assert( mozilla::detail::AssertionConditionType< decltype(!mTable && mEntryCount == 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!mTable && mEntryCount == 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("!mTable && mEntryCount == 0", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2146); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!mTable && mEntryCount == 0" ")"); do { *((volatile int*)__null) = 2146; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2147 | uint32_t newCapacity = rawCapacity(); | |||
2148 | RebuildStatus status = changeTableSize(newCapacity, ReportFailure); | |||
2149 | MOZ_ASSERT(status != NotOverloaded)do { static_assert( mozilla::detail::AssertionConditionType< decltype(status != NotOverloaded)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(status != NotOverloaded))), 0 ))) { do { } while (false); MOZ_ReportAssertionFailure("status != NotOverloaded" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2149); AnnotateMozCrashReason("MOZ_ASSERT" "(" "status != NotOverloaded" ")"); do { *((volatile int*)__null) = 2149; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2150 | if (status == RehashFailed) { | |||
2151 | return false; | |||
2152 | } | |||
2153 | aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash); | |||
2154 | ||||
2155 | } else if (aPtr.mSlot.isRemoved()) { | |||
2156 | // Changing an entry from removed to live does not affect whether we are | |||
2157 | // overloaded and can be handled separately. | |||
2158 | if (!this->checkSimulatedOOM()) { | |||
2159 | return false; | |||
2160 | } | |||
2161 | mRemovedCount--; | |||
2162 | aPtr.mKeyHash |= sCollisionBit; | |||
2163 | ||||
2164 | } else { | |||
2165 | // Preserve the validity of |aPtr.mSlot|. | |||
2166 | RebuildStatus status = rehashIfOverloaded(); | |||
2167 | if (status == RehashFailed) { | |||
2168 | return false; | |||
2169 | } | |||
2170 | if (status == NotOverloaded && !this->checkSimulatedOOM()) { | |||
2171 | return false; | |||
2172 | } | |||
2173 | if (status == Rehashed) { | |||
2174 | aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash); | |||
2175 | } | |||
2176 | } | |||
2177 | ||||
2178 | aPtr.mSlot.setLive(aPtr.mKeyHash, std::forward<Args>(aArgs)...); | |||
2179 | mEntryCount++; | |||
2180 | #ifdef DEBUG1 | |||
2181 | mMutationCount++; | |||
2182 | aPtr.mGeneration = generation(); | |||
2183 | aPtr.mMutationCount = mMutationCount; | |||
2184 | #endif | |||
2185 | return true; | |||
2186 | } | |||
2187 | ||||
2188 | // Note: |aLookup| may reference pieces of arguments in |aArgs|, so this | |||
2189 | // function must take care not to use |aLookup| after moving |aArgs|. | |||
2190 | template <typename... Args> | |||
2191 | void putNewInfallible(const Lookup& aLookup, Args&&... aArgs) { | |||
2192 | MOZ_ASSERT(!lookup(aLookup).found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!lookup(aLookup).found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!lookup(aLookup).found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!lookup(aLookup).found()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2192); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!lookup(aLookup).found()" ")"); do { *((volatile int*)__null) = 2192; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2193 | ReentrancyGuard g(*this); | |||
2194 | HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup)); | |||
2195 | putNewInfallibleInternal(keyHash, std::forward<Args>(aArgs)...); | |||
2196 | } | |||
2197 | ||||
2198 | // Note: |aLookup| may alias arguments in |aArgs|, so this function must take | |||
2199 | // care not to use |aLookup| after moving |aArgs|. | |||
2200 | template <typename... Args> | |||
2201 | [[nodiscard]] bool putNew(const Lookup& aLookup, Args&&... aArgs) { | |||
2202 | MOZ_ASSERT(!lookup(aLookup).found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!lookup(aLookup).found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!lookup(aLookup).found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!lookup(aLookup).found()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2202); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!lookup(aLookup).found()" ")"); do { *((volatile int*)__null) = 2202; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2203 | ReentrancyGuard g(*this); | |||
2204 | if (!this->checkSimulatedOOM()) { | |||
2205 | return false; | |||
2206 | } | |||
2207 | HashNumber inputHash; | |||
2208 | if (!EnsureHash<HashPolicy>(aLookup, &inputHash)) { | |||
2209 | return false; | |||
2210 | } | |||
2211 | HashNumber keyHash = prepareHash(inputHash); | |||
2212 | if (rehashIfOverloaded() == RehashFailed) { | |||
2213 | return false; | |||
2214 | } | |||
2215 | putNewInfallibleInternal(keyHash, std::forward<Args>(aArgs)...); | |||
2216 | return true; | |||
2217 | } | |||
2218 | ||||
2219 | // Note: |aLookup| may be a reference pieces of arguments in |aArgs|, so this | |||
2220 | // function must take care not to use |aLookup| after moving |aArgs|. | |||
2221 | template <typename... Args> | |||
2222 | [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup, | |||
2223 | Args&&... aArgs) { | |||
2224 | // Check for error from ensureHash() here. | |||
2225 | if (!aPtr.isLive()) { | |||
2226 | return false; | |||
2227 | } | |||
2228 | #ifdef DEBUG1 | |||
2229 | aPtr.mGeneration = generation(); | |||
2230 | aPtr.mMutationCount = mMutationCount; | |||
2231 | #endif | |||
2232 | if (mTable) { | |||
2233 | ReentrancyGuard g(*this); | |||
2234 | // Check that aLookup has not been destroyed. | |||
2235 | MOZ_ASSERT(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash)do { static_assert( mozilla::detail::AssertionConditionType< decltype(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash )>::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash ))), 0))) { do { } while (false); MOZ_ReportAssertionFailure( "prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2235); AnnotateMozCrashReason("MOZ_ASSERT" "(" "prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash" ")"); do { *((volatile int*)__null) = 2235; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2236 | aPtr.mSlot = lookup<ForAdd>(aLookup, aPtr.mKeyHash); | |||
2237 | if (aPtr.found()) { | |||
2238 | return true; | |||
2239 | } | |||
2240 | } else { | |||
2241 | // Clear aPtr so it's invalid; add() will allocate storage and redo the | |||
2242 | // lookup. | |||
2243 | aPtr.mSlot = Slot(nullptr, nullptr); | |||
2244 | } | |||
2245 | return add(aPtr, std::forward<Args>(aArgs)...); | |||
2246 | } | |||
2247 | ||||
2248 | void remove(Ptr aPtr) { | |||
2249 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2249); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 2249; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2250 | ReentrancyGuard g(*this); | |||
2251 | MOZ_ASSERT(aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPtr.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2251); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.found()" ")"); do { *((volatile int*)__null) = 2251; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2252 | MOZ_ASSERT(aPtr.mGeneration == generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.mGeneration == generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.mGeneration == generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("aPtr.mGeneration == generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2252); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mGeneration == generation()" ")"); do { *((volatile int*)__null) = 2252; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2253 | remove(aPtr.mSlot); | |||
2254 | shrinkIfUnderloaded(); | |||
2255 | } | |||
2256 | ||||
2257 | void rekeyWithoutRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) { | |||
2258 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2258); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 2258; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2259 | ReentrancyGuard g(*this); | |||
2260 | MOZ_ASSERT(aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPtr.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2260); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.found()" ")"); do { *((volatile int*)__null) = 2260; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2261 | MOZ_ASSERT(aPtr.mGeneration == generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.mGeneration == generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.mGeneration == generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("aPtr.mGeneration == generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2261); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mGeneration == generation()" ")"); do { *((volatile int*)__null) = 2261; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
2262 | typename HashTableEntry<T>::NonConstT t(std::move(*aPtr)); | |||
2263 | HashPolicy::setKey(t, const_cast<Key&>(aKey)); | |||
2264 | remove(aPtr.mSlot); | |||
2265 | HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup)); | |||
2266 | putNewInfallibleInternal(keyHash, std::move(t)); | |||
2267 | } | |||
2268 | ||||
2269 | void rekeyAndMaybeRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) { | |||
2270 | rekeyWithoutRehash(aPtr, aLookup, aKey); | |||
2271 | infallibleRehashIfOverloaded(); | |||
2272 | } | |||
2273 | }; | |||
2274 | ||||
2275 | } // namespace detail | |||
2276 | } // namespace mozilla | |||
2277 | ||||
2278 | #endif /* mozilla_HashTable_h */ |