Bug Summary

File:var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h
Warning:line 1312, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name HeapSnapshot.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -ffp-contract=off -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/devtools/shared/heapsnapshot -fcoverage-compilation-dir=/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/devtools/shared/heapsnapshot -resource-dir /usr/lib/llvm-20/lib/clang/20 -include /var/lib/jenkins/workspace/firefox-scan-build/config/gcc_hidden.h -include /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/mozilla-config.h -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/stl_wrappers -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/system_wrappers -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=2 -D _GLIBCXX_ASSERTIONS -D DEBUG=1 -D GOOGLE_PROTOBUF_NO_RTTI -D MOZ_HAS_MOZGLUE -D MOZILLA_INTERNAL_API -D IMPL_LIBXUL -D MOZ_SUPPORT_LEAKCHECKING -D STATIC_EXPORTABLE_JS_API -I /var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/devtools/shared/heapsnapshot -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/ipc/ipdl/_ipdlheaders -I /var/lib/jenkins/workspace/firefox-scan-build/ipc/chromium/src -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/nspr -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/nss -D MOZILLA_CLIENT -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/x86_64-linux-gnu/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14/backward -internal-isystem /usr/lib/llvm-20/lib/clang/20/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-error=tautological-type-limit-compare -Wno-invalid-offsetof -Wno-range-loop-analysis -Wno-deprecated-anon-enum-enum-conversion -Wno-deprecated-enum-enum-conversion -Wno-deprecated-this-capture -Wno-inline-new-delete -Wno-error=deprecated-declarations -Wno-error=array-bounds -Wno-error=free-nonheap-object -Wno-error=atomic-alignment -Wno-error=deprecated-builtins -Wno-psabi -Wno-error=builtin-macro-redefined -Wno-vla-cxx-extension -Wno-unknown-warning-option -fdeprecated-macro -ferror-limit 19 -fstrict-flex-arrays=1 -stack-protector 2 -fstack-clash-protection -ftrivial-auto-var-init=pattern -fno-rtti -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -fno-sized-deallocation -fno-aligned-allocation -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2025-01-20-090804-167946-1 -x c++ /var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp

/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp

1/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
2/* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6#include "HeapSnapshot.h"
7
8#include <google/protobuf/io/coded_stream.h>
9#include <google/protobuf/io/gzip_stream.h>
10#include <google/protobuf/io/zero_copy_stream_impl_lite.h>
11
12#include "js/Array.h" // JS::NewArrayObject
13#include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin, JS::TaggedColumnNumberOneOrigin
14#include "js/Debug.h"
15#include "js/PropertyAndElement.h" // JS_DefineProperty
16#include "js/TypeDecls.h"
17#include "js/UbiNodeBreadthFirst.h"
18#include "js/UbiNodeCensus.h"
19#include "js/UbiNodeDominatorTree.h"
20#include "js/UbiNodeShortestPaths.h"
21#include "mozilla/Attributes.h"
22#include "mozilla/CycleCollectedJSContext.h"
23#include "mozilla/devtools/AutoMemMap.h"
24#include "mozilla/devtools/CoreDump.pb.h"
25#include "mozilla/devtools/DeserializedNode.h"
26#include "mozilla/devtools/DominatorTree.h"
27#include "mozilla/devtools/FileDescriptorOutputStream.h"
28#include "mozilla/devtools/HeapSnapshotTempFileHelperChild.h"
29#include "mozilla/devtools/ZeroCopyNSIOutputStream.h"
30#include "mozilla/dom/ChromeUtils.h"
31#include "mozilla/dom/ContentChild.h"
32#include "mozilla/dom/HeapSnapshotBinding.h"
33#include "mozilla/RangedPtr.h"
34#include "mozilla/Telemetry.h"
35#include "mozilla/Unused.h"
36
37#include "jsapi.h"
38#include "jsfriendapi.h"
39#include "js/GCVector.h"
40#include "js/MapAndSet.h"
41#include "js/Object.h" // JS::GetCompartment
42#include "nsComponentManagerUtils.h" // do_CreateInstance
43#include "nsCycleCollectionParticipant.h"
44#include "nsCRTGlue.h"
45#include "nsIFile.h"
46#include "nsIOutputStream.h"
47#include "nsISupportsImpl.h"
48#include "nsNetUtil.h"
49#include "nsPrintfCString.h"
50#include "prerror.h"
51#include "prio.h"
52#include "prtypes.h"
53#include "SpecialSystemDirectory.h"
54
55namespace mozilla {
56namespace devtools {
57
58using namespace JS;
59using namespace dom;
60
61using ::google::protobuf::io::ArrayInputStream;
62using ::google::protobuf::io::CodedInputStream;
63using ::google::protobuf::io::GzipInputStream;
64using ::google::protobuf::io::ZeroCopyInputStream;
65
66using JS::ubi::AtomOrTwoByteChars;
67using JS::ubi::ShortestPaths;
68
69MallocSizeOf GetCurrentThreadDebuggerMallocSizeOf() {
70 auto ccjscx = CycleCollectedJSContext::Get();
71 MOZ_ASSERT(ccjscx)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(ccjscx)>::isValid, "invalid assertion condition")
; if ((__builtin_expect(!!(!(!!(ccjscx))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("ccjscx", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 71); AnnotateMozCrashReason("MOZ_ASSERT" "(" "ccjscx" ")");
do { *((volatile int*)__null) = 71; __attribute__((nomerge))
::abort(); } while (false); } } while (false)
;
72 auto cx = ccjscx->Context();
73 MOZ_ASSERT(cx)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(cx)>::isValid, "invalid assertion condition"); if
((__builtin_expect(!!(!(!!(cx))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("cx", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 73); AnnotateMozCrashReason("MOZ_ASSERT" "(" "cx" ")"); do {
*((volatile int*)__null) = 73; __attribute__((nomerge)) ::abort
(); } while (false); } } while (false)
;
74 auto mallocSizeOf = JS::dbg::GetDebuggerMallocSizeOf(cx);
75 MOZ_ASSERT(mallocSizeOf)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mallocSizeOf)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mallocSizeOf))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("mallocSizeOf", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 75); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mallocSizeOf"
")"); do { *((volatile int*)__null) = 75; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
76 return mallocSizeOf;
77}
78
79/*** Cycle Collection Boilerplate *********************************************/
80
81NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(HeapSnapshot, mParent)static_assert(std::is_base_of<nsWrapperCache, HeapSnapshot
>::value, "Class should inherit nsWrapperCache"); HeapSnapshot
::cycleCollection HeapSnapshot::_cycleCollectorGlobal( nsCycleCollectionParticipant
::FlagMaybeSingleZoneJSHolder); void HeapSnapshot::cycleCollection
::Trace( void* p, const TraceCallbacks& aCallbacks, void*
aClosure) { HeapSnapshot* tmp = DowncastCCParticipant<HeapSnapshot
>(p); TraceWrapper(p, aCallbacks, aClosure); (void)tmp; } void
HeapSnapshot::cycleCollection::TraceWrapper( void* p, const TraceCallbacks
& aCallbacks, void* aClosure) { HeapSnapshot* tmp = DowncastCCParticipant
<HeapSnapshot>(p); tmp->TraceWrapper(aCallbacks, aClosure
); } void HeapSnapshot::cycleCollection::Unlink(void* p) { HeapSnapshot
* tmp = DowncastCCParticipant<HeapSnapshot>(p); ImplCycleCollectionUnlink
(tmp->mParent); tmp->ReleaseWrapper(p); (void)tmp; } nsresult
HeapSnapshot::cycleCollection::TraverseNative( void* p, nsCycleCollectionTraversalCallback
& cb) { HeapSnapshot* tmp = DowncastCCParticipant<HeapSnapshot
>(p); cb.DescribeRefCountedNode(tmp->mRefCnt.get(), "HeapSnapshot"
); ImplCycleCollectionTraverse(cb, tmp->mParent, "mParent"
, 0); (void)tmp; return NS_OK; }
82
83NS_IMPL_CYCLE_COLLECTING_ADDREF(HeapSnapshot)MozExternalRefCountType HeapSnapshot::AddRef(void) { static_assert
(!std::is_destructible_v<HeapSnapshot>, "Reference-counted class "
"HeapSnapshot" " should not have a public destructor. " "Make this class's destructor non-public"
); do { static_assert( mozilla::detail::AssertionConditionType
<decltype(int32_t(mRefCnt) >= 0)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(int32_t(mRefCnt) >= 0))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("int32_t(mRefCnt) >= 0"
" (" "illegal refcnt" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 83); AnnotateMozCrashReason("MOZ_ASSERT" "(" "int32_t(mRefCnt) >= 0"
") (" "illegal refcnt" ")"); do { *((volatile int*)__null) =
83; __attribute__((nomerge)) ::abort(); } while (false); } }
while (false); _mOwningThread.AssertOwnership("HeapSnapshot"
" not thread-safe"); nsISupports* base = HeapSnapshot::cycleCollection
::Upcast(this); nsrefcnt count = mRefCnt.incr(base); NS_LogAddRef
((this), (count), ("HeapSnapshot"), (uint32_t)(sizeof(*this))
); return count; }
84NS_IMPL_CYCLE_COLLECTING_RELEASE(HeapSnapshot)MozExternalRefCountType HeapSnapshot::Release(void) { do { static_assert
( mozilla::detail::AssertionConditionType<decltype(int32_t
(mRefCnt) > 0)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(int32_t(mRefCnt) > 0))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("int32_t(mRefCnt) > 0"
" (" "dup release" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 84); AnnotateMozCrashReason("MOZ_ASSERT" "(" "int32_t(mRefCnt) > 0"
") (" "dup release" ")"); do { *((volatile int*)__null) = 84
; __attribute__((nomerge)) ::abort(); } while (false); } } while
(false); _mOwningThread.AssertOwnership("HeapSnapshot" " not thread-safe"
); nsISupports* base = HeapSnapshot::cycleCollection::Upcast(
this); nsrefcnt count = mRefCnt.decr(base); NS_LogRelease((this
), (count), ("HeapSnapshot")); return count; } void HeapSnapshot
::DeleteCycleCollectable(void) { delete (this); }
85
86NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(HeapSnapshot)nsresult HeapSnapshot::QueryInterface(const nsIID& aIID, void
** aInstancePtr) { do { if (!(aInstancePtr)) { NS_DebugBreak(
NS_DEBUG_ASSERTION, "QueryInterface requires a non-NULL destination!"
, "aInstancePtr", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 86); MOZ_PretendNoReturn(); } } while (0); nsISupports* foundInterface
; if (TopThreeWordsEquals( aIID, (nsXPCOMCycleCollectionParticipant
::COMTypeInfo<nsXPCOMCycleCollectionParticipant, void>::
kIID), (nsCycleCollectionISupports::COMTypeInfo<nsCycleCollectionISupports
, void>::kIID)) && (LowWordEquals(aIID, (nsXPCOMCycleCollectionParticipant
::COMTypeInfo<nsXPCOMCycleCollectionParticipant, void>::
kIID)) || LowWordEquals(aIID, (nsCycleCollectionISupports::COMTypeInfo
<nsCycleCollectionISupports, void>::kIID)))) { if (LowWordEquals
(aIID, (nsXPCOMCycleCollectionParticipant::COMTypeInfo<nsXPCOMCycleCollectionParticipant
, void>::kIID))) { *aInstancePtr = HeapSnapshot::cycleCollection
::GetParticipant(); return NS_OK; } if (LowWordEquals(aIID, (
nsCycleCollectionISupports::COMTypeInfo<nsCycleCollectionISupports
, void>::kIID))) { *aInstancePtr = HeapSnapshot::cycleCollection
::Upcast(this); return NS_OK; } foundInterface = nullptr; } else
87 NS_WRAPPERCACHE_INTERFACE_MAP_ENTRYif (aIID.Equals((nsWrapperCache::COMTypeInfo<nsWrapperCache
, void>::kIID))) { *aInstancePtr = static_cast<nsWrapperCache
*>(this); return NS_OK; } else
88 NS_INTERFACE_MAP_ENTRY(nsISupports)if (aIID.Equals(mozilla::detail::kImplementedIID<std::remove_reference_t
<decltype(*this)>, nsISupports>)) foundInterface = static_cast
<nsISupports*>(this); else
89NS_INTERFACE_MAP_ENDfoundInterface = 0; nsresult status; if (!foundInterface) { do
{ static_assert( mozilla::detail::AssertionConditionType<
decltype(!aIID.Equals((nsISupports::COMTypeInfo<nsISupports
, void>::kIID)))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!aIID.Equals((nsISupports::COMTypeInfo
<nsISupports, void>::kIID))))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("!aIID.Equals((nsISupports::COMTypeInfo<nsISupports, void>::kIID))"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 89); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!aIID.Equals((nsISupports::COMTypeInfo<nsISupports, void>::kIID))"
")"); do { *((volatile int*)__null) = 89; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false); status = NS_NOINTERFACE
; } else { (foundInterface)->AddRef(); status = NS_OK; } *
aInstancePtr = foundInterface; return status; }
90
91/* virtual */
92JSObject* HeapSnapshot::WrapObject(JSContext* aCx,
93 JS::Handle<JSObject*> aGivenProto) {
94 return HeapSnapshot_Binding::Wrap(aCx, this, aGivenProto);
95}
96
97/*** Reading Heap Snapshots ***************************************************/
98
99/* static */
100already_AddRefed<HeapSnapshot> HeapSnapshot::Create(JSContext* cx,
101 GlobalObject& global,
102 const uint8_t* buffer,
103 uint32_t size,
104 ErrorResult& rv) {
105 RefPtr<HeapSnapshot> snapshot = new HeapSnapshot(cx, global.GetAsSupports());
106 if (!snapshot->init(cx, buffer, size)) {
107 rv.Throw(NS_ERROR_UNEXPECTED);
108 return nullptr;
109 }
110 return snapshot.forget();
111}
112
113template <typename MessageType>
114static bool parseMessage(ZeroCopyInputStream& stream, uint32_t sizeOfMessage,
115 MessageType& message) {
116 // We need to create a new `CodedInputStream` for each message so that the
117 // 64MB limit is applied per-message rather than to the whole stream.
118 CodedInputStream codedStream(&stream);
119
120 // The protobuf message nesting that core dumps exhibit is dominated by
121 // allocation stacks' frames. In the most deeply nested case, each frame has
122 // two messages: a StackFrame message and a StackFrame::Data message. These
123 // frames are on top of a small constant of other messages. There are a
124 // MAX_STACK_DEPTH number of frames, so we multiply this by 3 to make room for
125 // the two messages per frame plus some head room for the constant number of
126 // non-dominating messages.
127 codedStream.SetRecursionLimit(HeapSnapshot::MAX_STACK_DEPTH * 3);
128
129 auto limit = codedStream.PushLimit(sizeOfMessage);
130 if (NS_WARN_IF(!message.ParseFromCodedStream(&codedStream))NS_warn_if_impl(!message.ParseFromCodedStream(&codedStream
), "!message.ParseFromCodedStream(&codedStream)", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 130)
||
131 NS_WARN_IF(!codedStream.ConsumedEntireMessage())NS_warn_if_impl(!codedStream.ConsumedEntireMessage(), "!codedStream.ConsumedEntireMessage()"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 131)
||
132 NS_WARN_IF(codedStream.BytesUntilLimit() != 0)NS_warn_if_impl(codedStream.BytesUntilLimit() != 0, "codedStream.BytesUntilLimit() != 0"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 132)
) {
133 return false;
134 }
135
136 codedStream.PopLimit(limit);
137 return true;
138}
139
140template <typename CharT, typename InternedStringSet>
141struct GetOrInternStringMatcher {
142 InternedStringSet& internedStrings;
143
144 explicit GetOrInternStringMatcher(InternedStringSet& strings)
145 : internedStrings(strings) {}
146
147 const CharT* operator()(const std::string* str) {
148 MOZ_ASSERT(str)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(str)>::isValid, "invalid assertion condition"); if
((__builtin_expect(!!(!(!!(str))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("str", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 148); AnnotateMozCrashReason("MOZ_ASSERT" "(" "str" ")"); do
{ *((volatile int*)__null) = 148; __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
149 size_t length = str->length() / sizeof(CharT);
150 auto tempString = reinterpret_cast<const CharT*>(str->data());
151
152 UniqueFreePtr<CharT[]> owned(NS_xstrndup(tempString, length));
153 if (!internedStrings.append(std::move(owned))) return nullptr;
154
155 return internedStrings.back().get();
156 }
157
158 const CharT* operator()(uint64_t ref) {
159 if (MOZ_LIKELY(ref < internedStrings.length())(__builtin_expect(!!(ref < internedStrings.length()), 1))) {
160 auto& string = internedStrings[ref];
161 MOZ_ASSERT(string)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(string)>::isValid, "invalid assertion condition")
; if ((__builtin_expect(!!(!(!!(string))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("string", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 161); AnnotateMozCrashReason("MOZ_ASSERT" "(" "string" ")")
; do { *((volatile int*)__null) = 161; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
162 return string.get();
163 }
164
165 return nullptr;
166 }
167};
168
169template <
170 // Either char or char16_t.
171 typename CharT,
172 // A reference to either `internedOneByteStrings` or
173 // `internedTwoByteStrings` if CharT is char or char16_t respectively.
174 typename InternedStringSet>
175const CharT* HeapSnapshot::getOrInternString(
176 InternedStringSet& internedStrings, Maybe<StringOrRef>& maybeStrOrRef) {
177 // Incomplete message: has neither a string nor a reference to an already
178 // interned string.
179 if (MOZ_UNLIKELY(maybeStrOrRef.isNothing())(__builtin_expect(!!(maybeStrOrRef.isNothing()), 0))) return nullptr;
180
181 GetOrInternStringMatcher<CharT, InternedStringSet> m(internedStrings);
182 return maybeStrOrRef->match(m);
183}
184
185// Get a de-duplicated string as a Maybe<StringOrRef> from the given `msg`.
186#define GET_STRING_OR_REF_WITH_PROP_NAMES(msg, strPropertyName, \
187 refPropertyName) \
188 (msg.has_##refPropertyName() ? Some(StringOrRef(msg.refPropertyName())) \
189 : msg.has_##strPropertyName() ? Some(StringOrRef(&msg.strPropertyName())) \
190 : Nothing())
191
192#define GET_STRING_OR_REF(msg, property) \
193 (msg.has_##property##ref() ? Some(StringOrRef(msg.property##ref())) \
194 : msg.has_##property() ? Some(StringOrRef(&msg.property())) \
195 : Nothing())
196
197bool HeapSnapshot::saveNode(const protobuf::Node& node,
198 NodeIdSet& edgeReferents) {
199 // NB: de-duplicated string properties must be read back and interned in the
200 // same order here as they are written and serialized in
201 // `CoreDumpWriter::writeNode` or else indices in references to already
202 // serialized strings will be off.
203
204 if (NS_WARN_IF(!node.has_id())NS_warn_if_impl(!node.has_id(), "!node.has_id()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 204)
) return false;
205 NodeId id = node.id();
206
207 // NodeIds are derived from pointers (at most 48 bits) and we rely on them
208 // fitting into JS numbers (IEEE 754 doubles, can precisely store 53 bit
209 // integers) despite storing them on disk as 64 bit integers.
210 if (NS_WARN_IF(!JS::Value::isNumberRepresentable(id))NS_warn_if_impl(!JS::Value::isNumberRepresentable(id), "!JS::Value::isNumberRepresentable(id)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 210)
) return false;
211
212 // Should only deserialize each node once.
213 if (NS_WARN_IF(nodes.has(id))NS_warn_if_impl(nodes.has(id), "nodes.has(id)", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 213)
) return false;
214
215 if (NS_WARN_IF(!JS::ubi::Uint32IsValidCoarseType(node.coarsetype()))NS_warn_if_impl(!JS::ubi::Uint32IsValidCoarseType(node.coarsetype
()), "!JS::ubi::Uint32IsValidCoarseType(node.coarsetype())", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 215)
)
216 return false;
217 auto coarseType = JS::ubi::Uint32ToCoarseType(node.coarsetype());
218
219 Maybe<StringOrRef> typeNameOrRef =
220 GET_STRING_OR_REF_WITH_PROP_NAMES(node, typename_, typenameref);
221 auto typeName =
222 getOrInternString<char16_t>(internedTwoByteStrings, typeNameOrRef);
223 if (NS_WARN_IF(!typeName)NS_warn_if_impl(!typeName, "!typeName", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 223)
) return false;
224
225 if (NS_WARN_IF(!node.has_size())NS_warn_if_impl(!node.has_size(), "!node.has_size()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 225)
) return false;
226 uint64_t size = node.size();
227
228 auto edgesLength = node.edges_size();
229 DeserializedNode::EdgeVector edges;
230 if (NS_WARN_IF(!edges.reserve(edgesLength))NS_warn_if_impl(!edges.reserve(edgesLength), "!edges.reserve(edgesLength)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 230)
) return false;
231 for (decltype(edgesLength) i = 0; i < edgesLength; i++) {
232 auto& protoEdge = node.edges(i);
233
234 if (NS_WARN_IF(!protoEdge.has_referent())NS_warn_if_impl(!protoEdge.has_referent(), "!protoEdge.has_referent()"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 234)
) return false;
235 NodeId referent = protoEdge.referent();
236
237 if (NS_WARN_IF(!edgeReferents.put(referent))NS_warn_if_impl(!edgeReferents.put(referent), "!edgeReferents.put(referent)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 237)
) return false;
238
239 const char16_t* edgeName = nullptr;
240 if (protoEdge.EdgeNameOrRef_case() !=
241 protobuf::Edge::EDGENAMEORREF_NOT_SET) {
242 Maybe<StringOrRef> edgeNameOrRef = GET_STRING_OR_REF(protoEdge, name);
243 edgeName =
244 getOrInternString<char16_t>(internedTwoByteStrings, edgeNameOrRef);
245 if (NS_WARN_IF(!edgeName)NS_warn_if_impl(!edgeName, "!edgeName", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 245)
) return false;
246 }
247
248 edges.infallibleAppend(DeserializedEdge(referent, edgeName));
249 }
250
251 Maybe<StackFrameId> allocationStack;
252 if (node.has_allocationstack()) {
253 StackFrameId id = 0;
254 if (NS_WARN_IF(!saveStackFrame(node.allocationstack(), id))NS_warn_if_impl(!saveStackFrame(node.allocationstack(), id), "!saveStackFrame(node.allocationstack(), id)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 254)
) return false;
255 allocationStack.emplace(id);
256 }
257 MOZ_ASSERT(allocationStack.isSome() == node.has_allocationstack())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(allocationStack.isSome() == node.has_allocationstack
())>::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(allocationStack.isSome() == node.has_allocationstack
()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("allocationStack.isSome() == node.has_allocationstack()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 257); AnnotateMozCrashReason("MOZ_ASSERT" "(" "allocationStack.isSome() == node.has_allocationstack()"
")"); do { *((volatile int*)__null) = 257; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
258
259 const char* jsObjectClassName = nullptr;
260 if (node.JSObjectClassNameOrRef_case() !=
261 protobuf::Node::JSOBJECTCLASSNAMEORREF_NOT_SET) {
262 Maybe<StringOrRef> clsNameOrRef =
263 GET_STRING_OR_REF(node, jsobjectclassname);
264 jsObjectClassName =
265 getOrInternString<char>(internedOneByteStrings, clsNameOrRef);
266 if (NS_WARN_IF(!jsObjectClassName)NS_warn_if_impl(!jsObjectClassName, "!jsObjectClassName", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 266)
) return false;
267 }
268
269 const char* scriptFilename = nullptr;
270 if (node.ScriptFilenameOrRef_case() !=
271 protobuf::Node::SCRIPTFILENAMEORREF_NOT_SET) {
272 Maybe<StringOrRef> scriptFilenameOrRef =
273 GET_STRING_OR_REF(node, scriptfilename);
274 scriptFilename =
275 getOrInternString<char>(internedOneByteStrings, scriptFilenameOrRef);
276 if (NS_WARN_IF(!scriptFilename)NS_warn_if_impl(!scriptFilename, "!scriptFilename", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 276)
) return false;
277 }
278
279 const char16_t* descriptiveTypeName = nullptr;
280 if (node.descriptiveTypeNameOrRef_case() !=
281 protobuf::Node::DESCRIPTIVETYPENAMEORREF_NOT_SET) {
282 Maybe<StringOrRef> descriptiveTypeNameOrRef =
283 GET_STRING_OR_REF(node, descriptivetypename);
284 descriptiveTypeName = getOrInternString<char16_t>(internedTwoByteStrings,
285 descriptiveTypeNameOrRef);
286 if (NS_WARN_IF(!descriptiveTypeName)NS_warn_if_impl(!descriptiveTypeName, "!descriptiveTypeName",
"/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 286)
) return false;
287 }
288
289 if (NS_WARN_IF(!nodes.putNew(NS_warn_if_impl(!nodes.putNew( id, DeserializedNode(id, coarseType
, typeName, size, std::move(edges), allocationStack, jsObjectClassName
, scriptFilename, descriptiveTypeName, *this)), "!nodes.putNew( id, DeserializedNode(id, coarseType, typeName, size, std::move(edges), allocationStack, jsObjectClassName, scriptFilename, descriptiveTypeName, *this))"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 292)
290 id, DeserializedNode(id, coarseType, typeName, size, std::move(edges),NS_warn_if_impl(!nodes.putNew( id, DeserializedNode(id, coarseType
, typeName, size, std::move(edges), allocationStack, jsObjectClassName
, scriptFilename, descriptiveTypeName, *this)), "!nodes.putNew( id, DeserializedNode(id, coarseType, typeName, size, std::move(edges), allocationStack, jsObjectClassName, scriptFilename, descriptiveTypeName, *this))"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 292)
291 allocationStack, jsObjectClassName,NS_warn_if_impl(!nodes.putNew( id, DeserializedNode(id, coarseType
, typeName, size, std::move(edges), allocationStack, jsObjectClassName
, scriptFilename, descriptiveTypeName, *this)), "!nodes.putNew( id, DeserializedNode(id, coarseType, typeName, size, std::move(edges), allocationStack, jsObjectClassName, scriptFilename, descriptiveTypeName, *this))"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 292)
292 scriptFilename, descriptiveTypeName, *this)))NS_warn_if_impl(!nodes.putNew( id, DeserializedNode(id, coarseType
, typeName, size, std::move(edges), allocationStack, jsObjectClassName
, scriptFilename, descriptiveTypeName, *this)), "!nodes.putNew( id, DeserializedNode(id, coarseType, typeName, size, std::move(edges), allocationStack, jsObjectClassName, scriptFilename, descriptiveTypeName, *this))"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 292)
) {
293 return false;
294 };
295
296 return true;
297}
298
299bool HeapSnapshot::saveStackFrame(const protobuf::StackFrame& frame,
300 StackFrameId& outFrameId) {
301 // NB: de-duplicated string properties must be read in the same order here as
302 // they are written in `CoreDumpWriter::getProtobufStackFrame` or else indices
303 // in references to already serialized strings will be off.
304
305 if (frame.has_ref()) {
306 // We should only get a reference to the previous frame if we have already
307 // seen the previous frame.
308 if (!frames.has(frame.ref())) return false;
309
310 outFrameId = frame.ref();
311 return true;
312 }
313
314 // Incomplete message.
315 if (!frame.has_data()) return false;
316
317 auto data = frame.data();
318
319 if (!data.has_id()) return false;
320 StackFrameId id = data.id();
321
322 // This should be the first and only time we see this frame.
323 if (frames.has(id)) return false;
324
325 if (!data.has_line()) return false;
326 uint32_t line = data.line();
327
328 if (!data.has_column()) return false;
329 JS::TaggedColumnNumberOneOrigin column(
330 JS::LimitedColumnNumberOneOrigin(data.column()));
331
332 if (!data.has_issystem()) return false;
333 bool isSystem = data.issystem();
334
335 if (!data.has_isselfhosted()) return false;
336 bool isSelfHosted = data.isselfhosted();
337
338 Maybe<StringOrRef> sourceOrRef = GET_STRING_OR_REF(data, source);
339 auto source =
340 getOrInternString<char16_t>(internedTwoByteStrings, sourceOrRef);
341 if (!source) return false;
342
343 const char16_t* functionDisplayName = nullptr;
344 if (data.FunctionDisplayNameOrRef_case() !=
345 protobuf::StackFrame_Data::FUNCTIONDISPLAYNAMEORREF_NOT_SET) {
346 Maybe<StringOrRef> nameOrRef = GET_STRING_OR_REF(data, functiondisplayname);
347 functionDisplayName =
348 getOrInternString<char16_t>(internedTwoByteStrings, nameOrRef);
349 if (!functionDisplayName) return false;
350 }
351
352 Maybe<StackFrameId> parent;
353 if (data.has_parent()) {
354 StackFrameId parentId = 0;
355 if (!saveStackFrame(data.parent(), parentId)) return false;
356 parent = Some(parentId);
357 }
358
359 if (!frames.putNew(id,
360 DeserializedStackFrame(id, parent, line, column, source,
361 functionDisplayName, isSystem,
362 isSelfHosted, *this))) {
363 return false;
364 }
365
366 outFrameId = id;
367 return true;
368}
369
370#undef GET_STRING_OR_REF_WITH_PROP_NAMES
371#undef GET_STRING_OR_REF
372
373// Because protobuf messages aren't self-delimiting, we serialize each message
374// preceded by its size in bytes. When deserializing, we read this size and then
375// limit reading from the stream to the given byte size. If we didn't, then the
376// first message would consume the entire stream.
377static bool readSizeOfNextMessage(ZeroCopyInputStream& stream,
378 uint32_t* sizep) {
379 MOZ_ASSERT(sizep)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(sizep)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(sizep))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("sizep", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 379); AnnotateMozCrashReason("MOZ_ASSERT" "(" "sizep" ")");
do { *((volatile int*)__null) = 379; __attribute__((nomerge)
) ::abort(); } while (false); } } while (false)
;
380 CodedInputStream codedStream(&stream);
381 return codedStream.ReadVarint32(sizep) && *sizep > 0;
382}
383
384bool HeapSnapshot::init(JSContext* cx, const uint8_t* buffer, uint32_t size) {
385 ArrayInputStream stream(buffer, size);
386 GzipInputStream gzipStream(&stream);
387 uint32_t sizeOfMessage = 0;
388
389 // First is the metadata.
390
391 protobuf::Metadata metadata;
392 if (NS_WARN_IF(!readSizeOfNextMessage(gzipStream, &sizeOfMessage))NS_warn_if_impl(!readSizeOfNextMessage(gzipStream, &sizeOfMessage
), "!readSizeOfNextMessage(gzipStream, &sizeOfMessage)", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 392)
)
393 return false;
394 if (!parseMessage(gzipStream, sizeOfMessage, metadata)) return false;
395 if (metadata.has_timestamp()) timestamp.emplace(metadata.timestamp());
396
397 // Next is the root node.
398
399 protobuf::Node root;
400 if (NS_WARN_IF(!readSizeOfNextMessage(gzipStream, &sizeOfMessage))NS_warn_if_impl(!readSizeOfNextMessage(gzipStream, &sizeOfMessage
), "!readSizeOfNextMessage(gzipStream, &sizeOfMessage)", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 400)
)
401 return false;
402 if (!parseMessage(gzipStream, sizeOfMessage, root)) return false;
403
404 // Although the id is optional in the protobuf format for future proofing, we
405 // can't currently do anything without it.
406 if (NS_WARN_IF(!root.has_id())NS_warn_if_impl(!root.has_id(), "!root.has_id()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 406)
) return false;
407 rootId = root.id();
408
409 // The set of all node ids we've found edges pointing to.
410 NodeIdSet edgeReferents(cx);
411
412 if (NS_WARN_IF(!saveNode(root, edgeReferents))NS_warn_if_impl(!saveNode(root, edgeReferents), "!saveNode(root, edgeReferents)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 412)
) return false;
413
414 // Finally, the rest of the nodes in the core dump.
415
416 // Test for the end of the stream. The protobuf library gives no way to tell
417 // the difference between an underlying read error and the stream being
418 // done. All we can do is attempt to read the size of the next message and
419 // extrapolate guestimations from the result of that operation.
420 while (readSizeOfNextMessage(gzipStream, &sizeOfMessage)) {
421 protobuf::Node node;
422 if (!parseMessage(gzipStream, sizeOfMessage, node)) return false;
423 if (NS_WARN_IF(!saveNode(node, edgeReferents))NS_warn_if_impl(!saveNode(node, edgeReferents), "!saveNode(node, edgeReferents)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 423)
) return false;
424 }
425
426 // Check the set of node ids referred to by edges we found and ensure that we
427 // have the node corresponding to each id. If we don't have all of them, it is
428 // unsafe to perform analyses of this heap snapshot.
429 for (auto iter = edgeReferents.iter(); !iter.done(); iter.next()) {
430 if (NS_WARN_IF(!nodes.has(iter.get()))NS_warn_if_impl(!nodes.has(iter.get()), "!nodes.has(iter.get())"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 430)
) return false;
431 }
432
433 return true;
434}
435
436/*** Heap Snapshot Analyses ***************************************************/
437
438void HeapSnapshot::TakeCensus(JSContext* cx, JS::Handle<JSObject*> options,
439 JS::MutableHandle<JS::Value> rval,
440 ErrorResult& rv) {
441 JS::ubi::Census census(cx);
442
443 JS::ubi::CountTypePtr rootType;
444 if (NS_WARN_IF(!JS::ubi::ParseCensusOptions(cx, census, options, rootType))NS_warn_if_impl(!JS::ubi::ParseCensusOptions(cx, census, options
, rootType), "!JS::ubi::ParseCensusOptions(cx, census, options, rootType)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 444)
) {
445 rv.Throw(NS_ERROR_UNEXPECTED);
446 return;
447 }
448
449 JS::ubi::RootedCount rootCount(cx, rootType->makeCount());
450 if (NS_WARN_IF(!rootCount)NS_warn_if_impl(!rootCount, "!rootCount", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 450)
) {
451 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
452 return;
453 }
454
455 JS::ubi::CensusHandler handler(census, rootCount,
456 GetCurrentThreadDebuggerMallocSizeOf());
457
458 {
459 JS::AutoCheckCannotGC nogc;
460
461 JS::ubi::CensusTraversal traversal(cx, handler, nogc);
462
463 if (NS_WARN_IF(!traversal.addStart(getRoot()))NS_warn_if_impl(!traversal.addStart(getRoot()), "!traversal.addStart(getRoot())"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 463)
) {
464 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
465 return;
466 }
467
468 if (NS_WARN_IF(!traversal.traverse())NS_warn_if_impl(!traversal.traverse(), "!traversal.traverse()"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 468)
) {
469 rv.Throw(NS_ERROR_UNEXPECTED);
470 return;
471 }
472 }
473
474 if (NS_WARN_IF(!handler.report(cx, rval))NS_warn_if_impl(!handler.report(cx, rval), "!handler.report(cx, rval)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 474)
) {
475 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
476 return;
477 }
478}
479
480void HeapSnapshot::DescribeNode(JSContext* cx, JS::Handle<JSObject*> breakdown,
481 uint64_t nodeId,
482 JS::MutableHandle<JS::Value> rval,
483 ErrorResult& rv) {
484 MOZ_ASSERT(breakdown)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(breakdown)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(breakdown))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("breakdown", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 484); AnnotateMozCrashReason("MOZ_ASSERT" "(" "breakdown" ")"
); do { *((volatile int*)__null) = 484; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
485 JS::Rooted<JS::Value> breakdownVal(cx, JS::ObjectValue(*breakdown));
486 JS::Rooted<JS::GCVector<JSLinearString*>> seen(cx, cx);
487 JS::ubi::CountTypePtr rootType =
488 JS::ubi::ParseBreakdown(cx, breakdownVal, &seen);
489 if (NS_WARN_IF(!rootType)NS_warn_if_impl(!rootType, "!rootType", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 489)
) {
490 rv.Throw(NS_ERROR_UNEXPECTED);
491 return;
492 }
493
494 JS::ubi::RootedCount rootCount(cx, rootType->makeCount());
495 if (NS_WARN_IF(!rootCount)NS_warn_if_impl(!rootCount, "!rootCount", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 495)
) {
496 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
497 return;
498 }
499
500 JS::ubi::Node::Id id(nodeId);
501 Maybe<JS::ubi::Node> node = getNodeById(id);
502 if (NS_WARN_IF(node.isNothing())NS_warn_if_impl(node.isNothing(), "node.isNothing()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 502)
) {
503 rv.Throw(NS_ERROR_INVALID_ARG);
504 return;
505 }
506
507 MallocSizeOf mallocSizeOf = GetCurrentThreadDebuggerMallocSizeOf();
508 if (NS_WARN_IF(!rootCount->count(mallocSizeOf, *node))NS_warn_if_impl(!rootCount->count(mallocSizeOf, *node), "!rootCount->count(mallocSizeOf, *node)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 508)
) {
509 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
510 return;
511 }
512
513 if (NS_WARN_IF(!rootCount->report(cx, rval))NS_warn_if_impl(!rootCount->report(cx, rval), "!rootCount->report(cx, rval)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 513)
) {
514 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
515 return;
516 }
517}
518
519already_AddRefed<DominatorTree> HeapSnapshot::ComputeDominatorTree(
520 ErrorResult& rv) {
521 Maybe<JS::ubi::DominatorTree> maybeTree;
522 {
523 auto ccjscx = CycleCollectedJSContext::Get();
524 MOZ_ASSERT(ccjscx)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(ccjscx)>::isValid, "invalid assertion condition")
; if ((__builtin_expect(!!(!(!!(ccjscx))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("ccjscx", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 524); AnnotateMozCrashReason("MOZ_ASSERT" "(" "ccjscx" ")")
; do { *((volatile int*)__null) = 524; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
525 auto cx = ccjscx->Context();
526 MOZ_ASSERT(cx)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(cx)>::isValid, "invalid assertion condition"); if
((__builtin_expect(!!(!(!!(cx))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("cx", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 526); AnnotateMozCrashReason("MOZ_ASSERT" "(" "cx" ")"); do
{ *((volatile int*)__null) = 526; __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
527 JS::AutoCheckCannotGC nogc(cx);
528 maybeTree = JS::ubi::DominatorTree::Create(cx, nogc, getRoot());
529 }
530
531 if (NS_WARN_IF(maybeTree.isNothing())NS_warn_if_impl(maybeTree.isNothing(), "maybeTree.isNothing()"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 531)
) {
532 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
533 return nullptr;
534 }
535
536 return MakeAndAddRef<DominatorTree>(std::move(*maybeTree), this, mParent);
537}
538
539void HeapSnapshot::ComputeShortestPaths(JSContext* cx, uint64_t start,
540 const Sequence<uint64_t>& targets,
541 uint64_t maxNumPaths,
542 JS::MutableHandle<JSObject*> results,
543 ErrorResult& rv) {
544 // First ensure that our inputs are valid.
545
546 if (NS_WARN_IF(maxNumPaths == 0)NS_warn_if_impl(maxNumPaths == 0, "maxNumPaths == 0", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 546)
) {
1
Assuming 'maxNumPaths' is not equal to 0
2
Taking false branch
547 rv.Throw(NS_ERROR_INVALID_ARG);
548 return;
549 }
550
551 Maybe<JS::ubi::Node> startNode = getNodeById(start);
3
Calling 'HeapSnapshot::getNodeById'
552 if (NS_WARN_IF(startNode.isNothing())NS_warn_if_impl(startNode.isNothing(), "startNode.isNothing()"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 552)
) {
553 rv.Throw(NS_ERROR_INVALID_ARG);
554 return;
555 }
556
557 if (NS_WARN_IF(targets.Length() == 0)NS_warn_if_impl(targets.Length() == 0, "targets.Length() == 0"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 557)
) {
558 rv.Throw(NS_ERROR_INVALID_ARG);
559 return;
560 }
561
562 // Aggregate the targets into a set and make sure that they exist in the heap
563 // snapshot.
564
565 JS::ubi::NodeSet targetsSet;
566
567 for (const auto& target : targets) {
568 Maybe<JS::ubi::Node> targetNode = getNodeById(target);
569 if (NS_WARN_IF(targetNode.isNothing())NS_warn_if_impl(targetNode.isNothing(), "targetNode.isNothing()"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 569)
) {
570 rv.Throw(NS_ERROR_INVALID_ARG);
571 return;
572 }
573
574 if (NS_WARN_IF(!targetsSet.put(*targetNode))NS_warn_if_impl(!targetsSet.put(*targetNode), "!targetsSet.put(*targetNode)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 574)
) {
575 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
576 return;
577 }
578 }
579
580 // Walk the heap graph and find the shortest paths.
581
582 Maybe<ShortestPaths> maybeShortestPaths;
583 {
584 JS::AutoCheckCannotGC nogc(cx);
585 maybeShortestPaths = ShortestPaths::Create(
586 cx, nogc, maxNumPaths, *startNode, std::move(targetsSet));
587 }
588
589 if (NS_WARN_IF(maybeShortestPaths.isNothing())NS_warn_if_impl(maybeShortestPaths.isNothing(), "maybeShortestPaths.isNothing()"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 589)
) {
590 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
591 return;
592 }
593
594 auto& shortestPaths = *maybeShortestPaths;
595
596 // Convert the results into a Map object mapping target node IDs to arrays of
597 // paths found.
598
599 JS::Rooted<JSObject*> resultsMap(cx, JS::NewMapObject(cx));
600 if (NS_WARN_IF(!resultsMap)NS_warn_if_impl(!resultsMap, "!resultsMap", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 600)
) {
601 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
602 return;
603 }
604
605 for (auto iter = shortestPaths.targetIter(); !iter.done(); iter.next()) {
606 JS::Rooted<JS::Value> key(cx, JS::NumberValue(iter.get().identifier()));
607 JS::RootedVector<JS::Value> paths(cx);
608
609 bool ok = shortestPaths.forEachPath(iter.get(), [&](JS::ubi::Path& path) {
610 JS::RootedVector<JS::Value> pathValues(cx);
611
612 for (JS::ubi::BackEdge* edge : path) {
613 JS::Rooted<JSObject*> pathPart(cx, JS_NewPlainObject(cx));
614 if (!pathPart) {
615 return false;
616 }
617
618 JS::Rooted<JS::Value> predecessor(
619 cx, NumberValue(edge->predecessor().identifier()));
620 if (!JS_DefineProperty(cx, pathPart, "predecessor", predecessor,
621 JSPROP_ENUMERATE)) {
622 return false;
623 }
624
625 JS::Rooted<JS::Value> edgeNameVal(cx, NullValue());
626 if (edge->name()) {
627 JS::Rooted<JSString*> edgeName(
628 cx, JS_AtomizeUCString(cx, edge->name().get()));
629 if (!edgeName) {
630 return false;
631 }
632 edgeNameVal = StringValue(edgeName);
633 }
634
635 if (!JS_DefineProperty(cx, pathPart, "edge", edgeNameVal,
636 JSPROP_ENUMERATE)) {
637 return false;
638 }
639
640 if (!pathValues.append(ObjectValue(*pathPart))) {
641 return false;
642 }
643 }
644
645 JS::Rooted<JSObject*> pathObj(cx, JS::NewArrayObject(cx, pathValues));
646 return pathObj && paths.append(ObjectValue(*pathObj));
647 });
648
649 if (NS_WARN_IF(!ok)NS_warn_if_impl(!ok, "!ok", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 649)
) {
650 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
651 return;
652 }
653
654 JS::Rooted<JSObject*> pathsArray(cx, JS::NewArrayObject(cx, paths));
655 if (NS_WARN_IF(!pathsArray)NS_warn_if_impl(!pathsArray, "!pathsArray", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 655)
) {
656 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
657 return;
658 }
659
660 JS::Rooted<JS::Value> pathsVal(cx, ObjectValue(*pathsArray));
661 if (NS_WARN_IF(!JS::MapSet(cx, resultsMap, key, pathsVal))NS_warn_if_impl(!JS::MapSet(cx, resultsMap, key, pathsVal), "!JS::MapSet(cx, resultsMap, key, pathsVal)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 661)
) {
662 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
663 return;
664 }
665 }
666
667 results.set(resultsMap);
668}
669
670/*** Saving Heap Snapshots ****************************************************/
671
672// If we are only taking a snapshot of the heap affected by the given set of
673// globals, find the set of compartments the globals are allocated
674// within. Returns false on OOM failure.
675static bool PopulateCompartmentsWithGlobals(
676 CompartmentSet& compartments, JS::HandleVector<JSObject*> globals) {
677 unsigned length = globals.length();
678 for (unsigned i = 0; i < length; i++) {
679 if (!compartments.put(JS::GetCompartment(globals[i]))) return false;
680 }
681
682 return true;
683}
684
685// Add the given set of globals as explicit roots in the given roots
686// list. Returns false on OOM failure.
687static bool AddGlobalsAsRoots(JS::HandleVector<JSObject*> globals,
688 ubi::RootList& roots) {
689 unsigned length = globals.length();
690 for (unsigned i = 0; i < length; i++) {
691 if (!roots.addRoot(ubi::Node(globals[i].get()), u"heap snapshot global")) {
692 return false;
693 }
694 }
695 return true;
696}
697
698// Choose roots and limits for a traversal, given `boundaries`. Set `roots` to
699// the set of nodes within the boundaries that are referred to by nodes
700// outside. If `boundaries` does not include all JS compartments, initialize
701// `compartments` to the set of included compartments; otherwise, leave
702// `compartments` uninitialized. (You can use compartments.initialized() to
703// check.)
704//
705// If `boundaries` is incoherent, or we encounter an error while trying to
706// handle it, or we run out of memory, set `rv` appropriately and return
707// `false`.
708//
709// Return value is a pair of the status and an AutoCheckCannotGC token,
710// forwarded from ubi::RootList::init(), to ensure that the caller does
711// not GC while the RootList is live and initialized.
712static std::pair<bool, AutoCheckCannotGC> EstablishBoundaries(
713 JSContext* cx, ErrorResult& rv, const HeapSnapshotBoundaries& boundaries,
714 ubi::RootList& roots, CompartmentSet& compartments) {
715 MOZ_ASSERT(!roots.initialized())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!roots.initialized())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!roots.initialized()))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("!roots.initialized()"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 715); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!roots.initialized()"
")"); do { *((volatile int*)__null) = 715; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
716 MOZ_ASSERT(compartments.empty())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(compartments.empty())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(compartments.empty()))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("compartments.empty()"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 716); AnnotateMozCrashReason("MOZ_ASSERT" "(" "compartments.empty()"
")"); do { *((volatile int*)__null) = 716; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
717
718 bool foundBoundaryProperty = false;
719
720 if (boundaries.mRuntime.WasPassed()) {
721 foundBoundaryProperty = true;
722
723 if (!boundaries.mRuntime.Value()) {
724 rv.Throw(NS_ERROR_INVALID_ARG);
725 return {false, AutoCheckCannotGC(cx)};
726 }
727
728 auto [ok, nogc] = roots.init();
729 if (!ok) {
730 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
731 return {false, nogc};
732 }
733 }
734
735 if (boundaries.mDebugger.WasPassed()) {
736 if (foundBoundaryProperty) {
737 rv.Throw(NS_ERROR_INVALID_ARG);
738 return {false, AutoCheckCannotGC(cx)};
739 }
740 foundBoundaryProperty = true;
741
742 JSObject* dbgObj = boundaries.mDebugger.Value();
743 if (!dbgObj || !dbg::IsDebugger(*dbgObj)) {
744 rv.Throw(NS_ERROR_INVALID_ARG);
745 return {false, AutoCheckCannotGC(cx)};
746 }
747
748 JS::RootedVector<JSObject*> globals(cx);
749 if (!dbg::GetDebuggeeGlobals(cx, *dbgObj, &globals) ||
750 !PopulateCompartmentsWithGlobals(compartments, globals) ||
751 !roots.init(compartments).first || !AddGlobalsAsRoots(globals, roots)) {
752 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
753 return {false, AutoCheckCannotGC(cx)};
754 }
755 }
756
757 if (boundaries.mGlobals.WasPassed()) {
758 if (foundBoundaryProperty) {
759 rv.Throw(NS_ERROR_INVALID_ARG);
760 return {false, AutoCheckCannotGC(cx)};
761 }
762 foundBoundaryProperty = true;
763
764 uint32_t length = boundaries.mGlobals.Value().Length();
765 if (length == 0) {
766 rv.Throw(NS_ERROR_INVALID_ARG);
767 return {false, AutoCheckCannotGC(cx)};
768 }
769
770 JS::RootedVector<JSObject*> globals(cx);
771 for (uint32_t i = 0; i < length; i++) {
772 JSObject* global = boundaries.mGlobals.Value().ElementAt(i);
773 if (!JS_IsGlobalObject(global)) {
774 rv.Throw(NS_ERROR_INVALID_ARG);
775 return {false, AutoCheckCannotGC(cx)};
776 }
777 if (!globals.append(global)) {
778 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
779 return {false, AutoCheckCannotGC(cx)};
780 }
781 }
782
783 if (!PopulateCompartmentsWithGlobals(compartments, globals) ||
784 !roots.init(compartments).first || !AddGlobalsAsRoots(globals, roots)) {
785 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
786 return {false, AutoCheckCannotGC(cx)};
787 }
788 }
789 AutoCheckCannotGC nogc(cx);
790
791 if (!foundBoundaryProperty) {
792 rv.Throw(NS_ERROR_INVALID_ARG);
793 return {false, nogc};
794 }
795
796 MOZ_ASSERT(roots.initialized())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(roots.initialized())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(roots.initialized()))), 0)))
{ do { } while (false); MOZ_ReportAssertionFailure("roots.initialized()"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 796); AnnotateMozCrashReason("MOZ_ASSERT" "(" "roots.initialized()"
")"); do { *((volatile int*)__null) = 796; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
797 return {true, nogc};
798}
799
800// A variant covering all the various two-byte strings that we can get from the
801// ubi::Node API.
802class TwoByteString
803 : public Variant<JSAtom*, const char16_t*, JS::ubi::EdgeName> {
804 using Base = Variant<JSAtom*, const char16_t*, JS::ubi::EdgeName>;
805
806 struct CopyToBufferMatcher {
807 RangedPtr<char16_t> destination;
808 size_t maxLength;
809
810 CopyToBufferMatcher(RangedPtr<char16_t> destination, size_t maxLength)
811 : destination(destination), maxLength(maxLength) {}
812
813 size_t operator()(JS::ubi::EdgeName& ptr) {
814 return ptr ? operator()(ptr.get()) : 0;
815 }
816
817 size_t operator()(JSAtom* atom) {
818 MOZ_ASSERT(atom)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(atom)>::isValid, "invalid assertion condition"); if
((__builtin_expect(!!(!(!!(atom))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("atom", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 818); AnnotateMozCrashReason("MOZ_ASSERT" "(" "atom" ")"); do
{ *((volatile int*)__null) = 818; __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
819 JS::ubi::AtomOrTwoByteChars s(atom);
820 return s.copyToBuffer(destination, maxLength);
821 }
822
823 size_t operator()(const char16_t* chars) {
824 MOZ_ASSERT(chars)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(chars)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(chars))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("chars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 824); AnnotateMozCrashReason("MOZ_ASSERT" "(" "chars" ")");
do { *((volatile int*)__null) = 824; __attribute__((nomerge)
) ::abort(); } while (false); } } while (false)
;
825 JS::ubi::AtomOrTwoByteChars s(chars);
826 return s.copyToBuffer(destination, maxLength);
827 }
828 };
829
830 public:
831 template <typename T>
832 MOZ_IMPLICIT TwoByteString(T&& rhs) : Base(std::forward<T>(rhs)) {}
833
834 template <typename T>
835 TwoByteString& operator=(T&& rhs) {
836 MOZ_ASSERT(this != &rhs, "self-move disallowed")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(this != &rhs)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(this != &rhs))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("this != &rhs"
" (" "self-move disallowed" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 836); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this != &rhs"
") (" "self-move disallowed" ")"); do { *((volatile int*)__null
) = 836; __attribute__((nomerge)) ::abort(); } while (false);
} } while (false)
;
837 this->~TwoByteString();
838 new (this) TwoByteString(std::forward<T>(rhs));
839 return *this;
840 }
841
842 TwoByteString(const TwoByteString&) = delete;
843 TwoByteString& operator=(const TwoByteString&) = delete;
844
845 // Rewrap the inner value of a JS::ubi::AtomOrTwoByteChars as a TwoByteString.
846 static TwoByteString from(JS::ubi::AtomOrTwoByteChars&& s) {
847 return s.match([](auto* a) { return TwoByteString(a); });
848 }
849
850 // Returns true if the given TwoByteString is non-null, false otherwise.
851 bool isNonNull() const {
852 return match([](auto& t) { return t != nullptr; });
853 }
854
855 // Return the length of the string, 0 if it is null.
856 size_t length() const {
857 return match(
858 [](JSAtom* atom) -> size_t {
859 MOZ_ASSERT(atom)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(atom)>::isValid, "invalid assertion condition"); if
((__builtin_expect(!!(!(!!(atom))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("atom", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 859); AnnotateMozCrashReason("MOZ_ASSERT" "(" "atom" ")"); do
{ *((volatile int*)__null) = 859; __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
860 JS::ubi::AtomOrTwoByteChars s(atom);
861 return s.length();
862 },
863 [](const char16_t* chars) -> size_t {
864 MOZ_ASSERT(chars)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(chars)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(chars))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("chars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 864); AnnotateMozCrashReason("MOZ_ASSERT" "(" "chars" ")");
do { *((volatile int*)__null) = 864; __attribute__((nomerge)
) ::abort(); } while (false); } } while (false)
;
865 return NS_strlen(chars);
866 },
867 [](const JS::ubi::EdgeName& ptr) -> size_t {
868 MOZ_ASSERT(ptr)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(ptr)>::isValid, "invalid assertion condition"); if
((__builtin_expect(!!(!(!!(ptr))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("ptr", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 868); AnnotateMozCrashReason("MOZ_ASSERT" "(" "ptr" ")"); do
{ *((volatile int*)__null) = 868; __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
869 return NS_strlen(ptr.get());
870 });
871 }
872
873 // Copy the contents of a TwoByteString into the provided buffer. The buffer
874 // is NOT null terminated. The number of characters written is returned.
875 size_t copyToBuffer(RangedPtr<char16_t> destination, size_t maxLength) {
876 CopyToBufferMatcher m(destination, maxLength);
877 return match(m);
878 }
879
880 struct HashPolicy;
881};
882
883// A hashing policy for TwoByteString.
884//
885// Atoms are pointer hashed and use pointer equality, which means that we
886// tolerate some duplication across atoms and the other two types of two-byte
887// strings. In practice, we expect the amount of this duplication to be very low
888// because each type is generally a different semantic thing in addition to
889// having a slightly different representation. For example, the set of edge
890// names and the set stack frames' source names naturally tend not to overlap
891// very much if at all.
892struct TwoByteString::HashPolicy {
893 using Lookup = TwoByteString;
894
895 static js::HashNumber hash(const Lookup& l) {
896 return l.match(
897 [](const JSAtom* atom) {
898 return js::DefaultHasher<const JSAtom*>::hash(atom);
899 },
900 [](const char16_t* chars) {
901 MOZ_ASSERT(chars)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(chars)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(chars))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("chars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 901); AnnotateMozCrashReason("MOZ_ASSERT" "(" "chars" ")");
do { *((volatile int*)__null) = 901; __attribute__((nomerge)
) ::abort(); } while (false); } } while (false)
;
902 auto length = NS_strlen(chars);
903 return HashString(chars, length);
904 },
905 [](const JS::ubi::EdgeName& ptr) {
906 const char16_t* chars = ptr.get();
907 MOZ_ASSERT(chars)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(chars)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(chars))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("chars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 907); AnnotateMozCrashReason("MOZ_ASSERT" "(" "chars" ")");
do { *((volatile int*)__null) = 907; __attribute__((nomerge)
) ::abort(); } while (false); } } while (false)
;
908 auto length = NS_strlen(chars);
909 return HashString(chars, length);
910 });
911 }
912
913 struct EqualityMatcher {
914 const TwoByteString& rhs;
915 explicit EqualityMatcher(const TwoByteString& rhs) : rhs(rhs) {}
916
917 bool operator()(const JSAtom* atom) {
918 return rhs.is<JSAtom*>() && rhs.as<JSAtom*>() == atom;
919 }
920
921 bool operator()(const char16_t* chars) {
922 MOZ_ASSERT(chars)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(chars)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(chars))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("chars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 922); AnnotateMozCrashReason("MOZ_ASSERT" "(" "chars" ")");
do { *((volatile int*)__null) = 922; __attribute__((nomerge)
) ::abort(); } while (false); } } while (false)
;
923
924 const char16_t* rhsChars = nullptr;
925 if (rhs.is<const char16_t*>())
926 rhsChars = rhs.as<const char16_t*>();
927 else if (rhs.is<JS::ubi::EdgeName>())
928 rhsChars = rhs.as<JS::ubi::EdgeName>().get();
929 else
930 return false;
931 MOZ_ASSERT(rhsChars)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(rhsChars)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(rhsChars))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("rhsChars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 931); AnnotateMozCrashReason("MOZ_ASSERT" "(" "rhsChars" ")"
); do { *((volatile int*)__null) = 931; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
932
933 auto length = NS_strlen(chars);
934 if (NS_strlen(rhsChars) != length) return false;
935
936 return memcmp(chars, rhsChars, length * sizeof(char16_t)) == 0;
937 }
938
939 bool operator()(const JS::ubi::EdgeName& ptr) {
940 MOZ_ASSERT(ptr)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(ptr)>::isValid, "invalid assertion condition"); if
((__builtin_expect(!!(!(!!(ptr))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("ptr", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 940); AnnotateMozCrashReason("MOZ_ASSERT" "(" "ptr" ")"); do
{ *((volatile int*)__null) = 940; __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
941 return operator()(ptr.get());
942 }
943 };
944
945 static bool match(const TwoByteString& k, const Lookup& l) {
946 EqualityMatcher eq(l);
947 return k.match(eq);
948 }
949
950 static void rekey(TwoByteString& k, TwoByteString&& newKey) {
951 k = std::move(newKey);
952 }
953};
954
955// Returns whether `edge` should be included in a heap snapshot of
956// `compartments`. The optional `policy` out-param is set to INCLUDE_EDGES
957// if we want to include the referent's edges, or EXCLUDE_EDGES if we don't
958// want to include them.
959static bool ShouldIncludeEdge(JS::CompartmentSet* compartments,
960 const ubi::Node& origin, const ubi::Edge& edge,
961 CoreDumpWriter::EdgePolicy* policy = nullptr) {
962 if (policy) {
963 *policy = CoreDumpWriter::INCLUDE_EDGES;
964 }
965
966 if (!compartments) {
967 // We aren't targeting a particular set of compartments, so serialize all
968 // the things!
969 return true;
970 }
971
972 // We are targeting a particular set of compartments. If this node is in our
973 // target set, serialize it and all of its edges. If this node is _not_ in our
974 // target set, we also serialize under the assumption that it is a shared
975 // resource being used by something in our target compartments since we
976 // reached it by traversing the heap graph. However, we do not serialize its
977 // outgoing edges and we abandon further traversal from this node.
978 //
979 // If the node does not belong to any compartment, we also serialize its
980 // outgoing edges. This case is relevant for Shapes: they don't belong to a
981 // specific compartment and contain edges to parent/kids Shapes we want to
982 // include. Note that these Shapes may contain pointers into our target
983 // compartment (the Shape's getter/setter JSObjects). However, we do not
984 // serialize nodes in other compartments that are reachable from these
985 // non-compartment nodes.
986
987 JS::Compartment* compartment = edge.referent.compartment();
988
989 if (!compartment || compartments->has(compartment)) {
990 return true;
991 }
992
993 if (policy) {
994 *policy = CoreDumpWriter::EXCLUDE_EDGES;
995 }
996
997 return !!origin.compartment();
998}
999
1000// A `CoreDumpWriter` that serializes nodes to protobufs and writes them to the
1001// given `ZeroCopyOutputStream`.
1002class MOZ_STACK_CLASS StreamWriter : public CoreDumpWriter {
1003 using FrameSet = js::HashSet<uint64_t>;
1004 using TwoByteStringMap =
1005 js::HashMap<TwoByteString, uint64_t, TwoByteString::HashPolicy>;
1006 using OneByteStringMap = js::HashMap<const char*, uint64_t>;
1007
1008 JSContext* cx;
1009 bool wantNames;
1010 // The set of |JS::ubi::StackFrame::identifier()|s that have already been
1011 // serialized and written to the core dump.
1012 FrameSet framesAlreadySerialized;
1013 // The set of two-byte strings that have already been serialized and written
1014 // to the core dump.
1015 TwoByteStringMap twoByteStringsAlreadySerialized;
1016 // The set of one-byte strings that have already been serialized and written
1017 // to the core dump.
1018 OneByteStringMap oneByteStringsAlreadySerialized;
1019
1020 ::google::protobuf::io::ZeroCopyOutputStream& stream;
1021
1022 JS::CompartmentSet* compartments;
1023
1024 bool writeMessage(const ::google::protobuf::MessageLite& message) {
1025 // We have to create a new CodedOutputStream when writing each message so
1026 // that the 64MB size limit used by Coded{Output,Input}Stream to prevent
1027 // integer overflow is enforced per message rather than on the whole stream.
1028 ::google::protobuf::io::CodedOutputStream codedStream(&stream);
1029 codedStream.WriteVarint32(message.ByteSizeLong());
1030 message.SerializeWithCachedSizes(&codedStream);
1031 return !codedStream.HadError();
1032 }
1033
1034 // Attach the full two-byte string or a reference to a two-byte string that
1035 // has already been serialized to a protobuf message.
1036 template <typename SetStringFunction, typename SetRefFunction>
1037 bool attachTwoByteString(TwoByteString& string, SetStringFunction setString,
1038 SetRefFunction setRef) {
1039 auto ptr = twoByteStringsAlreadySerialized.lookupForAdd(string);
1040 if (ptr) {
1041 setRef(ptr->value());
1042 return true;
1043 }
1044
1045 auto length = string.length();
1046 auto stringData = MakeUnique<std::string>(length * sizeof(char16_t), '\0');
1047 if (!stringData) return false;
1048
1049 auto buf = const_cast<char16_t*>(
1050 reinterpret_cast<const char16_t*>(stringData->data()));
1051 string.copyToBuffer(RangedPtr<char16_t>(buf, length), length);
1052
1053 uint64_t ref = twoByteStringsAlreadySerialized.count();
1054 if (!twoByteStringsAlreadySerialized.add(ptr, std::move(string), ref))
1055 return false;
1056
1057 setString(stringData.release());
1058 return true;
1059 }
1060
1061 // Attach the full one-byte string or a reference to a one-byte string that
1062 // has already been serialized to a protobuf message.
1063 template <typename SetStringFunction, typename SetRefFunction>
1064 bool attachOneByteString(const char* string, SetStringFunction setString,
1065 SetRefFunction setRef) {
1066 auto ptr = oneByteStringsAlreadySerialized.lookupForAdd(string);
1067 if (ptr) {
1068 setRef(ptr->value());
1069 return true;
1070 }
1071
1072 auto length = strlen(string);
1073 auto stringData = MakeUnique<std::string>(string, length);
1074 if (!stringData) return false;
1075
1076 uint64_t ref = oneByteStringsAlreadySerialized.count();
1077 if (!oneByteStringsAlreadySerialized.add(ptr, string, ref)) return false;
1078
1079 setString(stringData.release());
1080 return true;
1081 }
1082
1083 protobuf::StackFrame* getProtobufStackFrame(JS::ubi::StackFrame& frame,
1084 size_t depth = 1) {
1085 // NB: de-duplicated string properties must be written in the same order
1086 // here as they are read in `HeapSnapshot::saveStackFrame` or else indices
1087 // in references to already serialized strings will be off.
1088
1089 MOZ_ASSERT(frame,do { static_assert( mozilla::detail::AssertionConditionType<
decltype(frame)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(frame))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("frame" " (" "null frames should be represented as the lack of a serialized "
"stack frame" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1091); AnnotateMozCrashReason("MOZ_ASSERT" "(" "frame" ") ("
"null frames should be represented as the lack of a serialized "
"stack frame" ")"); do { *((volatile int*)__null) = 1091; __attribute__
((nomerge)) ::abort(); } while (false); } } while (false)
1090 "null frames should be represented as the lack of a serialized "do { static_assert( mozilla::detail::AssertionConditionType<
decltype(frame)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(frame))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("frame" " (" "null frames should be represented as the lack of a serialized "
"stack frame" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1091); AnnotateMozCrashReason("MOZ_ASSERT" "(" "frame" ") ("
"null frames should be represented as the lack of a serialized "
"stack frame" ")"); do { *((volatile int*)__null) = 1091; __attribute__
((nomerge)) ::abort(); } while (false); } } while (false)
1091 "stack frame")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(frame)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(frame))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("frame" " (" "null frames should be represented as the lack of a serialized "
"stack frame" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1091); AnnotateMozCrashReason("MOZ_ASSERT" "(" "frame" ") ("
"null frames should be represented as the lack of a serialized "
"stack frame" ")"); do { *((volatile int*)__null) = 1091; __attribute__
((nomerge)) ::abort(); } while (false); } } while (false)
;
1092
1093 auto id = frame.identifier();
1094 auto protobufStackFrame = MakeUnique<protobuf::StackFrame>();
1095 if (!protobufStackFrame) return nullptr;
1096
1097 if (framesAlreadySerialized.has(id)) {
1098 protobufStackFrame->set_ref(id);
1099 return protobufStackFrame.release();
1100 }
1101
1102 auto data = MakeUnique<protobuf::StackFrame_Data>();
1103 if (!data) return nullptr;
1104
1105 data->set_id(id);
1106 data->set_line(frame.line());
1107 data->set_column(frame.column().oneOriginValue());
1108 data->set_issystem(frame.isSystem());
1109 data->set_isselfhosted(frame.isSelfHosted(cx));
1110
1111 auto dupeSource = TwoByteString::from(frame.source());
1112 if (!attachTwoByteString(
1113 dupeSource,
1114 [&](std::string* source) { data->set_allocated_source(source); },
1115 [&](uint64_t ref) { data->set_sourceref(ref); })) {
1116 return nullptr;
1117 }
1118
1119 auto dupeName = TwoByteString::from(frame.functionDisplayName());
1120 if (dupeName.isNonNull()) {
1121 if (!attachTwoByteString(
1122 dupeName,
1123 [&](std::string* name) {
1124 data->set_allocated_functiondisplayname(name);
1125 },
1126 [&](uint64_t ref) { data->set_functiondisplaynameref(ref); })) {
1127 return nullptr;
1128 }
1129 }
1130
1131 auto parent = frame.parent();
1132 if (parent && depth < HeapSnapshot::MAX_STACK_DEPTH) {
1133 auto protobufParent = getProtobufStackFrame(parent, depth + 1);
1134 if (!protobufParent) return nullptr;
1135 data->set_allocated_parent(protobufParent);
1136 }
1137
1138 protobufStackFrame->set_allocated_data(data.release());
1139
1140 if (!framesAlreadySerialized.put(id)) return nullptr;
1141
1142 return protobufStackFrame.release();
1143 }
1144
1145 public:
1146 StreamWriter(JSContext* cx,
1147 ::google::protobuf::io::ZeroCopyOutputStream& stream,
1148 bool wantNames, JS::CompartmentSet* compartments)
1149 : cx(cx),
1150 wantNames(wantNames),
1151 framesAlreadySerialized(cx),
1152 twoByteStringsAlreadySerialized(cx),
1153 oneByteStringsAlreadySerialized(cx),
1154 stream(stream),
1155 compartments(compartments) {}
1156
1157 ~StreamWriter() override {}
1158
1159 bool writeMetadata(uint64_t timestamp) final {
1160 protobuf::Metadata metadata;
1161 metadata.set_timestamp(timestamp);
1162 return writeMessage(metadata);
1163 }
1164
1165 bool writeNode(const JS::ubi::Node& ubiNode, EdgePolicy includeEdges) final {
1166 // NB: de-duplicated string properties must be written in the same order
1167 // here as they are read in `HeapSnapshot::saveNode` or else indices in
1168 // references to already serialized strings will be off.
1169
1170 protobuf::Node protobufNode;
1171 protobufNode.set_id(ubiNode.identifier());
1172
1173 protobufNode.set_coarsetype(
1174 JS::ubi::CoarseTypeToUint32(ubiNode.coarseType()));
1175
1176 auto typeName = TwoByteString(ubiNode.typeName());
1177 if (NS_WARN_IF(!attachTwoByteString(NS_warn_if_impl(!attachTwoByteString( typeName, [&](std::
string* name) { protobufNode.set_allocated_typename_(name); }
, [&](uint64_t ref) { protobufNode.set_typenameref(ref); }
), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1182)
1178 typeName,NS_warn_if_impl(!attachTwoByteString( typeName, [&](std::
string* name) { protobufNode.set_allocated_typename_(name); }
, [&](uint64_t ref) { protobufNode.set_typenameref(ref); }
), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1182)
1179 [&](std::string* name) {NS_warn_if_impl(!attachTwoByteString( typeName, [&](std::
string* name) { protobufNode.set_allocated_typename_(name); }
, [&](uint64_t ref) { protobufNode.set_typenameref(ref); }
), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1182)
1180 protobufNode.set_allocated_typename_(name);NS_warn_if_impl(!attachTwoByteString( typeName, [&](std::
string* name) { protobufNode.set_allocated_typename_(name); }
, [&](uint64_t ref) { protobufNode.set_typenameref(ref); }
), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1182)
1181 },NS_warn_if_impl(!attachTwoByteString( typeName, [&](std::
string* name) { protobufNode.set_allocated_typename_(name); }
, [&](uint64_t ref) { protobufNode.set_typenameref(ref); }
), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1182)
1182 [&](uint64_t ref) { protobufNode.set_typenameref(ref); }))NS_warn_if_impl(!attachTwoByteString( typeName, [&](std::
string* name) { protobufNode.set_allocated_typename_(name); }
, [&](uint64_t ref) { protobufNode.set_typenameref(ref); }
), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1182)
) {
1183 return false;
1184 }
1185
1186 mozilla::MallocSizeOf mallocSizeOf = dbg::GetDebuggerMallocSizeOf(cx);
1187 MOZ_ASSERT(mallocSizeOf)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mallocSizeOf)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mallocSizeOf))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("mallocSizeOf", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1187); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mallocSizeOf"
")"); do { *((volatile int*)__null) = 1187; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1188 protobufNode.set_size(ubiNode.size(mallocSizeOf));
1189
1190 if (includeEdges) {
1191 auto edges = ubiNode.edges(cx, wantNames);
1192 if (NS_WARN_IF(!edges)NS_warn_if_impl(!edges, "!edges", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1192)
) return false;
1193
1194 for (; !edges->empty(); edges->popFront()) {
1195 ubi::Edge& ubiEdge = edges->front();
1196 if (!ShouldIncludeEdge(compartments, ubiNode, ubiEdge)) {
1197 continue;
1198 }
1199
1200 protobuf::Edge* protobufEdge = protobufNode.add_edges();
1201 if (NS_WARN_IF(!protobufEdge)NS_warn_if_impl(!protobufEdge, "!protobufEdge", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1201)
) {
1202 return false;
1203 }
1204
1205 protobufEdge->set_referent(ubiEdge.referent.identifier());
1206
1207 if (wantNames && ubiEdge.name) {
1208 TwoByteString edgeName(std::move(ubiEdge.name));
1209 if (NS_WARN_IF(!attachTwoByteString(NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std::
string* name) { protobufEdge->set_allocated_name(name); },
[&](uint64_t ref) { protobufEdge->set_nameref(ref); }
), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1214)
1210 edgeName,NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std::
string* name) { protobufEdge->set_allocated_name(name); },
[&](uint64_t ref) { protobufEdge->set_nameref(ref); }
), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1214)
1211 [&](std::string* name) {NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std::
string* name) { protobufEdge->set_allocated_name(name); },
[&](uint64_t ref) { protobufEdge->set_nameref(ref); }
), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1214)
1212 protobufEdge->set_allocated_name(name);NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std::
string* name) { protobufEdge->set_allocated_name(name); },
[&](uint64_t ref) { protobufEdge->set_nameref(ref); }
), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1214)
1213 },NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std::
string* name) { protobufEdge->set_allocated_name(name); },
[&](uint64_t ref) { protobufEdge->set_nameref(ref); }
), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1214)
1214 [&](uint64_t ref) { protobufEdge->set_nameref(ref); }))NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std::
string* name) { protobufEdge->set_allocated_name(name); },
[&](uint64_t ref) { protobufEdge->set_nameref(ref); }
), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1214)
) {
1215 return false;
1216 }
1217 }
1218 }
1219 }
1220
1221 if (ubiNode.hasAllocationStack()) {
1222 auto ubiStackFrame = ubiNode.allocationStack();
1223 auto protoStackFrame = getProtobufStackFrame(ubiStackFrame);
1224 if (NS_WARN_IF(!protoStackFrame)NS_warn_if_impl(!protoStackFrame, "!protoStackFrame", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1224)
) return false;
1225 protobufNode.set_allocated_allocationstack(protoStackFrame);
1226 }
1227
1228 if (auto className = ubiNode.jsObjectClassName()) {
1229 if (NS_WARN_IF(!attachOneByteString(NS_warn_if_impl(!attachOneByteString( className, [&](std::
string* name) { protobufNode.set_allocated_jsobjectclassname(
name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref
(ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1236)
1230 className,NS_warn_if_impl(!attachOneByteString( className, [&](std::
string* name) { protobufNode.set_allocated_jsobjectclassname(
name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref
(ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1236)
1231 [&](std::string* name) {NS_warn_if_impl(!attachOneByteString( className, [&](std::
string* name) { protobufNode.set_allocated_jsobjectclassname(
name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref
(ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1236)
1232 protobufNode.set_allocated_jsobjectclassname(name);NS_warn_if_impl(!attachOneByteString( className, [&](std::
string* name) { protobufNode.set_allocated_jsobjectclassname(
name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref
(ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1236)
1233 },NS_warn_if_impl(!attachOneByteString( className, [&](std::
string* name) { protobufNode.set_allocated_jsobjectclassname(
name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref
(ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1236)
1234 [&](uint64_t ref) {NS_warn_if_impl(!attachOneByteString( className, [&](std::
string* name) { protobufNode.set_allocated_jsobjectclassname(
name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref
(ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1236)
1235 protobufNode.set_jsobjectclassnameref(ref);NS_warn_if_impl(!attachOneByteString( className, [&](std::
string* name) { protobufNode.set_allocated_jsobjectclassname(
name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref
(ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1236)
1236 }))NS_warn_if_impl(!attachOneByteString( className, [&](std::
string* name) { protobufNode.set_allocated_jsobjectclassname(
name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref
(ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1236)
) {
1237 return false;
1238 }
1239 }
1240
1241 if (auto scriptFilename = ubiNode.scriptFilename()) {
1242 if (NS_WARN_IF(!attachOneByteString(NS_warn_if_impl(!attachOneByteString( scriptFilename, [&]
(std::string* name) { protobufNode.set_allocated_scriptfilename
(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref
(ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1249)
1243 scriptFilename,NS_warn_if_impl(!attachOneByteString( scriptFilename, [&]
(std::string* name) { protobufNode.set_allocated_scriptfilename
(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref
(ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1249)
1244 [&](std::string* name) {NS_warn_if_impl(!attachOneByteString( scriptFilename, [&]
(std::string* name) { protobufNode.set_allocated_scriptfilename
(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref
(ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1249)
1245 protobufNode.set_allocated_scriptfilename(name);NS_warn_if_impl(!attachOneByteString( scriptFilename, [&]
(std::string* name) { protobufNode.set_allocated_scriptfilename
(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref
(ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1249)
1246 },NS_warn_if_impl(!attachOneByteString( scriptFilename, [&]
(std::string* name) { protobufNode.set_allocated_scriptfilename
(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref
(ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1249)
1247 [&](uint64_t ref) {NS_warn_if_impl(!attachOneByteString( scriptFilename, [&]
(std::string* name) { protobufNode.set_allocated_scriptfilename
(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref
(ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1249)
1248 protobufNode.set_scriptfilenameref(ref);NS_warn_if_impl(!attachOneByteString( scriptFilename, [&]
(std::string* name) { protobufNode.set_allocated_scriptfilename
(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref
(ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1249)
1249 }))NS_warn_if_impl(!attachOneByteString( scriptFilename, [&]
(std::string* name) { protobufNode.set_allocated_scriptfilename
(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref
(ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1249)
) {
1250 return false;
1251 }
1252 }
1253
1254 if (ubiNode.descriptiveTypeName()) {
1255 auto descriptiveTypeName = TwoByteString(ubiNode.descriptiveTypeName());
1256 if (NS_WARN_IF(!attachTwoByteString(NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [&
](std::string* name) { protobufNode.set_allocated_descriptivetypename
(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref
(ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1263)
1257 descriptiveTypeName,NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [&
](std::string* name) { protobufNode.set_allocated_descriptivetypename
(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref
(ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1263)
1258 [&](std::string* name) {NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [&
](std::string* name) { protobufNode.set_allocated_descriptivetypename
(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref
(ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1263)
1259 protobufNode.set_allocated_descriptivetypename(name);NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [&
](std::string* name) { protobufNode.set_allocated_descriptivetypename
(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref
(ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1263)
1260 },NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [&
](std::string* name) { protobufNode.set_allocated_descriptivetypename
(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref
(ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1263)
1261 [&](uint64_t ref) {NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [&
](std::string* name) { protobufNode.set_allocated_descriptivetypename
(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref
(ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1263)
1262 protobufNode.set_descriptivetypenameref(ref);NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [&
](std::string* name) { protobufNode.set_allocated_descriptivetypename
(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref
(ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1263)
1263 }))NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [&
](std::string* name) { protobufNode.set_allocated_descriptivetypename
(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref
(ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1263)
) {
1264 return false;
1265 }
1266 }
1267
1268 return writeMessage(protobufNode);
1269 }
1270};
1271
1272// A JS::ubi::BreadthFirst handler that serializes a snapshot of the heap into a
1273// core dump.
1274class MOZ_STACK_CLASS HeapSnapshotHandler {
1275 CoreDumpWriter& writer;
1276 JS::CompartmentSet* compartments;
1277
1278 public:
1279 // For telemetry.
1280 uint32_t nodeCount;
1281 uint32_t edgeCount;
1282
1283 HeapSnapshotHandler(CoreDumpWriter& writer, JS::CompartmentSet* compartments)
1284 : writer(writer),
1285 compartments(compartments),
1286 nodeCount(0),
1287 edgeCount(0) {}
1288
1289 // JS::ubi::BreadthFirst handler interface.
1290
1291 class NodeData {};
1292 typedef JS::ubi::BreadthFirst<HeapSnapshotHandler> Traversal;
1293 bool operator()(Traversal& traversal, JS::ubi::Node origin,
1294 const JS::ubi::Edge& edge, NodeData*, bool first) {
1295 edgeCount++;
1296
1297 // We're only interested in the first time we reach edge.referent, not in
1298 // every edge arriving at that node. "But, don't we want to serialize every
1299 // edge in the heap graph?" you ask. Don't worry! This edge is still
1300 // serialized into the core dump. Serializing a node also serializes each of
1301 // its edges, and if we are traversing a given edge, we must have already
1302 // visited and serialized the origin node and its edges.
1303 if (!first) return true;
1304
1305 CoreDumpWriter::EdgePolicy policy;
1306 if (!ShouldIncludeEdge(compartments, origin, edge, &policy)) {
1307 // Because ShouldIncludeEdge considers the |origin| node as well, we don't
1308 // want to consider this node 'visited' until we write it to the core
1309 // dump.
1310 traversal.doNotMarkReferentAsVisited();
1311 return true;
1312 }
1313
1314 nodeCount++;
1315
1316 if (policy == CoreDumpWriter::EXCLUDE_EDGES) traversal.abandonReferent();
1317
1318 return writer.writeNode(edge.referent, policy);
1319 }
1320};
1321
1322bool WriteHeapGraph(JSContext* cx, const JS::ubi::Node& node,
1323 CoreDumpWriter& writer, bool wantNames,
1324 JS::CompartmentSet* compartments,
1325 JS::AutoCheckCannotGC& noGC, uint32_t& outNodeCount,
1326 uint32_t& outEdgeCount) {
1327 // Serialize the starting node to the core dump.
1328
1329 if (NS_WARN_IF(!writer.writeNode(node, CoreDumpWriter::INCLUDE_EDGES))NS_warn_if_impl(!writer.writeNode(node, CoreDumpWriter::INCLUDE_EDGES
), "!writer.writeNode(node, CoreDumpWriter::INCLUDE_EDGES)", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1329)
) {
1330 return false;
1331 }
1332
1333 // Walk the heap graph starting from the given node and serialize it into the
1334 // core dump.
1335
1336 HeapSnapshotHandler handler(writer, compartments);
1337 HeapSnapshotHandler::Traversal traversal(cx, handler, noGC);
1338 traversal.wantNames = wantNames;
1339
1340 bool ok = traversal.addStartVisited(node) && traversal.traverse();
1341
1342 if (ok) {
1343 outNodeCount = handler.nodeCount;
1344 outEdgeCount = handler.edgeCount;
1345 }
1346
1347 return ok;
1348}
1349
1350static unsigned long msSinceProcessCreation(const TimeStamp& now) {
1351 auto duration = now - TimeStamp::ProcessCreation();
1352 return (unsigned long)duration.ToMilliseconds();
1353}
1354
1355/* static */
1356already_AddRefed<nsIFile> HeapSnapshot::CreateUniqueCoreDumpFile(
1357 ErrorResult& rv, const TimeStamp& now, nsAString& outFilePath,
1358 nsAString& outSnapshotId) {
1359 MOZ_RELEASE_ASSERT(XRE_IsParentProcess())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(XRE_IsParentProcess())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(XRE_IsParentProcess()))), 0)
)) { do { } while (false); MOZ_ReportAssertionFailure("XRE_IsParentProcess()"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1359); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "XRE_IsParentProcess()"
")"); do { *((volatile int*)__null) = 1359; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1360 nsCOMPtr<nsIFile> file;
1361 rv = GetSpecialSystemDirectory(OS_TemporaryDirectory, getter_AddRefs(file));
1362 if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1362)
) return nullptr;
1363
1364 nsAutoString tempPath;
1365 rv = file->GetPath(tempPath);
1366 if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1366)
) return nullptr;
1367
1368 auto ms = msSinceProcessCreation(now);
1369 rv = file->AppendNative(nsPrintfCString("%lu.fxsnapshot", ms));
1370 if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1370)
) return nullptr;
1371
1372 rv = file->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0666);
1373 if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1373)
) return nullptr;
1374
1375 rv = file->GetPath(outFilePath);
1376 if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1376)
) return nullptr;
1377
1378 // The snapshot ID must be computed in the process that created the
1379 // temp file, because TmpD may not be the same in all processes.
1380 outSnapshotId.Assign(Substring(
1381 outFilePath, tempPath.Length() + 1,
1382 outFilePath.Length() - tempPath.Length() - sizeof(".fxsnapshot")));
1383
1384 return file.forget();
1385}
1386
1387// Deletion policy for cleaning up PHeapSnapshotTempFileHelperChild pointers.
1388class DeleteHeapSnapshotTempFileHelperChild {
1389 public:
1390 constexpr DeleteHeapSnapshotTempFileHelperChild() {}
1391
1392 void operator()(PHeapSnapshotTempFileHelperChild* ptr) const {
1393 Unused << NS_WARN_IF(!HeapSnapshotTempFileHelperChild::Send__delete__(ptr))NS_warn_if_impl(!HeapSnapshotTempFileHelperChild::Send__delete__
(ptr), "!HeapSnapshotTempFileHelperChild::Send__delete__(ptr)"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1393)
;
1394 }
1395};
1396
1397// A UniquePtr alias to automatically manage PHeapSnapshotTempFileHelperChild
1398// pointers.
1399using UniqueHeapSnapshotTempFileHelperChild =
1400 UniquePtr<PHeapSnapshotTempFileHelperChild,
1401 DeleteHeapSnapshotTempFileHelperChild>;
1402
1403// Get an nsIOutputStream that we can write the heap snapshot to. In non-e10s
1404// and in the e10s parent process, open a file directly and create an output
1405// stream for it. In e10s child processes, we are sandboxed without access to
1406// the filesystem. Use IPDL to request a file descriptor from the parent
1407// process.
1408static already_AddRefed<nsIOutputStream> getCoreDumpOutputStream(
1409 ErrorResult& rv, TimeStamp& start, nsAString& outFilePath,
1410 nsAString& outSnapshotId) {
1411 if (XRE_IsParentProcess()) {
1412 // Create the file and open the output stream directly.
1413
1414 nsCOMPtr<nsIFile> file = HeapSnapshot::CreateUniqueCoreDumpFile(
1415 rv, start, outFilePath, outSnapshotId);
1416 if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1416)
) return nullptr;
1417
1418 nsCOMPtr<nsIOutputStream> outputStream;
1419 rv = NS_NewLocalFileOutputStream(getter_AddRefs(outputStream), file,
1420 PR_WRONLY0x02, -1, 0);
1421 if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1421)
) return nullptr;
1422
1423 return outputStream.forget();
1424 }
1425 // Request a file descriptor from the parent process over IPDL.
1426
1427 auto cc = ContentChild::GetSingleton();
1428 if (!cc) {
1429 rv.Throw(NS_ERROR_UNEXPECTED);
1430 return nullptr;
1431 }
1432
1433 UniqueHeapSnapshotTempFileHelperChild helper(
1434 cc->SendPHeapSnapshotTempFileHelperConstructor());
1435 if (NS_WARN_IF(!helper)NS_warn_if_impl(!helper, "!helper", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1435)
) {
1436 rv.Throw(NS_ERROR_UNEXPECTED);
1437 return nullptr;
1438 }
1439
1440 OpenHeapSnapshotTempFileResponse response;
1441 if (!helper->SendOpenHeapSnapshotTempFile(&response)) {
1442 rv.Throw(NS_ERROR_UNEXPECTED);
1443 return nullptr;
1444 }
1445 if (response.type() == OpenHeapSnapshotTempFileResponse::Tnsresult) {
1446 rv.Throw(response.get_nsresult());
1447 return nullptr;
1448 }
1449
1450 auto opened = response.get_OpenedFile();
1451 outFilePath = opened.path();
1452 outSnapshotId = opened.snapshotId();
1453 nsCOMPtr<nsIOutputStream> outputStream =
1454 FileDescriptorOutputStream::Create(opened.descriptor());
1455 if (NS_WARN_IF(!outputStream)NS_warn_if_impl(!outputStream, "!outputStream", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1455)
) {
1456 rv.Throw(NS_ERROR_UNEXPECTED);
1457 return nullptr;
1458 }
1459
1460 return outputStream.forget();
1461}
1462
1463} // namespace devtools
1464
1465namespace dom {
1466
1467using namespace JS;
1468using namespace devtools;
1469
1470/* static */
1471void ChromeUtils::SaveHeapSnapshotShared(
1472 GlobalObject& global, const HeapSnapshotBoundaries& boundaries,
1473 nsAString& outFilePath, nsAString& outSnapshotId, ErrorResult& rv) {
1474 auto start = TimeStamp::Now();
1475
1476 bool wantNames = true;
1477 CompartmentSet compartments;
1478 uint32_t nodeCount = 0;
1479 uint32_t edgeCount = 0;
1480
1481 nsCOMPtr<nsIOutputStream> outputStream =
1482 getCoreDumpOutputStream(rv, start, outFilePath, outSnapshotId);
1483 if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp"
, 1483)
) return;
1484
1485 ZeroCopyNSIOutputStream zeroCopyStream(outputStream);
1486 ::google::protobuf::io::GzipOutputStream gzipStream(&zeroCopyStream);
1487
1488 JSContext* cx = global.Context();
1489
1490 {
1491 ubi::RootList rootList(cx, wantNames);
1492 auto [ok, nogc] =
1493 EstablishBoundaries(cx, rv, boundaries, rootList, compartments);
1494 if (!ok) {
1495 return;
1496 }
1497
1498 StreamWriter writer(cx, gzipStream, wantNames,
1499 !compartments.empty() ? &compartments : nullptr);
1500
1501 ubi::Node roots(&rootList);
1502
1503 // Serialize the initial heap snapshot metadata to the core dump.
1504 if (!writer.writeMetadata(PR_Now()) ||
1505 // Serialize the heap graph to the core dump, starting from our list of
1506 // roots.
1507 !WriteHeapGraph(cx, roots, writer, wantNames,
1508 !compartments.empty() ? &compartments : nullptr, nogc,
1509 nodeCount, edgeCount)) {
1510 rv.Throw(zeroCopyStream.failed() ? zeroCopyStream.result()
1511 : NS_ERROR_UNEXPECTED);
1512 return;
1513 }
1514 }
1515
1516 Telemetry::AccumulateTimeDelta(Telemetry::DEVTOOLS_SAVE_HEAP_SNAPSHOT_MS,
1517 start);
1518 Telemetry::Accumulate(Telemetry::DEVTOOLS_HEAP_SNAPSHOT_NODE_COUNT,
1519 nodeCount);
1520 Telemetry::Accumulate(Telemetry::DEVTOOLS_HEAP_SNAPSHOT_EDGE_COUNT,
1521 edgeCount);
1522}
1523
1524/* static */
1525uint64_t ChromeUtils::GetObjectNodeId(GlobalObject& global,
1526 JS::Handle<JSObject*> val) {
1527 JS::Rooted<JSObject*> obj(global.Context(), val);
1528
1529 JS::ubi::Node node(obj);
1530 return node.identifier();
1531}
1532
1533/* static */
1534void ChromeUtils::SaveHeapSnapshot(GlobalObject& global,
1535 const HeapSnapshotBoundaries& boundaries,
1536 nsAString& outFilePath, ErrorResult& rv) {
1537 nsAutoString snapshotId;
1538 SaveHeapSnapshotShared(global, boundaries, outFilePath, snapshotId, rv);
1539}
1540
1541/* static */
1542void ChromeUtils::SaveHeapSnapshotGetId(
1543 GlobalObject& global, const HeapSnapshotBoundaries& boundaries,
1544 nsAString& outSnapshotId, ErrorResult& rv) {
1545 nsAutoString filePath;
1546 SaveHeapSnapshotShared(global, boundaries, filePath, outSnapshotId, rv);
1547}
1548
1549/* static */
1550already_AddRefed<HeapSnapshot> ChromeUtils::ReadHeapSnapshot(
1551 GlobalObject& global, const nsAString& filePath, ErrorResult& rv) {
1552 auto start = TimeStamp::Now();
1553
1554 nsCOMPtr<nsIFile> snapshotFile;
1555 rv = NS_NewLocalFile(filePath, getter_AddRefs(snapshotFile));
1556 if (rv.Failed()) {
1557 return nullptr;
1558 }
1559
1560 AutoMemMap mm;
1561 rv = mm.init(snapshotFile);
1562 if (rv.Failed()) return nullptr;
1563
1564 RefPtr<HeapSnapshot> snapshot = HeapSnapshot::Create(
1565 global.Context(), global, reinterpret_cast<const uint8_t*>(mm.address()),
1566 mm.size(), rv);
1567
1568 if (!rv.Failed())
1569 Telemetry::AccumulateTimeDelta(Telemetry::DEVTOOLS_READ_HEAP_SNAPSHOT_MS,
1570 start);
1571
1572 return snapshot.forget();
1573}
1574
1575} // namespace dom
1576} // namespace mozilla

/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.h

1/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
2/* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6#ifndef mozilla_devtools_HeapSnapshot__
7#define mozilla_devtools_HeapSnapshot__
8
9#include "js/HashTable.h"
10#include "mozilla/devtools/DeserializedNode.h"
11#include "mozilla/dom/BindingDeclarations.h"
12#include "mozilla/dom/Nullable.h"
13#include "mozilla/HashFunctions.h"
14#include "mozilla/Maybe.h"
15#include "mozilla/RefCounted.h"
16#include "mozilla/RefPtr.h"
17#include "mozilla/TimeStamp.h"
18#include "mozilla/UniquePtrExtensions.h"
19
20#include "CoreDump.pb.h"
21#include "nsCOMPtr.h"
22#include "nsCRTGlue.h"
23#include "nsCycleCollectionParticipant.h"
24#include "nsISupports.h"
25#include "nsWrapperCache.h"
26#include "nsXPCOM.h"
27
28namespace mozilla {
29class ErrorResult;
30
31namespace devtools {
32
33class DominatorTree;
34
35using UniqueTwoByteString = UniqueFreePtr<char16_t[]>;
36using UniqueOneByteString = UniqueFreePtr<char[]>;
37
38class HeapSnapshot final : public nsISupports, public nsWrapperCache {
39 friend struct DeserializedNode;
40 friend struct DeserializedEdge;
41 friend struct DeserializedStackFrame;
42 friend class JS::ubi::Concrete<JS::ubi::DeserializedNode>;
43
44 explicit HeapSnapshot(JSContext* cx, nsISupports* aParent)
45 : timestamp(Nothing()),
46 rootId(0),
47 nodes(cx),
48 frames(cx),
49 mParent(aParent) {
50 MOZ_ASSERT(aParent)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aParent)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aParent))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("aParent", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.h"
, 50); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aParent" ")")
; do { *((volatile int*)__null) = 50; __attribute__((nomerge)
) ::abort(); } while (false); } } while (false)
;
51 };
52
53 // Initialize this HeapSnapshot from the given buffer that contains a
54 // serialized core dump. Do NOT take ownership of the buffer, only borrow it
55 // for the duration of the call. Return false on failure.
56 bool init(JSContext* cx, const uint8_t* buffer, uint32_t size);
57
58 using NodeIdSet = js::HashSet<NodeId>;
59
60 // Save the given `protobuf::Node` message in this `HeapSnapshot` as a
61 // `DeserializedNode`.
62 bool saveNode(const protobuf::Node& node, NodeIdSet& edgeReferents);
63
64 // Save the given `protobuf::StackFrame` message in this `HeapSnapshot` as a
65 // `DeserializedStackFrame`. The saved stack frame's id is returned via the
66 // out parameter.
67 bool saveStackFrame(const protobuf::StackFrame& frame,
68 StackFrameId& outFrameId);
69
70 public:
71 // The maximum number of stack frames that we will serialize into a core
72 // dump. This helps prevent over-recursion in the protobuf library when
73 // deserializing stacks.
74 static const size_t MAX_STACK_DEPTH = 60;
75
76 private:
77 // If present, a timestamp in the same units that `PR_Now` gives.
78 Maybe<uint64_t> timestamp;
79
80 // The id of the root node for this deserialized heap graph.
81 NodeId rootId;
82
83 // The set of nodes in this deserialized heap graph, keyed by id.
84 using NodeSet = js::HashSet<DeserializedNode, DeserializedNode::HashPolicy>;
85 NodeSet nodes;
86
87 // The set of stack frames in this deserialized heap graph, keyed by id.
88 using FrameSet =
89 js::HashSet<DeserializedStackFrame, DeserializedStackFrame::HashPolicy>;
90 FrameSet frames;
91
92 Vector<UniqueTwoByteString> internedTwoByteStrings;
93 Vector<UniqueOneByteString> internedOneByteStrings;
94
95 using StringOrRef = Variant<const std::string*, uint64_t>;
96
97 template <typename CharT, typename InternedStringSet>
98 const CharT* getOrInternString(InternedStringSet& internedStrings,
99 Maybe<StringOrRef>& maybeStrOrRef);
100
101 protected:
102 nsCOMPtr<nsISupports> mParent;
103
104 virtual ~HeapSnapshot() {}
105
106 public:
107 // Create a `HeapSnapshot` from the given buffer that contains a serialized
108 // core dump. Do NOT take ownership of the buffer, only borrow it for the
109 // duration of the call.
110 static already_AddRefed<HeapSnapshot> Create(JSContext* cx,
111 dom::GlobalObject& global,
112 const uint8_t* buffer,
113 uint32_t size, ErrorResult& rv);
114
115 // Creates the `$TEMP_DIR/XXXXXX-XXX.fxsnapshot` core dump file that heap
116 // snapshots are serialized into.
117 static already_AddRefed<nsIFile> CreateUniqueCoreDumpFile(
118 ErrorResult& rv, const TimeStamp& now, nsAString& outFilePath,
119 nsAString& outSnapshotId);
120
121 NS_DECL_CYCLE_COLLECTING_ISUPPORTSpublic: virtual nsresult QueryInterface(const nsIID& aIID
, void** aInstancePtr) override; virtual MozExternalRefCountType
AddRef(void) override; virtual MozExternalRefCountType Release
(void) override; using HasThreadSafeRefCnt = std::false_type;
protected: nsCycleCollectingAutoRefCnt mRefCnt; nsAutoOwningThread
_mOwningThread; public: virtual void DeleteCycleCollectable(
void); public:
122 NS_DECL_CYCLE_COLLECTION_WRAPPERCACHE_CLASS(HeapSnapshot)class cycleCollection : public nsXPCOMCycleCollectionParticipant
{ public: constexpr explicit cycleCollection(Flags aFlags = 0
) : nsXPCOMCycleCollectionParticipant(aFlags | FlagMaybeSingleZoneJSHolder
) {} private: public: virtual nsresult TraverseNative(void* p
, nsCycleCollectionTraversalCallback& cb) override; virtual
const char* ClassName() override { return "HeapSnapshot"; };
virtual void DeleteCycleCollectable(void* p) override { DowncastCCParticipant
<HeapSnapshot>(p)->DeleteCycleCollectable(); } static
HeapSnapshot* Downcast(nsISupports* s) { return static_cast<
HeapSnapshot*>(static_cast<HeapSnapshot*>(s)); } static
nsISupports* Upcast(HeapSnapshot* p) { return static_cast<
nsISupports*>(static_cast<HeapSnapshot*>(p)); } template
<typename T> friend nsISupports* ToSupports(T* p, cycleCollection
* dummy); virtual void Unlink(void* p) override; virtual void
Trace(void* p, const TraceCallbacks& cb, void* closure) override
; virtual void TraceWrapper(void* aPtr, const TraceCallbacks&
aCb, void* aClosure) final; static constexpr nsXPCOMCycleCollectionParticipant
* GetParticipant() { return &HeapSnapshot::_cycleCollectorGlobal
; } }; virtual void CheckForRightParticipant() { nsXPCOMCycleCollectionParticipant
* p; CallQueryInterface(this, &p); do { static_assert( mozilla
::detail::AssertionConditionType<decltype(p == &_cycleCollectorGlobal
)>::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(p == &_cycleCollectorGlobal))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("p == &_cycleCollectorGlobal"
" (" "HeapSnapshot" " should QI to its own CC participant" ")"
, "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.h"
, 122); AnnotateMozCrashReason("MOZ_ASSERT" "(" "p == &_cycleCollectorGlobal"
") (" "HeapSnapshot" " should QI to its own CC participant" ")"
); do { *((volatile int*)__null) = 122; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false); } static cycleCollection
_cycleCollectorGlobal; virtual void BaseCycleCollectable() final
{}
123 MOZ_DECLARE_REFCOUNTED_TYPENAME(HeapSnapshot)const char* typeName() const { return "HeapSnapshot"; } size_t
typeSize() const { return sizeof(*this); }
124
125 nsISupports* GetParentObject() const { return mParent; }
126
127 virtual JSObject* WrapObject(JSContext* aCx,
128 JS::Handle<JSObject*> aGivenProto) override;
129
130 const char16_t* borrowUniqueString(const char16_t* duplicateString,
131 size_t length);
132
133 // Get the root node of this heap snapshot's graph.
134 JS::ubi::Node getRoot() {
135 auto p = nodes.lookup(rootId);
136 MOZ_ASSERT(p)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(p)>::isValid, "invalid assertion condition"); if (
(__builtin_expect(!!(!(!!(p))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("p", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.h"
, 136); AnnotateMozCrashReason("MOZ_ASSERT" "(" "p" ")"); do {
*((volatile int*)__null) = 136; __attribute__((nomerge)) ::abort
(); } while (false); } } while (false)
;
137 const DeserializedNode& node = *p;
138 return JS::ubi::Node(const_cast<DeserializedNode*>(&node));
139 }
140
141 Maybe<JS::ubi::Node> getNodeById(JS::ubi::Node::Id nodeId) {
142 auto p = nodes.lookup(nodeId);
4
Calling 'HashSet::lookup'
13
Returning from 'HashSet::lookup'
143 if (!p) return Nothing();
14
Assuming the condition is false
15
Taking false branch
144 return Some(JS::ubi::Node(const_cast<DeserializedNode*>(&*p)));
16
Calling 'Ptr::operator*'
145 }
146
147 void TakeCensus(JSContext* cx, JS::Handle<JSObject*> options,
148 JS::MutableHandle<JS::Value> rval, ErrorResult& rv);
149
150 void DescribeNode(JSContext* cx, JS::Handle<JSObject*> breakdown,
151 uint64_t nodeId, JS::MutableHandle<JS::Value> rval,
152 ErrorResult& rv);
153
154 already_AddRefed<DominatorTree> ComputeDominatorTree(ErrorResult& rv);
155
156 void ComputeShortestPaths(JSContext* cx, uint64_t start,
157 const dom::Sequence<uint64_t>& targets,
158 uint64_t maxNumPaths,
159 JS::MutableHandle<JSObject*> results,
160 ErrorResult& rv);
161
162 dom::Nullable<uint64_t> GetCreationTime() {
163 static const uint64_t maxTime = uint64_t(1) << 53;
164 if (timestamp.isSome() && timestamp.ref() <= maxTime) {
165 return dom::Nullable<uint64_t>(timestamp.ref());
166 }
167
168 return dom::Nullable<uint64_t>();
169 }
170};
171
172// A `CoreDumpWriter` is given the data we wish to save in a core dump and
173// serializes it to disk, or memory, or a socket, etc.
174class CoreDumpWriter {
175 public:
176 virtual ~CoreDumpWriter() {};
177
178 // Write the given bits of metadata we would like to associate with this core
179 // dump.
180 virtual bool writeMetadata(uint64_t timestamp) = 0;
181
182 enum EdgePolicy : bool { INCLUDE_EDGES = true, EXCLUDE_EDGES = false };
183
184 // Write the given `JS::ubi::Node` to the core dump. The given `EdgePolicy`
185 // dictates whether its outgoing edges should also be written to the core
186 // dump, or excluded.
187 virtual bool writeNode(const JS::ubi::Node& node,
188 EdgePolicy includeEdges) = 0;
189};
190
191// Serialize the heap graph as seen from `node` with the given `CoreDumpWriter`.
192// If `wantNames` is true, capture edge names. If `zones` is non-null, only
193// capture the sub-graph within the zone set, otherwise capture the whole heap
194// graph. Returns false on failure.
195bool WriteHeapGraph(JSContext* cx, const JS::ubi::Node& node,
196 CoreDumpWriter& writer, bool wantNames,
197 JS::CompartmentSet* compartments,
198 JS::AutoCheckCannotGC& noGC, uint32_t& outNodeCount,
199 uint32_t& outEdgeCount);
200inline bool WriteHeapGraph(JSContext* cx, const JS::ubi::Node& node,
201 CoreDumpWriter& writer, bool wantNames,
202 JS::CompartmentSet* compartments,
203 JS::AutoCheckCannotGC& noGC) {
204 uint32_t ignoreNodeCount;
205 uint32_t ignoreEdgeCount;
206 return WriteHeapGraph(cx, node, writer, wantNames, compartments, noGC,
207 ignoreNodeCount, ignoreEdgeCount);
208}
209
210// Get the mozilla::MallocSizeOf for the current thread's JSRuntime.
211MallocSizeOf GetCurrentThreadDebuggerMallocSizeOf();
212
213} // namespace devtools
214} // namespace mozilla
215
216#endif // mozilla_devtools_HeapSnapshot__

/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h

1/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3/* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7//---------------------------------------------------------------------------
8// Overview
9//---------------------------------------------------------------------------
10//
11// This file defines HashMap<Key, Value> and HashSet<T>, hash tables that are
12// fast and have a nice API.
13//
14// Both hash tables have two optional template parameters.
15//
16// - HashPolicy. This defines the operations for hashing and matching keys. The
17// default HashPolicy is appropriate when both of the following two
18// conditions are true.
19//
20// - The key type stored in the table (|Key| for |HashMap<Key, Value>|, |T|
21// for |HashSet<T>|) is an integer, pointer, UniquePtr, float, or double.
22//
23// - The type used for lookups (|Lookup|) is the same as the key type. This
24// is usually the case, but not always.
25//
26// There is also a |CStringHasher| policy for |char*| keys. If your keys
27// don't match any of the above cases, you must provide your own hash policy;
28// see the "Hash Policy" section below.
29//
30// - AllocPolicy. This defines how allocations are done by the table.
31//
32// - |MallocAllocPolicy| is the default and is usually appropriate; note that
33// operations (such as insertions) that might cause allocations are
34// fallible and must be checked for OOM. These checks are enforced by the
35// use of [[nodiscard]].
36//
37// - |InfallibleAllocPolicy| is another possibility; it allows the
38// abovementioned OOM checks to be done with MOZ_ALWAYS_TRUE().
39//
40// Note that entry storage allocation is lazy, and not done until the first
41// lookupForAdd(), put(), or putNew() is performed.
42//
43// See AllocPolicy.h for more details.
44//
45// Documentation on how to use HashMap and HashSet, including examples, is
46// present within those classes. Search for "class HashMap" and "class
47// HashSet".
48//
49// Both HashMap and HashSet are implemented on top of a third class, HashTable.
50// You only need to look at HashTable if you want to understand the
51// implementation.
52//
53// How does mozilla::HashTable (this file) compare with PLDHashTable (and its
54// subclasses, such as nsTHashtable)?
55//
56// - mozilla::HashTable is a lot faster, largely because it uses templates
57// throughout *and* inlines everything. PLDHashTable inlines operations much
58// less aggressively, and also uses "virtual ops" for operations like hashing
59// and matching entries that require function calls.
60//
61// - Correspondingly, mozilla::HashTable use is likely to increase executable
62// size much more than PLDHashTable.
63//
64// - mozilla::HashTable has a nicer API, with a proper HashSet vs. HashMap
65// distinction.
66//
67// - mozilla::HashTable requires more explicit OOM checking. As mentioned
68// above, the use of |InfallibleAllocPolicy| can simplify things.
69//
70// - mozilla::HashTable has a default capacity on creation of 32 and a minimum
71// capacity of 4. PLDHashTable has a default capacity on creation of 8 and a
72// minimum capacity of 8.
73
74#ifndef mozilla_HashTable_h
75#define mozilla_HashTable_h
76
77#include <utility>
78#include <type_traits>
79
80#include "mozilla/AllocPolicy.h"
81#include "mozilla/Assertions.h"
82#include "mozilla/Attributes.h"
83#include "mozilla/Casting.h"
84#include "mozilla/HashFunctions.h"
85#include "mozilla/MathAlgorithms.h"
86#include "mozilla/Maybe.h"
87#include "mozilla/MemoryChecking.h"
88#include "mozilla/MemoryReporting.h"
89#include "mozilla/Opaque.h"
90#include "mozilla/OperatorNewExtensions.h"
91#include "mozilla/ReentrancyGuard.h"
92#include "mozilla/UniquePtr.h"
93#include "mozilla/WrappingOperations.h"
94
95namespace mozilla {
96
97template <class, class = void>
98struct DefaultHasher;
99
100template <class, class>
101class HashMapEntry;
102
103namespace detail {
104
105template <typename T>
106class HashTableEntry;
107
108template <class T, class HashPolicy, class AllocPolicy>
109class HashTable;
110
111} // namespace detail
112
113// The "generation" of a hash table is an opaque value indicating the state of
114// modification of the hash table through its lifetime. If the generation of
115// a hash table compares equal at times T1 and T2, then lookups in the hash
116// table, pointers to (or into) hash table entries, etc. at time T1 are valid
117// at time T2. If the generation compares unequal, these computations are all
118// invalid and must be performed again to be used.
119//
120// Generations are meaningfully comparable only with respect to a single hash
121// table. It's always nonsensical to compare the generation of distinct hash
122// tables H1 and H2.
123using Generation = Opaque<uint64_t>;
124
125//---------------------------------------------------------------------------
126// HashMap
127//---------------------------------------------------------------------------
128
129// HashMap is a fast hash-based map from keys to values.
130//
131// Template parameter requirements:
132// - Key/Value: movable, destructible, assignable.
133// - HashPolicy: see the "Hash Policy" section below.
134// - AllocPolicy: see AllocPolicy.h.
135//
136// Note:
137// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
138// called by HashMap must not call back into the same HashMap object.
139//
140template <class Key, class Value, class HashPolicy = DefaultHasher<Key>,
141 class AllocPolicy = MallocAllocPolicy>
142class HashMap {
143 // -- Implementation details -----------------------------------------------
144
145 // HashMap is not copyable or assignable.
146 HashMap(const HashMap& hm) = delete;
147 HashMap& operator=(const HashMap& hm) = delete;
148
149 using TableEntry = HashMapEntry<Key, Value>;
150
151 struct MapHashPolicy : HashPolicy {
152 using Base = HashPolicy;
153 using KeyType = Key;
154
155 static const Key& getKey(TableEntry& aEntry) { return aEntry.key(); }
156
157 static void setKey(TableEntry& aEntry, Key& aKey) {
158 HashPolicy::rekey(aEntry.mutableKey(), aKey);
159 }
160 };
161
162 using Impl = detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy>;
163 Impl mImpl;
164
165 friend class Impl::Enum;
166
167 public:
168 using Lookup = typename HashPolicy::Lookup;
169 using Entry = TableEntry;
170
171 // -- Initialization -------------------------------------------------------
172
173 explicit HashMap(AllocPolicy aAllocPolicy = AllocPolicy(),
174 uint32_t aLen = Impl::sDefaultLen)
175 : mImpl(std::move(aAllocPolicy), aLen) {}
176
177 explicit HashMap(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {}
178
179 // HashMap is movable.
180 HashMap(HashMap&& aRhs) = default;
181 HashMap& operator=(HashMap&& aRhs) = default;
182
183 // Swap the contents of this hash map with another.
184 void swap(HashMap& aOther) { mImpl.swap(aOther.mImpl); }
185
186 // -- Status and sizing ----------------------------------------------------
187
188 // The map's current generation.
189 Generation generation() const { return mImpl.generation(); }
190
191 // Is the map empty?
192 bool empty() const { return mImpl.empty(); }
193
194 // Number of keys/values in the map.
195 uint32_t count() const { return mImpl.count(); }
196
197 // Number of key/value slots in the map. Note: resize will happen well before
198 // count() == capacity().
199 uint32_t capacity() const { return mImpl.capacity(); }
200
201 // The size of the map's entry storage, in bytes. If the keys/values contain
202 // pointers to other heap blocks, you must iterate over the map and measure
203 // them separately; hence the "shallow" prefix.
204 size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
205 return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
206 }
207 size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
208 return aMallocSizeOf(this) +
209 mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
210 }
211
212 // Attempt to minimize the capacity(). If the table is empty, this will free
213 // the empty storage and upon regrowth it will be given the minimum capacity.
214 void compact() { mImpl.compact(); }
215
216 // Attempt to reserve enough space to fit at least |aLen| elements. This is
217 // total capacity, including elements already present. Does nothing if the
218 // map already has sufficient capacity.
219 [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); }
220
221 // -- Lookups --------------------------------------------------------------
222
223 // Does the map contain a key/value matching |aLookup|?
224 bool has(const Lookup& aLookup) const {
225 return mImpl.lookup(aLookup).found();
226 }
227
228 // Return a Ptr indicating whether a key/value matching |aLookup| is
229 // present in the map. E.g.:
230 //
231 // using HM = HashMap<int,char>;
232 // HM h;
233 // if (HM::Ptr p = h.lookup(3)) {
234 // assert(p->key() == 3);
235 // char val = p->value();
236 // }
237 //
238 using Ptr = typename Impl::Ptr;
239 MOZ_ALWAYS_INLINEinline Ptr lookup(const Lookup& aLookup) const {
240 return mImpl.lookup(aLookup);
241 }
242
243 // Like lookup(), but does not assert if two threads call it at the same
244 // time. Only use this method when none of the threads will modify the map.
245 MOZ_ALWAYS_INLINEinline Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
246 return mImpl.readonlyThreadsafeLookup(aLookup);
247 }
248
249 // -- Insertions -----------------------------------------------------------
250
251 // Overwrite existing value with |aValue|, or add it if not present. Returns
252 // false on OOM.
253 template <typename KeyInput, typename ValueInput>
254 [[nodiscard]] bool put(KeyInput&& aKey, ValueInput&& aValue) {
255 return put(aKey, std::forward<KeyInput>(aKey),
256 std::forward<ValueInput>(aValue));
257 }
258
259 template <typename KeyInput, typename ValueInput>
260 [[nodiscard]] bool put(const Lookup& aLookup, KeyInput&& aKey,
261 ValueInput&& aValue) {
262 AddPtr p = lookupForAdd(aLookup);
263 if (p) {
264 p->value() = std::forward<ValueInput>(aValue);
265 return true;
266 }
267 return add(p, std::forward<KeyInput>(aKey),
268 std::forward<ValueInput>(aValue));
269 }
270
271 // Like put(), but slightly faster. Must only be used when the given key is
272 // not already present. (In debug builds, assertions check this.)
273 template <typename KeyInput, typename ValueInput>
274 [[nodiscard]] bool putNew(KeyInput&& aKey, ValueInput&& aValue) {
275 return mImpl.putNew(aKey, std::forward<KeyInput>(aKey),
276 std::forward<ValueInput>(aValue));
277 }
278
279 template <typename KeyInput, typename ValueInput>
280 [[nodiscard]] bool putNew(const Lookup& aLookup, KeyInput&& aKey,
281 ValueInput&& aValue) {
282 return mImpl.putNew(aLookup, std::forward<KeyInput>(aKey),
283 std::forward<ValueInput>(aValue));
284 }
285
286 // Like putNew(), but should be only used when the table is known to be big
287 // enough for the insertion, and hashing cannot fail. Typically this is used
288 // to populate an empty map with known-unique keys after reserving space with
289 // reserve(), e.g.
290 //
291 // using HM = HashMap<int,char>;
292 // HM h;
293 // if (!h.reserve(3)) {
294 // MOZ_CRASH("OOM");
295 // }
296 // h.putNewInfallible(1, 'a'); // unique key
297 // h.putNewInfallible(2, 'b'); // unique key
298 // h.putNewInfallible(3, 'c'); // unique key
299 //
300 template <typename KeyInput, typename ValueInput>
301 void putNewInfallible(KeyInput&& aKey, ValueInput&& aValue) {
302 mImpl.putNewInfallible(aKey, std::forward<KeyInput>(aKey),
303 std::forward<ValueInput>(aValue));
304 }
305
306 // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
307 // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
308 // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new key/value. E.g.:
309 //
310 // using HM = HashMap<int,char>;
311 // HM h;
312 // HM::AddPtr p = h.lookupForAdd(3);
313 // if (!p) {
314 // if (!h.add(p, 3, 'a')) {
315 // return false;
316 // }
317 // }
318 // assert(p->key() == 3);
319 // char val = p->value();
320 //
321 // N.B. The caller must ensure that no mutating hash table operations occur
322 // between a pair of lookupForAdd() and add() calls. To avoid looking up the
323 // key a second time, the caller may use the more efficient relookupOrAdd()
324 // method. This method reuses part of the hashing computation to more
325 // efficiently insert the key if it has not been added. For example, a
326 // mutation-handling version of the previous example:
327 //
328 // HM::AddPtr p = h.lookupForAdd(3);
329 // if (!p) {
330 // call_that_may_mutate_h();
331 // if (!h.relookupOrAdd(p, 3, 'a')) {
332 // return false;
333 // }
334 // }
335 // assert(p->key() == 3);
336 // char val = p->value();
337 //
338 using AddPtr = typename Impl::AddPtr;
339 MOZ_ALWAYS_INLINEinline AddPtr lookupForAdd(const Lookup& aLookup) {
340 return mImpl.lookupForAdd(aLookup);
341 }
342
343 // Add a key/value. Returns false on OOM.
344 template <typename KeyInput, typename ValueInput>
345 [[nodiscard]] bool add(AddPtr& aPtr, KeyInput&& aKey, ValueInput&& aValue) {
346 return mImpl.add(aPtr, std::forward<KeyInput>(aKey),
347 std::forward<ValueInput>(aValue));
348 }
349
350 // See the comment above lookupForAdd() for details.
351 template <typename KeyInput, typename ValueInput>
352 [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, KeyInput&& aKey,
353 ValueInput&& aValue) {
354 return mImpl.relookupOrAdd(aPtr, aKey, std::forward<KeyInput>(aKey),
355 std::forward<ValueInput>(aValue));
356 }
357
358 // -- Removal --------------------------------------------------------------
359
360 // Lookup and remove the key/value matching |aLookup|, if present.
361 void remove(const Lookup& aLookup) {
362 if (Ptr p = lookup(aLookup)) {
363 remove(p);
364 }
365 }
366
367 // Remove a previously found key/value (assuming aPtr.found()). The map must
368 // not have been mutated in the interim.
369 void remove(Ptr aPtr) { mImpl.remove(aPtr); }
370
371 // Remove all keys/values without changing the capacity.
372 void clear() { mImpl.clear(); }
373
374 // Like clear() followed by compact().
375 void clearAndCompact() { mImpl.clearAndCompact(); }
376
377 // -- Rekeying -------------------------------------------------------------
378
379 // Infallibly rekey one entry, if necessary. Requires that template
380 // parameters Key and HashPolicy::Lookup are the same type.
381 void rekeyIfMoved(const Key& aOldKey, const Key& aNewKey) {
382 if (aOldKey != aNewKey) {
383 rekeyAs(aOldKey, aNewKey, aNewKey);
384 }
385 }
386
387 // Infallibly rekey one entry if present, and return whether that happened.
388 bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup,
389 const Key& aNewKey) {
390 if (Ptr p = lookup(aOldLookup)) {
391 mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewKey);
392 return true;
393 }
394 return false;
395 }
396
397 // -- Iteration ------------------------------------------------------------
398
399 // |iter()| returns an Iterator:
400 //
401 // HashMap<int, char> h;
402 // for (auto iter = h.iter(); !iter.done(); iter.next()) {
403 // char c = iter.get().value();
404 // }
405 //
406 using Iterator = typename Impl::Iterator;
407 Iterator iter() const { return mImpl.iter(); }
408
409 // |modIter()| returns a ModIterator:
410 //
411 // HashMap<int, char> h;
412 // for (auto iter = h.modIter(); !iter.done(); iter.next()) {
413 // if (iter.get().value() == 'l') {
414 // iter.remove();
415 // }
416 // }
417 //
418 // Table resize may occur in ModIterator's destructor.
419 using ModIterator = typename Impl::ModIterator;
420 ModIterator modIter() { return mImpl.modIter(); }
421
422 // These are similar to Iterator/ModIterator/iter(), but use different
423 // terminology.
424 using Range = typename Impl::Range;
425 using Enum = typename Impl::Enum;
426 Range all() const { return mImpl.all(); }
427};
428
429//---------------------------------------------------------------------------
430// HashSet
431//---------------------------------------------------------------------------
432
433// HashSet is a fast hash-based set of values.
434//
435// Template parameter requirements:
436// - T: movable, destructible, assignable.
437// - HashPolicy: see the "Hash Policy" section below.
438// - AllocPolicy: see AllocPolicy.h
439//
440// Note:
441// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
442// HashSet must not call back into the same HashSet object.
443//
444template <class T, class HashPolicy = DefaultHasher<T>,
445 class AllocPolicy = MallocAllocPolicy>
446class HashSet {
447 // -- Implementation details -----------------------------------------------
448
449 // HashSet is not copyable or assignable.
450 HashSet(const HashSet& hs) = delete;
451 HashSet& operator=(const HashSet& hs) = delete;
452
453 struct SetHashPolicy : HashPolicy {
454 using Base = HashPolicy;
455 using KeyType = T;
456
457 static const KeyType& getKey(const T& aT) { return aT; }
458
459 static void setKey(T& aT, KeyType& aKey) { HashPolicy::rekey(aT, aKey); }
460 };
461
462 using Impl = detail::HashTable<const T, SetHashPolicy, AllocPolicy>;
463 Impl mImpl;
464
465 friend class Impl::Enum;
466
467 public:
468 using Lookup = typename HashPolicy::Lookup;
469 using Entry = T;
470
471 // -- Initialization -------------------------------------------------------
472
473 explicit HashSet(AllocPolicy aAllocPolicy = AllocPolicy(),
474 uint32_t aLen = Impl::sDefaultLen)
475 : mImpl(std::move(aAllocPolicy), aLen) {}
476
477 explicit HashSet(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {}
478
479 // HashSet is movable.
480 HashSet(HashSet&& aRhs) = default;
481 HashSet& operator=(HashSet&& aRhs) = default;
482
483 // Swap the contents of this hash set with another.
484 void swap(HashSet& aOther) { mImpl.swap(aOther.mImpl); }
485
486 // -- Status and sizing ----------------------------------------------------
487
488 // The set's current generation.
489 Generation generation() const { return mImpl.generation(); }
490
491 // Is the set empty?
492 bool empty() const { return mImpl.empty(); }
493
494 // Number of elements in the set.
495 uint32_t count() const { return mImpl.count(); }
496
497 // Number of element slots in the set. Note: resize will happen well before
498 // count() == capacity().
499 uint32_t capacity() const { return mImpl.capacity(); }
500
501 // The size of the set's entry storage, in bytes. If the elements contain
502 // pointers to other heap blocks, you must iterate over the set and measure
503 // them separately; hence the "shallow" prefix.
504 size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
505 return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
506 }
507 size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
508 return aMallocSizeOf(this) +
509 mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
510 }
511
512 // Attempt to minimize the capacity(). If the table is empty, this will free
513 // the empty storage and upon regrowth it will be given the minimum capacity.
514 void compact() { mImpl.compact(); }
515
516 // Attempt to reserve enough space to fit at least |aLen| elements. This is
517 // total capacity, including elements already present. Does nothing if the
518 // map already has sufficient capacity.
519 [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); }
520
521 // -- Lookups --------------------------------------------------------------
522
523 // Does the set contain an element matching |aLookup|?
524 bool has(const Lookup& aLookup) const {
525 return mImpl.lookup(aLookup).found();
526 }
527
528 // Return a Ptr indicating whether an element matching |aLookup| is present
529 // in the set. E.g.:
530 //
531 // using HS = HashSet<int>;
532 // HS h;
533 // if (HS::Ptr p = h.lookup(3)) {
534 // assert(*p == 3); // p acts like a pointer to int
535 // }
536 //
537 using Ptr = typename Impl::Ptr;
538 MOZ_ALWAYS_INLINEinline Ptr lookup(const Lookup& aLookup) const {
539 return mImpl.lookup(aLookup);
5
Calling 'HashTable::lookup'
12
Returning from 'HashTable::lookup'
540 }
541
542 // Like lookup(), but does not assert if two threads call it at the same
543 // time. Only use this method when none of the threads will modify the set.
544 MOZ_ALWAYS_INLINEinline Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
545 return mImpl.readonlyThreadsafeLookup(aLookup);
546 }
547
548 // -- Insertions -----------------------------------------------------------
549
550 // Add |aU| if it is not present already. Returns false on OOM.
551 template <typename U>
552 [[nodiscard]] bool put(U&& aU) {
553 AddPtr p = lookupForAdd(aU);
554 return p ? true : add(p, std::forward<U>(aU));
555 }
556
557 // Like put(), but slightly faster. Must only be used when the given element
558 // is not already present. (In debug builds, assertions check this.)
559 template <typename U>
560 [[nodiscard]] bool putNew(U&& aU) {
561 return mImpl.putNew(aU, std::forward<U>(aU));
562 }
563
564 // Like the other putNew(), but for when |Lookup| is different to |T|.
565 template <typename U>
566 [[nodiscard]] bool putNew(const Lookup& aLookup, U&& aU) {
567 return mImpl.putNew(aLookup, std::forward<U>(aU));
568 }
569
570 // Like putNew(), but should be only used when the table is known to be big
571 // enough for the insertion, and hashing cannot fail. Typically this is used
572 // to populate an empty set with known-unique elements after reserving space
573 // with reserve(), e.g.
574 //
575 // using HS = HashMap<int>;
576 // HS h;
577 // if (!h.reserve(3)) {
578 // MOZ_CRASH("OOM");
579 // }
580 // h.putNewInfallible(1); // unique element
581 // h.putNewInfallible(2); // unique element
582 // h.putNewInfallible(3); // unique element
583 //
584 template <typename U>
585 void putNewInfallible(const Lookup& aLookup, U&& aU) {
586 mImpl.putNewInfallible(aLookup, std::forward<U>(aU));
587 }
588
589 // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
590 // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
591 // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
592 //
593 // using HS = HashSet<int>;
594 // HS h;
595 // HS::AddPtr p = h.lookupForAdd(3);
596 // if (!p) {
597 // if (!h.add(p, 3)) {
598 // return false;
599 // }
600 // }
601 // assert(*p == 3); // p acts like a pointer to int
602 //
603 // N.B. The caller must ensure that no mutating hash table operations occur
604 // between a pair of lookupForAdd() and add() calls. To avoid looking up the
605 // key a second time, the caller may use the more efficient relookupOrAdd()
606 // method. This method reuses part of the hashing computation to more
607 // efficiently insert the key if it has not been added. For example, a
608 // mutation-handling version of the previous example:
609 //
610 // HS::AddPtr p = h.lookupForAdd(3);
611 // if (!p) {
612 // call_that_may_mutate_h();
613 // if (!h.relookupOrAdd(p, 3, 3)) {
614 // return false;
615 // }
616 // }
617 // assert(*p == 3);
618 //
619 // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
620 // entry |t|, where the caller ensures match(l,t).
621 using AddPtr = typename Impl::AddPtr;
622 MOZ_ALWAYS_INLINEinline AddPtr lookupForAdd(const Lookup& aLookup) {
623 return mImpl.lookupForAdd(aLookup);
624 }
625
626 // Add an element. Returns false on OOM.
627 template <typename U>
628 [[nodiscard]] bool add(AddPtr& aPtr, U&& aU) {
629 return mImpl.add(aPtr, std::forward<U>(aU));
630 }
631
632 // See the comment above lookupForAdd() for details.
633 template <typename U>
634 [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup,
635 U&& aU) {
636 return mImpl.relookupOrAdd(aPtr, aLookup, std::forward<U>(aU));
637 }
638
639 // -- Removal --------------------------------------------------------------
640
641 // Lookup and remove the element matching |aLookup|, if present.
642 void remove(const Lookup& aLookup) {
643 if (Ptr p = lookup(aLookup)) {
644 remove(p);
645 }
646 }
647
648 // Remove a previously found element (assuming aPtr.found()). The set must
649 // not have been mutated in the interim.
650 void remove(Ptr aPtr) { mImpl.remove(aPtr); }
651
652 // Remove all keys/values without changing the capacity.
653 void clear() { mImpl.clear(); }
654
655 // Like clear() followed by compact().
656 void clearAndCompact() { mImpl.clearAndCompact(); }
657
658 // -- Rekeying -------------------------------------------------------------
659
660 // Infallibly rekey one entry, if present. Requires that template parameters
661 // T and HashPolicy::Lookup are the same type.
662 void rekeyIfMoved(const Lookup& aOldValue, const T& aNewValue) {
663 if (aOldValue != aNewValue) {
664 rekeyAs(aOldValue, aNewValue, aNewValue);
665 }
666 }
667
668 // Infallibly rekey one entry if present, and return whether that happened.
669 bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup,
670 const T& aNewValue) {
671 if (Ptr p = lookup(aOldLookup)) {
672 mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewValue);
673 return true;
674 }
675 return false;
676 }
677
678 // Infallibly replace the current key at |aPtr| with an equivalent key.
679 // Specifically, both HashPolicy::hash and HashPolicy::match must return
680 // identical results for the new and old key when applied against all
681 // possible matching values.
682 void replaceKey(Ptr aPtr, const Lookup& aLookup, const T& aNewValue) {
683 MOZ_ASSERT(aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPtr.found())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPtr.found()))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("aPtr.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 683); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.found()"
")"); do { *((volatile int*)__null) = 683; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
684 MOZ_ASSERT(*aPtr != aNewValue)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(*aPtr != aNewValue)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(*aPtr != aNewValue))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("*aPtr != aNewValue"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 684); AnnotateMozCrashReason("MOZ_ASSERT" "(" "*aPtr != aNewValue"
")"); do { *((volatile int*)__null) = 684; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
685 MOZ_ASSERT(HashPolicy::match(*aPtr, aLookup))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(HashPolicy::match(*aPtr, aLookup))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(HashPolicy::match(*aPtr, aLookup
)))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("HashPolicy::match(*aPtr, aLookup)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 685); AnnotateMozCrashReason("MOZ_ASSERT" "(" "HashPolicy::match(*aPtr, aLookup)"
")"); do { *((volatile int*)__null) = 685; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
686 MOZ_ASSERT(HashPolicy::match(aNewValue, aLookup))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(HashPolicy::match(aNewValue, aLookup))>::isValid,
"invalid assertion condition"); if ((__builtin_expect(!!(!(!
!(HashPolicy::match(aNewValue, aLookup)))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("HashPolicy::match(aNewValue, aLookup)"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 686); AnnotateMozCrashReason("MOZ_ASSERT" "(" "HashPolicy::match(aNewValue, aLookup)"
")"); do { *((volatile int*)__null) = 686; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
687 const_cast<T&>(*aPtr) = aNewValue;
688 MOZ_ASSERT(*lookup(aLookup) == aNewValue)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(*lookup(aLookup) == aNewValue)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(*lookup(aLookup) == aNewValue
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"*lookup(aLookup) == aNewValue", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 688); AnnotateMozCrashReason("MOZ_ASSERT" "(" "*lookup(aLookup) == aNewValue"
")"); do { *((volatile int*)__null) = 688; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
689 }
690 void replaceKey(Ptr aPtr, const T& aNewValue) {
691 replaceKey(aPtr, aNewValue, aNewValue);
692 }
693
694 // -- Iteration ------------------------------------------------------------
695
696 // |iter()| returns an Iterator:
697 //
698 // HashSet<int> h;
699 // for (auto iter = h.iter(); !iter.done(); iter.next()) {
700 // int i = iter.get();
701 // }
702 //
703 using Iterator = typename Impl::Iterator;
704 Iterator iter() const { return mImpl.iter(); }
705
706 // |modIter()| returns a ModIterator:
707 //
708 // HashSet<int> h;
709 // for (auto iter = h.modIter(); !iter.done(); iter.next()) {
710 // if (iter.get() == 42) {
711 // iter.remove();
712 // }
713 // }
714 //
715 // Table resize may occur in ModIterator's destructor.
716 using ModIterator = typename Impl::ModIterator;
717 ModIterator modIter() { return mImpl.modIter(); }
718
719 // These are similar to Iterator/ModIterator/iter(), but use different
720 // terminology.
721 using Range = typename Impl::Range;
722 using Enum = typename Impl::Enum;
723 Range all() const { return mImpl.all(); }
724};
725
726//---------------------------------------------------------------------------
727// Hash Policy
728//---------------------------------------------------------------------------
729
730// A hash policy |HP| for a hash table with key-type |Key| must provide:
731//
732// - a type |HP::Lookup| to use to lookup table entries;
733//
734// - a static member function |HP::hash| that hashes lookup values:
735//
736// static mozilla::HashNumber hash(const Lookup&);
737//
738// - a static member function |HP::match| that tests equality of key and
739// lookup values:
740//
741// static bool match(const Key& aKey, const Lookup& aLookup);
742//
743// |aKey| and |aLookup| can have different hash numbers, only when a
744// collision happens with |prepareHash| operation, which is less frequent.
745// Thus, |HP::match| shouldn't assume the hash equality in the comparison,
746// even if the hash numbers are almost always same between them.
747//
748// Normally, Lookup = Key. In general, though, different values and types of
749// values can be used to lookup and store. If a Lookup value |l| is not equal
750// to the added Key value |k|, the user must ensure that |HP::match(k,l)| is
751// true. E.g.:
752//
753// mozilla::HashSet<Key, HP>::AddPtr p = h.lookup(l);
754// if (!p) {
755// assert(HP::match(k, l)); // must hold
756// h.add(p, k);
757// }
758
759// A pointer hashing policy that uses HashGeneric() to create good hashes for
760// pointers. Note that we don't shift out the lowest k bits because we don't
761// want to assume anything about the alignment of the pointers.
762template <typename Key>
763struct PointerHasher {
764 using Lookup = Key;
765
766 static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); }
767
768 static bool match(const Key& aKey, const Lookup& aLookup) {
769 return aKey == aLookup;
770 }
771
772 static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
773};
774
775// The default hash policy, which only works with integers.
776template <class Key, typename>
777struct DefaultHasher {
778 using Lookup = Key;
779
780 static HashNumber hash(const Lookup& aLookup) {
781 // Just convert the integer to a HashNumber and use that as is. (This
782 // discards the high 32-bits of 64-bit integers!) ScrambleHashCode() is
783 // subsequently called on the value to improve the distribution.
784 return aLookup;
785 }
786
787 static bool match(const Key& aKey, const Lookup& aLookup) {
788 // Use builtin or overloaded operator==.
789 return aKey == aLookup;
790 }
791
792 static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
793};
794
795// A DefaultHasher specialization for enums.
796template <class T>
797struct DefaultHasher<T, std::enable_if_t<std::is_enum_v<T>>> {
798 using Key = T;
799 using Lookup = Key;
800
801 static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); }
802
803 static bool match(const Key& aKey, const Lookup& aLookup) {
804 // Use builtin or overloaded operator==.
805 return aKey == static_cast<Key>(aLookup);
806 }
807
808 static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
809};
810
811// A DefaultHasher specialization for pointers.
812template <class T>
813struct DefaultHasher<T*> : PointerHasher<T*> {};
814
815// A DefaultHasher specialization for mozilla::UniquePtr.
816template <class T, class D>
817struct DefaultHasher<UniquePtr<T, D>> {
818 using Key = UniquePtr<T, D>;
819 using Lookup = Key;
820 using PtrHasher = PointerHasher<T*>;
821
822 static HashNumber hash(const Lookup& aLookup) {
823 return PtrHasher::hash(aLookup.get());
824 }
825
826 static bool match(const Key& aKey, const Lookup& aLookup) {
827 return PtrHasher::match(aKey.get(), aLookup.get());
828 }
829
830 static void rekey(UniquePtr<T, D>& aKey, UniquePtr<T, D>&& aNewKey) {
831 aKey = std::move(aNewKey);
832 }
833};
834
835// A DefaultHasher specialization for doubles.
836template <>
837struct DefaultHasher<double> {
838 using Key = double;
839 using Lookup = Key;
840
841 static HashNumber hash(const Lookup& aLookup) {
842 // Just xor the high bits with the low bits, and then treat the bits of the
843 // result as a uint32_t.
844 static_assert(sizeof(HashNumber) == 4,
845 "subsequent code assumes a four-byte hash");
846 uint64_t u = BitwiseCast<uint64_t>(aLookup);
847 return HashNumber(u ^ (u >> 32));
848 }
849
850 static bool match(const Key& aKey, const Lookup& aLookup) {
851 return BitwiseCast<uint64_t>(aKey) == BitwiseCast<uint64_t>(aLookup);
852 }
853};
854
855// A DefaultHasher specialization for floats.
856template <>
857struct DefaultHasher<float> {
858 using Key = float;
859 using Lookup = Key;
860
861 static HashNumber hash(const Lookup& aLookup) {
862 // Just use the value as if its bits form an integer. ScrambleHashCode() is
863 // subsequently called on the value to improve the distribution.
864 static_assert(sizeof(HashNumber) == 4,
865 "subsequent code assumes a four-byte hash");
866 return HashNumber(BitwiseCast<uint32_t>(aLookup));
867 }
868
869 static bool match(const Key& aKey, const Lookup& aLookup) {
870 return BitwiseCast<uint32_t>(aKey) == BitwiseCast<uint32_t>(aLookup);
871 }
872};
873
874// A hash policy for C strings.
875struct CStringHasher {
876 using Key = const char*;
877 using Lookup = const char*;
878
879 static HashNumber hash(const Lookup& aLookup) { return HashString(aLookup); }
880
881 static bool match(const Key& aKey, const Lookup& aLookup) {
882 return strcmp(aKey, aLookup) == 0;
883 }
884};
885
886//---------------------------------------------------------------------------
887// Fallible Hashing Interface
888//---------------------------------------------------------------------------
889
890// Most of the time generating a hash code is infallible, but sometimes it is
891// necessary to generate hash codes on demand in a way that can fail. Specialize
892// this class for your own hash policy to provide fallible hashing.
893//
894// This is used by MovableCellHasher to handle the fact that generating a unique
895// ID for cell pointer may fail due to OOM.
896//
897// The default implementations of these methods delegate to the usual HashPolicy
898// implementation and always succeed.
899template <typename HashPolicy>
900struct FallibleHashMethods {
901 // Return true if a hashcode is already available for its argument, and
902 // sets |aHashOut|. Once this succeeds for a specific argument it
903 // must continue to do so.
904 //
905 // Return false if a hashcode is not already available. This implies that any
906 // lookup must fail, as the hash code would have to have been successfully
907 // created on insertion.
908 template <typename Lookup>
909 static bool maybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) {
910 *aHashOut = HashPolicy::hash(aLookup);
911 return true;
912 }
913
914 // Fallible method to ensure a hashcode exists for its argument and create one
915 // if not. Sets |aHashOut| to the hashcode and retuns true on success. Returns
916 // false on error, e.g. out of memory.
917 template <typename Lookup>
918 static bool ensureHash(Lookup&& aLookup, HashNumber* aHashOut) {
919 *aHashOut = HashPolicy::hash(aLookup);
920 return true;
921 }
922};
923
924template <typename HashPolicy, typename Lookup>
925static bool MaybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) {
926 return FallibleHashMethods<typename HashPolicy::Base>::maybeGetHash(
927 std::forward<Lookup>(aLookup), aHashOut);
928}
929
930template <typename HashPolicy, typename Lookup>
931static bool EnsureHash(Lookup&& aLookup, HashNumber* aHashOut) {
932 return FallibleHashMethods<typename HashPolicy::Base>::ensureHash(
933 std::forward<Lookup>(aLookup), aHashOut);
934}
935
936//---------------------------------------------------------------------------
937// Implementation Details (HashMapEntry, HashTableEntry, HashTable)
938//---------------------------------------------------------------------------
939
940// Both HashMap and HashSet are implemented by a single HashTable that is even
941// more heavily parameterized than the other two. This leaves HashTable gnarly
942// and extremely coupled to HashMap and HashSet; thus code should not use
943// HashTable directly.
944
945template <class Key, class Value>
946class HashMapEntry {
947 Key key_;
948 Value value_;
949
950 template <class, class, class>
951 friend class detail::HashTable;
952 template <class>
953 friend class detail::HashTableEntry;
954 template <class, class, class, class>
955 friend class HashMap;
956
957 public:
958 template <typename KeyInput, typename ValueInput>
959 HashMapEntry(KeyInput&& aKey, ValueInput&& aValue)
960 : key_(std::forward<KeyInput>(aKey)),
961 value_(std::forward<ValueInput>(aValue)) {}
962
963 HashMapEntry(HashMapEntry&& aRhs) = default;
964 HashMapEntry& operator=(HashMapEntry&& aRhs) = default;
965
966 using KeyType = Key;
967 using ValueType = Value;
968
969 const Key& key() const { return key_; }
970
971 // Use this method with caution! If the key is changed such that its hash
972 // value also changes, the map will be left in an invalid state.
973 Key& mutableKey() { return key_; }
974
975 const Value& value() const { return value_; }
976 Value& value() { return value_; }
977
978 private:
979 HashMapEntry(const HashMapEntry&) = delete;
980 void operator=(const HashMapEntry&) = delete;
981};
982
983namespace detail {
984
985template <class T, class HashPolicy, class AllocPolicy>
986class HashTable;
987
988template <typename T>
989class EntrySlot;
990
991template <typename T>
992class HashTableEntry {
993 private:
994 using NonConstT = std::remove_const_t<T>;
995
996 // Instead of having a hash table entry store that looks like this:
997 //
998 // +--------+--------+--------+--------+
999 // | entry0 | entry1 | .... | entryN |
1000 // +--------+--------+--------+--------+
1001 //
1002 // where the entries contained their cached hash code, we're going to lay out
1003 // the entry store thusly:
1004 //
1005 // +-------+-------+-------+-------+--------+--------+--------+--------+
1006 // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN |
1007 // +-------+-------+-------+-------+--------+--------+--------+--------+
1008 //
1009 // with all the cached hashes prior to the actual entries themselves.
1010 //
1011 // We do this because implementing the first strategy requires us to make
1012 // HashTableEntry look roughly like:
1013 //
1014 // template <typename T>
1015 // class HashTableEntry {
1016 // HashNumber mKeyHash;
1017 // T mValue;
1018 // };
1019 //
1020 // The problem with this setup is that, depending on the layout of `T`, there
1021 // may be platform ABI-mandated padding between `mKeyHash` and the first
1022 // member of `T`. This ABI-mandated padding is wasted space, and can be
1023 // surprisingly common, e.g. when `T` is a single pointer on 64-bit platforms.
1024 // In such cases, we're throwing away a quarter of our entry store on padding,
1025 // which is undesirable.
1026 //
1027 // The second layout above, namely:
1028 //
1029 // +-------+-------+-------+-------+--------+--------+--------+--------+
1030 // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN |
1031 // +-------+-------+-------+-------+--------+--------+--------+--------+
1032 //
1033 // means there is no wasted space between the hashes themselves, and no wasted
1034 // space between the entries themselves. However, we would also like there to
1035 // be no gap between the last hash and the first entry. The memory allocator
1036 // guarantees the alignment of the start of the hashes. The use of a
1037 // power-of-two capacity of at least 4 guarantees that the alignment of the
1038 // *end* of the hash array is no less than the alignment of the start.
1039 // Finally, the static_asserts here guarantee that the entries themselves
1040 // don't need to be any more aligned than the alignment of the entry store
1041 // itself.
1042 //
1043 // This assertion is safe for 32-bit builds because on both Windows and Linux
1044 // (including Android), the minimum alignment for allocations larger than 8
1045 // bytes is 8 bytes, and the actual data for entries in our entry store is
1046 // guaranteed to have that alignment as well, thanks to the power-of-two
1047 // number of cached hash values stored prior to the entry data.
1048
1049 // The allocation policy must allocate a table with at least this much
1050 // alignment.
1051 static constexpr size_t kMinimumAlignment = 8;
1052
1053 static_assert(alignof(HashNumber) <= kMinimumAlignment,
1054 "[N*2 hashes, N*2 T values] allocation's alignment must be "
1055 "enough to align each hash");
1056 static_assert(alignof(NonConstT) <= 2 * sizeof(HashNumber),
1057 "subsequent N*2 T values must not require more than an even "
1058 "number of HashNumbers provides");
1059
1060 static const HashNumber sFreeKey = 0;
1061 static const HashNumber sRemovedKey = 1;
1062 static const HashNumber sCollisionBit = 1;
1063
1064 alignas(NonConstT) unsigned char mValueData[sizeof(NonConstT)];
1065
1066 private:
1067 template <class, class, class>
1068 friend class HashTable;
1069 template <typename>
1070 friend class EntrySlot;
1071
1072 // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a
1073 // -Werror compile error) to reinterpret_cast<> |mValueData| to |T*|, even
1074 // through |void*|. Placing the latter cast in these separate functions
1075 // breaks the chain such that affected GCC versions no longer warn/error.
1076 void* rawValuePtr() { return mValueData; }
1077
1078 static bool isLiveHash(HashNumber hash) { return hash > sRemovedKey; }
1079
1080 HashTableEntry(const HashTableEntry&) = delete;
1081 void operator=(const HashTableEntry&) = delete;
1082
1083 NonConstT* valuePtr() { return reinterpret_cast<NonConstT*>(rawValuePtr()); }
1084
1085 void destroyStoredT() {
1086 NonConstT* ptr = valuePtr();
1087 ptr->~T();
1088 MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr))do { } while (0);
1089 }
1090
1091 public:
1092 HashTableEntry() = default;
1093
1094 ~HashTableEntry() { MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this))do { } while (0); }
1095
1096 void destroy() { destroyStoredT(); }
1097
1098 void swap(HashTableEntry* aOther, bool aIsLive) {
1099 // This allows types to use Argument-Dependent-Lookup, and thus use a custom
1100 // std::swap, which is needed by types like JS::Heap and such.
1101 using std::swap;
1102
1103 if (this == aOther) {
1104 return;
1105 }
1106 if (aIsLive) {
1107 swap(*valuePtr(), *aOther->valuePtr());
1108 } else {
1109 *aOther->valuePtr() = std::move(*valuePtr());
1110 destroy();
1111 }
1112 }
1113
1114 T& get() { return *valuePtr(); }
1115
1116 NonConstT& getMutable() { return *valuePtr(); }
1117};
1118
1119// A slot represents a cached hash value and its associated entry stored
1120// in the hash table. These two things are not stored in contiguous memory.
1121template <class T>
1122class EntrySlot {
1123 using NonConstT = std::remove_const_t<T>;
1124
1125 using Entry = HashTableEntry<T>;
1126
1127 Entry* mEntry;
1128 HashNumber* mKeyHash;
1129
1130 template <class, class, class>
1131 friend class HashTable;
1132
1133 EntrySlot(Entry* aEntry, HashNumber* aKeyHash)
1134 : mEntry(aEntry), mKeyHash(aKeyHash) {}
1135
1136 public:
1137 static bool isLiveHash(HashNumber hash) { return hash > Entry::sRemovedKey; }
1138
1139 EntrySlot(const EntrySlot&) = default;
1140 EntrySlot(EntrySlot&& aOther) = default;
1141
1142 EntrySlot& operator=(const EntrySlot&) = default;
1143 EntrySlot& operator=(EntrySlot&&) = default;
1144
1145 bool operator==(const EntrySlot& aRhs) const { return mEntry == aRhs.mEntry; }
1146
1147 bool operator<(const EntrySlot& aRhs) const { return mEntry < aRhs.mEntry; }
1148
1149 EntrySlot& operator++() {
1150 ++mEntry;
1151 ++mKeyHash;
1152 return *this;
1153 }
1154
1155 void destroy() { mEntry->destroy(); }
1156
1157 void swap(EntrySlot& aOther) {
1158 mEntry->swap(aOther.mEntry, aOther.isLive());
1159 std::swap(*mKeyHash, *aOther.mKeyHash);
1160 }
1161
1162 T& get() const { return mEntry->get(); }
1163
1164 NonConstT& getMutable() { return mEntry->getMutable(); }
1165
1166 bool isFree() const { return *mKeyHash == Entry::sFreeKey; }
1167
1168 void clearLive() {
1169 MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isLive())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1169); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")"
); do { *((volatile int*)__null) = 1169; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1170 *mKeyHash = Entry::sFreeKey;
1171 mEntry->destroyStoredT();
1172 }
1173
1174 void clear() {
1175 if (isLive()) {
1176 mEntry->destroyStoredT();
1177 }
1178 MOZ_MAKE_MEM_UNDEFINED(mEntry, sizeof(*mEntry))do { } while (0);
1179 *mKeyHash = Entry::sFreeKey;
1180 }
1181
1182 bool isRemoved() const { return *mKeyHash == Entry::sRemovedKey; }
1183
1184 void removeLive() {
1185 MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isLive())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1185); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")"
); do { *((volatile int*)__null) = 1185; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1186 *mKeyHash = Entry::sRemovedKey;
1187 mEntry->destroyStoredT();
1188 }
1189
1190 bool isLive() const { return isLiveHash(*mKeyHash); }
1191
1192 void setCollision() {
1193 MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isLive())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1193); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")"
); do { *((volatile int*)__null) = 1193; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1194 *mKeyHash |= Entry::sCollisionBit;
1195 }
1196 void unsetCollision() { *mKeyHash &= ~Entry::sCollisionBit; }
1197 bool hasCollision() const { return *mKeyHash & Entry::sCollisionBit; }
1198 bool matchHash(HashNumber hn) {
1199 return (*mKeyHash & ~Entry::sCollisionBit) == hn;
1200 }
1201 HashNumber getKeyHash() const { return *mKeyHash & ~Entry::sCollisionBit; }
1202
1203 template <typename... Args>
1204 void setLive(HashNumber aHashNumber, Args&&... aArgs) {
1205 MOZ_ASSERT(!isLive())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!isLive())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!isLive()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("!isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1205); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!isLive()" ")"
); do { *((volatile int*)__null) = 1205; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1206 *mKeyHash = aHashNumber;
1207 new (KnownNotNull, mEntry->valuePtr()) T(std::forward<Args>(aArgs)...);
1208 MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isLive())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1208); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")"
); do { *((volatile int*)__null) = 1208; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1209 }
1210
1211 Entry* toEntry() const { return mEntry; }
1212};
1213
1214template <class T, class HashPolicy, class AllocPolicy>
1215class HashTable : private AllocPolicy {
1216 friend class mozilla::ReentrancyGuard;
1217
1218 using NonConstT = std::remove_const_t<T>;
1219 using Key = typename HashPolicy::KeyType;
1220 using Lookup = typename HashPolicy::Lookup;
1221
1222 public:
1223 using Entry = HashTableEntry<T>;
1224 using Slot = EntrySlot<T>;
1225
1226 template <typename F>
1227 static void forEachSlot(char* aTable, uint32_t aCapacity, F&& f) {
1228 auto hashes = reinterpret_cast<HashNumber*>(aTable);
1229 auto entries = reinterpret_cast<Entry*>(&hashes[aCapacity]);
1230 Slot slot(entries, hashes);
1231 for (size_t i = 0; i < size_t(aCapacity); ++i) {
1232 f(slot);
1233 ++slot;
1234 }
1235 }
1236
1237 // A nullable pointer to a hash table element. A Ptr |p| can be tested
1238 // either explicitly |if (p.found()) p->...| or using boolean conversion
1239 // |if (p) p->...|. Ptr objects must not be used after any mutating hash
1240 // table operations unless |generation()| is tested.
1241 class Ptr {
1242 friend class HashTable;
1243
1244 Slot mSlot;
1245#ifdef DEBUG1
1246 const HashTable* mTable;
1247 Generation mGeneration;
1248#endif
1249
1250 protected:
1251 Ptr(Slot aSlot, const HashTable& aTable)
1252 : mSlot(aSlot)
1253#ifdef DEBUG1
1254 ,
1255 mTable(&aTable),
1256 mGeneration(aTable.generation())
1257#endif
1258 {
1259 }
1260
1261 // This constructor is used only by AddPtr() within lookupForAdd().
1262 explicit Ptr(const HashTable& aTable)
1263 : mSlot(nullptr, nullptr)
1264#ifdef DEBUG1
1265 ,
1266 mTable(&aTable),
1267 mGeneration(aTable.generation())
1268#endif
1269 {
1270 }
1271
1272 bool isValid() const { return !!mSlot.toEntry(); }
1273
1274 public:
1275 Ptr()
1276 : mSlot(nullptr, nullptr)
1277#ifdef DEBUG1
1278 ,
1279 mTable(nullptr),
9
Null pointer value stored to 'p.mTable'
1280 mGeneration(0)
1281#endif
1282 {
1283 }
1284
1285 bool found() const {
1286 if (!isValid()) {
1287 return false;
1288 }
1289#ifdef DEBUG1
1290 MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mGeneration == mTable->generation())>::isValid
, "invalid assertion condition"); if ((__builtin_expect(!!(!(
!!(mGeneration == mTable->generation()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1290); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()"
")"); do { *((volatile int*)__null) = 1290; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1291#endif
1292 return mSlot.isLive();
1293 }
1294
1295 explicit operator bool() const { return found(); }
1296
1297 bool operator==(const Ptr& aRhs) const {
1298 MOZ_ASSERT(found() && aRhs.found())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(found() && aRhs.found())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(found() && aRhs.found
()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("found() && aRhs.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1298); AnnotateMozCrashReason("MOZ_ASSERT" "(" "found() && aRhs.found()"
")"); do { *((volatile int*)__null) = 1298; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1299 return mSlot == aRhs.mSlot;
1300 }
1301
1302 bool operator!=(const Ptr& aRhs) const {
1303#ifdef DEBUG1
1304 MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mGeneration == mTable->generation())>::isValid
, "invalid assertion condition"); if ((__builtin_expect(!!(!(
!!(mGeneration == mTable->generation()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1304); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()"
")"); do { *((volatile int*)__null) = 1304; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1305#endif
1306 return !(*this == aRhs);
1307 }
1308
1309 T& operator*() const {
1310#ifdef DEBUG1
1311 MOZ_ASSERT(found())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(found())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(found()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1311); AnnotateMozCrashReason("MOZ_ASSERT" "(" "found()" ")"
); do { *((volatile int*)__null) = 1311; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
17
Assuming the condition is false
18
Taking false branch
19
Loop condition is false. Exiting loop
1312 MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mGeneration == mTable->generation())>::isValid
, "invalid assertion condition"); if ((__builtin_expect(!!(!(
!!(mGeneration == mTable->generation()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1312); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()"
")"); do { *((volatile int*)__null) = 1312; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
20
Called C++ object pointer is null
1313#endif
1314 return mSlot.get();
1315 }
1316
1317 T* operator->() const {
1318#ifdef DEBUG1
1319 MOZ_ASSERT(found())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(found())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(found()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1319); AnnotateMozCrashReason("MOZ_ASSERT" "(" "found()" ")"
); do { *((volatile int*)__null) = 1319; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1320 MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mGeneration == mTable->generation())>::isValid
, "invalid assertion condition"); if ((__builtin_expect(!!(!(
!!(mGeneration == mTable->generation()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1320); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()"
")"); do { *((volatile int*)__null) = 1320; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1321#endif
1322 return &mSlot.get();
1323 }
1324 };
1325
1326 // A Ptr that can be used to add a key after a failed lookup.
1327 class AddPtr : public Ptr {
1328 friend class HashTable;
1329
1330 HashNumber mKeyHash;
1331#ifdef DEBUG1
1332 uint64_t mMutationCount;
1333#endif
1334
1335 AddPtr(Slot aSlot, const HashTable& aTable, HashNumber aHashNumber)
1336 : Ptr(aSlot, aTable),
1337 mKeyHash(aHashNumber)
1338#ifdef DEBUG1
1339 ,
1340 mMutationCount(aTable.mMutationCount)
1341#endif
1342 {
1343 }
1344
1345 // This constructor is used when lookupForAdd() is performed on a table
1346 // lacking entry storage; it leaves mSlot null but initializes everything
1347 // else.
1348 AddPtr(const HashTable& aTable, HashNumber aHashNumber)
1349 : Ptr(aTable),
1350 mKeyHash(aHashNumber)
1351#ifdef DEBUG1
1352 ,
1353 mMutationCount(aTable.mMutationCount)
1354#endif
1355 {
1356 MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isLive())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1356); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")"
); do { *((volatile int*)__null) = 1356; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1357 }
1358
1359 bool isLive() const { return isLiveHash(mKeyHash); }
1360
1361 public:
1362 AddPtr() : mKeyHash(0) {}
1363 };
1364
1365 // A hash table iterator that (mostly) doesn't allow table modifications.
1366 // As with Ptr/AddPtr, Iterator objects must not be used after any mutating
1367 // hash table operation unless the |generation()| is tested.
1368 class Iterator {
1369 void moveToNextLiveEntry() {
1370 while (++mCur < mEnd && !mCur.isLive()) {
1371 continue;
1372 }
1373 }
1374
1375 protected:
1376 friend class HashTable;
1377
1378 explicit Iterator(const HashTable& aTable)
1379 : mCur(aTable.slotForIndex(0)),
1380 mEnd(aTable.slotForIndex(aTable.capacity()))
1381#ifdef DEBUG1
1382 ,
1383 mTable(aTable),
1384 mMutationCount(aTable.mMutationCount),
1385 mGeneration(aTable.generation()),
1386 mValidEntry(true)
1387#endif
1388 {
1389 if (!done() && !mCur.isLive()) {
1390 moveToNextLiveEntry();
1391 }
1392 }
1393
1394 Slot mCur;
1395 Slot mEnd;
1396#ifdef DEBUG1
1397 const HashTable& mTable;
1398 uint64_t mMutationCount;
1399 Generation mGeneration;
1400 bool mValidEntry;
1401#endif
1402
1403 public:
1404 bool done() const {
1405 MOZ_ASSERT(mGeneration == mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mGeneration == mTable.generation())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mGeneration == mTable.generation
()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("mGeneration == mTable.generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1405); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable.generation()"
")"); do { *((volatile int*)__null) = 1405; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1406 MOZ_ASSERT(mMutationCount == mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mMutationCount == mTable.mMutationCount)>::isValid
, "invalid assertion condition"); if ((__builtin_expect(!!(!(
!!(mMutationCount == mTable.mMutationCount))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mMutationCount == mTable.mMutationCount"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1406); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mMutationCount == mTable.mMutationCount"
")"); do { *((volatile int*)__null) = 1406; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1407 return mCur == mEnd;
1408 }
1409
1410 T& get() const {
1411 MOZ_ASSERT(!done())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!done())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!done()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("!done()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1411); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!done()" ")"
); do { *((volatile int*)__null) = 1411; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1412 MOZ_ASSERT(mValidEntry)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mValidEntry)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mValidEntry))), 0))) { do { }
while (false); MOZ_ReportAssertionFailure("mValidEntry", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1412); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mValidEntry"
")"); do { *((volatile int*)__null) = 1412; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1413 MOZ_ASSERT(mGeneration == mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mGeneration == mTable.generation())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mGeneration == mTable.generation
()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("mGeneration == mTable.generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1413); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable.generation()"
")"); do { *((volatile int*)__null) = 1413; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1414 MOZ_ASSERT(mMutationCount == mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mMutationCount == mTable.mMutationCount)>::isValid
, "invalid assertion condition"); if ((__builtin_expect(!!(!(
!!(mMutationCount == mTable.mMutationCount))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mMutationCount == mTable.mMutationCount"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1414); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mMutationCount == mTable.mMutationCount"
")"); do { *((volatile int*)__null) = 1414; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1415 return mCur.get();
1416 }
1417
1418 void next() {
1419 MOZ_ASSERT(!done())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!done())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!done()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("!done()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1419); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!done()" ")"
); do { *((volatile int*)__null) = 1419; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1420 MOZ_ASSERT(mGeneration == mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mGeneration == mTable.generation())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mGeneration == mTable.generation
()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("mGeneration == mTable.generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1420); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable.generation()"
")"); do { *((volatile int*)__null) = 1420; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1421 MOZ_ASSERT(mMutationCount == mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mMutationCount == mTable.mMutationCount)>::isValid
, "invalid assertion condition"); if ((__builtin_expect(!!(!(
!!(mMutationCount == mTable.mMutationCount))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mMutationCount == mTable.mMutationCount"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1421); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mMutationCount == mTable.mMutationCount"
")"); do { *((volatile int*)__null) = 1421; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1422 moveToNextLiveEntry();
1423#ifdef DEBUG1
1424 mValidEntry = true;
1425#endif
1426 }
1427 };
1428
1429 // A hash table iterator that permits modification, removal and rekeying.
1430 // Since rehashing when elements were removed during enumeration would be
1431 // bad, it is postponed until the ModIterator is destructed. Since the
1432 // ModIterator's destructor touches the hash table, the user must ensure
1433 // that the hash table is still alive when the destructor runs.
1434 class ModIterator : public Iterator {
1435 friend class HashTable;
1436
1437 HashTable& mTable;
1438 bool mRekeyed;
1439 bool mRemoved;
1440
1441 // ModIterator is movable but not copyable.
1442 ModIterator(const ModIterator&) = delete;
1443 void operator=(const ModIterator&) = delete;
1444
1445 protected:
1446 explicit ModIterator(HashTable& aTable)
1447 : Iterator(aTable), mTable(aTable), mRekeyed(false), mRemoved(false) {}
1448
1449 public:
1450 MOZ_IMPLICIT ModIterator(ModIterator&& aOther)
1451 : Iterator(aOther),
1452 mTable(aOther.mTable),
1453 mRekeyed(aOther.mRekeyed),
1454 mRemoved(aOther.mRemoved) {
1455 aOther.mRekeyed = false;
1456 aOther.mRemoved = false;
1457 }
1458
1459 // Removes the current element from the table, leaving |get()|
1460 // invalid until the next call to |next()|.
1461 void remove() {
1462 mTable.remove(this->mCur);
1463 mRemoved = true;
1464#ifdef DEBUG1
1465 this->mValidEntry = false;
1466 this->mMutationCount = mTable.mMutationCount;
1467#endif
1468 }
1469
1470 NonConstT& getMutable() {
1471 MOZ_ASSERT(!this->done())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!this->done())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!this->done()))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!this->done()"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1471); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!this->done()"
")"); do { *((volatile int*)__null) = 1471; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1472 MOZ_ASSERT(this->mValidEntry)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(this->mValidEntry)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(this->mValidEntry))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("this->mValidEntry"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1472); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this->mValidEntry"
")"); do { *((volatile int*)__null) = 1472; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1473 MOZ_ASSERT(this->mGeneration == this->Iterator::mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(this->mGeneration == this->Iterator::mTable.generation
())>::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(this->mGeneration == this->Iterator::mTable.generation
()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("this->mGeneration == this->Iterator::mTable.generation()"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1473); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this->mGeneration == this->Iterator::mTable.generation()"
")"); do { *((volatile int*)__null) = 1473; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1474 MOZ_ASSERT(this->mMutationCount == this->Iterator::mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(this->mMutationCount == this->Iterator::mTable
.mMutationCount)>::isValid, "invalid assertion condition")
; if ((__builtin_expect(!!(!(!!(this->mMutationCount == this
->Iterator::mTable.mMutationCount))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("this->mMutationCount == this->Iterator::mTable.mMutationCount"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1474); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this->mMutationCount == this->Iterator::mTable.mMutationCount"
")"); do { *((volatile int*)__null) = 1474; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1475 return this->mCur.getMutable();
1476 }
1477
1478 // Removes the current element and re-inserts it into the table with
1479 // a new key at the new Lookup position. |get()| is invalid after
1480 // this operation until the next call to |next()|.
1481 void rekey(const Lookup& l, const Key& k) {
1482 MOZ_ASSERT(&k != &HashPolicy::getKey(this->mCur.get()))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(&k != &HashPolicy::getKey(this->mCur.get(
)))>::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(&k != &HashPolicy::getKey(this->mCur.get(
))))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("&k != &HashPolicy::getKey(this->mCur.get())", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1482); AnnotateMozCrashReason("MOZ_ASSERT" "(" "&k != &HashPolicy::getKey(this->mCur.get())"
")"); do { *((volatile int*)__null) = 1482; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1483 Ptr p(this->mCur, mTable);
1484 mTable.rekeyWithoutRehash(p, l, k);
1485 mRekeyed = true;
1486#ifdef DEBUG1
1487 this->mValidEntry = false;
1488 this->mMutationCount = mTable.mMutationCount;
1489#endif
1490 }
1491
1492 void rekey(const Key& k) { rekey(k, k); }
1493
1494 // Potentially rehashes the table.
1495 ~ModIterator() {
1496 if (mRekeyed) {
1497 mTable.mGen++;
1498 mTable.infallibleRehashIfOverloaded();
1499 }
1500
1501 if (mRemoved) {
1502 mTable.compact();
1503 }
1504 }
1505 };
1506
1507 // Range is similar to Iterator, but uses different terminology.
1508 class Range {
1509 friend class HashTable;
1510
1511 Iterator mIter;
1512
1513 protected:
1514 explicit Range(const HashTable& table) : mIter(table) {}
1515
1516 public:
1517 bool empty() const { return mIter.done(); }
1518
1519 T& front() const { return mIter.get(); }
1520
1521 void popFront() { return mIter.next(); }
1522 };
1523
1524 // Enum is similar to ModIterator, but uses different terminology.
1525 class Enum {
1526 ModIterator mIter;
1527
1528 // Enum is movable but not copyable.
1529 Enum(const Enum&) = delete;
1530 void operator=(const Enum&) = delete;
1531
1532 public:
1533 template <class Map>
1534 explicit Enum(Map& map) : mIter(map.mImpl) {}
1535
1536 MOZ_IMPLICIT Enum(Enum&& other) : mIter(std::move(other.mIter)) {}
1537
1538 bool empty() const { return mIter.done(); }
1539
1540 T& front() const { return mIter.get(); }
1541
1542 void popFront() { return mIter.next(); }
1543
1544 void removeFront() { mIter.remove(); }
1545
1546 NonConstT& mutableFront() { return mIter.getMutable(); }
1547
1548 void rekeyFront(const Lookup& aLookup, const Key& aKey) {
1549 mIter.rekey(aLookup, aKey);
1550 }
1551
1552 void rekeyFront(const Key& aKey) { mIter.rekey(aKey); }
1553 };
1554
1555 // HashTable is movable
1556 HashTable(HashTable&& aRhs) : AllocPolicy(std::move(aRhs)) { moveFrom(aRhs); }
1557 HashTable& operator=(HashTable&& aRhs) {
1558 MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(this != &aRhs)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(this != &aRhs))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("this != &aRhs"
" (" "self-move assignment is prohibited" ")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1558); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this != &aRhs"
") (" "self-move assignment is prohibited" ")"); do { *((volatile
int*)__null) = 1558; __attribute__((nomerge)) ::abort(); } while
(false); } } while (false)
;
1559 if (mTable) {
1560 destroyTable(*this, mTable, capacity());
1561 }
1562 AllocPolicy::operator=(std::move(aRhs));
1563 moveFrom(aRhs);
1564 return *this;
1565 }
1566
1567 void swap(HashTable& aOther) {
1568 ReentrancyGuard g1(*this);
1569 ReentrancyGuard g2(aOther);
1570
1571 // Manual swap of generation because it's a bitfield
1572 uint64_t generation = mGen;
1573 mGen = aOther.mGen;
1574 aOther.mGen = generation;
1575
1576 // Manual swap of hashShift because it's a bitfield
1577 uint64_t hashShift = mHashShift;
1578 mHashShift = aOther.mHashShift;
1579 aOther.mHashShift = hashShift;
1580
1581 std::swap(mTable, aOther.mTable);
1582 std::swap(mEntryCount, aOther.mEntryCount);
1583 std::swap(mRemovedCount, aOther.mRemovedCount);
1584#ifdef DEBUG1
1585 std::swap(mMutationCount, aOther.mMutationCount);
1586 std::swap(mEntered, aOther.mEntered);
1587#endif
1588 }
1589
1590 private:
1591 void moveFrom(HashTable& aRhs) {
1592 mGen = aRhs.mGen;
1593 mHashShift = aRhs.mHashShift;
1594 mTable = aRhs.mTable;
1595 mEntryCount = aRhs.mEntryCount;
1596 mRemovedCount = aRhs.mRemovedCount;
1597#ifdef DEBUG1
1598 mMutationCount = aRhs.mMutationCount;
1599 mEntered = aRhs.mEntered;
1600#endif
1601 aRhs.mTable = nullptr;
1602 aRhs.clearAndCompact();
1603 }
1604
1605 // HashTable is not copyable or assignable
1606 HashTable(const HashTable&) = delete;
1607 void operator=(const HashTable&) = delete;
1608
1609 static const uint32_t CAP_BITS = 30;
1610
1611 public:
1612 uint64_t mGen : 56; // entry storage generation number
1613 uint64_t mHashShift : 8; // multiplicative hash shift
1614 char* mTable; // entry storage
1615 uint32_t mEntryCount; // number of entries in mTable
1616 uint32_t mRemovedCount; // removed entry sentinels in mTable
1617
1618#ifdef DEBUG1
1619 uint64_t mMutationCount;
1620 mutable bool mEntered;
1621#endif
1622
1623 // The default initial capacity is 32 (enough to hold 16 elements), but it
1624 // can be as low as 4.
1625 static const uint32_t sDefaultLen = 16;
1626 static const uint32_t sMinCapacity = 4;
1627 // See the comments in HashTableEntry about this value.
1628 static_assert(sMinCapacity >= 4, "too-small sMinCapacity breaks assumptions");
1629 static const uint32_t sMaxInit = 1u << (CAP_BITS - 1);
1630 static const uint32_t sMaxCapacity = 1u << CAP_BITS;
1631
1632 // Hash-table alpha is conceptually a fraction, but to avoid floating-point
1633 // math we implement it as a ratio of integers.
1634 static const uint8_t sAlphaDenominator = 4;
1635 static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
1636 static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
1637
1638 static const HashNumber sFreeKey = Entry::sFreeKey;
1639 static const HashNumber sRemovedKey = Entry::sRemovedKey;
1640 static const HashNumber sCollisionBit = Entry::sCollisionBit;
1641
1642 static uint32_t bestCapacity(uint32_t aLen) {
1643 static_assert(
1644 (sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit,
1645 "multiplication in numerator below could overflow");
1646 static_assert(
1647 sMaxInit * sAlphaDenominator <= UINT32_MAX(4294967295U) - sMaxAlphaNumerator,
1648 "numerator calculation below could potentially overflow");
1649
1650 // Callers should ensure this is true.
1651 MOZ_ASSERT(aLen <= sMaxInit)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aLen <= sMaxInit)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aLen <= sMaxInit))), 0)))
{ do { } while (false); MOZ_ReportAssertionFailure("aLen <= sMaxInit"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1651); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aLen <= sMaxInit"
")"); do { *((volatile int*)__null) = 1651; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1652
1653 // Compute the smallest capacity allowing |aLen| elements to be
1654 // inserted without rehashing: ceil(aLen / max-alpha). (Ceiling
1655 // integral division: <http://stackoverflow.com/a/2745086>.)
1656 uint32_t capacity = (aLen * sAlphaDenominator + sMaxAlphaNumerator - 1) /
1657 sMaxAlphaNumerator;
1658 capacity = (capacity < sMinCapacity) ? sMinCapacity : RoundUpPow2(capacity);
1659
1660 MOZ_ASSERT(capacity >= aLen)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(capacity >= aLen)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(capacity >= aLen))), 0)))
{ do { } while (false); MOZ_ReportAssertionFailure("capacity >= aLen"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1660); AnnotateMozCrashReason("MOZ_ASSERT" "(" "capacity >= aLen"
")"); do { *((volatile int*)__null) = 1660; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1661 MOZ_ASSERT(capacity <= sMaxCapacity)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(capacity <= sMaxCapacity)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(capacity <= sMaxCapacity)
)), 0))) { do { } while (false); MOZ_ReportAssertionFailure("capacity <= sMaxCapacity"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1661); AnnotateMozCrashReason("MOZ_ASSERT" "(" "capacity <= sMaxCapacity"
")"); do { *((volatile int*)__null) = 1661; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1662
1663 return capacity;
1664 }
1665
1666 static uint32_t hashShift(uint32_t aLen) {
1667 // Reject all lengths whose initial computed capacity would exceed
1668 // sMaxCapacity. Round that maximum aLen down to the nearest power of two
1669 // for speedier code.
1670 if (MOZ_UNLIKELY(aLen > sMaxInit)(__builtin_expect(!!(aLen > sMaxInit), 0))) {
1671 MOZ_CRASH("initial length is too large")do { do { } while (false); MOZ_ReportCrash("" "initial length is too large"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1671); AnnotateMozCrashReason("MOZ_CRASH(" "initial length is too large"
")"); do { *((volatile int*)__null) = 1671; __attribute__((nomerge
)) ::abort(); } while (false); } while (false)
;
1672 }
1673
1674 return kHashNumberBits - mozilla::CeilingLog2(bestCapacity(aLen));
1675 }
1676
1677 static bool isLiveHash(HashNumber aHash) { return Entry::isLiveHash(aHash); }
1678
1679 static HashNumber prepareHash(HashNumber aInputHash) {
1680 HashNumber keyHash = ScrambleHashCode(aInputHash);
1681
1682 // Avoid reserved hash codes.
1683 if (!isLiveHash(keyHash)) {
1684 keyHash -= (sRemovedKey + 1);
1685 }
1686 return keyHash & ~sCollisionBit;
1687 }
1688
1689 enum FailureBehavior { DontReportFailure = false, ReportFailure = true };
1690
1691 // Fake a struct that we're going to alloc. See the comments in
1692 // HashTableEntry about how the table is laid out, and why it's safe.
1693 struct FakeSlot {
1694 unsigned char c[sizeof(HashNumber) + sizeof(typename Entry::NonConstT)];
1695 };
1696
1697 static char* createTable(AllocPolicy& aAllocPolicy, uint32_t aCapacity,
1698 FailureBehavior aReportFailure = ReportFailure) {
1699 FakeSlot* fake =
1700 aReportFailure
1701 ? aAllocPolicy.template pod_malloc<FakeSlot>(aCapacity)
1702 : aAllocPolicy.template maybe_pod_malloc<FakeSlot>(aCapacity);
1703
1704 MOZ_ASSERT((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) ==do { static_assert( mozilla::detail::AssertionConditionType<
decltype((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment
) == 0)>::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment
) == 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1705); AnnotateMozCrashReason("MOZ_ASSERT" "(" "(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0"
")"); do { *((volatile int*)__null) = 1705; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
1705 0)do { static_assert( mozilla::detail::AssertionConditionType<
decltype((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment
) == 0)>::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment
) == 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1705); AnnotateMozCrashReason("MOZ_ASSERT" "(" "(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0"
")"); do { *((volatile int*)__null) = 1705; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1706
1707 char* table = reinterpret_cast<char*>(fake);
1708 if (table) {
1709 forEachSlot(table, aCapacity, [&](Slot& slot) {
1710 *slot.mKeyHash = sFreeKey;
1711 new (KnownNotNull, slot.toEntry()) Entry();
1712 });
1713 }
1714 return table;
1715 }
1716
1717 static void destroyTable(AllocPolicy& aAllocPolicy, char* aOldTable,
1718 uint32_t aCapacity) {
1719 forEachSlot(aOldTable, aCapacity, [&](const Slot& slot) {
1720 if (slot.isLive()) {
1721 slot.toEntry()->destroyStoredT();
1722 }
1723 });
1724 freeTable(aAllocPolicy, aOldTable, aCapacity);
1725 }
1726
1727 static void freeTable(AllocPolicy& aAllocPolicy, char* aOldTable,
1728 uint32_t aCapacity) {
1729 FakeSlot* fake = reinterpret_cast<FakeSlot*>(aOldTable);
1730 aAllocPolicy.free_(fake, aCapacity);
1731 }
1732
1733 public:
1734 HashTable(AllocPolicy aAllocPolicy, uint32_t aLen)
1735 : AllocPolicy(std::move(aAllocPolicy)),
1736 mGen(0),
1737 mHashShift(hashShift(aLen)),
1738 mTable(nullptr),
1739 mEntryCount(0),
1740 mRemovedCount(0)
1741#ifdef DEBUG1
1742 ,
1743 mMutationCount(0),
1744 mEntered(false)
1745#endif
1746 {
1747 }
1748
1749 explicit HashTable(AllocPolicy aAllocPolicy)
1750 : HashTable(aAllocPolicy, sDefaultLen) {}
1751
1752 ~HashTable() {
1753 if (mTable) {
1754 destroyTable(*this, mTable, capacity());
1755 }
1756 }
1757
1758 private:
1759 HashNumber hash1(HashNumber aHash0) const { return aHash0 >> mHashShift; }
1760
1761 struct DoubleHash {
1762 HashNumber mHash2;
1763 HashNumber mSizeMask;
1764 };
1765
1766 DoubleHash hash2(HashNumber aCurKeyHash) const {
1767 uint32_t sizeLog2 = kHashNumberBits - mHashShift;
1768 DoubleHash dh = {((aCurKeyHash << sizeLog2) >> mHashShift) | 1,
1769 (HashNumber(1) << sizeLog2) - 1};
1770 return dh;
1771 }
1772
1773 static HashNumber applyDoubleHash(HashNumber aHash1,
1774 const DoubleHash& aDoubleHash) {
1775 return WrappingSubtract(aHash1, aDoubleHash.mHash2) & aDoubleHash.mSizeMask;
1776 }
1777
1778 static MOZ_ALWAYS_INLINEinline bool match(T& aEntry, const Lookup& aLookup) {
1779 return HashPolicy::match(HashPolicy::getKey(aEntry), aLookup);
1780 }
1781
1782 enum LookupReason { ForNonAdd, ForAdd };
1783
1784 Slot slotForIndex(HashNumber aIndex) const {
1785 auto hashes = reinterpret_cast<HashNumber*>(mTable);
1786 auto entries = reinterpret_cast<Entry*>(&hashes[capacity()]);
1787 return Slot(&entries[aIndex], &hashes[aIndex]);
1788 }
1789
1790 // Warning: in order for readonlyThreadsafeLookup() to be safe this
1791 // function must not modify the table in any way when Reason==ForNonAdd.
1792 template <LookupReason Reason>
1793 MOZ_ALWAYS_INLINEinline Slot lookup(const Lookup& aLookup,
1794 HashNumber aKeyHash) const {
1795 MOZ_ASSERT(isLiveHash(aKeyHash))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isLiveHash(aKeyHash))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isLiveHash(aKeyHash)))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("isLiveHash(aKeyHash)"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1795); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLiveHash(aKeyHash)"
")"); do { *((volatile int*)__null) = 1795; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1796 MOZ_ASSERT(!(aKeyHash & sCollisionBit))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!(aKeyHash & sCollisionBit))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!(aKeyHash & sCollisionBit
)))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("!(aKeyHash & sCollisionBit)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1796); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!(aKeyHash & sCollisionBit)"
")"); do { *((volatile int*)__null) = 1796; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1797 MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mTable)>::isValid, "invalid assertion condition")
; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1797); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")"
); do { *((volatile int*)__null) = 1797; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1798
1799 // Compute the primary hash address.
1800 HashNumber h1 = hash1(aKeyHash);
1801 Slot slot = slotForIndex(h1);
1802
1803 // Miss: return space for a new entry.
1804 if (slot.isFree()) {
1805 return slot;
1806 }
1807
1808 // Hit: return entry.
1809 if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) {
1810 return slot;
1811 }
1812
1813 // Collision: double hash.
1814 DoubleHash dh = hash2(aKeyHash);
1815
1816 // Save the first removed entry pointer so we can recycle later.
1817 Maybe<Slot> firstRemoved;
1818
1819 while (true) {
1820 if (Reason == ForAdd && !firstRemoved) {
1821 if (MOZ_UNLIKELY(slot.isRemoved())(__builtin_expect(!!(slot.isRemoved()), 0))) {
1822 firstRemoved.emplace(slot);
1823 } else {
1824 slot.setCollision();
1825 }
1826 }
1827
1828 h1 = applyDoubleHash(h1, dh);
1829
1830 slot = slotForIndex(h1);
1831 if (slot.isFree()) {
1832 return firstRemoved.refOr(slot);
1833 }
1834
1835 if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) {
1836 return slot;
1837 }
1838 }
1839 }
1840
1841 // This is a copy of lookup() hardcoded to the assumptions:
1842 // 1. the lookup is for an add;
1843 // 2. the key, whose |keyHash| has been passed, is not in the table.
1844 Slot findNonLiveSlot(HashNumber aKeyHash) {
1845 MOZ_ASSERT(!(aKeyHash & sCollisionBit))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!(aKeyHash & sCollisionBit))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!(aKeyHash & sCollisionBit
)))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("!(aKeyHash & sCollisionBit)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1845); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!(aKeyHash & sCollisionBit)"
")"); do { *((volatile int*)__null) = 1845; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1846 MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mTable)>::isValid, "invalid assertion condition")
; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1846); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")"
); do { *((volatile int*)__null) = 1846; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1847
1848 // We assume 'aKeyHash' has already been distributed.
1849
1850 // Compute the primary hash address.
1851 HashNumber h1 = hash1(aKeyHash);
1852 Slot slot = slotForIndex(h1);
1853
1854 // Miss: return space for a new entry.
1855 if (!slot.isLive()) {
1856 return slot;
1857 }
1858
1859 // Collision: double hash.
1860 DoubleHash dh = hash2(aKeyHash);
1861
1862 while (true) {
1863 slot.setCollision();
1864
1865 h1 = applyDoubleHash(h1, dh);
1866
1867 slot = slotForIndex(h1);
1868 if (!slot.isLive()) {
1869 return slot;
1870 }
1871 }
1872 }
1873
1874 enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
1875
1876 RebuildStatus changeTableSize(
1877 uint32_t newCapacity, FailureBehavior aReportFailure = ReportFailure) {
1878 MOZ_ASSERT(IsPowerOfTwo(newCapacity))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(IsPowerOfTwo(newCapacity))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(IsPowerOfTwo(newCapacity))))
, 0))) { do { } while (false); MOZ_ReportAssertionFailure("IsPowerOfTwo(newCapacity)"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1878); AnnotateMozCrashReason("MOZ_ASSERT" "(" "IsPowerOfTwo(newCapacity)"
")"); do { *((volatile int*)__null) = 1878; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1879 MOZ_ASSERT(!!mTable == !!capacity())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!!mTable == !!capacity())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!!mTable == !!capacity()))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("!!mTable == !!capacity()"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1879); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!!mTable == !!capacity()"
")"); do { *((volatile int*)__null) = 1879; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1880
1881 // Look, but don't touch, until we succeed in getting new entry store.
1882 char* oldTable = mTable;
1883 uint32_t oldCapacity = capacity();
1884 uint32_t newLog2 = mozilla::CeilingLog2(newCapacity);
1885
1886 if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)(__builtin_expect(!!(newCapacity > sMaxCapacity), 0))) {
1887 if (aReportFailure) {
1888 this->reportAllocOverflow();
1889 }
1890 return RehashFailed;
1891 }
1892
1893 char* newTable = createTable(*this, newCapacity, aReportFailure);
1894 if (!newTable) {
1895 return RehashFailed;
1896 }
1897
1898 // We can't fail from here on, so update table parameters.
1899 mHashShift = kHashNumberBits - newLog2;
1900 mRemovedCount = 0;
1901 mGen++;
1902 mTable = newTable;
1903
1904 // Copy only live entries, leaving removed ones behind.
1905 forEachSlot(oldTable, oldCapacity, [&](Slot& slot) {
1906 if (slot.isLive()) {
1907 HashNumber hn = slot.getKeyHash();
1908 findNonLiveSlot(hn).setLive(
1909 hn, std::move(const_cast<typename Entry::NonConstT&>(slot.get())));
1910 }
1911
1912 slot.clear();
1913 });
1914
1915 // All entries have been destroyed, no need to destroyTable.
1916 freeTable(*this, oldTable, oldCapacity);
1917 return Rehashed;
1918 }
1919
1920 RebuildStatus rehashIfOverloaded(
1921 FailureBehavior aReportFailure = ReportFailure) {
1922 static_assert(sMaxCapacity <= UINT32_MAX(4294967295U) / sMaxAlphaNumerator,
1923 "multiplication below could overflow");
1924
1925 // Note: if capacity() is zero, this will always succeed, which is
1926 // what we want.
1927 bool overloaded = mEntryCount + mRemovedCount >=
1928 capacity() * sMaxAlphaNumerator / sAlphaDenominator;
1929
1930 if (!overloaded) {
1931 return NotOverloaded;
1932 }
1933
1934 // Succeed if a quarter or more of all entries are removed. Note that this
1935 // always succeeds if capacity() == 0 (i.e. entry storage has not been
1936 // allocated), which is what we want, because it means changeTableSize()
1937 // will allocate the requested capacity rather than doubling it.
1938 bool manyRemoved = mRemovedCount >= (capacity() >> 2);
1939 uint32_t newCapacity = manyRemoved ? rawCapacity() : rawCapacity() * 2;
1940 return changeTableSize(newCapacity, aReportFailure);
1941 }
1942
1943 void infallibleRehashIfOverloaded() {
1944 if (rehashIfOverloaded(DontReportFailure) == RehashFailed) {
1945 rehashTableInPlace();
1946 }
1947 }
1948
1949 void remove(Slot& aSlot) {
1950 MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mTable)>::isValid, "invalid assertion condition")
; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 1950); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")"
); do { *((volatile int*)__null) = 1950; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1951
1952 if (aSlot.hasCollision()) {
1953 aSlot.removeLive();
1954 mRemovedCount++;
1955 } else {
1956 aSlot.clearLive();
1957 }
1958 mEntryCount--;
1959#ifdef DEBUG1
1960 mMutationCount++;
1961#endif
1962 }
1963
1964 void shrinkIfUnderloaded() {
1965 static_assert(sMaxCapacity <= UINT32_MAX(4294967295U) / sMinAlphaNumerator,
1966 "multiplication below could overflow");
1967 bool underloaded =
1968 capacity() > sMinCapacity &&
1969 mEntryCount <= capacity() * sMinAlphaNumerator / sAlphaDenominator;
1970
1971 if (underloaded) {
1972 (void)changeTableSize(capacity() / 2, DontReportFailure);
1973 }
1974 }
1975
1976 // This is identical to changeTableSize(currentSize), but without requiring
1977 // a second table. We do this by recycling the collision bits to tell us if
1978 // the element is already inserted or still waiting to be inserted. Since
1979 // already-inserted elements win any conflicts, we get the same table as we
1980 // would have gotten through random insertion order.
1981 void rehashTableInPlace() {
1982 mRemovedCount = 0;
1983 mGen++;
1984 forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.unsetCollision(); });
1985 for (uint32_t i = 0; i < capacity();) {
1986 Slot src = slotForIndex(i);
1987
1988 if (!src.isLive() || src.hasCollision()) {
1989 ++i;
1990 continue;
1991 }
1992
1993 HashNumber keyHash = src.getKeyHash();
1994 HashNumber h1 = hash1(keyHash);
1995 DoubleHash dh = hash2(keyHash);
1996 Slot tgt = slotForIndex(h1);
1997 while (true) {
1998 if (!tgt.hasCollision()) {
1999 src.swap(tgt);
2000 tgt.setCollision();
2001 break;
2002 }
2003
2004 h1 = applyDoubleHash(h1, dh);
2005 tgt = slotForIndex(h1);
2006 }
2007 }
2008
2009 // TODO: this algorithm leaves collision bits on *all* elements, even if
2010 // they are on no collision path. We have the option of setting the
2011 // collision bits correctly on a subsequent pass or skipping the rehash
2012 // unless we are totally filled with tombstones: benchmark to find out
2013 // which approach is best.
2014 }
2015
2016 // Prefer to use putNewInfallible; this function does not check
2017 // invariants.
2018 template <typename... Args>
2019 void putNewInfallibleInternal(HashNumber aKeyHash, Args&&... aArgs) {
2020 MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mTable)>::isValid, "invalid assertion condition")
; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2020); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")"
); do { *((volatile int*)__null) = 2020; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2021
2022 Slot slot = findNonLiveSlot(aKeyHash);
2023
2024 if (slot.isRemoved()) {
2025 mRemovedCount--;
2026 aKeyHash |= sCollisionBit;
2027 }
2028
2029 slot.setLive(aKeyHash, std::forward<Args>(aArgs)...);
2030 mEntryCount++;
2031#ifdef DEBUG1
2032 mMutationCount++;
2033#endif
2034 }
2035
2036 public:
2037 void clear() {
2038 forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.clear(); });
2039 mRemovedCount = 0;
2040 mEntryCount = 0;
2041#ifdef DEBUG1
2042 mMutationCount++;
2043#endif
2044 }
2045
2046 // Resize the table down to the smallest capacity that doesn't overload the
2047 // table. Since we call shrinkIfUnderloaded() on every remove, you only need
2048 // to call this after a bulk removal of items done without calling remove().
2049 void compact() {
2050 if (empty()) {
2051 // Free the entry storage.
2052 freeTable(*this, mTable, capacity());
2053 mGen++;
2054 mHashShift = hashShift(0); // gives minimum capacity on regrowth
2055 mTable = nullptr;
2056 mRemovedCount = 0;
2057 return;
2058 }
2059
2060 uint32_t bestCapacity = this->bestCapacity(mEntryCount);
2061 MOZ_ASSERT(bestCapacity <= capacity())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(bestCapacity <= capacity())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(bestCapacity <= capacity(
)))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("bestCapacity <= capacity()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2061); AnnotateMozCrashReason("MOZ_ASSERT" "(" "bestCapacity <= capacity()"
")"); do { *((volatile int*)__null) = 2061; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2062
2063 if (bestCapacity < capacity()) {
2064 (void)changeTableSize(bestCapacity, DontReportFailure);
2065 }
2066 }
2067
2068 void clearAndCompact() {
2069 clear();
2070 compact();
2071 }
2072
2073 [[nodiscard]] bool reserve(uint32_t aLen) {
2074 if (aLen == 0) {
2075 return true;
2076 }
2077
2078 if (MOZ_UNLIKELY(aLen > sMaxInit)(__builtin_expect(!!(aLen > sMaxInit), 0))) {
2079 this->reportAllocOverflow();
2080 return false;
2081 }
2082
2083 uint32_t bestCapacity = this->bestCapacity(aLen);
2084 if (bestCapacity <= capacity()) {
2085 return true; // Capacity is already sufficient.
2086 }
2087
2088 RebuildStatus status = changeTableSize(bestCapacity, ReportFailure);
2089 MOZ_ASSERT(status != NotOverloaded)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(status != NotOverloaded)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(status != NotOverloaded))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("status != NotOverloaded"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2089); AnnotateMozCrashReason("MOZ_ASSERT" "(" "status != NotOverloaded"
")"); do { *((volatile int*)__null) = 2089; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2090 return status != RehashFailed;
2091 }
2092
2093 Iterator iter() const { return Iterator(*this); }
2094
2095 ModIterator modIter() { return ModIterator(*this); }
2096
2097 Range all() const { return Range(*this); }
2098
2099 bool empty() const { return mEntryCount == 0; }
2100
2101 uint32_t count() const { return mEntryCount; }
2102
2103 uint32_t rawCapacity() const { return 1u << (kHashNumberBits - mHashShift); }
2104
2105 uint32_t capacity() const { return mTable ? rawCapacity() : 0; }
2106
2107 Generation generation() const { return Generation(mGen); }
2108
2109 size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
2110 return aMallocSizeOf(mTable);
2111 }
2112
2113 size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
2114 return aMallocSizeOf(this) + shallowSizeOfExcludingThis(aMallocSizeOf);
2115 }
2116
2117 MOZ_ALWAYS_INLINEinline Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
2118 if (empty()) {
7
Taking true branch
2119 return Ptr();
8
Calling default constructor for 'Ptr'
10
Returning from default constructor for 'Ptr'
2120 }
2121
2122 HashNumber inputHash;
2123 if (!MaybeGetHash<HashPolicy>(aLookup, &inputHash)) {
2124 return Ptr();
2125 }
2126
2127 HashNumber keyHash = prepareHash(inputHash);
2128 return Ptr(lookup<ForNonAdd>(aLookup, keyHash), *this);
2129 }
2130
2131 MOZ_ALWAYS_INLINEinline Ptr lookup(const Lookup& aLookup) const {
2132 ReentrancyGuard g(*this);
2133 return readonlyThreadsafeLookup(aLookup);
6
Calling 'HashTable::readonlyThreadsafeLookup'
11
Returning from 'HashTable::readonlyThreadsafeLookup'
2134 }
2135
2136 MOZ_ALWAYS_INLINEinline AddPtr lookupForAdd(const Lookup& aLookup) {
2137 ReentrancyGuard g(*this);
2138
2139 HashNumber inputHash;
2140 if (!EnsureHash<HashPolicy>(aLookup, &inputHash)) {
2141 return AddPtr();
2142 }
2143
2144 HashNumber keyHash = prepareHash(inputHash);
2145
2146 if (!mTable) {
2147 return AddPtr(*this, keyHash);
2148 }
2149
2150 // Directly call the constructor in the return statement to avoid
2151 // excess copying when building with Visual Studio 2017.
2152 // See bug 1385181.
2153 return AddPtr(lookup<ForAdd>(aLookup, keyHash), *this, keyHash);
2154 }
2155
2156 template <typename... Args>
2157 [[nodiscard]] bool add(AddPtr& aPtr, Args&&... aArgs) {
2158 ReentrancyGuard g(*this);
2159 MOZ_ASSERT_IF(aPtr.isValid(), mTable)do { if (aPtr.isValid()) { do { static_assert( mozilla::detail
::AssertionConditionType<decltype(mTable)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2159); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")"
); do { *((volatile int*)__null) = 2159; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false); } } while (
false)
;
2160 MOZ_ASSERT_IF(aPtr.isValid(), aPtr.mTable == this)do { if (aPtr.isValid()) { do { static_assert( mozilla::detail
::AssertionConditionType<decltype(aPtr.mTable == this)>
::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(aPtr.mTable == this))), 0))) { do { } while (false);
MOZ_ReportAssertionFailure("aPtr.mTable == this", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2160); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mTable == this"
")"); do { *((volatile int*)__null) = 2160; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false); } } while (
false)
;
2161 MOZ_ASSERT(!aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!aPtr.found())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!aPtr.found()))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("!aPtr.found()",
"/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2161); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!aPtr.found()"
")"); do { *((volatile int*)__null) = 2161; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2162 MOZ_ASSERT(!(aPtr.mKeyHash & sCollisionBit))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!(aPtr.mKeyHash & sCollisionBit))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!(aPtr.mKeyHash & sCollisionBit
)))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("!(aPtr.mKeyHash & sCollisionBit)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2162); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!(aPtr.mKeyHash & sCollisionBit)"
")"); do { *((volatile int*)__null) = 2162; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2163
2164 // Check for error from ensureHash() here.
2165 if (!aPtr.isLive()) {
2166 return false;
2167 }
2168
2169 MOZ_ASSERT(aPtr.mGeneration == generation())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPtr.mGeneration == generation())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPtr.mGeneration == generation
()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("aPtr.mGeneration == generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2169); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mGeneration == generation()"
")"); do { *((volatile int*)__null) = 2169; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2170#ifdef DEBUG1
2171 MOZ_ASSERT(aPtr.mMutationCount == mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPtr.mMutationCount == mMutationCount)>::isValid,
"invalid assertion condition"); if ((__builtin_expect(!!(!(!
!(aPtr.mMutationCount == mMutationCount))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("aPtr.mMutationCount == mMutationCount"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2171); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mMutationCount == mMutationCount"
")"); do { *((volatile int*)__null) = 2171; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2172#endif
2173
2174 if (!aPtr.isValid()) {
2175 MOZ_ASSERT(!mTable && mEntryCount == 0)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!mTable && mEntryCount == 0)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!mTable && mEntryCount
== 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("!mTable && mEntryCount == 0", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2175); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!mTable && mEntryCount == 0"
")"); do { *((volatile int*)__null) = 2175; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2176 uint32_t newCapacity = rawCapacity();
2177 RebuildStatus status = changeTableSize(newCapacity, ReportFailure);
2178 MOZ_ASSERT(status != NotOverloaded)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(status != NotOverloaded)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(status != NotOverloaded))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("status != NotOverloaded"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2178); AnnotateMozCrashReason("MOZ_ASSERT" "(" "status != NotOverloaded"
")"); do { *((volatile int*)__null) = 2178; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2179 if (status == RehashFailed) {
2180 return false;
2181 }
2182 aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash);
2183
2184 } else if (aPtr.mSlot.isRemoved()) {
2185 // Changing an entry from removed to live does not affect whether we are
2186 // overloaded and can be handled separately.
2187 if (!this->checkSimulatedOOM()) {
2188 return false;
2189 }
2190 mRemovedCount--;
2191 aPtr.mKeyHash |= sCollisionBit;
2192
2193 } else {
2194 // Preserve the validity of |aPtr.mSlot|.
2195 RebuildStatus status = rehashIfOverloaded();
2196 if (status == RehashFailed) {
2197 return false;
2198 }
2199 if (status == NotOverloaded && !this->checkSimulatedOOM()) {
2200 return false;
2201 }
2202 if (status == Rehashed) {
2203 aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash);
2204 }
2205 }
2206
2207 aPtr.mSlot.setLive(aPtr.mKeyHash, std::forward<Args>(aArgs)...);
2208 mEntryCount++;
2209#ifdef DEBUG1
2210 mMutationCount++;
2211 aPtr.mGeneration = generation();
2212 aPtr.mMutationCount = mMutationCount;
2213#endif
2214 return true;
2215 }
2216
2217 // Note: |aLookup| may reference pieces of arguments in |aArgs|, so this
2218 // function must take care not to use |aLookup| after moving |aArgs|.
2219 template <typename... Args>
2220 void putNewInfallible(const Lookup& aLookup, Args&&... aArgs) {
2221 MOZ_ASSERT(!lookup(aLookup).found())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!lookup(aLookup).found())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!lookup(aLookup).found()))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("!lookup(aLookup).found()"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2221); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!lookup(aLookup).found()"
")"); do { *((volatile int*)__null) = 2221; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2222 ReentrancyGuard g(*this);
2223 HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup));
2224 putNewInfallibleInternal(keyHash, std::forward<Args>(aArgs)...);
2225 }
2226
2227 // Note: |aLookup| may alias arguments in |aArgs|, so this function must take
2228 // care not to use |aLookup| after moving |aArgs|.
2229 template <typename... Args>
2230 [[nodiscard]] bool putNew(const Lookup& aLookup, Args&&... aArgs) {
2231 MOZ_ASSERT(!lookup(aLookup).found())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!lookup(aLookup).found())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!lookup(aLookup).found()))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("!lookup(aLookup).found()"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2231); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!lookup(aLookup).found()"
")"); do { *((volatile int*)__null) = 2231; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2232 ReentrancyGuard g(*this);
2233 if (!this->checkSimulatedOOM()) {
2234 return false;
2235 }
2236 HashNumber inputHash;
2237 if (!EnsureHash<HashPolicy>(aLookup, &inputHash)) {
2238 return false;
2239 }
2240 HashNumber keyHash = prepareHash(inputHash);
2241 if (rehashIfOverloaded() == RehashFailed) {
2242 return false;
2243 }
2244 putNewInfallibleInternal(keyHash, std::forward<Args>(aArgs)...);
2245 return true;
2246 }
2247
2248 // Note: |aLookup| may be a reference pieces of arguments in |aArgs|, so this
2249 // function must take care not to use |aLookup| after moving |aArgs|.
2250 template <typename... Args>
2251 [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup,
2252 Args&&... aArgs) {
2253 // Check for error from ensureHash() here.
2254 if (!aPtr.isLive()) {
2255 return false;
2256 }
2257#ifdef DEBUG1
2258 aPtr.mGeneration = generation();
2259 aPtr.mMutationCount = mMutationCount;
2260#endif
2261 if (mTable) {
2262 ReentrancyGuard g(*this);
2263 // Check that aLookup has not been destroyed.
2264 MOZ_ASSERT(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash
)>::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2264); AnnotateMozCrashReason("MOZ_ASSERT" "(" "prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash"
")"); do { *((volatile int*)__null) = 2264; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2265 aPtr.mSlot = lookup<ForAdd>(aLookup, aPtr.mKeyHash);
2266 if (aPtr.found()) {
2267 return true;
2268 }
2269 } else {
2270 // Clear aPtr so it's invalid; add() will allocate storage and redo the
2271 // lookup.
2272 aPtr.mSlot = Slot(nullptr, nullptr);
2273 }
2274 return add(aPtr, std::forward<Args>(aArgs)...);
2275 }
2276
2277 void remove(Ptr aPtr) {
2278 MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mTable)>::isValid, "invalid assertion condition")
; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2278); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")"
); do { *((volatile int*)__null) = 2278; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2279 ReentrancyGuard g(*this);
2280 MOZ_ASSERT(aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPtr.found())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPtr.found()))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("aPtr.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2280); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.found()"
")"); do { *((volatile int*)__null) = 2280; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2281 MOZ_ASSERT(aPtr.mGeneration == generation())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPtr.mGeneration == generation())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPtr.mGeneration == generation
()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("aPtr.mGeneration == generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2281); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mGeneration == generation()"
")"); do { *((volatile int*)__null) = 2281; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2282 remove(aPtr.mSlot);
2283 shrinkIfUnderloaded();
2284 }
2285
2286 void rekeyWithoutRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) {
2287 MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mTable)>::isValid, "invalid assertion condition")
; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2287); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")"
); do { *((volatile int*)__null) = 2287; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2288 ReentrancyGuard g(*this);
2289 MOZ_ASSERT(aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPtr.found())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPtr.found()))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("aPtr.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2289); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.found()"
")"); do { *((volatile int*)__null) = 2289; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2290 MOZ_ASSERT(aPtr.mGeneration == generation())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPtr.mGeneration == generation())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPtr.mGeneration == generation
()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("aPtr.mGeneration == generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h"
, 2290); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mGeneration == generation()"
")"); do { *((volatile int*)__null) = 2290; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2291 typename HashTableEntry<T>::NonConstT t(std::move(*aPtr));
2292 HashPolicy::setKey(t, const_cast<Key&>(aKey));
2293 remove(aPtr.mSlot);
2294 HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup));
2295 putNewInfallibleInternal(keyHash, std::move(t));
2296 }
2297
2298 void rekeyAndMaybeRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) {
2299 rekeyWithoutRehash(aPtr, aLookup, aKey);
2300 infallibleRehashIfOverloaded();
2301 }
2302};
2303
2304} // namespace detail
2305} // namespace mozilla
2306
2307#endif /* mozilla_HashTable_h */