File: | var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h |
Warning: | line 1730, column 61 The result of right shift is undefined because the right operand '32' is not smaller than 32, the capacity of 'HashNumber' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */ | |||
2 | /* This Source Code Form is subject to the terms of the Mozilla Public | |||
3 | * License, v. 2.0. If a copy of the MPL was not distributed with this | |||
4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | |||
5 | ||||
6 | #include "HeapSnapshot.h" | |||
7 | ||||
8 | #include <google/protobuf/io/coded_stream.h> | |||
9 | #include <google/protobuf/io/gzip_stream.h> | |||
10 | #include <google/protobuf/io/zero_copy_stream_impl_lite.h> | |||
11 | ||||
12 | #include "js/Array.h" // JS::NewArrayObject | |||
13 | #include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin, JS::TaggedColumnNumberOneOrigin | |||
14 | #include "js/Debug.h" | |||
15 | #include "js/PropertyAndElement.h" // JS_DefineProperty | |||
16 | #include "js/TypeDecls.h" | |||
17 | #include "js/UbiNodeBreadthFirst.h" | |||
18 | #include "js/UbiNodeCensus.h" | |||
19 | #include "js/UbiNodeDominatorTree.h" | |||
20 | #include "js/UbiNodeShortestPaths.h" | |||
21 | #include "mozilla/Attributes.h" | |||
22 | #include "mozilla/CycleCollectedJSContext.h" | |||
23 | #include "mozilla/devtools/AutoMemMap.h" | |||
24 | #include "mozilla/devtools/CoreDump.pb.h" | |||
25 | #include "mozilla/devtools/DeserializedNode.h" | |||
26 | #include "mozilla/devtools/DominatorTree.h" | |||
27 | #include "mozilla/devtools/FileDescriptorOutputStream.h" | |||
28 | #include "mozilla/devtools/HeapSnapshotTempFileHelperChild.h" | |||
29 | #include "mozilla/devtools/ZeroCopyNSIOutputStream.h" | |||
30 | #include "mozilla/dom/ChromeUtils.h" | |||
31 | #include "mozilla/dom/ContentChild.h" | |||
32 | #include "mozilla/dom/HeapSnapshotBinding.h" | |||
33 | #include "mozilla/RangedPtr.h" | |||
34 | #include "mozilla/Telemetry.h" | |||
35 | #include "mozilla/Unused.h" | |||
36 | ||||
37 | #include "jsapi.h" | |||
38 | #include "jsfriendapi.h" | |||
39 | #include "js/GCVector.h" | |||
40 | #include "js/MapAndSet.h" | |||
41 | #include "js/Object.h" // JS::GetCompartment | |||
42 | #include "nsComponentManagerUtils.h" // do_CreateInstance | |||
43 | #include "nsCycleCollectionParticipant.h" | |||
44 | #include "nsCRTGlue.h" | |||
45 | #include "nsIFile.h" | |||
46 | #include "nsIOutputStream.h" | |||
47 | #include "nsISupportsImpl.h" | |||
48 | #include "nsNetUtil.h" | |||
49 | #include "nsPrintfCString.h" | |||
50 | #include "prerror.h" | |||
51 | #include "prio.h" | |||
52 | #include "prtypes.h" | |||
53 | #include "SpecialSystemDirectory.h" | |||
54 | ||||
55 | namespace mozilla { | |||
56 | namespace devtools { | |||
57 | ||||
58 | using namespace JS; | |||
59 | using namespace dom; | |||
60 | ||||
61 | using ::google::protobuf::io::ArrayInputStream; | |||
62 | using ::google::protobuf::io::CodedInputStream; | |||
63 | using ::google::protobuf::io::GzipInputStream; | |||
64 | using ::google::protobuf::io::ZeroCopyInputStream; | |||
65 | ||||
66 | using JS::ubi::AtomOrTwoByteChars; | |||
67 | using JS::ubi::ShortestPaths; | |||
68 | ||||
69 | MallocSizeOf GetCurrentThreadDebuggerMallocSizeOf() { | |||
70 | auto ccjscx = CycleCollectedJSContext::Get(); | |||
71 | MOZ_ASSERT(ccjscx)do { static_assert( mozilla::detail::AssertionConditionType< decltype(ccjscx)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(ccjscx))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("ccjscx", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 71); AnnotateMozCrashReason("MOZ_ASSERT" "(" "ccjscx" ")"); do { *((volatile int*)__null) = 71; __attribute__((nomerge)) ::abort(); } while (false); } } while (false); | |||
72 | auto cx = ccjscx->Context(); | |||
73 | MOZ_ASSERT(cx)do { static_assert( mozilla::detail::AssertionConditionType< decltype(cx)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(cx))), 0))) { do { } while (false ); MOZ_ReportAssertionFailure("cx", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 73); AnnotateMozCrashReason("MOZ_ASSERT" "(" "cx" ")"); do { *((volatile int*)__null) = 73; __attribute__((nomerge)) ::abort (); } while (false); } } while (false); | |||
74 | auto mallocSizeOf = JS::dbg::GetDebuggerMallocSizeOf(cx); | |||
75 | MOZ_ASSERT(mallocSizeOf)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mallocSizeOf)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mallocSizeOf))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mallocSizeOf", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 75); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mallocSizeOf" ")"); do { *((volatile int*)__null) = 75; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
76 | return mallocSizeOf; | |||
77 | } | |||
78 | ||||
79 | /*** Cycle Collection Boilerplate *********************************************/ | |||
80 | ||||
81 | NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(HeapSnapshot, mParent)static_assert(std::is_base_of<nsWrapperCache, HeapSnapshot >::value, "Class should inherit nsWrapperCache"); HeapSnapshot ::cycleCollection HeapSnapshot::_cycleCollectorGlobal( nsCycleCollectionParticipant ::FlagMaybeSingleZoneJSHolder); void HeapSnapshot::cycleCollection ::Trace( void* p, const TraceCallbacks& aCallbacks, void* aClosure) { HeapSnapshot* tmp = DowncastCCParticipant<HeapSnapshot >(p); TraceWrapper(p, aCallbacks, aClosure); (void)tmp; } void HeapSnapshot::cycleCollection::TraceWrapper( void* p, const TraceCallbacks & aCallbacks, void* aClosure) { HeapSnapshot* tmp = DowncastCCParticipant <HeapSnapshot>(p); tmp->TraceWrapper(aCallbacks, aClosure ); } void HeapSnapshot::cycleCollection::Unlink(void* p) { HeapSnapshot * tmp = DowncastCCParticipant<HeapSnapshot>(p); ImplCycleCollectionUnlink (tmp->mParent); tmp->ReleaseWrapper(p); (void)tmp; } nsresult HeapSnapshot::cycleCollection::TraverseNative( void* p, nsCycleCollectionTraversalCallback & cb) { HeapSnapshot* tmp = DowncastCCParticipant<HeapSnapshot >(p); cb.DescribeRefCountedNode(tmp->mRefCnt.get(), "HeapSnapshot" ); ImplCycleCollectionTraverse(cb, tmp->mParent, "mParent" , 0); (void)tmp; return NS_OK; } | |||
82 | ||||
83 | NS_IMPL_CYCLE_COLLECTING_ADDREF(HeapSnapshot)MozExternalRefCountType HeapSnapshot::AddRef(void) { static_assert (!std::is_destructible_v<HeapSnapshot>, "Reference-counted class " "HeapSnapshot" " should not have a public destructor. " "Make this class's destructor non-public" ); do { static_assert( mozilla::detail::AssertionConditionType <decltype(int32_t(mRefCnt) >= 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(int32_t(mRefCnt) >= 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("int32_t(mRefCnt) >= 0" " (" "illegal refcnt" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 83); AnnotateMozCrashReason("MOZ_ASSERT" "(" "int32_t(mRefCnt) >= 0" ") (" "illegal refcnt" ")"); do { *((volatile int*)__null) = 83; __attribute__((nomerge)) ::abort(); } while (false); } } while (false); _mOwningThread.AssertOwnership("HeapSnapshot" " not thread-safe"); nsISupports* base = HeapSnapshot::cycleCollection ::Upcast(this); nsrefcnt count = mRefCnt.incr(base); NS_LogAddRef ((this), (count), ("HeapSnapshot"), (uint32_t)(sizeof(*this)) ); return count; } | |||
84 | NS_IMPL_CYCLE_COLLECTING_RELEASE(HeapSnapshot)MozExternalRefCountType HeapSnapshot::Release(void) { do { static_assert ( mozilla::detail::AssertionConditionType<decltype(int32_t (mRefCnt) > 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(int32_t(mRefCnt) > 0))), 0 ))) { do { } while (false); MOZ_ReportAssertionFailure("int32_t(mRefCnt) > 0" " (" "dup release" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 84); AnnotateMozCrashReason("MOZ_ASSERT" "(" "int32_t(mRefCnt) > 0" ") (" "dup release" ")"); do { *((volatile int*)__null) = 84 ; __attribute__((nomerge)) ::abort(); } while (false); } } while (false); _mOwningThread.AssertOwnership("HeapSnapshot" " not thread-safe" ); nsISupports* base = HeapSnapshot::cycleCollection::Upcast( this); nsrefcnt count = mRefCnt.decr(base); NS_LogRelease((this ), (count), ("HeapSnapshot")); return count; } void HeapSnapshot ::DeleteCycleCollectable(void) { delete (this); } | |||
85 | ||||
86 | NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(HeapSnapshot)nsresult HeapSnapshot::QueryInterface(const nsIID& aIID, void ** aInstancePtr) { do { if (!(aInstancePtr)) { NS_DebugBreak( NS_DEBUG_ASSERTION, "QueryInterface requires a non-NULL destination!" , "aInstancePtr", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 86); MOZ_PretendNoReturn(); } } while (0); nsISupports* foundInterface ; if (TopThreeWordsEquals( aIID, (nsXPCOMCycleCollectionParticipant ::COMTypeInfo<nsXPCOMCycleCollectionParticipant, void>:: kIID), (nsCycleCollectionISupports::COMTypeInfo<nsCycleCollectionISupports , void>::kIID)) && (LowWordEquals(aIID, (nsXPCOMCycleCollectionParticipant ::COMTypeInfo<nsXPCOMCycleCollectionParticipant, void>:: kIID)) || LowWordEquals(aIID, (nsCycleCollectionISupports::COMTypeInfo <nsCycleCollectionISupports, void>::kIID)))) { if (LowWordEquals (aIID, (nsXPCOMCycleCollectionParticipant::COMTypeInfo<nsXPCOMCycleCollectionParticipant , void>::kIID))) { *aInstancePtr = HeapSnapshot::cycleCollection ::GetParticipant(); return NS_OK; } if (LowWordEquals(aIID, ( nsCycleCollectionISupports::COMTypeInfo<nsCycleCollectionISupports , void>::kIID))) { *aInstancePtr = HeapSnapshot::cycleCollection ::Upcast(this); return NS_OK; } foundInterface = nullptr; } else | |||
87 | NS_WRAPPERCACHE_INTERFACE_MAP_ENTRYif (aIID.Equals((nsWrapperCache::COMTypeInfo<nsWrapperCache , void>::kIID))) { *aInstancePtr = static_cast<nsWrapperCache *>(this); return NS_OK; } else | |||
88 | NS_INTERFACE_MAP_ENTRY(nsISupports)if (aIID.Equals(mozilla::detail::kImplementedIID<std::remove_reference_t <decltype(*this)>, nsISupports>)) foundInterface = static_cast <nsISupports*>(this); else | |||
89 | NS_INTERFACE_MAP_ENDfoundInterface = 0; nsresult status; if (!foundInterface) { do { static_assert( mozilla::detail::AssertionConditionType< decltype(!aIID.Equals((nsISupports::COMTypeInfo<nsISupports , void>::kIID)))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!aIID.Equals((nsISupports::COMTypeInfo <nsISupports, void>::kIID))))), 0))) { do { } while (false ); MOZ_ReportAssertionFailure("!aIID.Equals((nsISupports::COMTypeInfo<nsISupports, void>::kIID))" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 89); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!aIID.Equals((nsISupports::COMTypeInfo<nsISupports, void>::kIID))" ")"); do { *((volatile int*)__null) = 89; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); status = NS_NOINTERFACE ; } else { (foundInterface)->AddRef(); status = NS_OK; } * aInstancePtr = foundInterface; return status; } | |||
90 | ||||
91 | /* virtual */ | |||
92 | JSObject* HeapSnapshot::WrapObject(JSContext* aCx, | |||
93 | JS::Handle<JSObject*> aGivenProto) { | |||
94 | return HeapSnapshot_Binding::Wrap(aCx, this, aGivenProto); | |||
95 | } | |||
96 | ||||
97 | /*** Reading Heap Snapshots ***************************************************/ | |||
98 | ||||
99 | /* static */ | |||
100 | already_AddRefed<HeapSnapshot> HeapSnapshot::Create(JSContext* cx, | |||
101 | GlobalObject& global, | |||
102 | const uint8_t* buffer, | |||
103 | uint32_t size, | |||
104 | ErrorResult& rv) { | |||
105 | RefPtr<HeapSnapshot> snapshot = new HeapSnapshot(cx, global.GetAsSupports()); | |||
106 | if (!snapshot->init(cx, buffer, size)) { | |||
107 | rv.Throw(NS_ERROR_UNEXPECTED); | |||
108 | return nullptr; | |||
109 | } | |||
110 | return snapshot.forget(); | |||
111 | } | |||
112 | ||||
113 | template <typename MessageType> | |||
114 | static bool parseMessage(ZeroCopyInputStream& stream, uint32_t sizeOfMessage, | |||
115 | MessageType& message) { | |||
116 | // We need to create a new `CodedInputStream` for each message so that the | |||
117 | // 64MB limit is applied per-message rather than to the whole stream. | |||
118 | CodedInputStream codedStream(&stream); | |||
119 | ||||
120 | // The protobuf message nesting that core dumps exhibit is dominated by | |||
121 | // allocation stacks' frames. In the most deeply nested case, each frame has | |||
122 | // two messages: a StackFrame message and a StackFrame::Data message. These | |||
123 | // frames are on top of a small constant of other messages. There are a | |||
124 | // MAX_STACK_DEPTH number of frames, so we multiply this by 3 to make room for | |||
125 | // the two messages per frame plus some head room for the constant number of | |||
126 | // non-dominating messages. | |||
127 | codedStream.SetRecursionLimit(HeapSnapshot::MAX_STACK_DEPTH * 3); | |||
128 | ||||
129 | auto limit = codedStream.PushLimit(sizeOfMessage); | |||
130 | if (NS_WARN_IF(!message.ParseFromCodedStream(&codedStream))NS_warn_if_impl(!message.ParseFromCodedStream(&codedStream ), "!message.ParseFromCodedStream(&codedStream)", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 130) || | |||
131 | NS_WARN_IF(!codedStream.ConsumedEntireMessage())NS_warn_if_impl(!codedStream.ConsumedEntireMessage(), "!codedStream.ConsumedEntireMessage()" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 131) || | |||
132 | NS_WARN_IF(codedStream.BytesUntilLimit() != 0)NS_warn_if_impl(codedStream.BytesUntilLimit() != 0, "codedStream.BytesUntilLimit() != 0" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 132)) { | |||
133 | return false; | |||
134 | } | |||
135 | ||||
136 | codedStream.PopLimit(limit); | |||
137 | return true; | |||
138 | } | |||
139 | ||||
140 | template <typename CharT, typename InternedStringSet> | |||
141 | struct GetOrInternStringMatcher { | |||
142 | InternedStringSet& internedStrings; | |||
143 | ||||
144 | explicit GetOrInternStringMatcher(InternedStringSet& strings) | |||
145 | : internedStrings(strings) {} | |||
146 | ||||
147 | const CharT* operator()(const std::string* str) { | |||
148 | MOZ_ASSERT(str)do { static_assert( mozilla::detail::AssertionConditionType< decltype(str)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(str))), 0))) { do { } while (false ); MOZ_ReportAssertionFailure("str", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 148); AnnotateMozCrashReason("MOZ_ASSERT" "(" "str" ")"); do { *((volatile int*)__null) = 148; __attribute__((nomerge)) :: abort(); } while (false); } } while (false); | |||
149 | size_t length = str->length() / sizeof(CharT); | |||
150 | auto tempString = reinterpret_cast<const CharT*>(str->data()); | |||
151 | ||||
152 | UniqueFreePtr<CharT[]> owned(NS_xstrndup(tempString, length)); | |||
153 | if (!internedStrings.append(std::move(owned))) return nullptr; | |||
154 | ||||
155 | return internedStrings.back().get(); | |||
156 | } | |||
157 | ||||
158 | const CharT* operator()(uint64_t ref) { | |||
159 | if (MOZ_LIKELY(ref < internedStrings.length())(__builtin_expect(!!(ref < internedStrings.length()), 1))) { | |||
160 | auto& string = internedStrings[ref]; | |||
161 | MOZ_ASSERT(string)do { static_assert( mozilla::detail::AssertionConditionType< decltype(string)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(string))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("string", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 161); AnnotateMozCrashReason("MOZ_ASSERT" "(" "string" ")") ; do { *((volatile int*)__null) = 161; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
162 | return string.get(); | |||
163 | } | |||
164 | ||||
165 | return nullptr; | |||
166 | } | |||
167 | }; | |||
168 | ||||
169 | template < | |||
170 | // Either char or char16_t. | |||
171 | typename CharT, | |||
172 | // A reference to either `internedOneByteStrings` or | |||
173 | // `internedTwoByteStrings` if CharT is char or char16_t respectively. | |||
174 | typename InternedStringSet> | |||
175 | const CharT* HeapSnapshot::getOrInternString( | |||
176 | InternedStringSet& internedStrings, Maybe<StringOrRef>& maybeStrOrRef) { | |||
177 | // Incomplete message: has neither a string nor a reference to an already | |||
178 | // interned string. | |||
179 | if (MOZ_UNLIKELY(maybeStrOrRef.isNothing())(__builtin_expect(!!(maybeStrOrRef.isNothing()), 0))) return nullptr; | |||
180 | ||||
181 | GetOrInternStringMatcher<CharT, InternedStringSet> m(internedStrings); | |||
182 | return maybeStrOrRef->match(m); | |||
183 | } | |||
184 | ||||
185 | // Get a de-duplicated string as a Maybe<StringOrRef> from the given `msg`. | |||
186 | #define GET_STRING_OR_REF_WITH_PROP_NAMES(msg, strPropertyName, \ | |||
187 | refPropertyName) \ | |||
188 | (msg.has_##refPropertyName() ? Some(StringOrRef(msg.refPropertyName())) \ | |||
189 | : msg.has_##strPropertyName() ? Some(StringOrRef(&msg.strPropertyName())) \ | |||
190 | : Nothing()) | |||
191 | ||||
192 | #define GET_STRING_OR_REF(msg, property) \ | |||
193 | (msg.has_##property##ref() ? Some(StringOrRef(msg.property##ref())) \ | |||
194 | : msg.has_##property() ? Some(StringOrRef(&msg.property())) \ | |||
195 | : Nothing()) | |||
196 | ||||
197 | bool HeapSnapshot::saveNode(const protobuf::Node& node, | |||
198 | NodeIdSet& edgeReferents) { | |||
199 | // NB: de-duplicated string properties must be read back and interned in the | |||
200 | // same order here as they are written and serialized in | |||
201 | // `CoreDumpWriter::writeNode` or else indices in references to already | |||
202 | // serialized strings will be off. | |||
203 | ||||
204 | if (NS_WARN_IF(!node.has_id())NS_warn_if_impl(!node.has_id(), "!node.has_id()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 204)) return false; | |||
| ||||
205 | NodeId id = node.id(); | |||
206 | ||||
207 | // NodeIds are derived from pointers (at most 48 bits) and we rely on them | |||
208 | // fitting into JS numbers (IEEE 754 doubles, can precisely store 53 bit | |||
209 | // integers) despite storing them on disk as 64 bit integers. | |||
210 | if (NS_WARN_IF(!JS::Value::isNumberRepresentable(id))NS_warn_if_impl(!JS::Value::isNumberRepresentable(id), "!JS::Value::isNumberRepresentable(id)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 210)) return false; | |||
211 | ||||
212 | // Should only deserialize each node once. | |||
213 | if (NS_WARN_IF(nodes.has(id))NS_warn_if_impl(nodes.has(id), "nodes.has(id)", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 213)) return false; | |||
214 | ||||
215 | if (NS_WARN_IF(!JS::ubi::Uint32IsValidCoarseType(node.coarsetype()))NS_warn_if_impl(!JS::ubi::Uint32IsValidCoarseType(node.coarsetype ()), "!JS::ubi::Uint32IsValidCoarseType(node.coarsetype())", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 215)) | |||
216 | return false; | |||
217 | auto coarseType = JS::ubi::Uint32ToCoarseType(node.coarsetype()); | |||
218 | ||||
219 | Maybe<StringOrRef> typeNameOrRef = | |||
220 | GET_STRING_OR_REF_WITH_PROP_NAMES(node, typename_, typenameref); | |||
221 | auto typeName = | |||
222 | getOrInternString<char16_t>(internedTwoByteStrings, typeNameOrRef); | |||
223 | if (NS_WARN_IF(!typeName)NS_warn_if_impl(!typeName, "!typeName", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 223)) return false; | |||
224 | ||||
225 | if (NS_WARN_IF(!node.has_size())NS_warn_if_impl(!node.has_size(), "!node.has_size()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 225)) return false; | |||
226 | uint64_t size = node.size(); | |||
227 | ||||
228 | auto edgesLength = node.edges_size(); | |||
229 | DeserializedNode::EdgeVector edges; | |||
230 | if (NS_WARN_IF(!edges.reserve(edgesLength))NS_warn_if_impl(!edges.reserve(edgesLength), "!edges.reserve(edgesLength)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 230)) return false; | |||
231 | for (decltype(edgesLength) i = 0; i < edgesLength; i++) { | |||
232 | auto& protoEdge = node.edges(i); | |||
233 | ||||
234 | if (NS_WARN_IF(!protoEdge.has_referent())NS_warn_if_impl(!protoEdge.has_referent(), "!protoEdge.has_referent()" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 234)) return false; | |||
235 | NodeId referent = protoEdge.referent(); | |||
236 | ||||
237 | if (NS_WARN_IF(!edgeReferents.put(referent))NS_warn_if_impl(!edgeReferents.put(referent), "!edgeReferents.put(referent)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 237)) return false; | |||
238 | ||||
239 | const char16_t* edgeName = nullptr; | |||
240 | if (protoEdge.EdgeNameOrRef_case() != | |||
241 | protobuf::Edge::EDGENAMEORREF_NOT_SET) { | |||
242 | Maybe<StringOrRef> edgeNameOrRef = GET_STRING_OR_REF(protoEdge, name); | |||
243 | edgeName = | |||
244 | getOrInternString<char16_t>(internedTwoByteStrings, edgeNameOrRef); | |||
245 | if (NS_WARN_IF(!edgeName)NS_warn_if_impl(!edgeName, "!edgeName", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 245)) return false; | |||
246 | } | |||
247 | ||||
248 | edges.infallibleAppend(DeserializedEdge(referent, edgeName)); | |||
249 | } | |||
250 | ||||
251 | Maybe<StackFrameId> allocationStack; | |||
252 | if (node.has_allocationstack()) { | |||
253 | StackFrameId id = 0; | |||
254 | if (NS_WARN_IF(!saveStackFrame(node.allocationstack(), id))NS_warn_if_impl(!saveStackFrame(node.allocationstack(), id), "!saveStackFrame(node.allocationstack(), id)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 254)) return false; | |||
255 | allocationStack.emplace(id); | |||
256 | } | |||
257 | MOZ_ASSERT(allocationStack.isSome() == node.has_allocationstack())do { static_assert( mozilla::detail::AssertionConditionType< decltype(allocationStack.isSome() == node.has_allocationstack ())>::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!(allocationStack.isSome() == node.has_allocationstack ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("allocationStack.isSome() == node.has_allocationstack()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 257); AnnotateMozCrashReason("MOZ_ASSERT" "(" "allocationStack.isSome() == node.has_allocationstack()" ")"); do { *((volatile int*)__null) = 257; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
258 | ||||
259 | const char* jsObjectClassName = nullptr; | |||
260 | if (node.JSObjectClassNameOrRef_case() != | |||
261 | protobuf::Node::JSOBJECTCLASSNAMEORREF_NOT_SET) { | |||
262 | Maybe<StringOrRef> clsNameOrRef = | |||
263 | GET_STRING_OR_REF(node, jsobjectclassname); | |||
264 | jsObjectClassName = | |||
265 | getOrInternString<char>(internedOneByteStrings, clsNameOrRef); | |||
266 | if (NS_WARN_IF(!jsObjectClassName)NS_warn_if_impl(!jsObjectClassName, "!jsObjectClassName", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 266)) return false; | |||
267 | } | |||
268 | ||||
269 | const char* scriptFilename = nullptr; | |||
270 | if (node.ScriptFilenameOrRef_case() != | |||
271 | protobuf::Node::SCRIPTFILENAMEORREF_NOT_SET) { | |||
272 | Maybe<StringOrRef> scriptFilenameOrRef = | |||
273 | GET_STRING_OR_REF(node, scriptfilename); | |||
274 | scriptFilename = | |||
275 | getOrInternString<char>(internedOneByteStrings, scriptFilenameOrRef); | |||
276 | if (NS_WARN_IF(!scriptFilename)NS_warn_if_impl(!scriptFilename, "!scriptFilename", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 276)) return false; | |||
277 | } | |||
278 | ||||
279 | const char16_t* descriptiveTypeName = nullptr; | |||
280 | if (node.descriptiveTypeNameOrRef_case() != | |||
281 | protobuf::Node::DESCRIPTIVETYPENAMEORREF_NOT_SET) { | |||
282 | Maybe<StringOrRef> descriptiveTypeNameOrRef = | |||
283 | GET_STRING_OR_REF(node, descriptivetypename); | |||
284 | descriptiveTypeName = getOrInternString<char16_t>(internedTwoByteStrings, | |||
285 | descriptiveTypeNameOrRef); | |||
286 | if (NS_WARN_IF(!descriptiveTypeName)NS_warn_if_impl(!descriptiveTypeName, "!descriptiveTypeName", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 286)) return false; | |||
287 | } | |||
288 | ||||
289 | if (NS_WARN_IF(!nodes.putNew(NS_warn_if_impl(!nodes.putNew( id, DeserializedNode(id, coarseType , typeName, size, std::move(edges), allocationStack, jsObjectClassName , scriptFilename, descriptiveTypeName, *this)), "!nodes.putNew( id, DeserializedNode(id, coarseType, typeName, size, std::move(edges), allocationStack, jsObjectClassName, scriptFilename, descriptiveTypeName, *this))" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 292) | |||
290 | id, DeserializedNode(id, coarseType, typeName, size, std::move(edges),NS_warn_if_impl(!nodes.putNew( id, DeserializedNode(id, coarseType , typeName, size, std::move(edges), allocationStack, jsObjectClassName , scriptFilename, descriptiveTypeName, *this)), "!nodes.putNew( id, DeserializedNode(id, coarseType, typeName, size, std::move(edges), allocationStack, jsObjectClassName, scriptFilename, descriptiveTypeName, *this))" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 292) | |||
291 | allocationStack, jsObjectClassName,NS_warn_if_impl(!nodes.putNew( id, DeserializedNode(id, coarseType , typeName, size, std::move(edges), allocationStack, jsObjectClassName , scriptFilename, descriptiveTypeName, *this)), "!nodes.putNew( id, DeserializedNode(id, coarseType, typeName, size, std::move(edges), allocationStack, jsObjectClassName, scriptFilename, descriptiveTypeName, *this))" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 292) | |||
292 | scriptFilename, descriptiveTypeName, *this)))NS_warn_if_impl(!nodes.putNew( id, DeserializedNode(id, coarseType , typeName, size, std::move(edges), allocationStack, jsObjectClassName , scriptFilename, descriptiveTypeName, *this)), "!nodes.putNew( id, DeserializedNode(id, coarseType, typeName, size, std::move(edges), allocationStack, jsObjectClassName, scriptFilename, descriptiveTypeName, *this))" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 292)) { | |||
293 | return false; | |||
294 | }; | |||
295 | ||||
296 | return true; | |||
297 | } | |||
298 | ||||
299 | bool HeapSnapshot::saveStackFrame(const protobuf::StackFrame& frame, | |||
300 | StackFrameId& outFrameId) { | |||
301 | // NB: de-duplicated string properties must be read in the same order here as | |||
302 | // they are written in `CoreDumpWriter::getProtobufStackFrame` or else indices | |||
303 | // in references to already serialized strings will be off. | |||
304 | ||||
305 | if (frame.has_ref()) { | |||
306 | // We should only get a reference to the previous frame if we have already | |||
307 | // seen the previous frame. | |||
308 | if (!frames.has(frame.ref())) return false; | |||
309 | ||||
310 | outFrameId = frame.ref(); | |||
311 | return true; | |||
312 | } | |||
313 | ||||
314 | // Incomplete message. | |||
315 | if (!frame.has_data()) return false; | |||
316 | ||||
317 | auto data = frame.data(); | |||
318 | ||||
319 | if (!data.has_id()) return false; | |||
320 | StackFrameId id = data.id(); | |||
321 | ||||
322 | // This should be the first and only time we see this frame. | |||
323 | if (frames.has(id)) return false; | |||
324 | ||||
325 | if (!data.has_line()) return false; | |||
326 | uint32_t line = data.line(); | |||
327 | ||||
328 | if (!data.has_column()) return false; | |||
329 | JS::TaggedColumnNumberOneOrigin column( | |||
330 | JS::LimitedColumnNumberOneOrigin(data.column())); | |||
331 | ||||
332 | if (!data.has_issystem()) return false; | |||
333 | bool isSystem = data.issystem(); | |||
334 | ||||
335 | if (!data.has_isselfhosted()) return false; | |||
336 | bool isSelfHosted = data.isselfhosted(); | |||
337 | ||||
338 | Maybe<StringOrRef> sourceOrRef = GET_STRING_OR_REF(data, source); | |||
339 | auto source = | |||
340 | getOrInternString<char16_t>(internedTwoByteStrings, sourceOrRef); | |||
341 | if (!source) return false; | |||
342 | ||||
343 | const char16_t* functionDisplayName = nullptr; | |||
344 | if (data.FunctionDisplayNameOrRef_case() != | |||
345 | protobuf::StackFrame_Data::FUNCTIONDISPLAYNAMEORREF_NOT_SET) { | |||
346 | Maybe<StringOrRef> nameOrRef = GET_STRING_OR_REF(data, functiondisplayname); | |||
347 | functionDisplayName = | |||
348 | getOrInternString<char16_t>(internedTwoByteStrings, nameOrRef); | |||
349 | if (!functionDisplayName) return false; | |||
350 | } | |||
351 | ||||
352 | Maybe<StackFrameId> parent; | |||
353 | if (data.has_parent()) { | |||
354 | StackFrameId parentId = 0; | |||
355 | if (!saveStackFrame(data.parent(), parentId)) return false; | |||
356 | parent = Some(parentId); | |||
357 | } | |||
358 | ||||
359 | if (!frames.putNew(id, | |||
360 | DeserializedStackFrame(id, parent, line, column, source, | |||
361 | functionDisplayName, isSystem, | |||
362 | isSelfHosted, *this))) { | |||
363 | return false; | |||
364 | } | |||
365 | ||||
366 | outFrameId = id; | |||
367 | return true; | |||
368 | } | |||
369 | ||||
370 | #undef GET_STRING_OR_REF_WITH_PROP_NAMES | |||
371 | #undef GET_STRING_OR_REF | |||
372 | ||||
373 | // Because protobuf messages aren't self-delimiting, we serialize each message | |||
374 | // preceded by its size in bytes. When deserializing, we read this size and then | |||
375 | // limit reading from the stream to the given byte size. If we didn't, then the | |||
376 | // first message would consume the entire stream. | |||
377 | static bool readSizeOfNextMessage(ZeroCopyInputStream& stream, | |||
378 | uint32_t* sizep) { | |||
379 | MOZ_ASSERT(sizep)do { static_assert( mozilla::detail::AssertionConditionType< decltype(sizep)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(sizep))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("sizep", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 379); AnnotateMozCrashReason("MOZ_ASSERT" "(" "sizep" ")"); do { *((volatile int*)__null) = 379; __attribute__((nomerge) ) ::abort(); } while (false); } } while (false); | |||
380 | CodedInputStream codedStream(&stream); | |||
381 | return codedStream.ReadVarint32(sizep) && *sizep > 0; | |||
382 | } | |||
383 | ||||
384 | bool HeapSnapshot::init(JSContext* cx, const uint8_t* buffer, uint32_t size) { | |||
385 | ArrayInputStream stream(buffer, size); | |||
386 | GzipInputStream gzipStream(&stream); | |||
387 | uint32_t sizeOfMessage = 0; | |||
388 | ||||
389 | // First is the metadata. | |||
390 | ||||
391 | protobuf::Metadata metadata; | |||
392 | if (NS_WARN_IF(!readSizeOfNextMessage(gzipStream, &sizeOfMessage))NS_warn_if_impl(!readSizeOfNextMessage(gzipStream, &sizeOfMessage ), "!readSizeOfNextMessage(gzipStream, &sizeOfMessage)", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 392)) | |||
393 | return false; | |||
394 | if (!parseMessage(gzipStream, sizeOfMessage, metadata)) return false; | |||
395 | if (metadata.has_timestamp()) timestamp.emplace(metadata.timestamp()); | |||
396 | ||||
397 | // Next is the root node. | |||
398 | ||||
399 | protobuf::Node root; | |||
400 | if (NS_WARN_IF(!readSizeOfNextMessage(gzipStream, &sizeOfMessage))NS_warn_if_impl(!readSizeOfNextMessage(gzipStream, &sizeOfMessage ), "!readSizeOfNextMessage(gzipStream, &sizeOfMessage)", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 400)) | |||
401 | return false; | |||
402 | if (!parseMessage(gzipStream, sizeOfMessage, root)) return false; | |||
403 | ||||
404 | // Although the id is optional in the protobuf format for future proofing, we | |||
405 | // can't currently do anything without it. | |||
406 | if (NS_WARN_IF(!root.has_id())NS_warn_if_impl(!root.has_id(), "!root.has_id()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 406)) return false; | |||
407 | rootId = root.id(); | |||
408 | ||||
409 | // The set of all node ids we've found edges pointing to. | |||
410 | NodeIdSet edgeReferents(cx); | |||
411 | ||||
412 | if (NS_WARN_IF(!saveNode(root, edgeReferents))NS_warn_if_impl(!saveNode(root, edgeReferents), "!saveNode(root, edgeReferents)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 412)) return false; | |||
413 | ||||
414 | // Finally, the rest of the nodes in the core dump. | |||
415 | ||||
416 | // Test for the end of the stream. The protobuf library gives no way to tell | |||
417 | // the difference between an underlying read error and the stream being | |||
418 | // done. All we can do is attempt to read the size of the next message and | |||
419 | // extrapolate guestimations from the result of that operation. | |||
420 | while (readSizeOfNextMessage(gzipStream, &sizeOfMessage)) { | |||
421 | protobuf::Node node; | |||
422 | if (!parseMessage(gzipStream, sizeOfMessage, node)) return false; | |||
423 | if (NS_WARN_IF(!saveNode(node, edgeReferents))NS_warn_if_impl(!saveNode(node, edgeReferents), "!saveNode(node, edgeReferents)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 423)) return false; | |||
424 | } | |||
425 | ||||
426 | // Check the set of node ids referred to by edges we found and ensure that we | |||
427 | // have the node corresponding to each id. If we don't have all of them, it is | |||
428 | // unsafe to perform analyses of this heap snapshot. | |||
429 | for (auto iter = edgeReferents.iter(); !iter.done(); iter.next()) { | |||
430 | if (NS_WARN_IF(!nodes.has(iter.get()))NS_warn_if_impl(!nodes.has(iter.get()), "!nodes.has(iter.get())" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 430)) return false; | |||
431 | } | |||
432 | ||||
433 | return true; | |||
434 | } | |||
435 | ||||
436 | /*** Heap Snapshot Analyses ***************************************************/ | |||
437 | ||||
438 | void HeapSnapshot::TakeCensus(JSContext* cx, JS::Handle<JSObject*> options, | |||
439 | JS::MutableHandle<JS::Value> rval, | |||
440 | ErrorResult& rv) { | |||
441 | JS::ubi::Census census(cx); | |||
442 | ||||
443 | JS::ubi::CountTypePtr rootType; | |||
444 | if (NS_WARN_IF(!JS::ubi::ParseCensusOptions(cx, census, options, rootType))NS_warn_if_impl(!JS::ubi::ParseCensusOptions(cx, census, options , rootType), "!JS::ubi::ParseCensusOptions(cx, census, options, rootType)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 444)) { | |||
445 | rv.Throw(NS_ERROR_UNEXPECTED); | |||
446 | return; | |||
447 | } | |||
448 | ||||
449 | JS::ubi::RootedCount rootCount(cx, rootType->makeCount()); | |||
450 | if (NS_WARN_IF(!rootCount)NS_warn_if_impl(!rootCount, "!rootCount", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 450)) { | |||
451 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
452 | return; | |||
453 | } | |||
454 | ||||
455 | JS::ubi::CensusHandler handler(census, rootCount, | |||
456 | GetCurrentThreadDebuggerMallocSizeOf()); | |||
457 | ||||
458 | { | |||
459 | JS::AutoCheckCannotGC nogc; | |||
460 | ||||
461 | JS::ubi::CensusTraversal traversal(cx, handler, nogc); | |||
462 | ||||
463 | if (NS_WARN_IF(!traversal.addStart(getRoot()))NS_warn_if_impl(!traversal.addStart(getRoot()), "!traversal.addStart(getRoot())" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 463)) { | |||
464 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
465 | return; | |||
466 | } | |||
467 | ||||
468 | if (NS_WARN_IF(!traversal.traverse())NS_warn_if_impl(!traversal.traverse(), "!traversal.traverse()" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 468)) { | |||
469 | rv.Throw(NS_ERROR_UNEXPECTED); | |||
470 | return; | |||
471 | } | |||
472 | } | |||
473 | ||||
474 | if (NS_WARN_IF(!handler.report(cx, rval))NS_warn_if_impl(!handler.report(cx, rval), "!handler.report(cx, rval)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 474)) { | |||
475 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
476 | return; | |||
477 | } | |||
478 | } | |||
479 | ||||
480 | void HeapSnapshot::DescribeNode(JSContext* cx, JS::Handle<JSObject*> breakdown, | |||
481 | uint64_t nodeId, | |||
482 | JS::MutableHandle<JS::Value> rval, | |||
483 | ErrorResult& rv) { | |||
484 | MOZ_ASSERT(breakdown)do { static_assert( mozilla::detail::AssertionConditionType< decltype(breakdown)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(breakdown))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("breakdown", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 484); AnnotateMozCrashReason("MOZ_ASSERT" "(" "breakdown" ")" ); do { *((volatile int*)__null) = 484; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
485 | JS::Rooted<JS::Value> breakdownVal(cx, JS::ObjectValue(*breakdown)); | |||
486 | JS::Rooted<JS::GCVector<JSLinearString*>> seen(cx, cx); | |||
487 | JS::ubi::CountTypePtr rootType = | |||
488 | JS::ubi::ParseBreakdown(cx, breakdownVal, &seen); | |||
489 | if (NS_WARN_IF(!rootType)NS_warn_if_impl(!rootType, "!rootType", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 489)) { | |||
490 | rv.Throw(NS_ERROR_UNEXPECTED); | |||
491 | return; | |||
492 | } | |||
493 | ||||
494 | JS::ubi::RootedCount rootCount(cx, rootType->makeCount()); | |||
495 | if (NS_WARN_IF(!rootCount)NS_warn_if_impl(!rootCount, "!rootCount", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 495)) { | |||
496 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
497 | return; | |||
498 | } | |||
499 | ||||
500 | JS::ubi::Node::Id id(nodeId); | |||
501 | Maybe<JS::ubi::Node> node = getNodeById(id); | |||
502 | if (NS_WARN_IF(node.isNothing())NS_warn_if_impl(node.isNothing(), "node.isNothing()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 502)) { | |||
503 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
504 | return; | |||
505 | } | |||
506 | ||||
507 | MallocSizeOf mallocSizeOf = GetCurrentThreadDebuggerMallocSizeOf(); | |||
508 | if (NS_WARN_IF(!rootCount->count(mallocSizeOf, *node))NS_warn_if_impl(!rootCount->count(mallocSizeOf, *node), "!rootCount->count(mallocSizeOf, *node)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 508)) { | |||
509 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
510 | return; | |||
511 | } | |||
512 | ||||
513 | if (NS_WARN_IF(!rootCount->report(cx, rval))NS_warn_if_impl(!rootCount->report(cx, rval), "!rootCount->report(cx, rval)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 513)) { | |||
514 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
515 | return; | |||
516 | } | |||
517 | } | |||
518 | ||||
519 | already_AddRefed<DominatorTree> HeapSnapshot::ComputeDominatorTree( | |||
520 | ErrorResult& rv) { | |||
521 | Maybe<JS::ubi::DominatorTree> maybeTree; | |||
522 | { | |||
523 | auto ccjscx = CycleCollectedJSContext::Get(); | |||
524 | MOZ_ASSERT(ccjscx)do { static_assert( mozilla::detail::AssertionConditionType< decltype(ccjscx)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(ccjscx))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("ccjscx", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 524); AnnotateMozCrashReason("MOZ_ASSERT" "(" "ccjscx" ")") ; do { *((volatile int*)__null) = 524; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
525 | auto cx = ccjscx->Context(); | |||
526 | MOZ_ASSERT(cx)do { static_assert( mozilla::detail::AssertionConditionType< decltype(cx)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(cx))), 0))) { do { } while (false ); MOZ_ReportAssertionFailure("cx", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 526); AnnotateMozCrashReason("MOZ_ASSERT" "(" "cx" ")"); do { *((volatile int*)__null) = 526; __attribute__((nomerge)) :: abort(); } while (false); } } while (false); | |||
527 | JS::AutoCheckCannotGC nogc(cx); | |||
528 | maybeTree = JS::ubi::DominatorTree::Create(cx, nogc, getRoot()); | |||
529 | } | |||
530 | ||||
531 | if (NS_WARN_IF(maybeTree.isNothing())NS_warn_if_impl(maybeTree.isNothing(), "maybeTree.isNothing()" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 531)) { | |||
532 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
533 | return nullptr; | |||
534 | } | |||
535 | ||||
536 | return MakeAndAddRef<DominatorTree>(std::move(*maybeTree), this, mParent); | |||
537 | } | |||
538 | ||||
539 | void HeapSnapshot::ComputeShortestPaths(JSContext* cx, uint64_t start, | |||
540 | const Sequence<uint64_t>& targets, | |||
541 | uint64_t maxNumPaths, | |||
542 | JS::MutableHandle<JSObject*> results, | |||
543 | ErrorResult& rv) { | |||
544 | // First ensure that our inputs are valid. | |||
545 | ||||
546 | if (NS_WARN_IF(maxNumPaths == 0)NS_warn_if_impl(maxNumPaths == 0, "maxNumPaths == 0", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 546)) { | |||
547 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
548 | return; | |||
549 | } | |||
550 | ||||
551 | Maybe<JS::ubi::Node> startNode = getNodeById(start); | |||
552 | if (NS_WARN_IF(startNode.isNothing())NS_warn_if_impl(startNode.isNothing(), "startNode.isNothing()" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 552)) { | |||
553 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
554 | return; | |||
555 | } | |||
556 | ||||
557 | if (NS_WARN_IF(targets.Length() == 0)NS_warn_if_impl(targets.Length() == 0, "targets.Length() == 0" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 557)) { | |||
558 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
559 | return; | |||
560 | } | |||
561 | ||||
562 | // Aggregate the targets into a set and make sure that they exist in the heap | |||
563 | // snapshot. | |||
564 | ||||
565 | JS::ubi::NodeSet targetsSet; | |||
566 | ||||
567 | for (const auto& target : targets) { | |||
568 | Maybe<JS::ubi::Node> targetNode = getNodeById(target); | |||
569 | if (NS_WARN_IF(targetNode.isNothing())NS_warn_if_impl(targetNode.isNothing(), "targetNode.isNothing()" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 569)) { | |||
570 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
571 | return; | |||
572 | } | |||
573 | ||||
574 | if (NS_WARN_IF(!targetsSet.put(*targetNode))NS_warn_if_impl(!targetsSet.put(*targetNode), "!targetsSet.put(*targetNode)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 574)) { | |||
575 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
576 | return; | |||
577 | } | |||
578 | } | |||
579 | ||||
580 | // Walk the heap graph and find the shortest paths. | |||
581 | ||||
582 | Maybe<ShortestPaths> maybeShortestPaths; | |||
583 | { | |||
584 | JS::AutoCheckCannotGC nogc(cx); | |||
585 | maybeShortestPaths = ShortestPaths::Create( | |||
586 | cx, nogc, maxNumPaths, *startNode, std::move(targetsSet)); | |||
587 | } | |||
588 | ||||
589 | if (NS_WARN_IF(maybeShortestPaths.isNothing())NS_warn_if_impl(maybeShortestPaths.isNothing(), "maybeShortestPaths.isNothing()" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 589)) { | |||
590 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
591 | return; | |||
592 | } | |||
593 | ||||
594 | auto& shortestPaths = *maybeShortestPaths; | |||
595 | ||||
596 | // Convert the results into a Map object mapping target node IDs to arrays of | |||
597 | // paths found. | |||
598 | ||||
599 | JS::Rooted<JSObject*> resultsMap(cx, JS::NewMapObject(cx)); | |||
600 | if (NS_WARN_IF(!resultsMap)NS_warn_if_impl(!resultsMap, "!resultsMap", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 600)) { | |||
601 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
602 | return; | |||
603 | } | |||
604 | ||||
605 | for (auto iter = shortestPaths.targetIter(); !iter.done(); iter.next()) { | |||
606 | JS::Rooted<JS::Value> key(cx, JS::NumberValue(iter.get().identifier())); | |||
607 | JS::RootedVector<JS::Value> paths(cx); | |||
608 | ||||
609 | bool ok = shortestPaths.forEachPath(iter.get(), [&](JS::ubi::Path& path) { | |||
610 | JS::RootedVector<JS::Value> pathValues(cx); | |||
611 | ||||
612 | for (JS::ubi::BackEdge* edge : path) { | |||
613 | JS::Rooted<JSObject*> pathPart(cx, JS_NewPlainObject(cx)); | |||
614 | if (!pathPart) { | |||
615 | return false; | |||
616 | } | |||
617 | ||||
618 | JS::Rooted<JS::Value> predecessor( | |||
619 | cx, NumberValue(edge->predecessor().identifier())); | |||
620 | if (!JS_DefineProperty(cx, pathPart, "predecessor", predecessor, | |||
621 | JSPROP_ENUMERATE)) { | |||
622 | return false; | |||
623 | } | |||
624 | ||||
625 | JS::Rooted<JS::Value> edgeNameVal(cx, NullValue()); | |||
626 | if (edge->name()) { | |||
627 | JS::Rooted<JSString*> edgeName( | |||
628 | cx, JS_AtomizeUCString(cx, edge->name().get())); | |||
629 | if (!edgeName) { | |||
630 | return false; | |||
631 | } | |||
632 | edgeNameVal = StringValue(edgeName); | |||
633 | } | |||
634 | ||||
635 | if (!JS_DefineProperty(cx, pathPart, "edge", edgeNameVal, | |||
636 | JSPROP_ENUMERATE)) { | |||
637 | return false; | |||
638 | } | |||
639 | ||||
640 | if (!pathValues.append(ObjectValue(*pathPart))) { | |||
641 | return false; | |||
642 | } | |||
643 | } | |||
644 | ||||
645 | JS::Rooted<JSObject*> pathObj(cx, JS::NewArrayObject(cx, pathValues)); | |||
646 | return pathObj && paths.append(ObjectValue(*pathObj)); | |||
647 | }); | |||
648 | ||||
649 | if (NS_WARN_IF(!ok)NS_warn_if_impl(!ok, "!ok", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 649)) { | |||
650 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
651 | return; | |||
652 | } | |||
653 | ||||
654 | JS::Rooted<JSObject*> pathsArray(cx, JS::NewArrayObject(cx, paths)); | |||
655 | if (NS_WARN_IF(!pathsArray)NS_warn_if_impl(!pathsArray, "!pathsArray", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 655)) { | |||
656 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
657 | return; | |||
658 | } | |||
659 | ||||
660 | JS::Rooted<JS::Value> pathsVal(cx, ObjectValue(*pathsArray)); | |||
661 | if (NS_WARN_IF(!JS::MapSet(cx, resultsMap, key, pathsVal))NS_warn_if_impl(!JS::MapSet(cx, resultsMap, key, pathsVal), "!JS::MapSet(cx, resultsMap, key, pathsVal)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 661)) { | |||
662 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
663 | return; | |||
664 | } | |||
665 | } | |||
666 | ||||
667 | results.set(resultsMap); | |||
668 | } | |||
669 | ||||
670 | /*** Saving Heap Snapshots ****************************************************/ | |||
671 | ||||
672 | // If we are only taking a snapshot of the heap affected by the given set of | |||
673 | // globals, find the set of compartments the globals are allocated | |||
674 | // within. Returns false on OOM failure. | |||
675 | static bool PopulateCompartmentsWithGlobals( | |||
676 | CompartmentSet& compartments, JS::HandleVector<JSObject*> globals) { | |||
677 | unsigned length = globals.length(); | |||
678 | for (unsigned i = 0; i < length; i++) { | |||
679 | if (!compartments.put(JS::GetCompartment(globals[i]))) return false; | |||
680 | } | |||
681 | ||||
682 | return true; | |||
683 | } | |||
684 | ||||
685 | // Add the given set of globals as explicit roots in the given roots | |||
686 | // list. Returns false on OOM failure. | |||
687 | static bool AddGlobalsAsRoots(JS::HandleVector<JSObject*> globals, | |||
688 | ubi::RootList& roots) { | |||
689 | unsigned length = globals.length(); | |||
690 | for (unsigned i = 0; i < length; i++) { | |||
691 | if (!roots.addRoot(ubi::Node(globals[i].get()), u"heap snapshot global")) { | |||
692 | return false; | |||
693 | } | |||
694 | } | |||
695 | return true; | |||
696 | } | |||
697 | ||||
698 | // Choose roots and limits for a traversal, given `boundaries`. Set `roots` to | |||
699 | // the set of nodes within the boundaries that are referred to by nodes | |||
700 | // outside. If `boundaries` does not include all JS compartments, initialize | |||
701 | // `compartments` to the set of included compartments; otherwise, leave | |||
702 | // `compartments` uninitialized. (You can use compartments.initialized() to | |||
703 | // check.) | |||
704 | // | |||
705 | // If `boundaries` is incoherent, or we encounter an error while trying to | |||
706 | // handle it, or we run out of memory, set `rv` appropriately and return | |||
707 | // `false`. | |||
708 | // | |||
709 | // Return value is a pair of the status and an AutoCheckCannotGC token, | |||
710 | // forwarded from ubi::RootList::init(), to ensure that the caller does | |||
711 | // not GC while the RootList is live and initialized. | |||
712 | static std::pair<bool, AutoCheckCannotGC> EstablishBoundaries( | |||
713 | JSContext* cx, ErrorResult& rv, const HeapSnapshotBoundaries& boundaries, | |||
714 | ubi::RootList& roots, CompartmentSet& compartments) { | |||
715 | MOZ_ASSERT(!roots.initialized())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!roots.initialized())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!roots.initialized()))), 0)) ) { do { } while (false); MOZ_ReportAssertionFailure("!roots.initialized()" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 715); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!roots.initialized()" ")"); do { *((volatile int*)__null) = 715; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
716 | MOZ_ASSERT(compartments.empty())do { static_assert( mozilla::detail::AssertionConditionType< decltype(compartments.empty())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(compartments.empty()))), 0)) ) { do { } while (false); MOZ_ReportAssertionFailure("compartments.empty()" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 716); AnnotateMozCrashReason("MOZ_ASSERT" "(" "compartments.empty()" ")"); do { *((volatile int*)__null) = 716; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
717 | ||||
718 | bool foundBoundaryProperty = false; | |||
719 | ||||
720 | if (boundaries.mRuntime.WasPassed()) { | |||
721 | foundBoundaryProperty = true; | |||
722 | ||||
723 | if (!boundaries.mRuntime.Value()) { | |||
724 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
725 | return {false, AutoCheckCannotGC(cx)}; | |||
726 | } | |||
727 | ||||
728 | auto [ok, nogc] = roots.init(); | |||
729 | if (!ok) { | |||
730 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
731 | return {false, nogc}; | |||
732 | } | |||
733 | } | |||
734 | ||||
735 | if (boundaries.mDebugger.WasPassed()) { | |||
736 | if (foundBoundaryProperty) { | |||
737 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
738 | return {false, AutoCheckCannotGC(cx)}; | |||
739 | } | |||
740 | foundBoundaryProperty = true; | |||
741 | ||||
742 | JSObject* dbgObj = boundaries.mDebugger.Value(); | |||
743 | if (!dbgObj || !dbg::IsDebugger(*dbgObj)) { | |||
744 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
745 | return {false, AutoCheckCannotGC(cx)}; | |||
746 | } | |||
747 | ||||
748 | JS::RootedVector<JSObject*> globals(cx); | |||
749 | if (!dbg::GetDebuggeeGlobals(cx, *dbgObj, &globals) || | |||
750 | !PopulateCompartmentsWithGlobals(compartments, globals) || | |||
751 | !roots.init(compartments).first || !AddGlobalsAsRoots(globals, roots)) { | |||
752 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
753 | return {false, AutoCheckCannotGC(cx)}; | |||
754 | } | |||
755 | } | |||
756 | ||||
757 | if (boundaries.mGlobals.WasPassed()) { | |||
758 | if (foundBoundaryProperty) { | |||
759 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
760 | return {false, AutoCheckCannotGC(cx)}; | |||
761 | } | |||
762 | foundBoundaryProperty = true; | |||
763 | ||||
764 | uint32_t length = boundaries.mGlobals.Value().Length(); | |||
765 | if (length == 0) { | |||
766 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
767 | return {false, AutoCheckCannotGC(cx)}; | |||
768 | } | |||
769 | ||||
770 | JS::RootedVector<JSObject*> globals(cx); | |||
771 | for (uint32_t i = 0; i < length; i++) { | |||
772 | JSObject* global = boundaries.mGlobals.Value().ElementAt(i); | |||
773 | if (!JS_IsGlobalObject(global)) { | |||
774 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
775 | return {false, AutoCheckCannotGC(cx)}; | |||
776 | } | |||
777 | if (!globals.append(global)) { | |||
778 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
779 | return {false, AutoCheckCannotGC(cx)}; | |||
780 | } | |||
781 | } | |||
782 | ||||
783 | if (!PopulateCompartmentsWithGlobals(compartments, globals) || | |||
784 | !roots.init(compartments).first || !AddGlobalsAsRoots(globals, roots)) { | |||
785 | rv.Throw(NS_ERROR_OUT_OF_MEMORY); | |||
786 | return {false, AutoCheckCannotGC(cx)}; | |||
787 | } | |||
788 | } | |||
789 | AutoCheckCannotGC nogc(cx); | |||
790 | ||||
791 | if (!foundBoundaryProperty) { | |||
792 | rv.Throw(NS_ERROR_INVALID_ARG); | |||
793 | return {false, nogc}; | |||
794 | } | |||
795 | ||||
796 | MOZ_ASSERT(roots.initialized())do { static_assert( mozilla::detail::AssertionConditionType< decltype(roots.initialized())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(roots.initialized()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("roots.initialized()" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 796); AnnotateMozCrashReason("MOZ_ASSERT" "(" "roots.initialized()" ")"); do { *((volatile int*)__null) = 796; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
797 | return {true, nogc}; | |||
798 | } | |||
799 | ||||
800 | // A variant covering all the various two-byte strings that we can get from the | |||
801 | // ubi::Node API. | |||
802 | class TwoByteString | |||
803 | : public Variant<JSAtom*, const char16_t*, JS::ubi::EdgeName> { | |||
804 | using Base = Variant<JSAtom*, const char16_t*, JS::ubi::EdgeName>; | |||
805 | ||||
806 | struct CopyToBufferMatcher { | |||
807 | RangedPtr<char16_t> destination; | |||
808 | size_t maxLength; | |||
809 | ||||
810 | CopyToBufferMatcher(RangedPtr<char16_t> destination, size_t maxLength) | |||
811 | : destination(destination), maxLength(maxLength) {} | |||
812 | ||||
813 | size_t operator()(JS::ubi::EdgeName& ptr) { | |||
814 | return ptr ? operator()(ptr.get()) : 0; | |||
815 | } | |||
816 | ||||
817 | size_t operator()(JSAtom* atom) { | |||
818 | MOZ_ASSERT(atom)do { static_assert( mozilla::detail::AssertionConditionType< decltype(atom)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(atom))), 0))) { do { } while (false ); MOZ_ReportAssertionFailure("atom", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 818); AnnotateMozCrashReason("MOZ_ASSERT" "(" "atom" ")"); do { *((volatile int*)__null) = 818; __attribute__((nomerge)) :: abort(); } while (false); } } while (false); | |||
819 | JS::ubi::AtomOrTwoByteChars s(atom); | |||
820 | return s.copyToBuffer(destination, maxLength); | |||
821 | } | |||
822 | ||||
823 | size_t operator()(const char16_t* chars) { | |||
824 | MOZ_ASSERT(chars)do { static_assert( mozilla::detail::AssertionConditionType< decltype(chars)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(chars))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("chars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 824); AnnotateMozCrashReason("MOZ_ASSERT" "(" "chars" ")"); do { *((volatile int*)__null) = 824; __attribute__((nomerge) ) ::abort(); } while (false); } } while (false); | |||
825 | JS::ubi::AtomOrTwoByteChars s(chars); | |||
826 | return s.copyToBuffer(destination, maxLength); | |||
827 | } | |||
828 | }; | |||
829 | ||||
830 | public: | |||
831 | template <typename T> | |||
832 | MOZ_IMPLICIT TwoByteString(T&& rhs) : Base(std::forward<T>(rhs)) {} | |||
833 | ||||
834 | template <typename T> | |||
835 | TwoByteString& operator=(T&& rhs) { | |||
836 | MOZ_ASSERT(this != &rhs, "self-move disallowed")do { static_assert( mozilla::detail::AssertionConditionType< decltype(this != &rhs)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(this != &rhs))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("this != &rhs" " (" "self-move disallowed" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 836); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this != &rhs" ") (" "self-move disallowed" ")"); do { *((volatile int*)__null ) = 836; __attribute__((nomerge)) ::abort(); } while (false); } } while (false); | |||
837 | this->~TwoByteString(); | |||
838 | new (this) TwoByteString(std::forward<T>(rhs)); | |||
839 | return *this; | |||
840 | } | |||
841 | ||||
842 | TwoByteString(const TwoByteString&) = delete; | |||
843 | TwoByteString& operator=(const TwoByteString&) = delete; | |||
844 | ||||
845 | // Rewrap the inner value of a JS::ubi::AtomOrTwoByteChars as a TwoByteString. | |||
846 | static TwoByteString from(JS::ubi::AtomOrTwoByteChars&& s) { | |||
847 | return s.match([](auto* a) { return TwoByteString(a); }); | |||
848 | } | |||
849 | ||||
850 | // Returns true if the given TwoByteString is non-null, false otherwise. | |||
851 | bool isNonNull() const { | |||
852 | return match([](auto& t) { return t != nullptr; }); | |||
853 | } | |||
854 | ||||
855 | // Return the length of the string, 0 if it is null. | |||
856 | size_t length() const { | |||
857 | return match( | |||
858 | [](JSAtom* atom) -> size_t { | |||
859 | MOZ_ASSERT(atom)do { static_assert( mozilla::detail::AssertionConditionType< decltype(atom)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(atom))), 0))) { do { } while (false ); MOZ_ReportAssertionFailure("atom", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 859); AnnotateMozCrashReason("MOZ_ASSERT" "(" "atom" ")"); do { *((volatile int*)__null) = 859; __attribute__((nomerge)) :: abort(); } while (false); } } while (false); | |||
860 | JS::ubi::AtomOrTwoByteChars s(atom); | |||
861 | return s.length(); | |||
862 | }, | |||
863 | [](const char16_t* chars) -> size_t { | |||
864 | MOZ_ASSERT(chars)do { static_assert( mozilla::detail::AssertionConditionType< decltype(chars)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(chars))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("chars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 864); AnnotateMozCrashReason("MOZ_ASSERT" "(" "chars" ")"); do { *((volatile int*)__null) = 864; __attribute__((nomerge) ) ::abort(); } while (false); } } while (false); | |||
865 | return NS_strlen(chars); | |||
866 | }, | |||
867 | [](const JS::ubi::EdgeName& ptr) -> size_t { | |||
868 | MOZ_ASSERT(ptr)do { static_assert( mozilla::detail::AssertionConditionType< decltype(ptr)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(ptr))), 0))) { do { } while (false ); MOZ_ReportAssertionFailure("ptr", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 868); AnnotateMozCrashReason("MOZ_ASSERT" "(" "ptr" ")"); do { *((volatile int*)__null) = 868; __attribute__((nomerge)) :: abort(); } while (false); } } while (false); | |||
869 | return NS_strlen(ptr.get()); | |||
870 | }); | |||
871 | } | |||
872 | ||||
873 | // Copy the contents of a TwoByteString into the provided buffer. The buffer | |||
874 | // is NOT null terminated. The number of characters written is returned. | |||
875 | size_t copyToBuffer(RangedPtr<char16_t> destination, size_t maxLength) { | |||
876 | CopyToBufferMatcher m(destination, maxLength); | |||
877 | return match(m); | |||
878 | } | |||
879 | ||||
880 | struct HashPolicy; | |||
881 | }; | |||
882 | ||||
883 | // A hashing policy for TwoByteString. | |||
884 | // | |||
885 | // Atoms are pointer hashed and use pointer equality, which means that we | |||
886 | // tolerate some duplication across atoms and the other two types of two-byte | |||
887 | // strings. In practice, we expect the amount of this duplication to be very low | |||
888 | // because each type is generally a different semantic thing in addition to | |||
889 | // having a slightly different representation. For example, the set of edge | |||
890 | // names and the set stack frames' source names naturally tend not to overlap | |||
891 | // very much if at all. | |||
892 | struct TwoByteString::HashPolicy { | |||
893 | using Lookup = TwoByteString; | |||
894 | ||||
895 | static js::HashNumber hash(const Lookup& l) { | |||
896 | return l.match( | |||
897 | [](const JSAtom* atom) { | |||
898 | return js::DefaultHasher<const JSAtom*>::hash(atom); | |||
899 | }, | |||
900 | [](const char16_t* chars) { | |||
901 | MOZ_ASSERT(chars)do { static_assert( mozilla::detail::AssertionConditionType< decltype(chars)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(chars))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("chars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 901); AnnotateMozCrashReason("MOZ_ASSERT" "(" "chars" ")"); do { *((volatile int*)__null) = 901; __attribute__((nomerge) ) ::abort(); } while (false); } } while (false); | |||
902 | auto length = NS_strlen(chars); | |||
903 | return HashString(chars, length); | |||
904 | }, | |||
905 | [](const JS::ubi::EdgeName& ptr) { | |||
906 | const char16_t* chars = ptr.get(); | |||
907 | MOZ_ASSERT(chars)do { static_assert( mozilla::detail::AssertionConditionType< decltype(chars)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(chars))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("chars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 907); AnnotateMozCrashReason("MOZ_ASSERT" "(" "chars" ")"); do { *((volatile int*)__null) = 907; __attribute__((nomerge) ) ::abort(); } while (false); } } while (false); | |||
908 | auto length = NS_strlen(chars); | |||
909 | return HashString(chars, length); | |||
910 | }); | |||
911 | } | |||
912 | ||||
913 | struct EqualityMatcher { | |||
914 | const TwoByteString& rhs; | |||
915 | explicit EqualityMatcher(const TwoByteString& rhs) : rhs(rhs) {} | |||
916 | ||||
917 | bool operator()(const JSAtom* atom) { | |||
918 | return rhs.is<JSAtom*>() && rhs.as<JSAtom*>() == atom; | |||
919 | } | |||
920 | ||||
921 | bool operator()(const char16_t* chars) { | |||
922 | MOZ_ASSERT(chars)do { static_assert( mozilla::detail::AssertionConditionType< decltype(chars)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(chars))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("chars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 922); AnnotateMozCrashReason("MOZ_ASSERT" "(" "chars" ")"); do { *((volatile int*)__null) = 922; __attribute__((nomerge) ) ::abort(); } while (false); } } while (false); | |||
923 | ||||
924 | const char16_t* rhsChars = nullptr; | |||
925 | if (rhs.is<const char16_t*>()) | |||
926 | rhsChars = rhs.as<const char16_t*>(); | |||
927 | else if (rhs.is<JS::ubi::EdgeName>()) | |||
928 | rhsChars = rhs.as<JS::ubi::EdgeName>().get(); | |||
929 | else | |||
930 | return false; | |||
931 | MOZ_ASSERT(rhsChars)do { static_assert( mozilla::detail::AssertionConditionType< decltype(rhsChars)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(rhsChars))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("rhsChars", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 931); AnnotateMozCrashReason("MOZ_ASSERT" "(" "rhsChars" ")" ); do { *((volatile int*)__null) = 931; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
932 | ||||
933 | auto length = NS_strlen(chars); | |||
934 | if (NS_strlen(rhsChars) != length) return false; | |||
935 | ||||
936 | return memcmp(chars, rhsChars, length * sizeof(char16_t)) == 0; | |||
937 | } | |||
938 | ||||
939 | bool operator()(const JS::ubi::EdgeName& ptr) { | |||
940 | MOZ_ASSERT(ptr)do { static_assert( mozilla::detail::AssertionConditionType< decltype(ptr)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(ptr))), 0))) { do { } while (false ); MOZ_ReportAssertionFailure("ptr", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 940); AnnotateMozCrashReason("MOZ_ASSERT" "(" "ptr" ")"); do { *((volatile int*)__null) = 940; __attribute__((nomerge)) :: abort(); } while (false); } } while (false); | |||
941 | return operator()(ptr.get()); | |||
942 | } | |||
943 | }; | |||
944 | ||||
945 | static bool match(const TwoByteString& k, const Lookup& l) { | |||
946 | EqualityMatcher eq(l); | |||
947 | return k.match(eq); | |||
948 | } | |||
949 | ||||
950 | static void rekey(TwoByteString& k, TwoByteString&& newKey) { | |||
951 | k = std::move(newKey); | |||
952 | } | |||
953 | }; | |||
954 | ||||
955 | // Returns whether `edge` should be included in a heap snapshot of | |||
956 | // `compartments`. The optional `policy` out-param is set to INCLUDE_EDGES | |||
957 | // if we want to include the referent's edges, or EXCLUDE_EDGES if we don't | |||
958 | // want to include them. | |||
959 | static bool ShouldIncludeEdge(JS::CompartmentSet* compartments, | |||
960 | const ubi::Node& origin, const ubi::Edge& edge, | |||
961 | CoreDumpWriter::EdgePolicy* policy = nullptr) { | |||
962 | if (policy) { | |||
963 | *policy = CoreDumpWriter::INCLUDE_EDGES; | |||
964 | } | |||
965 | ||||
966 | if (!compartments) { | |||
967 | // We aren't targeting a particular set of compartments, so serialize all | |||
968 | // the things! | |||
969 | return true; | |||
970 | } | |||
971 | ||||
972 | // We are targeting a particular set of compartments. If this node is in our | |||
973 | // target set, serialize it and all of its edges. If this node is _not_ in our | |||
974 | // target set, we also serialize under the assumption that it is a shared | |||
975 | // resource being used by something in our target compartments since we | |||
976 | // reached it by traversing the heap graph. However, we do not serialize its | |||
977 | // outgoing edges and we abandon further traversal from this node. | |||
978 | // | |||
979 | // If the node does not belong to any compartment, we also serialize its | |||
980 | // outgoing edges. This case is relevant for Shapes: they don't belong to a | |||
981 | // specific compartment and contain edges to parent/kids Shapes we want to | |||
982 | // include. Note that these Shapes may contain pointers into our target | |||
983 | // compartment (the Shape's getter/setter JSObjects). However, we do not | |||
984 | // serialize nodes in other compartments that are reachable from these | |||
985 | // non-compartment nodes. | |||
986 | ||||
987 | JS::Compartment* compartment = edge.referent.compartment(); | |||
988 | ||||
989 | if (!compartment || compartments->has(compartment)) { | |||
990 | return true; | |||
991 | } | |||
992 | ||||
993 | if (policy) { | |||
994 | *policy = CoreDumpWriter::EXCLUDE_EDGES; | |||
995 | } | |||
996 | ||||
997 | return !!origin.compartment(); | |||
998 | } | |||
999 | ||||
1000 | // A `CoreDumpWriter` that serializes nodes to protobufs and writes them to the | |||
1001 | // given `ZeroCopyOutputStream`. | |||
1002 | class MOZ_STACK_CLASS StreamWriter : public CoreDumpWriter { | |||
1003 | using FrameSet = js::HashSet<uint64_t>; | |||
1004 | using TwoByteStringMap = | |||
1005 | js::HashMap<TwoByteString, uint64_t, TwoByteString::HashPolicy>; | |||
1006 | using OneByteStringMap = js::HashMap<const char*, uint64_t>; | |||
1007 | ||||
1008 | JSContext* cx; | |||
1009 | bool wantNames; | |||
1010 | // The set of |JS::ubi::StackFrame::identifier()|s that have already been | |||
1011 | // serialized and written to the core dump. | |||
1012 | FrameSet framesAlreadySerialized; | |||
1013 | // The set of two-byte strings that have already been serialized and written | |||
1014 | // to the core dump. | |||
1015 | TwoByteStringMap twoByteStringsAlreadySerialized; | |||
1016 | // The set of one-byte strings that have already been serialized and written | |||
1017 | // to the core dump. | |||
1018 | OneByteStringMap oneByteStringsAlreadySerialized; | |||
1019 | ||||
1020 | ::google::protobuf::io::ZeroCopyOutputStream& stream; | |||
1021 | ||||
1022 | JS::CompartmentSet* compartments; | |||
1023 | ||||
1024 | bool writeMessage(const ::google::protobuf::MessageLite& message) { | |||
1025 | // We have to create a new CodedOutputStream when writing each message so | |||
1026 | // that the 64MB size limit used by Coded{Output,Input}Stream to prevent | |||
1027 | // integer overflow is enforced per message rather than on the whole stream. | |||
1028 | ::google::protobuf::io::CodedOutputStream codedStream(&stream); | |||
1029 | codedStream.WriteVarint32(message.ByteSizeLong()); | |||
1030 | message.SerializeWithCachedSizes(&codedStream); | |||
1031 | return !codedStream.HadError(); | |||
1032 | } | |||
1033 | ||||
1034 | // Attach the full two-byte string or a reference to a two-byte string that | |||
1035 | // has already been serialized to a protobuf message. | |||
1036 | template <typename SetStringFunction, typename SetRefFunction> | |||
1037 | bool attachTwoByteString(TwoByteString& string, SetStringFunction setString, | |||
1038 | SetRefFunction setRef) { | |||
1039 | auto ptr = twoByteStringsAlreadySerialized.lookupForAdd(string); | |||
1040 | if (ptr) { | |||
1041 | setRef(ptr->value()); | |||
1042 | return true; | |||
1043 | } | |||
1044 | ||||
1045 | auto length = string.length(); | |||
1046 | auto stringData = MakeUnique<std::string>(length * sizeof(char16_t), '\0'); | |||
1047 | if (!stringData) return false; | |||
1048 | ||||
1049 | auto buf = const_cast<char16_t*>( | |||
1050 | reinterpret_cast<const char16_t*>(stringData->data())); | |||
1051 | string.copyToBuffer(RangedPtr<char16_t>(buf, length), length); | |||
1052 | ||||
1053 | uint64_t ref = twoByteStringsAlreadySerialized.count(); | |||
1054 | if (!twoByteStringsAlreadySerialized.add(ptr, std::move(string), ref)) | |||
1055 | return false; | |||
1056 | ||||
1057 | setString(stringData.release()); | |||
1058 | return true; | |||
1059 | } | |||
1060 | ||||
1061 | // Attach the full one-byte string or a reference to a one-byte string that | |||
1062 | // has already been serialized to a protobuf message. | |||
1063 | template <typename SetStringFunction, typename SetRefFunction> | |||
1064 | bool attachOneByteString(const char* string, SetStringFunction setString, | |||
1065 | SetRefFunction setRef) { | |||
1066 | auto ptr = oneByteStringsAlreadySerialized.lookupForAdd(string); | |||
1067 | if (ptr) { | |||
1068 | setRef(ptr->value()); | |||
1069 | return true; | |||
1070 | } | |||
1071 | ||||
1072 | auto length = strlen(string); | |||
1073 | auto stringData = MakeUnique<std::string>(string, length); | |||
1074 | if (!stringData) return false; | |||
1075 | ||||
1076 | uint64_t ref = oneByteStringsAlreadySerialized.count(); | |||
1077 | if (!oneByteStringsAlreadySerialized.add(ptr, string, ref)) return false; | |||
1078 | ||||
1079 | setString(stringData.release()); | |||
1080 | return true; | |||
1081 | } | |||
1082 | ||||
1083 | protobuf::StackFrame* getProtobufStackFrame(JS::ubi::StackFrame& frame, | |||
1084 | size_t depth = 1) { | |||
1085 | // NB: de-duplicated string properties must be written in the same order | |||
1086 | // here as they are read in `HeapSnapshot::saveStackFrame` or else indices | |||
1087 | // in references to already serialized strings will be off. | |||
1088 | ||||
1089 | MOZ_ASSERT(frame,do { static_assert( mozilla::detail::AssertionConditionType< decltype(frame)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(frame))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("frame" " (" "null frames should be represented as the lack of a serialized " "stack frame" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1091); AnnotateMozCrashReason("MOZ_ASSERT" "(" "frame" ") (" "null frames should be represented as the lack of a serialized " "stack frame" ")"); do { *((volatile int*)__null) = 1091; __attribute__ ((nomerge)) ::abort(); } while (false); } } while (false) | |||
1090 | "null frames should be represented as the lack of a serialized "do { static_assert( mozilla::detail::AssertionConditionType< decltype(frame)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(frame))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("frame" " (" "null frames should be represented as the lack of a serialized " "stack frame" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1091); AnnotateMozCrashReason("MOZ_ASSERT" "(" "frame" ") (" "null frames should be represented as the lack of a serialized " "stack frame" ")"); do { *((volatile int*)__null) = 1091; __attribute__ ((nomerge)) ::abort(); } while (false); } } while (false) | |||
1091 | "stack frame")do { static_assert( mozilla::detail::AssertionConditionType< decltype(frame)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(frame))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("frame" " (" "null frames should be represented as the lack of a serialized " "stack frame" ")", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1091); AnnotateMozCrashReason("MOZ_ASSERT" "(" "frame" ") (" "null frames should be represented as the lack of a serialized " "stack frame" ")"); do { *((volatile int*)__null) = 1091; __attribute__ ((nomerge)) ::abort(); } while (false); } } while (false); | |||
1092 | ||||
1093 | auto id = frame.identifier(); | |||
1094 | auto protobufStackFrame = MakeUnique<protobuf::StackFrame>(); | |||
1095 | if (!protobufStackFrame) return nullptr; | |||
1096 | ||||
1097 | if (framesAlreadySerialized.has(id)) { | |||
1098 | protobufStackFrame->set_ref(id); | |||
1099 | return protobufStackFrame.release(); | |||
1100 | } | |||
1101 | ||||
1102 | auto data = MakeUnique<protobuf::StackFrame_Data>(); | |||
1103 | if (!data) return nullptr; | |||
1104 | ||||
1105 | data->set_id(id); | |||
1106 | data->set_line(frame.line()); | |||
1107 | data->set_column(frame.column().oneOriginValue()); | |||
1108 | data->set_issystem(frame.isSystem()); | |||
1109 | data->set_isselfhosted(frame.isSelfHosted(cx)); | |||
1110 | ||||
1111 | auto dupeSource = TwoByteString::from(frame.source()); | |||
1112 | if (!attachTwoByteString( | |||
1113 | dupeSource, | |||
1114 | [&](std::string* source) { data->set_allocated_source(source); }, | |||
1115 | [&](uint64_t ref) { data->set_sourceref(ref); })) { | |||
1116 | return nullptr; | |||
1117 | } | |||
1118 | ||||
1119 | auto dupeName = TwoByteString::from(frame.functionDisplayName()); | |||
1120 | if (dupeName.isNonNull()) { | |||
1121 | if (!attachTwoByteString( | |||
1122 | dupeName, | |||
1123 | [&](std::string* name) { | |||
1124 | data->set_allocated_functiondisplayname(name); | |||
1125 | }, | |||
1126 | [&](uint64_t ref) { data->set_functiondisplaynameref(ref); })) { | |||
1127 | return nullptr; | |||
1128 | } | |||
1129 | } | |||
1130 | ||||
1131 | auto parent = frame.parent(); | |||
1132 | if (parent && depth < HeapSnapshot::MAX_STACK_DEPTH) { | |||
1133 | auto protobufParent = getProtobufStackFrame(parent, depth + 1); | |||
1134 | if (!protobufParent) return nullptr; | |||
1135 | data->set_allocated_parent(protobufParent); | |||
1136 | } | |||
1137 | ||||
1138 | protobufStackFrame->set_allocated_data(data.release()); | |||
1139 | ||||
1140 | if (!framesAlreadySerialized.put(id)) return nullptr; | |||
1141 | ||||
1142 | return protobufStackFrame.release(); | |||
1143 | } | |||
1144 | ||||
1145 | public: | |||
1146 | StreamWriter(JSContext* cx, | |||
1147 | ::google::protobuf::io::ZeroCopyOutputStream& stream, | |||
1148 | bool wantNames, JS::CompartmentSet* compartments) | |||
1149 | : cx(cx), | |||
1150 | wantNames(wantNames), | |||
1151 | framesAlreadySerialized(cx), | |||
1152 | twoByteStringsAlreadySerialized(cx), | |||
1153 | oneByteStringsAlreadySerialized(cx), | |||
1154 | stream(stream), | |||
1155 | compartments(compartments) {} | |||
1156 | ||||
1157 | ~StreamWriter() override {} | |||
1158 | ||||
1159 | bool writeMetadata(uint64_t timestamp) final { | |||
1160 | protobuf::Metadata metadata; | |||
1161 | metadata.set_timestamp(timestamp); | |||
1162 | return writeMessage(metadata); | |||
1163 | } | |||
1164 | ||||
1165 | bool writeNode(const JS::ubi::Node& ubiNode, EdgePolicy includeEdges) final { | |||
1166 | // NB: de-duplicated string properties must be written in the same order | |||
1167 | // here as they are read in `HeapSnapshot::saveNode` or else indices in | |||
1168 | // references to already serialized strings will be off. | |||
1169 | ||||
1170 | protobuf::Node protobufNode; | |||
1171 | protobufNode.set_id(ubiNode.identifier()); | |||
1172 | ||||
1173 | protobufNode.set_coarsetype( | |||
1174 | JS::ubi::CoarseTypeToUint32(ubiNode.coarseType())); | |||
1175 | ||||
1176 | auto typeName = TwoByteString(ubiNode.typeName()); | |||
1177 | if (NS_WARN_IF(!attachTwoByteString(NS_warn_if_impl(!attachTwoByteString( typeName, [&](std:: string* name) { protobufNode.set_allocated_typename_(name); } , [&](uint64_t ref) { protobufNode.set_typenameref(ref); } ), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1182) | |||
1178 | typeName,NS_warn_if_impl(!attachTwoByteString( typeName, [&](std:: string* name) { protobufNode.set_allocated_typename_(name); } , [&](uint64_t ref) { protobufNode.set_typenameref(ref); } ), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1182) | |||
1179 | [&](std::string* name) {NS_warn_if_impl(!attachTwoByteString( typeName, [&](std:: string* name) { protobufNode.set_allocated_typename_(name); } , [&](uint64_t ref) { protobufNode.set_typenameref(ref); } ), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1182) | |||
1180 | protobufNode.set_allocated_typename_(name);NS_warn_if_impl(!attachTwoByteString( typeName, [&](std:: string* name) { protobufNode.set_allocated_typename_(name); } , [&](uint64_t ref) { protobufNode.set_typenameref(ref); } ), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1182) | |||
1181 | },NS_warn_if_impl(!attachTwoByteString( typeName, [&](std:: string* name) { protobufNode.set_allocated_typename_(name); } , [&](uint64_t ref) { protobufNode.set_typenameref(ref); } ), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1182) | |||
1182 | [&](uint64_t ref) { protobufNode.set_typenameref(ref); }))NS_warn_if_impl(!attachTwoByteString( typeName, [&](std:: string* name) { protobufNode.set_allocated_typename_(name); } , [&](uint64_t ref) { protobufNode.set_typenameref(ref); } ), "!attachTwoByteString( typeName, [&](std::string* name) { protobufNode.set_allocated_typename_(name); }, [&](uint64_t ref) { protobufNode.set_typenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1182)) { | |||
1183 | return false; | |||
1184 | } | |||
1185 | ||||
1186 | mozilla::MallocSizeOf mallocSizeOf = dbg::GetDebuggerMallocSizeOf(cx); | |||
1187 | MOZ_ASSERT(mallocSizeOf)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mallocSizeOf)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mallocSizeOf))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mallocSizeOf", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1187); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mallocSizeOf" ")"); do { *((volatile int*)__null) = 1187; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1188 | protobufNode.set_size(ubiNode.size(mallocSizeOf)); | |||
1189 | ||||
1190 | if (includeEdges) { | |||
1191 | auto edges = ubiNode.edges(cx, wantNames); | |||
1192 | if (NS_WARN_IF(!edges)NS_warn_if_impl(!edges, "!edges", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1192)) return false; | |||
1193 | ||||
1194 | for (; !edges->empty(); edges->popFront()) { | |||
1195 | ubi::Edge& ubiEdge = edges->front(); | |||
1196 | if (!ShouldIncludeEdge(compartments, ubiNode, ubiEdge)) { | |||
1197 | continue; | |||
1198 | } | |||
1199 | ||||
1200 | protobuf::Edge* protobufEdge = protobufNode.add_edges(); | |||
1201 | if (NS_WARN_IF(!protobufEdge)NS_warn_if_impl(!protobufEdge, "!protobufEdge", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1201)) { | |||
1202 | return false; | |||
1203 | } | |||
1204 | ||||
1205 | protobufEdge->set_referent(ubiEdge.referent.identifier()); | |||
1206 | ||||
1207 | if (wantNames && ubiEdge.name) { | |||
1208 | TwoByteString edgeName(std::move(ubiEdge.name)); | |||
1209 | if (NS_WARN_IF(!attachTwoByteString(NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std:: string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); } ), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1214) | |||
1210 | edgeName,NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std:: string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); } ), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1214) | |||
1211 | [&](std::string* name) {NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std:: string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); } ), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1214) | |||
1212 | protobufEdge->set_allocated_name(name);NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std:: string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); } ), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1214) | |||
1213 | },NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std:: string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); } ), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1214) | |||
1214 | [&](uint64_t ref) { protobufEdge->set_nameref(ref); }))NS_warn_if_impl(!attachTwoByteString( edgeName, [&](std:: string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); } ), "!attachTwoByteString( edgeName, [&](std::string* name) { protobufEdge->set_allocated_name(name); }, [&](uint64_t ref) { protobufEdge->set_nameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1214)) { | |||
1215 | return false; | |||
1216 | } | |||
1217 | } | |||
1218 | } | |||
1219 | } | |||
1220 | ||||
1221 | if (ubiNode.hasAllocationStack()) { | |||
1222 | auto ubiStackFrame = ubiNode.allocationStack(); | |||
1223 | auto protoStackFrame = getProtobufStackFrame(ubiStackFrame); | |||
1224 | if (NS_WARN_IF(!protoStackFrame)NS_warn_if_impl(!protoStackFrame, "!protoStackFrame", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1224)) return false; | |||
1225 | protobufNode.set_allocated_allocationstack(protoStackFrame); | |||
1226 | } | |||
1227 | ||||
1228 | if (auto className = ubiNode.jsObjectClassName()) { | |||
1229 | if (NS_WARN_IF(!attachOneByteString(NS_warn_if_impl(!attachOneByteString( className, [&](std:: string* name) { protobufNode.set_allocated_jsobjectclassname( name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref (ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1236) | |||
1230 | className,NS_warn_if_impl(!attachOneByteString( className, [&](std:: string* name) { protobufNode.set_allocated_jsobjectclassname( name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref (ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1236) | |||
1231 | [&](std::string* name) {NS_warn_if_impl(!attachOneByteString( className, [&](std:: string* name) { protobufNode.set_allocated_jsobjectclassname( name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref (ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1236) | |||
1232 | protobufNode.set_allocated_jsobjectclassname(name);NS_warn_if_impl(!attachOneByteString( className, [&](std:: string* name) { protobufNode.set_allocated_jsobjectclassname( name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref (ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1236) | |||
1233 | },NS_warn_if_impl(!attachOneByteString( className, [&](std:: string* name) { protobufNode.set_allocated_jsobjectclassname( name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref (ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1236) | |||
1234 | [&](uint64_t ref) {NS_warn_if_impl(!attachOneByteString( className, [&](std:: string* name) { protobufNode.set_allocated_jsobjectclassname( name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref (ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1236) | |||
1235 | protobufNode.set_jsobjectclassnameref(ref);NS_warn_if_impl(!attachOneByteString( className, [&](std:: string* name) { protobufNode.set_allocated_jsobjectclassname( name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref (ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1236) | |||
1236 | }))NS_warn_if_impl(!attachOneByteString( className, [&](std:: string* name) { protobufNode.set_allocated_jsobjectclassname( name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref (ref); }), "!attachOneByteString( className, [&](std::string* name) { protobufNode.set_allocated_jsobjectclassname(name); }, [&](uint64_t ref) { protobufNode.set_jsobjectclassnameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1236)) { | |||
1237 | return false; | |||
1238 | } | |||
1239 | } | |||
1240 | ||||
1241 | if (auto scriptFilename = ubiNode.scriptFilename()) { | |||
1242 | if (NS_WARN_IF(!attachOneByteString(NS_warn_if_impl(!attachOneByteString( scriptFilename, [&] (std::string* name) { protobufNode.set_allocated_scriptfilename (name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref (ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1249) | |||
1243 | scriptFilename,NS_warn_if_impl(!attachOneByteString( scriptFilename, [&] (std::string* name) { protobufNode.set_allocated_scriptfilename (name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref (ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1249) | |||
1244 | [&](std::string* name) {NS_warn_if_impl(!attachOneByteString( scriptFilename, [&] (std::string* name) { protobufNode.set_allocated_scriptfilename (name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref (ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1249) | |||
1245 | protobufNode.set_allocated_scriptfilename(name);NS_warn_if_impl(!attachOneByteString( scriptFilename, [&] (std::string* name) { protobufNode.set_allocated_scriptfilename (name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref (ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1249) | |||
1246 | },NS_warn_if_impl(!attachOneByteString( scriptFilename, [&] (std::string* name) { protobufNode.set_allocated_scriptfilename (name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref (ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1249) | |||
1247 | [&](uint64_t ref) {NS_warn_if_impl(!attachOneByteString( scriptFilename, [&] (std::string* name) { protobufNode.set_allocated_scriptfilename (name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref (ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1249) | |||
1248 | protobufNode.set_scriptfilenameref(ref);NS_warn_if_impl(!attachOneByteString( scriptFilename, [&] (std::string* name) { protobufNode.set_allocated_scriptfilename (name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref (ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1249) | |||
1249 | }))NS_warn_if_impl(!attachOneByteString( scriptFilename, [&] (std::string* name) { protobufNode.set_allocated_scriptfilename (name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref (ref); }), "!attachOneByteString( scriptFilename, [&](std::string* name) { protobufNode.set_allocated_scriptfilename(name); }, [&](uint64_t ref) { protobufNode.set_scriptfilenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1249)) { | |||
1250 | return false; | |||
1251 | } | |||
1252 | } | |||
1253 | ||||
1254 | if (ubiNode.descriptiveTypeName()) { | |||
1255 | auto descriptiveTypeName = TwoByteString(ubiNode.descriptiveTypeName()); | |||
1256 | if (NS_WARN_IF(!attachTwoByteString(NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [& ](std::string* name) { protobufNode.set_allocated_descriptivetypename (name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref (ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1263) | |||
1257 | descriptiveTypeName,NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [& ](std::string* name) { protobufNode.set_allocated_descriptivetypename (name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref (ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1263) | |||
1258 | [&](std::string* name) {NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [& ](std::string* name) { protobufNode.set_allocated_descriptivetypename (name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref (ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1263) | |||
1259 | protobufNode.set_allocated_descriptivetypename(name);NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [& ](std::string* name) { protobufNode.set_allocated_descriptivetypename (name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref (ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1263) | |||
1260 | },NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [& ](std::string* name) { protobufNode.set_allocated_descriptivetypename (name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref (ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1263) | |||
1261 | [&](uint64_t ref) {NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [& ](std::string* name) { protobufNode.set_allocated_descriptivetypename (name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref (ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1263) | |||
1262 | protobufNode.set_descriptivetypenameref(ref);NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [& ](std::string* name) { protobufNode.set_allocated_descriptivetypename (name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref (ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1263) | |||
1263 | }))NS_warn_if_impl(!attachTwoByteString( descriptiveTypeName, [& ](std::string* name) { protobufNode.set_allocated_descriptivetypename (name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref (ref); }), "!attachTwoByteString( descriptiveTypeName, [&](std::string* name) { protobufNode.set_allocated_descriptivetypename(name); }, [&](uint64_t ref) { protobufNode.set_descriptivetypenameref(ref); })" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1263)) { | |||
1264 | return false; | |||
1265 | } | |||
1266 | } | |||
1267 | ||||
1268 | return writeMessage(protobufNode); | |||
1269 | } | |||
1270 | }; | |||
1271 | ||||
1272 | // A JS::ubi::BreadthFirst handler that serializes a snapshot of the heap into a | |||
1273 | // core dump. | |||
1274 | class MOZ_STACK_CLASS HeapSnapshotHandler { | |||
1275 | CoreDumpWriter& writer; | |||
1276 | JS::CompartmentSet* compartments; | |||
1277 | ||||
1278 | public: | |||
1279 | // For telemetry. | |||
1280 | uint32_t nodeCount; | |||
1281 | uint32_t edgeCount; | |||
1282 | ||||
1283 | HeapSnapshotHandler(CoreDumpWriter& writer, JS::CompartmentSet* compartments) | |||
1284 | : writer(writer), | |||
1285 | compartments(compartments), | |||
1286 | nodeCount(0), | |||
1287 | edgeCount(0) {} | |||
1288 | ||||
1289 | // JS::ubi::BreadthFirst handler interface. | |||
1290 | ||||
1291 | class NodeData {}; | |||
1292 | typedef JS::ubi::BreadthFirst<HeapSnapshotHandler> Traversal; | |||
1293 | bool operator()(Traversal& traversal, JS::ubi::Node origin, | |||
1294 | const JS::ubi::Edge& edge, NodeData*, bool first) { | |||
1295 | edgeCount++; | |||
1296 | ||||
1297 | // We're only interested in the first time we reach edge.referent, not in | |||
1298 | // every edge arriving at that node. "But, don't we want to serialize every | |||
1299 | // edge in the heap graph?" you ask. Don't worry! This edge is still | |||
1300 | // serialized into the core dump. Serializing a node also serializes each of | |||
1301 | // its edges, and if we are traversing a given edge, we must have already | |||
1302 | // visited and serialized the origin node and its edges. | |||
1303 | if (!first) return true; | |||
1304 | ||||
1305 | CoreDumpWriter::EdgePolicy policy; | |||
1306 | if (!ShouldIncludeEdge(compartments, origin, edge, &policy)) { | |||
1307 | // Because ShouldIncludeEdge considers the |origin| node as well, we don't | |||
1308 | // want to consider this node 'visited' until we write it to the core | |||
1309 | // dump. | |||
1310 | traversal.doNotMarkReferentAsVisited(); | |||
1311 | return true; | |||
1312 | } | |||
1313 | ||||
1314 | nodeCount++; | |||
1315 | ||||
1316 | if (policy == CoreDumpWriter::EXCLUDE_EDGES) traversal.abandonReferent(); | |||
1317 | ||||
1318 | return writer.writeNode(edge.referent, policy); | |||
1319 | } | |||
1320 | }; | |||
1321 | ||||
1322 | bool WriteHeapGraph(JSContext* cx, const JS::ubi::Node& node, | |||
1323 | CoreDumpWriter& writer, bool wantNames, | |||
1324 | JS::CompartmentSet* compartments, | |||
1325 | JS::AutoCheckCannotGC& noGC, uint32_t& outNodeCount, | |||
1326 | uint32_t& outEdgeCount) { | |||
1327 | // Serialize the starting node to the core dump. | |||
1328 | ||||
1329 | if (NS_WARN_IF(!writer.writeNode(node, CoreDumpWriter::INCLUDE_EDGES))NS_warn_if_impl(!writer.writeNode(node, CoreDumpWriter::INCLUDE_EDGES ), "!writer.writeNode(node, CoreDumpWriter::INCLUDE_EDGES)", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1329)) { | |||
1330 | return false; | |||
1331 | } | |||
1332 | ||||
1333 | // Walk the heap graph starting from the given node and serialize it into the | |||
1334 | // core dump. | |||
1335 | ||||
1336 | HeapSnapshotHandler handler(writer, compartments); | |||
1337 | HeapSnapshotHandler::Traversal traversal(cx, handler, noGC); | |||
1338 | traversal.wantNames = wantNames; | |||
1339 | ||||
1340 | bool ok = traversal.addStartVisited(node) && traversal.traverse(); | |||
1341 | ||||
1342 | if (ok) { | |||
1343 | outNodeCount = handler.nodeCount; | |||
1344 | outEdgeCount = handler.edgeCount; | |||
1345 | } | |||
1346 | ||||
1347 | return ok; | |||
1348 | } | |||
1349 | ||||
1350 | static unsigned long msSinceProcessCreation(const TimeStamp& now) { | |||
1351 | auto duration = now - TimeStamp::ProcessCreation(); | |||
1352 | return (unsigned long)duration.ToMilliseconds(); | |||
1353 | } | |||
1354 | ||||
1355 | /* static */ | |||
1356 | already_AddRefed<nsIFile> HeapSnapshot::CreateUniqueCoreDumpFile( | |||
1357 | ErrorResult& rv, const TimeStamp& now, nsAString& outFilePath, | |||
1358 | nsAString& outSnapshotId) { | |||
1359 | MOZ_RELEASE_ASSERT(XRE_IsParentProcess())do { static_assert( mozilla::detail::AssertionConditionType< decltype(XRE_IsParentProcess())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(XRE_IsParentProcess()))), 0) )) { do { } while (false); MOZ_ReportAssertionFailure("XRE_IsParentProcess()" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1359); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "XRE_IsParentProcess()" ")"); do { *((volatile int*)__null) = 1359; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | |||
1360 | nsCOMPtr<nsIFile> file; | |||
1361 | rv = GetSpecialSystemDirectory(OS_TemporaryDirectory, getter_AddRefs(file)); | |||
1362 | if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1362)) return nullptr; | |||
1363 | ||||
1364 | nsAutoString tempPath; | |||
1365 | rv = file->GetPath(tempPath); | |||
1366 | if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1366)) return nullptr; | |||
1367 | ||||
1368 | auto ms = msSinceProcessCreation(now); | |||
1369 | rv = file->AppendNative(nsPrintfCString("%lu.fxsnapshot", ms)); | |||
1370 | if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1370)) return nullptr; | |||
1371 | ||||
1372 | rv = file->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0666); | |||
1373 | if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1373)) return nullptr; | |||
1374 | ||||
1375 | rv = file->GetPath(outFilePath); | |||
1376 | if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1376)) return nullptr; | |||
1377 | ||||
1378 | // The snapshot ID must be computed in the process that created the | |||
1379 | // temp file, because TmpD may not be the same in all processes. | |||
1380 | outSnapshotId.Assign(Substring( | |||
1381 | outFilePath, tempPath.Length() + 1, | |||
1382 | outFilePath.Length() - tempPath.Length() - sizeof(".fxsnapshot"))); | |||
1383 | ||||
1384 | return file.forget(); | |||
1385 | } | |||
1386 | ||||
1387 | // Deletion policy for cleaning up PHeapSnapshotTempFileHelperChild pointers. | |||
1388 | class DeleteHeapSnapshotTempFileHelperChild { | |||
1389 | public: | |||
1390 | constexpr DeleteHeapSnapshotTempFileHelperChild() {} | |||
1391 | ||||
1392 | void operator()(PHeapSnapshotTempFileHelperChild* ptr) const { | |||
1393 | Unused << NS_WARN_IF(!HeapSnapshotTempFileHelperChild::Send__delete__(ptr))NS_warn_if_impl(!HeapSnapshotTempFileHelperChild::Send__delete__ (ptr), "!HeapSnapshotTempFileHelperChild::Send__delete__(ptr)" , "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1393); | |||
1394 | } | |||
1395 | }; | |||
1396 | ||||
1397 | // A UniquePtr alias to automatically manage PHeapSnapshotTempFileHelperChild | |||
1398 | // pointers. | |||
1399 | using UniqueHeapSnapshotTempFileHelperChild = | |||
1400 | UniquePtr<PHeapSnapshotTempFileHelperChild, | |||
1401 | DeleteHeapSnapshotTempFileHelperChild>; | |||
1402 | ||||
1403 | // Get an nsIOutputStream that we can write the heap snapshot to. In non-e10s | |||
1404 | // and in the e10s parent process, open a file directly and create an output | |||
1405 | // stream for it. In e10s child processes, we are sandboxed without access to | |||
1406 | // the filesystem. Use IPDL to request a file descriptor from the parent | |||
1407 | // process. | |||
1408 | static already_AddRefed<nsIOutputStream> getCoreDumpOutputStream( | |||
1409 | ErrorResult& rv, TimeStamp& start, nsAString& outFilePath, | |||
1410 | nsAString& outSnapshotId) { | |||
1411 | if (XRE_IsParentProcess()) { | |||
1412 | // Create the file and open the output stream directly. | |||
1413 | ||||
1414 | nsCOMPtr<nsIFile> file = HeapSnapshot::CreateUniqueCoreDumpFile( | |||
1415 | rv, start, outFilePath, outSnapshotId); | |||
1416 | if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1416)) return nullptr; | |||
1417 | ||||
1418 | nsCOMPtr<nsIOutputStream> outputStream; | |||
1419 | rv = NS_NewLocalFileOutputStream(getter_AddRefs(outputStream), file, | |||
1420 | PR_WRONLY0x02, -1, 0); | |||
1421 | if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1421)) return nullptr; | |||
1422 | ||||
1423 | return outputStream.forget(); | |||
1424 | } | |||
1425 | // Request a file descriptor from the parent process over IPDL. | |||
1426 | ||||
1427 | auto cc = ContentChild::GetSingleton(); | |||
1428 | if (!cc) { | |||
1429 | rv.Throw(NS_ERROR_UNEXPECTED); | |||
1430 | return nullptr; | |||
1431 | } | |||
1432 | ||||
1433 | UniqueHeapSnapshotTempFileHelperChild helper( | |||
1434 | cc->SendPHeapSnapshotTempFileHelperConstructor()); | |||
1435 | if (NS_WARN_IF(!helper)NS_warn_if_impl(!helper, "!helper", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1435)) { | |||
1436 | rv.Throw(NS_ERROR_UNEXPECTED); | |||
1437 | return nullptr; | |||
1438 | } | |||
1439 | ||||
1440 | OpenHeapSnapshotTempFileResponse response; | |||
1441 | if (!helper->SendOpenHeapSnapshotTempFile(&response)) { | |||
1442 | rv.Throw(NS_ERROR_UNEXPECTED); | |||
1443 | return nullptr; | |||
1444 | } | |||
1445 | if (response.type() == OpenHeapSnapshotTempFileResponse::Tnsresult) { | |||
1446 | rv.Throw(response.get_nsresult()); | |||
1447 | return nullptr; | |||
1448 | } | |||
1449 | ||||
1450 | auto opened = response.get_OpenedFile(); | |||
1451 | outFilePath = opened.path(); | |||
1452 | outSnapshotId = opened.snapshotId(); | |||
1453 | nsCOMPtr<nsIOutputStream> outputStream = | |||
1454 | FileDescriptorOutputStream::Create(opened.descriptor()); | |||
1455 | if (NS_WARN_IF(!outputStream)NS_warn_if_impl(!outputStream, "!outputStream", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1455)) { | |||
1456 | rv.Throw(NS_ERROR_UNEXPECTED); | |||
1457 | return nullptr; | |||
1458 | } | |||
1459 | ||||
1460 | return outputStream.forget(); | |||
1461 | } | |||
1462 | ||||
1463 | } // namespace devtools | |||
1464 | ||||
1465 | namespace dom { | |||
1466 | ||||
1467 | using namespace JS; | |||
1468 | using namespace devtools; | |||
1469 | ||||
1470 | /* static */ | |||
1471 | void ChromeUtils::SaveHeapSnapshotShared( | |||
1472 | GlobalObject& global, const HeapSnapshotBoundaries& boundaries, | |||
1473 | nsAString& outFilePath, nsAString& outSnapshotId, ErrorResult& rv) { | |||
1474 | auto start = TimeStamp::Now(); | |||
1475 | ||||
1476 | bool wantNames = true; | |||
1477 | CompartmentSet compartments; | |||
1478 | uint32_t nodeCount = 0; | |||
1479 | uint32_t edgeCount = 0; | |||
1480 | ||||
1481 | nsCOMPtr<nsIOutputStream> outputStream = | |||
1482 | getCoreDumpOutputStream(rv, start, outFilePath, outSnapshotId); | |||
1483 | if (NS_WARN_IF(rv.Failed())NS_warn_if_impl(rv.Failed(), "rv.Failed()", "/var/lib/jenkins/workspace/firefox-scan-build/devtools/shared/heapsnapshot/HeapSnapshot.cpp" , 1483)) return; | |||
1484 | ||||
1485 | ZeroCopyNSIOutputStream zeroCopyStream(outputStream); | |||
1486 | ::google::protobuf::io::GzipOutputStream gzipStream(&zeroCopyStream); | |||
1487 | ||||
1488 | JSContext* cx = global.Context(); | |||
1489 | ||||
1490 | { | |||
1491 | ubi::RootList rootList(cx, wantNames); | |||
1492 | auto [ok, nogc] = | |||
1493 | EstablishBoundaries(cx, rv, boundaries, rootList, compartments); | |||
1494 | if (!ok) { | |||
1495 | return; | |||
1496 | } | |||
1497 | ||||
1498 | StreamWriter writer(cx, gzipStream, wantNames, | |||
1499 | !compartments.empty() ? &compartments : nullptr); | |||
1500 | ||||
1501 | ubi::Node roots(&rootList); | |||
1502 | ||||
1503 | // Serialize the initial heap snapshot metadata to the core dump. | |||
1504 | if (!writer.writeMetadata(PR_Now()) || | |||
1505 | // Serialize the heap graph to the core dump, starting from our list of | |||
1506 | // roots. | |||
1507 | !WriteHeapGraph(cx, roots, writer, wantNames, | |||
1508 | !compartments.empty() ? &compartments : nullptr, nogc, | |||
1509 | nodeCount, edgeCount)) { | |||
1510 | rv.Throw(zeroCopyStream.failed() ? zeroCopyStream.result() | |||
1511 | : NS_ERROR_UNEXPECTED); | |||
1512 | return; | |||
1513 | } | |||
1514 | } | |||
1515 | ||||
1516 | Telemetry::AccumulateTimeDelta(Telemetry::DEVTOOLS_SAVE_HEAP_SNAPSHOT_MS, | |||
1517 | start); | |||
1518 | Telemetry::Accumulate(Telemetry::DEVTOOLS_HEAP_SNAPSHOT_NODE_COUNT, | |||
1519 | nodeCount); | |||
1520 | Telemetry::Accumulate(Telemetry::DEVTOOLS_HEAP_SNAPSHOT_EDGE_COUNT, | |||
1521 | edgeCount); | |||
1522 | } | |||
1523 | ||||
1524 | /* static */ | |||
1525 | uint64_t ChromeUtils::GetObjectNodeId(GlobalObject& global, | |||
1526 | JS::Handle<JSObject*> val) { | |||
1527 | JS::Rooted<JSObject*> obj(global.Context(), val); | |||
1528 | ||||
1529 | JS::ubi::Node node(obj); | |||
1530 | return node.identifier(); | |||
1531 | } | |||
1532 | ||||
1533 | /* static */ | |||
1534 | void ChromeUtils::SaveHeapSnapshot(GlobalObject& global, | |||
1535 | const HeapSnapshotBoundaries& boundaries, | |||
1536 | nsAString& outFilePath, ErrorResult& rv) { | |||
1537 | nsAutoString snapshotId; | |||
1538 | SaveHeapSnapshotShared(global, boundaries, outFilePath, snapshotId, rv); | |||
1539 | } | |||
1540 | ||||
1541 | /* static */ | |||
1542 | void ChromeUtils::SaveHeapSnapshotGetId( | |||
1543 | GlobalObject& global, const HeapSnapshotBoundaries& boundaries, | |||
1544 | nsAString& outSnapshotId, ErrorResult& rv) { | |||
1545 | nsAutoString filePath; | |||
1546 | SaveHeapSnapshotShared(global, boundaries, filePath, outSnapshotId, rv); | |||
1547 | } | |||
1548 | ||||
1549 | /* static */ | |||
1550 | already_AddRefed<HeapSnapshot> ChromeUtils::ReadHeapSnapshot( | |||
1551 | GlobalObject& global, const nsAString& filePath, ErrorResult& rv) { | |||
1552 | auto start = TimeStamp::Now(); | |||
1553 | ||||
1554 | nsresult nsrv; | |||
1555 | nsCOMPtr<nsIFile> snapshotFile = | |||
1556 | do_CreateInstance("@mozilla.org/file/local;1", &nsrv); | |||
1557 | ||||
1558 | if (NS_FAILED(nsrv)((bool)(__builtin_expect(!!(NS_FAILED_impl(nsrv)), 0)))) { | |||
1559 | rv = nsrv; | |||
1560 | return nullptr; | |||
1561 | } | |||
1562 | ||||
1563 | rv = snapshotFile->InitWithPath(filePath); | |||
1564 | if (rv.Failed()) { | |||
1565 | return nullptr; | |||
1566 | } | |||
1567 | ||||
1568 | AutoMemMap mm; | |||
1569 | rv = mm.init(snapshotFile); | |||
1570 | if (rv.Failed()) return nullptr; | |||
1571 | ||||
1572 | RefPtr<HeapSnapshot> snapshot = HeapSnapshot::Create( | |||
1573 | global.Context(), global, reinterpret_cast<const uint8_t*>(mm.address()), | |||
1574 | mm.size(), rv); | |||
1575 | ||||
1576 | if (!rv.Failed()) | |||
1577 | Telemetry::AccumulateTimeDelta(Telemetry::DEVTOOLS_READ_HEAP_SNAPSHOT_MS, | |||
1578 | start); | |||
1579 | ||||
1580 | return snapshot.forget(); | |||
1581 | } | |||
1582 | ||||
1583 | } // namespace dom | |||
1584 | } // namespace mozilla |
1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ | ||||
2 | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ | ||||
3 | /* This Source Code Form is subject to the terms of the Mozilla Public | ||||
4 | * License, v. 2.0. If a copy of the MPL was not distributed with this | ||||
5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | ||||
6 | |||||
7 | //--------------------------------------------------------------------------- | ||||
8 | // Overview | ||||
9 | //--------------------------------------------------------------------------- | ||||
10 | // | ||||
11 | // This file defines HashMap<Key, Value> and HashSet<T>, hash tables that are | ||||
12 | // fast and have a nice API. | ||||
13 | // | ||||
14 | // Both hash tables have two optional template parameters. | ||||
15 | // | ||||
16 | // - HashPolicy. This defines the operations for hashing and matching keys. The | ||||
17 | // default HashPolicy is appropriate when both of the following two | ||||
18 | // conditions are true. | ||||
19 | // | ||||
20 | // - The key type stored in the table (|Key| for |HashMap<Key, Value>|, |T| | ||||
21 | // for |HashSet<T>|) is an integer, pointer, UniquePtr, float, or double. | ||||
22 | // | ||||
23 | // - The type used for lookups (|Lookup|) is the same as the key type. This | ||||
24 | // is usually the case, but not always. | ||||
25 | // | ||||
26 | // There is also a |CStringHasher| policy for |char*| keys. If your keys | ||||
27 | // don't match any of the above cases, you must provide your own hash policy; | ||||
28 | // see the "Hash Policy" section below. | ||||
29 | // | ||||
30 | // - AllocPolicy. This defines how allocations are done by the table. | ||||
31 | // | ||||
32 | // - |MallocAllocPolicy| is the default and is usually appropriate; note that | ||||
33 | // operations (such as insertions) that might cause allocations are | ||||
34 | // fallible and must be checked for OOM. These checks are enforced by the | ||||
35 | // use of [[nodiscard]]. | ||||
36 | // | ||||
37 | // - |InfallibleAllocPolicy| is another possibility; it allows the | ||||
38 | // abovementioned OOM checks to be done with MOZ_ALWAYS_TRUE(). | ||||
39 | // | ||||
40 | // Note that entry storage allocation is lazy, and not done until the first | ||||
41 | // lookupForAdd(), put(), or putNew() is performed. | ||||
42 | // | ||||
43 | // See AllocPolicy.h for more details. | ||||
44 | // | ||||
45 | // Documentation on how to use HashMap and HashSet, including examples, is | ||||
46 | // present within those classes. Search for "class HashMap" and "class | ||||
47 | // HashSet". | ||||
48 | // | ||||
49 | // Both HashMap and HashSet are implemented on top of a third class, HashTable. | ||||
50 | // You only need to look at HashTable if you want to understand the | ||||
51 | // implementation. | ||||
52 | // | ||||
53 | // How does mozilla::HashTable (this file) compare with PLDHashTable (and its | ||||
54 | // subclasses, such as nsTHashtable)? | ||||
55 | // | ||||
56 | // - mozilla::HashTable is a lot faster, largely because it uses templates | ||||
57 | // throughout *and* inlines everything. PLDHashTable inlines operations much | ||||
58 | // less aggressively, and also uses "virtual ops" for operations like hashing | ||||
59 | // and matching entries that require function calls. | ||||
60 | // | ||||
61 | // - Correspondingly, mozilla::HashTable use is likely to increase executable | ||||
62 | // size much more than PLDHashTable. | ||||
63 | // | ||||
64 | // - mozilla::HashTable has a nicer API, with a proper HashSet vs. HashMap | ||||
65 | // distinction. | ||||
66 | // | ||||
67 | // - mozilla::HashTable requires more explicit OOM checking. As mentioned | ||||
68 | // above, the use of |InfallibleAllocPolicy| can simplify things. | ||||
69 | // | ||||
70 | // - mozilla::HashTable has a default capacity on creation of 32 and a minimum | ||||
71 | // capacity of 4. PLDHashTable has a default capacity on creation of 8 and a | ||||
72 | // minimum capacity of 8. | ||||
73 | |||||
74 | #ifndef mozilla_HashTable_h | ||||
75 | #define mozilla_HashTable_h | ||||
76 | |||||
77 | #include <utility> | ||||
78 | #include <type_traits> | ||||
79 | |||||
80 | #include "mozilla/AllocPolicy.h" | ||||
81 | #include "mozilla/Assertions.h" | ||||
82 | #include "mozilla/Attributes.h" | ||||
83 | #include "mozilla/Casting.h" | ||||
84 | #include "mozilla/HashFunctions.h" | ||||
85 | #include "mozilla/MathAlgorithms.h" | ||||
86 | #include "mozilla/Maybe.h" | ||||
87 | #include "mozilla/MemoryChecking.h" | ||||
88 | #include "mozilla/MemoryReporting.h" | ||||
89 | #include "mozilla/Opaque.h" | ||||
90 | #include "mozilla/OperatorNewExtensions.h" | ||||
91 | #include "mozilla/ReentrancyGuard.h" | ||||
92 | #include "mozilla/UniquePtr.h" | ||||
93 | #include "mozilla/WrappingOperations.h" | ||||
94 | |||||
95 | namespace mozilla { | ||||
96 | |||||
97 | template <class, class = void> | ||||
98 | struct DefaultHasher; | ||||
99 | |||||
100 | template <class, class> | ||||
101 | class HashMapEntry; | ||||
102 | |||||
103 | namespace detail { | ||||
104 | |||||
105 | template <typename T> | ||||
106 | class HashTableEntry; | ||||
107 | |||||
108 | template <class T, class HashPolicy, class AllocPolicy> | ||||
109 | class HashTable; | ||||
110 | |||||
111 | } // namespace detail | ||||
112 | |||||
113 | // The "generation" of a hash table is an opaque value indicating the state of | ||||
114 | // modification of the hash table through its lifetime. If the generation of | ||||
115 | // a hash table compares equal at times T1 and T2, then lookups in the hash | ||||
116 | // table, pointers to (or into) hash table entries, etc. at time T1 are valid | ||||
117 | // at time T2. If the generation compares unequal, these computations are all | ||||
118 | // invalid and must be performed again to be used. | ||||
119 | // | ||||
120 | // Generations are meaningfully comparable only with respect to a single hash | ||||
121 | // table. It's always nonsensical to compare the generation of distinct hash | ||||
122 | // tables H1 and H2. | ||||
123 | using Generation = Opaque<uint64_t>; | ||||
124 | |||||
125 | //--------------------------------------------------------------------------- | ||||
126 | // HashMap | ||||
127 | //--------------------------------------------------------------------------- | ||||
128 | |||||
129 | // HashMap is a fast hash-based map from keys to values. | ||||
130 | // | ||||
131 | // Template parameter requirements: | ||||
132 | // - Key/Value: movable, destructible, assignable. | ||||
133 | // - HashPolicy: see the "Hash Policy" section below. | ||||
134 | // - AllocPolicy: see AllocPolicy.h. | ||||
135 | // | ||||
136 | // Note: | ||||
137 | // - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members | ||||
138 | // called by HashMap must not call back into the same HashMap object. | ||||
139 | // | ||||
140 | template <class Key, class Value, class HashPolicy = DefaultHasher<Key>, | ||||
141 | class AllocPolicy = MallocAllocPolicy> | ||||
142 | class HashMap { | ||||
143 | // -- Implementation details ----------------------------------------------- | ||||
144 | |||||
145 | // HashMap is not copyable or assignable. | ||||
146 | HashMap(const HashMap& hm) = delete; | ||||
147 | HashMap& operator=(const HashMap& hm) = delete; | ||||
148 | |||||
149 | using TableEntry = HashMapEntry<Key, Value>; | ||||
150 | |||||
151 | struct MapHashPolicy : HashPolicy { | ||||
152 | using Base = HashPolicy; | ||||
153 | using KeyType = Key; | ||||
154 | |||||
155 | static const Key& getKey(TableEntry& aEntry) { return aEntry.key(); } | ||||
156 | |||||
157 | static void setKey(TableEntry& aEntry, Key& aKey) { | ||||
158 | HashPolicy::rekey(aEntry.mutableKey(), aKey); | ||||
159 | } | ||||
160 | }; | ||||
161 | |||||
162 | using Impl = detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy>; | ||||
163 | Impl mImpl; | ||||
164 | |||||
165 | friend class Impl::Enum; | ||||
166 | |||||
167 | public: | ||||
168 | using Lookup = typename HashPolicy::Lookup; | ||||
169 | using Entry = TableEntry; | ||||
170 | |||||
171 | // -- Initialization ------------------------------------------------------- | ||||
172 | |||||
173 | explicit HashMap(AllocPolicy aAllocPolicy = AllocPolicy(), | ||||
174 | uint32_t aLen = Impl::sDefaultLen) | ||||
175 | : mImpl(std::move(aAllocPolicy), aLen) {} | ||||
176 | |||||
177 | explicit HashMap(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {} | ||||
178 | |||||
179 | // HashMap is movable. | ||||
180 | HashMap(HashMap&& aRhs) = default; | ||||
181 | HashMap& operator=(HashMap&& aRhs) = default; | ||||
182 | |||||
183 | // -- Status and sizing ---------------------------------------------------- | ||||
184 | |||||
185 | // The map's current generation. | ||||
186 | Generation generation() const { return mImpl.generation(); } | ||||
187 | |||||
188 | // Is the map empty? | ||||
189 | bool empty() const { return mImpl.empty(); } | ||||
190 | |||||
191 | // Number of keys/values in the map. | ||||
192 | uint32_t count() const { return mImpl.count(); } | ||||
193 | |||||
194 | // Number of key/value slots in the map. Note: resize will happen well before | ||||
195 | // count() == capacity(). | ||||
196 | uint32_t capacity() const { return mImpl.capacity(); } | ||||
197 | |||||
198 | // The size of the map's entry storage, in bytes. If the keys/values contain | ||||
199 | // pointers to other heap blocks, you must iterate over the map and measure | ||||
200 | // them separately; hence the "shallow" prefix. | ||||
201 | size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { | ||||
202 | return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); | ||||
203 | } | ||||
204 | size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { | ||||
205 | return aMallocSizeOf(this) + | ||||
206 | mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); | ||||
207 | } | ||||
208 | |||||
209 | // Attempt to minimize the capacity(). If the table is empty, this will free | ||||
210 | // the empty storage and upon regrowth it will be given the minimum capacity. | ||||
211 | void compact() { mImpl.compact(); } | ||||
212 | |||||
213 | // Attempt to reserve enough space to fit at least |aLen| elements. This is | ||||
214 | // total capacity, including elements already present. Does nothing if the | ||||
215 | // map already has sufficient capacity. | ||||
216 | [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); } | ||||
217 | |||||
218 | // -- Lookups -------------------------------------------------------------- | ||||
219 | |||||
220 | // Does the map contain a key/value matching |aLookup|? | ||||
221 | bool has(const Lookup& aLookup) const { | ||||
222 | return mImpl.lookup(aLookup).found(); | ||||
223 | } | ||||
224 | |||||
225 | // Return a Ptr indicating whether a key/value matching |aLookup| is | ||||
226 | // present in the map. E.g.: | ||||
227 | // | ||||
228 | // using HM = HashMap<int,char>; | ||||
229 | // HM h; | ||||
230 | // if (HM::Ptr p = h.lookup(3)) { | ||||
231 | // assert(p->key() == 3); | ||||
232 | // char val = p->value(); | ||||
233 | // } | ||||
234 | // | ||||
235 | using Ptr = typename Impl::Ptr; | ||||
236 | MOZ_ALWAYS_INLINEinline Ptr lookup(const Lookup& aLookup) const { | ||||
237 | return mImpl.lookup(aLookup); | ||||
238 | } | ||||
239 | |||||
240 | // Like lookup(), but does not assert if two threads call it at the same | ||||
241 | // time. Only use this method when none of the threads will modify the map. | ||||
242 | MOZ_ALWAYS_INLINEinline Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const { | ||||
243 | return mImpl.readonlyThreadsafeLookup(aLookup); | ||||
244 | } | ||||
245 | |||||
246 | // -- Insertions ----------------------------------------------------------- | ||||
247 | |||||
248 | // Overwrite existing value with |aValue|, or add it if not present. Returns | ||||
249 | // false on OOM. | ||||
250 | template <typename KeyInput, typename ValueInput> | ||||
251 | [[nodiscard]] bool put(KeyInput&& aKey, ValueInput&& aValue) { | ||||
252 | return put(aKey, std::forward<KeyInput>(aKey), | ||||
253 | std::forward<ValueInput>(aValue)); | ||||
254 | } | ||||
255 | |||||
256 | template <typename KeyInput, typename ValueInput> | ||||
257 | [[nodiscard]] bool put(const Lookup& aLookup, KeyInput&& aKey, | ||||
258 | ValueInput&& aValue) { | ||||
259 | AddPtr p = lookupForAdd(aLookup); | ||||
260 | if (p) { | ||||
261 | p->value() = std::forward<ValueInput>(aValue); | ||||
262 | return true; | ||||
263 | } | ||||
264 | return add(p, std::forward<KeyInput>(aKey), | ||||
265 | std::forward<ValueInput>(aValue)); | ||||
266 | } | ||||
267 | |||||
268 | // Like put(), but slightly faster. Must only be used when the given key is | ||||
269 | // not already present. (In debug builds, assertions check this.) | ||||
270 | template <typename KeyInput, typename ValueInput> | ||||
271 | [[nodiscard]] bool putNew(KeyInput&& aKey, ValueInput&& aValue) { | ||||
272 | return mImpl.putNew(aKey, std::forward<KeyInput>(aKey), | ||||
273 | std::forward<ValueInput>(aValue)); | ||||
274 | } | ||||
275 | |||||
276 | template <typename KeyInput, typename ValueInput> | ||||
277 | [[nodiscard]] bool putNew(const Lookup& aLookup, KeyInput&& aKey, | ||||
278 | ValueInput&& aValue) { | ||||
279 | return mImpl.putNew(aLookup, std::forward<KeyInput>(aKey), | ||||
280 | std::forward<ValueInput>(aValue)); | ||||
281 | } | ||||
282 | |||||
283 | // Like putNew(), but should be only used when the table is known to be big | ||||
284 | // enough for the insertion, and hashing cannot fail. Typically this is used | ||||
285 | // to populate an empty map with known-unique keys after reserving space with | ||||
286 | // reserve(), e.g. | ||||
287 | // | ||||
288 | // using HM = HashMap<int,char>; | ||||
289 | // HM h; | ||||
290 | // if (!h.reserve(3)) { | ||||
291 | // MOZ_CRASH("OOM"); | ||||
292 | // } | ||||
293 | // h.putNewInfallible(1, 'a'); // unique key | ||||
294 | // h.putNewInfallible(2, 'b'); // unique key | ||||
295 | // h.putNewInfallible(3, 'c'); // unique key | ||||
296 | // | ||||
297 | template <typename KeyInput, typename ValueInput> | ||||
298 | void putNewInfallible(KeyInput&& aKey, ValueInput&& aValue) { | ||||
299 | mImpl.putNewInfallible(aKey, std::forward<KeyInput>(aKey), | ||||
300 | std::forward<ValueInput>(aValue)); | ||||
301 | } | ||||
302 | |||||
303 | // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient | ||||
304 | // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using | ||||
305 | // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new key/value. E.g.: | ||||
306 | // | ||||
307 | // using HM = HashMap<int,char>; | ||||
308 | // HM h; | ||||
309 | // HM::AddPtr p = h.lookupForAdd(3); | ||||
310 | // if (!p) { | ||||
311 | // if (!h.add(p, 3, 'a')) { | ||||
312 | // return false; | ||||
313 | // } | ||||
314 | // } | ||||
315 | // assert(p->key() == 3); | ||||
316 | // char val = p->value(); | ||||
317 | // | ||||
318 | // N.B. The caller must ensure that no mutating hash table operations occur | ||||
319 | // between a pair of lookupForAdd() and add() calls. To avoid looking up the | ||||
320 | // key a second time, the caller may use the more efficient relookupOrAdd() | ||||
321 | // method. This method reuses part of the hashing computation to more | ||||
322 | // efficiently insert the key if it has not been added. For example, a | ||||
323 | // mutation-handling version of the previous example: | ||||
324 | // | ||||
325 | // HM::AddPtr p = h.lookupForAdd(3); | ||||
326 | // if (!p) { | ||||
327 | // call_that_may_mutate_h(); | ||||
328 | // if (!h.relookupOrAdd(p, 3, 'a')) { | ||||
329 | // return false; | ||||
330 | // } | ||||
331 | // } | ||||
332 | // assert(p->key() == 3); | ||||
333 | // char val = p->value(); | ||||
334 | // | ||||
335 | using AddPtr = typename Impl::AddPtr; | ||||
336 | MOZ_ALWAYS_INLINEinline AddPtr lookupForAdd(const Lookup& aLookup) { | ||||
337 | return mImpl.lookupForAdd(aLookup); | ||||
338 | } | ||||
339 | |||||
340 | // Add a key/value. Returns false on OOM. | ||||
341 | template <typename KeyInput, typename ValueInput> | ||||
342 | [[nodiscard]] bool add(AddPtr& aPtr, KeyInput&& aKey, ValueInput&& aValue) { | ||||
343 | return mImpl.add(aPtr, std::forward<KeyInput>(aKey), | ||||
344 | std::forward<ValueInput>(aValue)); | ||||
345 | } | ||||
346 | |||||
347 | // See the comment above lookupForAdd() for details. | ||||
348 | template <typename KeyInput, typename ValueInput> | ||||
349 | [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, KeyInput&& aKey, | ||||
350 | ValueInput&& aValue) { | ||||
351 | return mImpl.relookupOrAdd(aPtr, aKey, std::forward<KeyInput>(aKey), | ||||
352 | std::forward<ValueInput>(aValue)); | ||||
353 | } | ||||
354 | |||||
355 | // -- Removal -------------------------------------------------------------- | ||||
356 | |||||
357 | // Lookup and remove the key/value matching |aLookup|, if present. | ||||
358 | void remove(const Lookup& aLookup) { | ||||
359 | if (Ptr p = lookup(aLookup)) { | ||||
360 | remove(p); | ||||
361 | } | ||||
362 | } | ||||
363 | |||||
364 | // Remove a previously found key/value (assuming aPtr.found()). The map must | ||||
365 | // not have been mutated in the interim. | ||||
366 | void remove(Ptr aPtr) { mImpl.remove(aPtr); } | ||||
367 | |||||
368 | // Remove all keys/values without changing the capacity. | ||||
369 | void clear() { mImpl.clear(); } | ||||
370 | |||||
371 | // Like clear() followed by compact(). | ||||
372 | void clearAndCompact() { mImpl.clearAndCompact(); } | ||||
373 | |||||
374 | // -- Rekeying ------------------------------------------------------------- | ||||
375 | |||||
376 | // Infallibly rekey one entry, if necessary. Requires that template | ||||
377 | // parameters Key and HashPolicy::Lookup are the same type. | ||||
378 | void rekeyIfMoved(const Key& aOldKey, const Key& aNewKey) { | ||||
379 | if (aOldKey != aNewKey) { | ||||
380 | rekeyAs(aOldKey, aNewKey, aNewKey); | ||||
381 | } | ||||
382 | } | ||||
383 | |||||
384 | // Infallibly rekey one entry if present, and return whether that happened. | ||||
385 | bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup, | ||||
386 | const Key& aNewKey) { | ||||
387 | if (Ptr p = lookup(aOldLookup)) { | ||||
388 | mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewKey); | ||||
389 | return true; | ||||
390 | } | ||||
391 | return false; | ||||
392 | } | ||||
393 | |||||
394 | // -- Iteration ------------------------------------------------------------ | ||||
395 | |||||
396 | // |iter()| returns an Iterator: | ||||
397 | // | ||||
398 | // HashMap<int, char> h; | ||||
399 | // for (auto iter = h.iter(); !iter.done(); iter.next()) { | ||||
400 | // char c = iter.get().value(); | ||||
401 | // } | ||||
402 | // | ||||
403 | using Iterator = typename Impl::Iterator; | ||||
404 | Iterator iter() const { return mImpl.iter(); } | ||||
405 | |||||
406 | // |modIter()| returns a ModIterator: | ||||
407 | // | ||||
408 | // HashMap<int, char> h; | ||||
409 | // for (auto iter = h.modIter(); !iter.done(); iter.next()) { | ||||
410 | // if (iter.get().value() == 'l') { | ||||
411 | // iter.remove(); | ||||
412 | // } | ||||
413 | // } | ||||
414 | // | ||||
415 | // Table resize may occur in ModIterator's destructor. | ||||
416 | using ModIterator = typename Impl::ModIterator; | ||||
417 | ModIterator modIter() { return mImpl.modIter(); } | ||||
418 | |||||
419 | // These are similar to Iterator/ModIterator/iter(), but use different | ||||
420 | // terminology. | ||||
421 | using Range = typename Impl::Range; | ||||
422 | using Enum = typename Impl::Enum; | ||||
423 | Range all() const { return mImpl.all(); } | ||||
424 | }; | ||||
425 | |||||
426 | //--------------------------------------------------------------------------- | ||||
427 | // HashSet | ||||
428 | //--------------------------------------------------------------------------- | ||||
429 | |||||
430 | // HashSet is a fast hash-based set of values. | ||||
431 | // | ||||
432 | // Template parameter requirements: | ||||
433 | // - T: movable, destructible, assignable. | ||||
434 | // - HashPolicy: see the "Hash Policy" section below. | ||||
435 | // - AllocPolicy: see AllocPolicy.h | ||||
436 | // | ||||
437 | // Note: | ||||
438 | // - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by | ||||
439 | // HashSet must not call back into the same HashSet object. | ||||
440 | // | ||||
441 | template <class T, class HashPolicy = DefaultHasher<T>, | ||||
442 | class AllocPolicy = MallocAllocPolicy> | ||||
443 | class HashSet { | ||||
444 | // -- Implementation details ----------------------------------------------- | ||||
445 | |||||
446 | // HashSet is not copyable or assignable. | ||||
447 | HashSet(const HashSet& hs) = delete; | ||||
448 | HashSet& operator=(const HashSet& hs) = delete; | ||||
449 | |||||
450 | struct SetHashPolicy : HashPolicy { | ||||
451 | using Base = HashPolicy; | ||||
452 | using KeyType = T; | ||||
453 | |||||
454 | static const KeyType& getKey(const T& aT) { return aT; } | ||||
455 | |||||
456 | static void setKey(T& aT, KeyType& aKey) { HashPolicy::rekey(aT, aKey); } | ||||
457 | }; | ||||
458 | |||||
459 | using Impl = detail::HashTable<const T, SetHashPolicy, AllocPolicy>; | ||||
460 | Impl mImpl; | ||||
461 | |||||
462 | friend class Impl::Enum; | ||||
463 | |||||
464 | public: | ||||
465 | using Lookup = typename HashPolicy::Lookup; | ||||
466 | using Entry = T; | ||||
467 | |||||
468 | // -- Initialization ------------------------------------------------------- | ||||
469 | |||||
470 | explicit HashSet(AllocPolicy aAllocPolicy = AllocPolicy(), | ||||
471 | uint32_t aLen = Impl::sDefaultLen) | ||||
472 | : mImpl(std::move(aAllocPolicy), aLen) {} | ||||
473 | |||||
474 | explicit HashSet(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {} | ||||
475 | |||||
476 | // HashSet is movable. | ||||
477 | HashSet(HashSet&& aRhs) = default; | ||||
478 | HashSet& operator=(HashSet&& aRhs) = default; | ||||
479 | |||||
480 | // -- Status and sizing ---------------------------------------------------- | ||||
481 | |||||
482 | // The set's current generation. | ||||
483 | Generation generation() const { return mImpl.generation(); } | ||||
484 | |||||
485 | // Is the set empty? | ||||
486 | bool empty() const { return mImpl.empty(); } | ||||
487 | |||||
488 | // Number of elements in the set. | ||||
489 | uint32_t count() const { return mImpl.count(); } | ||||
490 | |||||
491 | // Number of element slots in the set. Note: resize will happen well before | ||||
492 | // count() == capacity(). | ||||
493 | uint32_t capacity() const { return mImpl.capacity(); } | ||||
494 | |||||
495 | // The size of the set's entry storage, in bytes. If the elements contain | ||||
496 | // pointers to other heap blocks, you must iterate over the set and measure | ||||
497 | // them separately; hence the "shallow" prefix. | ||||
498 | size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { | ||||
499 | return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); | ||||
500 | } | ||||
501 | size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { | ||||
502 | return aMallocSizeOf(this) + | ||||
503 | mImpl.shallowSizeOfExcludingThis(aMallocSizeOf); | ||||
504 | } | ||||
505 | |||||
506 | // Attempt to minimize the capacity(). If the table is empty, this will free | ||||
507 | // the empty storage and upon regrowth it will be given the minimum capacity. | ||||
508 | void compact() { mImpl.compact(); } | ||||
509 | |||||
510 | // Attempt to reserve enough space to fit at least |aLen| elements. This is | ||||
511 | // total capacity, including elements already present. Does nothing if the | ||||
512 | // map already has sufficient capacity. | ||||
513 | [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); } | ||||
514 | |||||
515 | // -- Lookups -------------------------------------------------------------- | ||||
516 | |||||
517 | // Does the set contain an element matching |aLookup|? | ||||
518 | bool has(const Lookup& aLookup) const { | ||||
519 | return mImpl.lookup(aLookup).found(); | ||||
520 | } | ||||
521 | |||||
522 | // Return a Ptr indicating whether an element matching |aLookup| is present | ||||
523 | // in the set. E.g.: | ||||
524 | // | ||||
525 | // using HS = HashSet<int>; | ||||
526 | // HS h; | ||||
527 | // if (HS::Ptr p = h.lookup(3)) { | ||||
528 | // assert(*p == 3); // p acts like a pointer to int | ||||
529 | // } | ||||
530 | // | ||||
531 | using Ptr = typename Impl::Ptr; | ||||
532 | MOZ_ALWAYS_INLINEinline Ptr lookup(const Lookup& aLookup) const { | ||||
533 | return mImpl.lookup(aLookup); | ||||
534 | } | ||||
535 | |||||
536 | // Like lookup(), but does not assert if two threads call it at the same | ||||
537 | // time. Only use this method when none of the threads will modify the set. | ||||
538 | MOZ_ALWAYS_INLINEinline Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const { | ||||
539 | return mImpl.readonlyThreadsafeLookup(aLookup); | ||||
540 | } | ||||
541 | |||||
542 | // -- Insertions ----------------------------------------------------------- | ||||
543 | |||||
544 | // Add |aU| if it is not present already. Returns false on OOM. | ||||
545 | template <typename U> | ||||
546 | [[nodiscard]] bool put(U&& aU) { | ||||
547 | AddPtr p = lookupForAdd(aU); | ||||
548 | return p ? true : add(p, std::forward<U>(aU)); | ||||
549 | } | ||||
550 | |||||
551 | // Like put(), but slightly faster. Must only be used when the given element | ||||
552 | // is not already present. (In debug builds, assertions check this.) | ||||
553 | template <typename U> | ||||
554 | [[nodiscard]] bool putNew(U&& aU) { | ||||
555 | return mImpl.putNew(aU, std::forward<U>(aU)); | ||||
556 | } | ||||
557 | |||||
558 | // Like the other putNew(), but for when |Lookup| is different to |T|. | ||||
559 | template <typename U> | ||||
560 | [[nodiscard]] bool putNew(const Lookup& aLookup, U&& aU) { | ||||
561 | return mImpl.putNew(aLookup, std::forward<U>(aU)); | ||||
562 | } | ||||
563 | |||||
564 | // Like putNew(), but should be only used when the table is known to be big | ||||
565 | // enough for the insertion, and hashing cannot fail. Typically this is used | ||||
566 | // to populate an empty set with known-unique elements after reserving space | ||||
567 | // with reserve(), e.g. | ||||
568 | // | ||||
569 | // using HS = HashMap<int>; | ||||
570 | // HS h; | ||||
571 | // if (!h.reserve(3)) { | ||||
572 | // MOZ_CRASH("OOM"); | ||||
573 | // } | ||||
574 | // h.putNewInfallible(1); // unique element | ||||
575 | // h.putNewInfallible(2); // unique element | ||||
576 | // h.putNewInfallible(3); // unique element | ||||
577 | // | ||||
578 | template <typename U> | ||||
579 | void putNewInfallible(const Lookup& aLookup, U&& aU) { | ||||
580 | mImpl.putNewInfallible(aLookup, std::forward<U>(aU)); | ||||
581 | } | ||||
582 | |||||
583 | // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient | ||||
584 | // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using | ||||
585 | // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.: | ||||
586 | // | ||||
587 | // using HS = HashSet<int>; | ||||
588 | // HS h; | ||||
589 | // HS::AddPtr p = h.lookupForAdd(3); | ||||
590 | // if (!p) { | ||||
591 | // if (!h.add(p, 3)) { | ||||
592 | // return false; | ||||
593 | // } | ||||
594 | // } | ||||
595 | // assert(*p == 3); // p acts like a pointer to int | ||||
596 | // | ||||
597 | // N.B. The caller must ensure that no mutating hash table operations occur | ||||
598 | // between a pair of lookupForAdd() and add() calls. To avoid looking up the | ||||
599 | // key a second time, the caller may use the more efficient relookupOrAdd() | ||||
600 | // method. This method reuses part of the hashing computation to more | ||||
601 | // efficiently insert the key if it has not been added. For example, a | ||||
602 | // mutation-handling version of the previous example: | ||||
603 | // | ||||
604 | // HS::AddPtr p = h.lookupForAdd(3); | ||||
605 | // if (!p) { | ||||
606 | // call_that_may_mutate_h(); | ||||
607 | // if (!h.relookupOrAdd(p, 3, 3)) { | ||||
608 | // return false; | ||||
609 | // } | ||||
610 | // } | ||||
611 | // assert(*p == 3); | ||||
612 | // | ||||
613 | // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the | ||||
614 | // entry |t|, where the caller ensures match(l,t). | ||||
615 | using AddPtr = typename Impl::AddPtr; | ||||
616 | MOZ_ALWAYS_INLINEinline AddPtr lookupForAdd(const Lookup& aLookup) { | ||||
617 | return mImpl.lookupForAdd(aLookup); | ||||
618 | } | ||||
619 | |||||
620 | // Add an element. Returns false on OOM. | ||||
621 | template <typename U> | ||||
622 | [[nodiscard]] bool add(AddPtr& aPtr, U&& aU) { | ||||
623 | return mImpl.add(aPtr, std::forward<U>(aU)); | ||||
624 | } | ||||
625 | |||||
626 | // See the comment above lookupForAdd() for details. | ||||
627 | template <typename U> | ||||
628 | [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup, | ||||
629 | U&& aU) { | ||||
630 | return mImpl.relookupOrAdd(aPtr, aLookup, std::forward<U>(aU)); | ||||
631 | } | ||||
632 | |||||
633 | // -- Removal -------------------------------------------------------------- | ||||
634 | |||||
635 | // Lookup and remove the element matching |aLookup|, if present. | ||||
636 | void remove(const Lookup& aLookup) { | ||||
637 | if (Ptr p = lookup(aLookup)) { | ||||
638 | remove(p); | ||||
639 | } | ||||
640 | } | ||||
641 | |||||
642 | // Remove a previously found element (assuming aPtr.found()). The set must | ||||
643 | // not have been mutated in the interim. | ||||
644 | void remove(Ptr aPtr) { mImpl.remove(aPtr); } | ||||
645 | |||||
646 | // Remove all keys/values without changing the capacity. | ||||
647 | void clear() { mImpl.clear(); } | ||||
648 | |||||
649 | // Like clear() followed by compact(). | ||||
650 | void clearAndCompact() { mImpl.clearAndCompact(); } | ||||
651 | |||||
652 | // -- Rekeying ------------------------------------------------------------- | ||||
653 | |||||
654 | // Infallibly rekey one entry, if present. Requires that template parameters | ||||
655 | // T and HashPolicy::Lookup are the same type. | ||||
656 | void rekeyIfMoved(const Lookup& aOldValue, const T& aNewValue) { | ||||
657 | if (aOldValue != aNewValue) { | ||||
658 | rekeyAs(aOldValue, aNewValue, aNewValue); | ||||
659 | } | ||||
660 | } | ||||
661 | |||||
662 | // Infallibly rekey one entry if present, and return whether that happened. | ||||
663 | bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup, | ||||
664 | const T& aNewValue) { | ||||
665 | if (Ptr p = lookup(aOldLookup)) { | ||||
666 | mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewValue); | ||||
667 | return true; | ||||
668 | } | ||||
669 | return false; | ||||
670 | } | ||||
671 | |||||
672 | // Infallibly replace the current key at |aPtr| with an equivalent key. | ||||
673 | // Specifically, both HashPolicy::hash and HashPolicy::match must return | ||||
674 | // identical results for the new and old key when applied against all | ||||
675 | // possible matching values. | ||||
676 | void replaceKey(Ptr aPtr, const Lookup& aLookup, const T& aNewValue) { | ||||
677 | MOZ_ASSERT(aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPtr.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 677); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.found()" ")"); do { *((volatile int*)__null) = 677; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
678 | MOZ_ASSERT(*aPtr != aNewValue)do { static_assert( mozilla::detail::AssertionConditionType< decltype(*aPtr != aNewValue)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(*aPtr != aNewValue))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("*aPtr != aNewValue" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 678); AnnotateMozCrashReason("MOZ_ASSERT" "(" "*aPtr != aNewValue" ")"); do { *((volatile int*)__null) = 678; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
679 | MOZ_ASSERT(HashPolicy::match(*aPtr, aLookup))do { static_assert( mozilla::detail::AssertionConditionType< decltype(HashPolicy::match(*aPtr, aLookup))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(HashPolicy::match(*aPtr, aLookup )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("HashPolicy::match(*aPtr, aLookup)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 679); AnnotateMozCrashReason("MOZ_ASSERT" "(" "HashPolicy::match(*aPtr, aLookup)" ")"); do { *((volatile int*)__null) = 679; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
680 | MOZ_ASSERT(HashPolicy::match(aNewValue, aLookup))do { static_assert( mozilla::detail::AssertionConditionType< decltype(HashPolicy::match(aNewValue, aLookup))>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(! !(HashPolicy::match(aNewValue, aLookup)))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("HashPolicy::match(aNewValue, aLookup)" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 680); AnnotateMozCrashReason("MOZ_ASSERT" "(" "HashPolicy::match(aNewValue, aLookup)" ")"); do { *((volatile int*)__null) = 680; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
681 | const_cast<T&>(*aPtr) = aNewValue; | ||||
682 | MOZ_ASSERT(*lookup(aLookup) == aNewValue)do { static_assert( mozilla::detail::AssertionConditionType< decltype(*lookup(aLookup) == aNewValue)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(*lookup(aLookup) == aNewValue ))), 0))) { do { } while (false); MOZ_ReportAssertionFailure( "*lookup(aLookup) == aNewValue", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 682); AnnotateMozCrashReason("MOZ_ASSERT" "(" "*lookup(aLookup) == aNewValue" ")"); do { *((volatile int*)__null) = 682; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
683 | } | ||||
684 | void replaceKey(Ptr aPtr, const T& aNewValue) { | ||||
685 | replaceKey(aPtr, aNewValue, aNewValue); | ||||
686 | } | ||||
687 | |||||
688 | // -- Iteration ------------------------------------------------------------ | ||||
689 | |||||
690 | // |iter()| returns an Iterator: | ||||
691 | // | ||||
692 | // HashSet<int> h; | ||||
693 | // for (auto iter = h.iter(); !iter.done(); iter.next()) { | ||||
694 | // int i = iter.get(); | ||||
695 | // } | ||||
696 | // | ||||
697 | using Iterator = typename Impl::Iterator; | ||||
698 | Iterator iter() const { return mImpl.iter(); } | ||||
699 | |||||
700 | // |modIter()| returns a ModIterator: | ||||
701 | // | ||||
702 | // HashSet<int> h; | ||||
703 | // for (auto iter = h.modIter(); !iter.done(); iter.next()) { | ||||
704 | // if (iter.get() == 42) { | ||||
705 | // iter.remove(); | ||||
706 | // } | ||||
707 | // } | ||||
708 | // | ||||
709 | // Table resize may occur in ModIterator's destructor. | ||||
710 | using ModIterator = typename Impl::ModIterator; | ||||
711 | ModIterator modIter() { return mImpl.modIter(); } | ||||
712 | |||||
713 | // These are similar to Iterator/ModIterator/iter(), but use different | ||||
714 | // terminology. | ||||
715 | using Range = typename Impl::Range; | ||||
716 | using Enum = typename Impl::Enum; | ||||
717 | Range all() const { return mImpl.all(); } | ||||
718 | }; | ||||
719 | |||||
720 | //--------------------------------------------------------------------------- | ||||
721 | // Hash Policy | ||||
722 | //--------------------------------------------------------------------------- | ||||
723 | |||||
724 | // A hash policy |HP| for a hash table with key-type |Key| must provide: | ||||
725 | // | ||||
726 | // - a type |HP::Lookup| to use to lookup table entries; | ||||
727 | // | ||||
728 | // - a static member function |HP::hash| that hashes lookup values: | ||||
729 | // | ||||
730 | // static mozilla::HashNumber hash(const Lookup&); | ||||
731 | // | ||||
732 | // - a static member function |HP::match| that tests equality of key and | ||||
733 | // lookup values: | ||||
734 | // | ||||
735 | // static bool match(const Key& aKey, const Lookup& aLookup); | ||||
736 | // | ||||
737 | // |aKey| and |aLookup| can have different hash numbers, only when a | ||||
738 | // collision happens with |prepareHash| operation, which is less frequent. | ||||
739 | // Thus, |HP::match| shouldn't assume the hash equality in the comparison, | ||||
740 | // even if the hash numbers are almost always same between them. | ||||
741 | // | ||||
742 | // Normally, Lookup = Key. In general, though, different values and types of | ||||
743 | // values can be used to lookup and store. If a Lookup value |l| is not equal | ||||
744 | // to the added Key value |k|, the user must ensure that |HP::match(k,l)| is | ||||
745 | // true. E.g.: | ||||
746 | // | ||||
747 | // mozilla::HashSet<Key, HP>::AddPtr p = h.lookup(l); | ||||
748 | // if (!p) { | ||||
749 | // assert(HP::match(k, l)); // must hold | ||||
750 | // h.add(p, k); | ||||
751 | // } | ||||
752 | |||||
753 | // A pointer hashing policy that uses HashGeneric() to create good hashes for | ||||
754 | // pointers. Note that we don't shift out the lowest k bits because we don't | ||||
755 | // want to assume anything about the alignment of the pointers. | ||||
756 | template <typename Key> | ||||
757 | struct PointerHasher { | ||||
758 | using Lookup = Key; | ||||
759 | |||||
760 | static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); } | ||||
761 | |||||
762 | static bool match(const Key& aKey, const Lookup& aLookup) { | ||||
763 | return aKey == aLookup; | ||||
764 | } | ||||
765 | |||||
766 | static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; } | ||||
767 | }; | ||||
768 | |||||
769 | // The default hash policy, which only works with integers. | ||||
770 | template <class Key, typename> | ||||
771 | struct DefaultHasher { | ||||
772 | using Lookup = Key; | ||||
773 | |||||
774 | static HashNumber hash(const Lookup& aLookup) { | ||||
775 | // Just convert the integer to a HashNumber and use that as is. (This | ||||
776 | // discards the high 32-bits of 64-bit integers!) ScrambleHashCode() is | ||||
777 | // subsequently called on the value to improve the distribution. | ||||
778 | return aLookup; | ||||
779 | } | ||||
780 | |||||
781 | static bool match(const Key& aKey, const Lookup& aLookup) { | ||||
782 | // Use builtin or overloaded operator==. | ||||
783 | return aKey == aLookup; | ||||
784 | } | ||||
785 | |||||
786 | static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; } | ||||
787 | }; | ||||
788 | |||||
789 | // A DefaultHasher specialization for enums. | ||||
790 | template <class T> | ||||
791 | struct DefaultHasher<T, std::enable_if_t<std::is_enum_v<T>>> { | ||||
792 | using Key = T; | ||||
793 | using Lookup = Key; | ||||
794 | |||||
795 | static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); } | ||||
796 | |||||
797 | static bool match(const Key& aKey, const Lookup& aLookup) { | ||||
798 | // Use builtin or overloaded operator==. | ||||
799 | return aKey == static_cast<Key>(aLookup); | ||||
800 | } | ||||
801 | |||||
802 | static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; } | ||||
803 | }; | ||||
804 | |||||
805 | // A DefaultHasher specialization for pointers. | ||||
806 | template <class T> | ||||
807 | struct DefaultHasher<T*> : PointerHasher<T*> {}; | ||||
808 | |||||
809 | // A DefaultHasher specialization for mozilla::UniquePtr. | ||||
810 | template <class T, class D> | ||||
811 | struct DefaultHasher<UniquePtr<T, D>> { | ||||
812 | using Key = UniquePtr<T, D>; | ||||
813 | using Lookup = Key; | ||||
814 | using PtrHasher = PointerHasher<T*>; | ||||
815 | |||||
816 | static HashNumber hash(const Lookup& aLookup) { | ||||
817 | return PtrHasher::hash(aLookup.get()); | ||||
818 | } | ||||
819 | |||||
820 | static bool match(const Key& aKey, const Lookup& aLookup) { | ||||
821 | return PtrHasher::match(aKey.get(), aLookup.get()); | ||||
822 | } | ||||
823 | |||||
824 | static void rekey(UniquePtr<T, D>& aKey, UniquePtr<T, D>&& aNewKey) { | ||||
825 | aKey = std::move(aNewKey); | ||||
826 | } | ||||
827 | }; | ||||
828 | |||||
829 | // A DefaultHasher specialization for doubles. | ||||
830 | template <> | ||||
831 | struct DefaultHasher<double> { | ||||
832 | using Key = double; | ||||
833 | using Lookup = Key; | ||||
834 | |||||
835 | static HashNumber hash(const Lookup& aLookup) { | ||||
836 | // Just xor the high bits with the low bits, and then treat the bits of the | ||||
837 | // result as a uint32_t. | ||||
838 | static_assert(sizeof(HashNumber) == 4, | ||||
839 | "subsequent code assumes a four-byte hash"); | ||||
840 | uint64_t u = BitwiseCast<uint64_t>(aLookup); | ||||
841 | return HashNumber(u ^ (u >> 32)); | ||||
842 | } | ||||
843 | |||||
844 | static bool match(const Key& aKey, const Lookup& aLookup) { | ||||
845 | return BitwiseCast<uint64_t>(aKey) == BitwiseCast<uint64_t>(aLookup); | ||||
846 | } | ||||
847 | }; | ||||
848 | |||||
849 | // A DefaultHasher specialization for floats. | ||||
850 | template <> | ||||
851 | struct DefaultHasher<float> { | ||||
852 | using Key = float; | ||||
853 | using Lookup = Key; | ||||
854 | |||||
855 | static HashNumber hash(const Lookup& aLookup) { | ||||
856 | // Just use the value as if its bits form an integer. ScrambleHashCode() is | ||||
857 | // subsequently called on the value to improve the distribution. | ||||
858 | static_assert(sizeof(HashNumber) == 4, | ||||
859 | "subsequent code assumes a four-byte hash"); | ||||
860 | return HashNumber(BitwiseCast<uint32_t>(aLookup)); | ||||
861 | } | ||||
862 | |||||
863 | static bool match(const Key& aKey, const Lookup& aLookup) { | ||||
864 | return BitwiseCast<uint32_t>(aKey) == BitwiseCast<uint32_t>(aLookup); | ||||
865 | } | ||||
866 | }; | ||||
867 | |||||
868 | // A hash policy for C strings. | ||||
869 | struct CStringHasher { | ||||
870 | using Key = const char*; | ||||
871 | using Lookup = const char*; | ||||
872 | |||||
873 | static HashNumber hash(const Lookup& aLookup) { return HashString(aLookup); } | ||||
874 | |||||
875 | static bool match(const Key& aKey, const Lookup& aLookup) { | ||||
876 | return strcmp(aKey, aLookup) == 0; | ||||
877 | } | ||||
878 | }; | ||||
879 | |||||
880 | //--------------------------------------------------------------------------- | ||||
881 | // Fallible Hashing Interface | ||||
882 | //--------------------------------------------------------------------------- | ||||
883 | |||||
884 | // Most of the time generating a hash code is infallible, but sometimes it is | ||||
885 | // necessary to generate hash codes on demand in a way that can fail. Specialize | ||||
886 | // this class for your own hash policy to provide fallible hashing. | ||||
887 | // | ||||
888 | // This is used by MovableCellHasher to handle the fact that generating a unique | ||||
889 | // ID for cell pointer may fail due to OOM. | ||||
890 | // | ||||
891 | // The default implementations of these methods delegate to the usual HashPolicy | ||||
892 | // implementation and always succeed. | ||||
893 | template <typename HashPolicy> | ||||
894 | struct FallibleHashMethods { | ||||
895 | // Return true if a hashcode is already available for its argument, and | ||||
896 | // sets |aHashOut|. Once this succeeds for a specific argument it | ||||
897 | // must continue to do so. | ||||
898 | // | ||||
899 | // Return false if a hashcode is not already available. This implies that any | ||||
900 | // lookup must fail, as the hash code would have to have been successfully | ||||
901 | // created on insertion. | ||||
902 | template <typename Lookup> | ||||
903 | static bool maybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) { | ||||
904 | *aHashOut = HashPolicy::hash(aLookup); | ||||
905 | return true; | ||||
906 | } | ||||
907 | |||||
908 | // Fallible method to ensure a hashcode exists for its argument and create one | ||||
909 | // if not. Sets |aHashOut| to the hashcode and retuns true on success. Returns | ||||
910 | // false on error, e.g. out of memory. | ||||
911 | template <typename Lookup> | ||||
912 | static bool ensureHash(Lookup&& aLookup, HashNumber* aHashOut) { | ||||
913 | *aHashOut = HashPolicy::hash(aLookup); | ||||
914 | return true; | ||||
915 | } | ||||
916 | }; | ||||
917 | |||||
918 | template <typename HashPolicy, typename Lookup> | ||||
919 | static bool MaybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) { | ||||
920 | return FallibleHashMethods<typename HashPolicy::Base>::maybeGetHash( | ||||
921 | std::forward<Lookup>(aLookup), aHashOut); | ||||
922 | } | ||||
923 | |||||
924 | template <typename HashPolicy, typename Lookup> | ||||
925 | static bool EnsureHash(Lookup&& aLookup, HashNumber* aHashOut) { | ||||
926 | return FallibleHashMethods<typename HashPolicy::Base>::ensureHash( | ||||
927 | std::forward<Lookup>(aLookup), aHashOut); | ||||
928 | } | ||||
929 | |||||
930 | //--------------------------------------------------------------------------- | ||||
931 | // Implementation Details (HashMapEntry, HashTableEntry, HashTable) | ||||
932 | //--------------------------------------------------------------------------- | ||||
933 | |||||
934 | // Both HashMap and HashSet are implemented by a single HashTable that is even | ||||
935 | // more heavily parameterized than the other two. This leaves HashTable gnarly | ||||
936 | // and extremely coupled to HashMap and HashSet; thus code should not use | ||||
937 | // HashTable directly. | ||||
938 | |||||
939 | template <class Key, class Value> | ||||
940 | class HashMapEntry { | ||||
941 | Key key_; | ||||
942 | Value value_; | ||||
943 | |||||
944 | template <class, class, class> | ||||
945 | friend class detail::HashTable; | ||||
946 | template <class> | ||||
947 | friend class detail::HashTableEntry; | ||||
948 | template <class, class, class, class> | ||||
949 | friend class HashMap; | ||||
950 | |||||
951 | public: | ||||
952 | template <typename KeyInput, typename ValueInput> | ||||
953 | HashMapEntry(KeyInput&& aKey, ValueInput&& aValue) | ||||
954 | : key_(std::forward<KeyInput>(aKey)), | ||||
955 | value_(std::forward<ValueInput>(aValue)) {} | ||||
956 | |||||
957 | HashMapEntry(HashMapEntry&& aRhs) = default; | ||||
958 | HashMapEntry& operator=(HashMapEntry&& aRhs) = default; | ||||
959 | |||||
960 | using KeyType = Key; | ||||
961 | using ValueType = Value; | ||||
962 | |||||
963 | const Key& key() const { return key_; } | ||||
964 | |||||
965 | // Use this method with caution! If the key is changed such that its hash | ||||
966 | // value also changes, the map will be left in an invalid state. | ||||
967 | Key& mutableKey() { return key_; } | ||||
968 | |||||
969 | const Value& value() const { return value_; } | ||||
970 | Value& value() { return value_; } | ||||
971 | |||||
972 | private: | ||||
973 | HashMapEntry(const HashMapEntry&) = delete; | ||||
974 | void operator=(const HashMapEntry&) = delete; | ||||
975 | }; | ||||
976 | |||||
977 | namespace detail { | ||||
978 | |||||
979 | template <class T, class HashPolicy, class AllocPolicy> | ||||
980 | class HashTable; | ||||
981 | |||||
982 | template <typename T> | ||||
983 | class EntrySlot; | ||||
984 | |||||
985 | template <typename T> | ||||
986 | class HashTableEntry { | ||||
987 | private: | ||||
988 | using NonConstT = std::remove_const_t<T>; | ||||
989 | |||||
990 | // Instead of having a hash table entry store that looks like this: | ||||
991 | // | ||||
992 | // +--------+--------+--------+--------+ | ||||
993 | // | entry0 | entry1 | .... | entryN | | ||||
994 | // +--------+--------+--------+--------+ | ||||
995 | // | ||||
996 | // where the entries contained their cached hash code, we're going to lay out | ||||
997 | // the entry store thusly: | ||||
998 | // | ||||
999 | // +-------+-------+-------+-------+--------+--------+--------+--------+ | ||||
1000 | // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN | | ||||
1001 | // +-------+-------+-------+-------+--------+--------+--------+--------+ | ||||
1002 | // | ||||
1003 | // with all the cached hashes prior to the actual entries themselves. | ||||
1004 | // | ||||
1005 | // We do this because implementing the first strategy requires us to make | ||||
1006 | // HashTableEntry look roughly like: | ||||
1007 | // | ||||
1008 | // template <typename T> | ||||
1009 | // class HashTableEntry { | ||||
1010 | // HashNumber mKeyHash; | ||||
1011 | // T mValue; | ||||
1012 | // }; | ||||
1013 | // | ||||
1014 | // The problem with this setup is that, depending on the layout of `T`, there | ||||
1015 | // may be platform ABI-mandated padding between `mKeyHash` and the first | ||||
1016 | // member of `T`. This ABI-mandated padding is wasted space, and can be | ||||
1017 | // surprisingly common, e.g. when `T` is a single pointer on 64-bit platforms. | ||||
1018 | // In such cases, we're throwing away a quarter of our entry store on padding, | ||||
1019 | // which is undesirable. | ||||
1020 | // | ||||
1021 | // The second layout above, namely: | ||||
1022 | // | ||||
1023 | // +-------+-------+-------+-------+--------+--------+--------+--------+ | ||||
1024 | // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN | | ||||
1025 | // +-------+-------+-------+-------+--------+--------+--------+--------+ | ||||
1026 | // | ||||
1027 | // means there is no wasted space between the hashes themselves, and no wasted | ||||
1028 | // space between the entries themselves. However, we would also like there to | ||||
1029 | // be no gap between the last hash and the first entry. The memory allocator | ||||
1030 | // guarantees the alignment of the start of the hashes. The use of a | ||||
1031 | // power-of-two capacity of at least 4 guarantees that the alignment of the | ||||
1032 | // *end* of the hash array is no less than the alignment of the start. | ||||
1033 | // Finally, the static_asserts here guarantee that the entries themselves | ||||
1034 | // don't need to be any more aligned than the alignment of the entry store | ||||
1035 | // itself. | ||||
1036 | // | ||||
1037 | // This assertion is safe for 32-bit builds because on both Windows and Linux | ||||
1038 | // (including Android), the minimum alignment for allocations larger than 8 | ||||
1039 | // bytes is 8 bytes, and the actual data for entries in our entry store is | ||||
1040 | // guaranteed to have that alignment as well, thanks to the power-of-two | ||||
1041 | // number of cached hash values stored prior to the entry data. | ||||
1042 | |||||
1043 | // The allocation policy must allocate a table with at least this much | ||||
1044 | // alignment. | ||||
1045 | static constexpr size_t kMinimumAlignment = 8; | ||||
1046 | |||||
1047 | static_assert(alignof(HashNumber) <= kMinimumAlignment, | ||||
1048 | "[N*2 hashes, N*2 T values] allocation's alignment must be " | ||||
1049 | "enough to align each hash"); | ||||
1050 | static_assert(alignof(NonConstT) <= 2 * sizeof(HashNumber), | ||||
1051 | "subsequent N*2 T values must not require more than an even " | ||||
1052 | "number of HashNumbers provides"); | ||||
1053 | |||||
1054 | static const HashNumber sFreeKey = 0; | ||||
1055 | static const HashNumber sRemovedKey = 1; | ||||
1056 | static const HashNumber sCollisionBit = 1; | ||||
1057 | |||||
1058 | alignas(NonConstT) unsigned char mValueData[sizeof(NonConstT)]; | ||||
1059 | |||||
1060 | private: | ||||
1061 | template <class, class, class> | ||||
1062 | friend class HashTable; | ||||
1063 | template <typename> | ||||
1064 | friend class EntrySlot; | ||||
1065 | |||||
1066 | // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a | ||||
1067 | // -Werror compile error) to reinterpret_cast<> |mValueData| to |T*|, even | ||||
1068 | // through |void*|. Placing the latter cast in these separate functions | ||||
1069 | // breaks the chain such that affected GCC versions no longer warn/error. | ||||
1070 | void* rawValuePtr() { return mValueData; } | ||||
1071 | |||||
1072 | static bool isLiveHash(HashNumber hash) { return hash > sRemovedKey; } | ||||
1073 | |||||
1074 | HashTableEntry(const HashTableEntry&) = delete; | ||||
1075 | void operator=(const HashTableEntry&) = delete; | ||||
1076 | |||||
1077 | NonConstT* valuePtr() { return reinterpret_cast<NonConstT*>(rawValuePtr()); } | ||||
1078 | |||||
1079 | void destroyStoredT() { | ||||
1080 | NonConstT* ptr = valuePtr(); | ||||
1081 | ptr->~T(); | ||||
1082 | MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr))do { } while (0); | ||||
1083 | } | ||||
1084 | |||||
1085 | public: | ||||
1086 | HashTableEntry() = default; | ||||
1087 | |||||
1088 | ~HashTableEntry() { MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this))do { } while (0); } | ||||
1089 | |||||
1090 | void destroy() { destroyStoredT(); } | ||||
1091 | |||||
1092 | void swap(HashTableEntry* aOther, bool aIsLive) { | ||||
1093 | // This allows types to use Argument-Dependent-Lookup, and thus use a custom | ||||
1094 | // std::swap, which is needed by types like JS::Heap and such. | ||||
1095 | using std::swap; | ||||
1096 | |||||
1097 | if (this == aOther) { | ||||
1098 | return; | ||||
1099 | } | ||||
1100 | if (aIsLive) { | ||||
1101 | swap(*valuePtr(), *aOther->valuePtr()); | ||||
1102 | } else { | ||||
1103 | *aOther->valuePtr() = std::move(*valuePtr()); | ||||
1104 | destroy(); | ||||
1105 | } | ||||
1106 | } | ||||
1107 | |||||
1108 | T& get() { return *valuePtr(); } | ||||
1109 | |||||
1110 | NonConstT& getMutable() { return *valuePtr(); } | ||||
1111 | }; | ||||
1112 | |||||
1113 | // A slot represents a cached hash value and its associated entry stored | ||||
1114 | // in the hash table. These two things are not stored in contiguous memory. | ||||
1115 | template <class T> | ||||
1116 | class EntrySlot { | ||||
1117 | using NonConstT = std::remove_const_t<T>; | ||||
1118 | |||||
1119 | using Entry = HashTableEntry<T>; | ||||
1120 | |||||
1121 | Entry* mEntry; | ||||
1122 | HashNumber* mKeyHash; | ||||
1123 | |||||
1124 | template <class, class, class> | ||||
1125 | friend class HashTable; | ||||
1126 | |||||
1127 | EntrySlot(Entry* aEntry, HashNumber* aKeyHash) | ||||
1128 | : mEntry(aEntry), mKeyHash(aKeyHash) {} | ||||
1129 | |||||
1130 | public: | ||||
1131 | static bool isLiveHash(HashNumber hash) { return hash > Entry::sRemovedKey; } | ||||
1132 | |||||
1133 | EntrySlot(const EntrySlot&) = default; | ||||
1134 | EntrySlot(EntrySlot&& aOther) = default; | ||||
1135 | |||||
1136 | EntrySlot& operator=(const EntrySlot&) = default; | ||||
1137 | EntrySlot& operator=(EntrySlot&&) = default; | ||||
1138 | |||||
1139 | bool operator==(const EntrySlot& aRhs) const { return mEntry == aRhs.mEntry; } | ||||
1140 | |||||
1141 | bool operator<(const EntrySlot& aRhs) const { return mEntry < aRhs.mEntry; } | ||||
1142 | |||||
1143 | EntrySlot& operator++() { | ||||
1144 | ++mEntry; | ||||
1145 | ++mKeyHash; | ||||
1146 | return *this; | ||||
1147 | } | ||||
1148 | |||||
1149 | void destroy() { mEntry->destroy(); } | ||||
1150 | |||||
1151 | void swap(EntrySlot& aOther) { | ||||
1152 | mEntry->swap(aOther.mEntry, aOther.isLive()); | ||||
1153 | std::swap(*mKeyHash, *aOther.mKeyHash); | ||||
1154 | } | ||||
1155 | |||||
1156 | T& get() const { return mEntry->get(); } | ||||
1157 | |||||
1158 | NonConstT& getMutable() { return mEntry->getMutable(); } | ||||
1159 | |||||
1160 | bool isFree() const { return *mKeyHash == Entry::sFreeKey; } | ||||
1161 | |||||
1162 | void clearLive() { | ||||
1163 | MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1163); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")" ); do { *((volatile int*)__null) = 1163; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1164 | *mKeyHash = Entry::sFreeKey; | ||||
1165 | mEntry->destroyStoredT(); | ||||
1166 | } | ||||
1167 | |||||
1168 | void clear() { | ||||
1169 | if (isLive()) { | ||||
1170 | mEntry->destroyStoredT(); | ||||
1171 | } | ||||
1172 | MOZ_MAKE_MEM_UNDEFINED(mEntry, sizeof(*mEntry))do { } while (0); | ||||
1173 | *mKeyHash = Entry::sFreeKey; | ||||
1174 | } | ||||
1175 | |||||
1176 | bool isRemoved() const { return *mKeyHash == Entry::sRemovedKey; } | ||||
1177 | |||||
1178 | void removeLive() { | ||||
1179 | MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1179); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")" ); do { *((volatile int*)__null) = 1179; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1180 | *mKeyHash = Entry::sRemovedKey; | ||||
1181 | mEntry->destroyStoredT(); | ||||
1182 | } | ||||
1183 | |||||
1184 | bool isLive() const { return isLiveHash(*mKeyHash); } | ||||
1185 | |||||
1186 | void setCollision() { | ||||
1187 | MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1187); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")" ); do { *((volatile int*)__null) = 1187; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1188 | *mKeyHash |= Entry::sCollisionBit; | ||||
1189 | } | ||||
1190 | void unsetCollision() { *mKeyHash &= ~Entry::sCollisionBit; } | ||||
1191 | bool hasCollision() const { return *mKeyHash & Entry::sCollisionBit; } | ||||
1192 | bool matchHash(HashNumber hn) { | ||||
1193 | return (*mKeyHash & ~Entry::sCollisionBit) == hn; | ||||
1194 | } | ||||
1195 | HashNumber getKeyHash() const { return *mKeyHash & ~Entry::sCollisionBit; } | ||||
1196 | |||||
1197 | template <typename... Args> | ||||
1198 | void setLive(HashNumber aHashNumber, Args&&... aArgs) { | ||||
1199 | MOZ_ASSERT(!isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1199); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!isLive()" ")" ); do { *((volatile int*)__null) = 1199; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1200 | *mKeyHash = aHashNumber; | ||||
1201 | new (KnownNotNull, mEntry->valuePtr()) T(std::forward<Args>(aArgs)...); | ||||
1202 | MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1202); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")" ); do { *((volatile int*)__null) = 1202; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1203 | } | ||||
1204 | |||||
1205 | Entry* toEntry() const { return mEntry; } | ||||
1206 | }; | ||||
1207 | |||||
1208 | template <class T, class HashPolicy, class AllocPolicy> | ||||
1209 | class HashTable : private AllocPolicy { | ||||
1210 | friend class mozilla::ReentrancyGuard; | ||||
1211 | |||||
1212 | using NonConstT = std::remove_const_t<T>; | ||||
1213 | using Key = typename HashPolicy::KeyType; | ||||
1214 | using Lookup = typename HashPolicy::Lookup; | ||||
1215 | |||||
1216 | public: | ||||
1217 | using Entry = HashTableEntry<T>; | ||||
1218 | using Slot = EntrySlot<T>; | ||||
1219 | |||||
1220 | template <typename F> | ||||
1221 | static void forEachSlot(char* aTable, uint32_t aCapacity, F&& f) { | ||||
1222 | auto hashes = reinterpret_cast<HashNumber*>(aTable); | ||||
1223 | auto entries = reinterpret_cast<Entry*>(&hashes[aCapacity]); | ||||
1224 | Slot slot(entries, hashes); | ||||
1225 | for (size_t i = 0; i < size_t(aCapacity); ++i) { | ||||
1226 | f(slot); | ||||
1227 | ++slot; | ||||
1228 | } | ||||
1229 | } | ||||
1230 | |||||
1231 | // A nullable pointer to a hash table element. A Ptr |p| can be tested | ||||
1232 | // either explicitly |if (p.found()) p->...| or using boolean conversion | ||||
1233 | // |if (p) p->...|. Ptr objects must not be used after any mutating hash | ||||
1234 | // table operations unless |generation()| is tested. | ||||
1235 | class Ptr { | ||||
1236 | friend class HashTable; | ||||
1237 | |||||
1238 | Slot mSlot; | ||||
1239 | #ifdef DEBUG1 | ||||
1240 | const HashTable* mTable; | ||||
1241 | Generation mGeneration; | ||||
1242 | #endif | ||||
1243 | |||||
1244 | protected: | ||||
1245 | Ptr(Slot aSlot, const HashTable& aTable) | ||||
1246 | : mSlot(aSlot) | ||||
1247 | #ifdef DEBUG1 | ||||
1248 | , | ||||
1249 | mTable(&aTable), | ||||
1250 | mGeneration(aTable.generation()) | ||||
1251 | #endif | ||||
1252 | { | ||||
1253 | } | ||||
1254 | |||||
1255 | // This constructor is used only by AddPtr() within lookupForAdd(). | ||||
1256 | explicit Ptr(const HashTable& aTable) | ||||
1257 | : mSlot(nullptr, nullptr) | ||||
1258 | #ifdef DEBUG1 | ||||
1259 | , | ||||
1260 | mTable(&aTable), | ||||
1261 | mGeneration(aTable.generation()) | ||||
1262 | #endif | ||||
1263 | { | ||||
1264 | } | ||||
1265 | |||||
1266 | bool isValid() const { return !!mSlot.toEntry(); } | ||||
1267 | |||||
1268 | public: | ||||
1269 | Ptr() | ||||
1270 | : mSlot(nullptr, nullptr) | ||||
1271 | #ifdef DEBUG1 | ||||
1272 | , | ||||
1273 | mTable(nullptr), | ||||
1274 | mGeneration(0) | ||||
1275 | #endif | ||||
1276 | { | ||||
1277 | } | ||||
1278 | |||||
1279 | bool found() const { | ||||
1280 | if (!isValid()) { | ||||
1281 | return false; | ||||
1282 | } | ||||
1283 | #ifdef DEBUG1 | ||||
1284 | MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable->generation())>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mGeneration == mTable->generation()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1284); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()" ")"); do { *((volatile int*)__null) = 1284; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1285 | #endif | ||||
1286 | return mSlot.isLive(); | ||||
1287 | } | ||||
1288 | |||||
1289 | explicit operator bool() const { return found(); } | ||||
1290 | |||||
1291 | bool operator==(const Ptr& aRhs) const { | ||||
1292 | MOZ_ASSERT(found() && aRhs.found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(found() && aRhs.found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(found() && aRhs.found ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("found() && aRhs.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1292); AnnotateMozCrashReason("MOZ_ASSERT" "(" "found() && aRhs.found()" ")"); do { *((volatile int*)__null) = 1292; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1293 | return mSlot == aRhs.mSlot; | ||||
1294 | } | ||||
1295 | |||||
1296 | bool operator!=(const Ptr& aRhs) const { | ||||
1297 | #ifdef DEBUG1 | ||||
1298 | MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable->generation())>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mGeneration == mTable->generation()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1298); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()" ")"); do { *((volatile int*)__null) = 1298; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1299 | #endif | ||||
1300 | return !(*this == aRhs); | ||||
1301 | } | ||||
1302 | |||||
1303 | T& operator*() const { | ||||
1304 | #ifdef DEBUG1 | ||||
1305 | MOZ_ASSERT(found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1305); AnnotateMozCrashReason("MOZ_ASSERT" "(" "found()" ")" ); do { *((volatile int*)__null) = 1305; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1306 | MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable->generation())>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mGeneration == mTable->generation()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1306); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()" ")"); do { *((volatile int*)__null) = 1306; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1307 | #endif | ||||
1308 | return mSlot.get(); | ||||
1309 | } | ||||
1310 | |||||
1311 | T* operator->() const { | ||||
1312 | #ifdef DEBUG1 | ||||
1313 | MOZ_ASSERT(found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1313); AnnotateMozCrashReason("MOZ_ASSERT" "(" "found()" ")" ); do { *((volatile int*)__null) = 1313; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1314 | MOZ_ASSERT(mGeneration == mTable->generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable->generation())>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mGeneration == mTable->generation()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mGeneration == mTable->generation()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1314); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable->generation()" ")"); do { *((volatile int*)__null) = 1314; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1315 | #endif | ||||
1316 | return &mSlot.get(); | ||||
1317 | } | ||||
1318 | }; | ||||
1319 | |||||
1320 | // A Ptr that can be used to add a key after a failed lookup. | ||||
1321 | class AddPtr : public Ptr { | ||||
1322 | friend class HashTable; | ||||
1323 | |||||
1324 | HashNumber mKeyHash; | ||||
1325 | #ifdef DEBUG1 | ||||
1326 | uint64_t mMutationCount; | ||||
1327 | #endif | ||||
1328 | |||||
1329 | AddPtr(Slot aSlot, const HashTable& aTable, HashNumber aHashNumber) | ||||
1330 | : Ptr(aSlot, aTable), | ||||
1331 | mKeyHash(aHashNumber) | ||||
1332 | #ifdef DEBUG1 | ||||
1333 | , | ||||
1334 | mMutationCount(aTable.mMutationCount) | ||||
1335 | #endif | ||||
1336 | { | ||||
1337 | } | ||||
1338 | |||||
1339 | // This constructor is used when lookupForAdd() is performed on a table | ||||
1340 | // lacking entry storage; it leaves mSlot null but initializes everything | ||||
1341 | // else. | ||||
1342 | AddPtr(const HashTable& aTable, HashNumber aHashNumber) | ||||
1343 | : Ptr(aTable), | ||||
1344 | mKeyHash(aHashNumber) | ||||
1345 | #ifdef DEBUG1 | ||||
1346 | , | ||||
1347 | mMutationCount(aTable.mMutationCount) | ||||
1348 | #endif | ||||
1349 | { | ||||
1350 | MOZ_ASSERT(isLive())do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLive())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLive()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("isLive()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1350); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLive()" ")" ); do { *((volatile int*)__null) = 1350; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1351 | } | ||||
1352 | |||||
1353 | bool isLive() const { return isLiveHash(mKeyHash); } | ||||
1354 | |||||
1355 | public: | ||||
1356 | AddPtr() : mKeyHash(0) {} | ||||
1357 | }; | ||||
1358 | |||||
1359 | // A hash table iterator that (mostly) doesn't allow table modifications. | ||||
1360 | // As with Ptr/AddPtr, Iterator objects must not be used after any mutating | ||||
1361 | // hash table operation unless the |generation()| is tested. | ||||
1362 | class Iterator { | ||||
1363 | void moveToNextLiveEntry() { | ||||
1364 | while (++mCur < mEnd && !mCur.isLive()) { | ||||
1365 | continue; | ||||
1366 | } | ||||
1367 | } | ||||
1368 | |||||
1369 | protected: | ||||
1370 | friend class HashTable; | ||||
1371 | |||||
1372 | explicit Iterator(const HashTable& aTable) | ||||
1373 | : mCur(aTable.slotForIndex(0)), | ||||
1374 | mEnd(aTable.slotForIndex(aTable.capacity())) | ||||
1375 | #ifdef DEBUG1 | ||||
1376 | , | ||||
1377 | mTable(aTable), | ||||
1378 | mMutationCount(aTable.mMutationCount), | ||||
1379 | mGeneration(aTable.generation()), | ||||
1380 | mValidEntry(true) | ||||
1381 | #endif | ||||
1382 | { | ||||
1383 | if (!done() && !mCur.isLive()) { | ||||
1384 | moveToNextLiveEntry(); | ||||
1385 | } | ||||
1386 | } | ||||
1387 | |||||
1388 | Slot mCur; | ||||
1389 | Slot mEnd; | ||||
1390 | #ifdef DEBUG1 | ||||
1391 | const HashTable& mTable; | ||||
1392 | uint64_t mMutationCount; | ||||
1393 | Generation mGeneration; | ||||
1394 | bool mValidEntry; | ||||
1395 | #endif | ||||
1396 | |||||
1397 | public: | ||||
1398 | bool done() const { | ||||
1399 | MOZ_ASSERT(mGeneration == mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable.generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mGeneration == mTable.generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("mGeneration == mTable.generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1399); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable.generation()" ")"); do { *((volatile int*)__null) = 1399; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1400 | MOZ_ASSERT(mMutationCount == mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mMutationCount == mTable.mMutationCount)>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mMutationCount == mTable.mMutationCount))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mMutationCount == mTable.mMutationCount" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1400); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mMutationCount == mTable.mMutationCount" ")"); do { *((volatile int*)__null) = 1400; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1401 | return mCur == mEnd; | ||||
1402 | } | ||||
1403 | |||||
1404 | T& get() const { | ||||
1405 | MOZ_ASSERT(!done())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!done())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!done()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!done()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1405); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!done()" ")" ); do { *((volatile int*)__null) = 1405; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1406 | MOZ_ASSERT(mValidEntry)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mValidEntry)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mValidEntry))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mValidEntry", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1406); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mValidEntry" ")"); do { *((volatile int*)__null) = 1406; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1407 | MOZ_ASSERT(mGeneration == mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable.generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mGeneration == mTable.generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("mGeneration == mTable.generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1407); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable.generation()" ")"); do { *((volatile int*)__null) = 1407; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1408 | MOZ_ASSERT(mMutationCount == mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mMutationCount == mTable.mMutationCount)>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mMutationCount == mTable.mMutationCount))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mMutationCount == mTable.mMutationCount" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1408); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mMutationCount == mTable.mMutationCount" ")"); do { *((volatile int*)__null) = 1408; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1409 | return mCur.get(); | ||||
1410 | } | ||||
1411 | |||||
1412 | void next() { | ||||
1413 | MOZ_ASSERT(!done())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!done())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!done()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!done()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1413); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!done()" ")" ); do { *((volatile int*)__null) = 1413; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1414 | MOZ_ASSERT(mGeneration == mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(mGeneration == mTable.generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mGeneration == mTable.generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("mGeneration == mTable.generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1414); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mGeneration == mTable.generation()" ")"); do { *((volatile int*)__null) = 1414; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1415 | MOZ_ASSERT(mMutationCount == mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mMutationCount == mTable.mMutationCount)>::isValid , "invalid assertion condition"); if ((__builtin_expect(!!(!( !!(mMutationCount == mTable.mMutationCount))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mMutationCount == mTable.mMutationCount" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1415); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mMutationCount == mTable.mMutationCount" ")"); do { *((volatile int*)__null) = 1415; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1416 | moveToNextLiveEntry(); | ||||
1417 | #ifdef DEBUG1 | ||||
1418 | mValidEntry = true; | ||||
1419 | #endif | ||||
1420 | } | ||||
1421 | }; | ||||
1422 | |||||
1423 | // A hash table iterator that permits modification, removal and rekeying. | ||||
1424 | // Since rehashing when elements were removed during enumeration would be | ||||
1425 | // bad, it is postponed until the ModIterator is destructed. Since the | ||||
1426 | // ModIterator's destructor touches the hash table, the user must ensure | ||||
1427 | // that the hash table is still alive when the destructor runs. | ||||
1428 | class ModIterator : public Iterator { | ||||
1429 | friend class HashTable; | ||||
1430 | |||||
1431 | HashTable& mTable; | ||||
1432 | bool mRekeyed; | ||||
1433 | bool mRemoved; | ||||
1434 | |||||
1435 | // ModIterator is movable but not copyable. | ||||
1436 | ModIterator(const ModIterator&) = delete; | ||||
1437 | void operator=(const ModIterator&) = delete; | ||||
1438 | |||||
1439 | protected: | ||||
1440 | explicit ModIterator(HashTable& aTable) | ||||
1441 | : Iterator(aTable), mTable(aTable), mRekeyed(false), mRemoved(false) {} | ||||
1442 | |||||
1443 | public: | ||||
1444 | MOZ_IMPLICIT ModIterator(ModIterator&& aOther) | ||||
1445 | : Iterator(aOther), | ||||
1446 | mTable(aOther.mTable), | ||||
1447 | mRekeyed(aOther.mRekeyed), | ||||
1448 | mRemoved(aOther.mRemoved) { | ||||
1449 | aOther.mRekeyed = false; | ||||
1450 | aOther.mRemoved = false; | ||||
1451 | } | ||||
1452 | |||||
1453 | // Removes the current element from the table, leaving |get()| | ||||
1454 | // invalid until the next call to |next()|. | ||||
1455 | void remove() { | ||||
1456 | mTable.remove(this->mCur); | ||||
1457 | mRemoved = true; | ||||
1458 | #ifdef DEBUG1 | ||||
1459 | this->mValidEntry = false; | ||||
1460 | this->mMutationCount = mTable.mMutationCount; | ||||
1461 | #endif | ||||
1462 | } | ||||
1463 | |||||
1464 | NonConstT& getMutable() { | ||||
1465 | MOZ_ASSERT(!this->done())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!this->done())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!this->done()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!this->done()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1465); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!this->done()" ")"); do { *((volatile int*)__null) = 1465; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1466 | MOZ_ASSERT(this->mValidEntry)do { static_assert( mozilla::detail::AssertionConditionType< decltype(this->mValidEntry)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(this->mValidEntry))), 0)) ) { do { } while (false); MOZ_ReportAssertionFailure("this->mValidEntry" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1466); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this->mValidEntry" ")"); do { *((volatile int*)__null) = 1466; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1467 | MOZ_ASSERT(this->mGeneration == this->Iterator::mTable.generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(this->mGeneration == this->Iterator::mTable.generation ())>::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!(this->mGeneration == this->Iterator::mTable.generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("this->mGeneration == this->Iterator::mTable.generation()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1467); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this->mGeneration == this->Iterator::mTable.generation()" ")"); do { *((volatile int*)__null) = 1467; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1468 | MOZ_ASSERT(this->mMutationCount == this->Iterator::mTable.mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType< decltype(this->mMutationCount == this->Iterator::mTable .mMutationCount)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(this->mMutationCount == this ->Iterator::mTable.mMutationCount))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("this->mMutationCount == this->Iterator::mTable.mMutationCount" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1468); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this->mMutationCount == this->Iterator::mTable.mMutationCount" ")"); do { *((volatile int*)__null) = 1468; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1469 | return this->mCur.getMutable(); | ||||
1470 | } | ||||
1471 | |||||
1472 | // Removes the current element and re-inserts it into the table with | ||||
1473 | // a new key at the new Lookup position. |get()| is invalid after | ||||
1474 | // this operation until the next call to |next()|. | ||||
1475 | void rekey(const Lookup& l, const Key& k) { | ||||
1476 | MOZ_ASSERT(&k != &HashPolicy::getKey(this->mCur.get()))do { static_assert( mozilla::detail::AssertionConditionType< decltype(&k != &HashPolicy::getKey(this->mCur.get( )))>::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!(&k != &HashPolicy::getKey(this->mCur.get( ))))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("&k != &HashPolicy::getKey(this->mCur.get())", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1476); AnnotateMozCrashReason("MOZ_ASSERT" "(" "&k != &HashPolicy::getKey(this->mCur.get())" ")"); do { *((volatile int*)__null) = 1476; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1477 | Ptr p(this->mCur, mTable); | ||||
1478 | mTable.rekeyWithoutRehash(p, l, k); | ||||
1479 | mRekeyed = true; | ||||
1480 | #ifdef DEBUG1 | ||||
1481 | this->mValidEntry = false; | ||||
1482 | this->mMutationCount = mTable.mMutationCount; | ||||
1483 | #endif | ||||
1484 | } | ||||
1485 | |||||
1486 | void rekey(const Key& k) { rekey(k, k); } | ||||
1487 | |||||
1488 | // Potentially rehashes the table. | ||||
1489 | ~ModIterator() { | ||||
1490 | if (mRekeyed) { | ||||
1491 | mTable.mGen++; | ||||
1492 | mTable.infallibleRehashIfOverloaded(); | ||||
1493 | } | ||||
1494 | |||||
1495 | if (mRemoved) { | ||||
1496 | mTable.compact(); | ||||
1497 | } | ||||
1498 | } | ||||
1499 | }; | ||||
1500 | |||||
1501 | // Range is similar to Iterator, but uses different terminology. | ||||
1502 | class Range { | ||||
1503 | friend class HashTable; | ||||
1504 | |||||
1505 | Iterator mIter; | ||||
1506 | |||||
1507 | protected: | ||||
1508 | explicit Range(const HashTable& table) : mIter(table) {} | ||||
1509 | |||||
1510 | public: | ||||
1511 | bool empty() const { return mIter.done(); } | ||||
1512 | |||||
1513 | T& front() const { return mIter.get(); } | ||||
1514 | |||||
1515 | void popFront() { return mIter.next(); } | ||||
1516 | }; | ||||
1517 | |||||
1518 | // Enum is similar to ModIterator, but uses different terminology. | ||||
1519 | class Enum { | ||||
1520 | ModIterator mIter; | ||||
1521 | |||||
1522 | // Enum is movable but not copyable. | ||||
1523 | Enum(const Enum&) = delete; | ||||
1524 | void operator=(const Enum&) = delete; | ||||
1525 | |||||
1526 | public: | ||||
1527 | template <class Map> | ||||
1528 | explicit Enum(Map& map) : mIter(map.mImpl) {} | ||||
1529 | |||||
1530 | MOZ_IMPLICIT Enum(Enum&& other) : mIter(std::move(other.mIter)) {} | ||||
1531 | |||||
1532 | bool empty() const { return mIter.done(); } | ||||
1533 | |||||
1534 | T& front() const { return mIter.get(); } | ||||
1535 | |||||
1536 | void popFront() { return mIter.next(); } | ||||
1537 | |||||
1538 | void removeFront() { mIter.remove(); } | ||||
1539 | |||||
1540 | NonConstT& mutableFront() { return mIter.getMutable(); } | ||||
1541 | |||||
1542 | void rekeyFront(const Lookup& aLookup, const Key& aKey) { | ||||
1543 | mIter.rekey(aLookup, aKey); | ||||
1544 | } | ||||
1545 | |||||
1546 | void rekeyFront(const Key& aKey) { mIter.rekey(aKey); } | ||||
1547 | }; | ||||
1548 | |||||
1549 | // HashTable is movable | ||||
1550 | HashTable(HashTable&& aRhs) : AllocPolicy(std::move(aRhs)) { moveFrom(aRhs); } | ||||
1551 | HashTable& operator=(HashTable&& aRhs) { | ||||
1552 | MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited")do { static_assert( mozilla::detail::AssertionConditionType< decltype(this != &aRhs)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(this != &aRhs))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("this != &aRhs" " (" "self-move assignment is prohibited" ")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1552); AnnotateMozCrashReason("MOZ_ASSERT" "(" "this != &aRhs" ") (" "self-move assignment is prohibited" ")"); do { *((volatile int*)__null) = 1552; __attribute__((nomerge)) ::abort(); } while (false); } } while (false); | ||||
1553 | if (mTable) { | ||||
1554 | destroyTable(*this, mTable, capacity()); | ||||
1555 | } | ||||
1556 | AllocPolicy::operator=(std::move(aRhs)); | ||||
1557 | moveFrom(aRhs); | ||||
1558 | return *this; | ||||
1559 | } | ||||
1560 | |||||
1561 | private: | ||||
1562 | void moveFrom(HashTable& aRhs) { | ||||
1563 | mGen = aRhs.mGen; | ||||
1564 | mHashShift = aRhs.mHashShift; | ||||
1565 | mTable = aRhs.mTable; | ||||
1566 | mEntryCount = aRhs.mEntryCount; | ||||
1567 | mRemovedCount = aRhs.mRemovedCount; | ||||
1568 | #ifdef DEBUG1 | ||||
1569 | mMutationCount = aRhs.mMutationCount; | ||||
1570 | mEntered = aRhs.mEntered; | ||||
1571 | #endif | ||||
1572 | aRhs.mTable = nullptr; | ||||
1573 | aRhs.clearAndCompact(); | ||||
1574 | } | ||||
1575 | |||||
1576 | // HashTable is not copyable or assignable | ||||
1577 | HashTable(const HashTable&) = delete; | ||||
1578 | void operator=(const HashTable&) = delete; | ||||
1579 | |||||
1580 | static const uint32_t CAP_BITS = 30; | ||||
1581 | |||||
1582 | public: | ||||
1583 | uint64_t mGen : 56; // entry storage generation number | ||||
1584 | uint64_t mHashShift : 8; // multiplicative hash shift | ||||
1585 | char* mTable; // entry storage | ||||
1586 | uint32_t mEntryCount; // number of entries in mTable | ||||
1587 | uint32_t mRemovedCount; // removed entry sentinels in mTable | ||||
1588 | |||||
1589 | #ifdef DEBUG1 | ||||
1590 | uint64_t mMutationCount; | ||||
1591 | mutable bool mEntered; | ||||
1592 | #endif | ||||
1593 | |||||
1594 | // The default initial capacity is 32 (enough to hold 16 elements), but it | ||||
1595 | // can be as low as 4. | ||||
1596 | static const uint32_t sDefaultLen = 16; | ||||
1597 | static const uint32_t sMinCapacity = 4; | ||||
1598 | // See the comments in HashTableEntry about this value. | ||||
1599 | static_assert(sMinCapacity >= 4, "too-small sMinCapacity breaks assumptions"); | ||||
1600 | static const uint32_t sMaxInit = 1u << (CAP_BITS - 1); | ||||
1601 | static const uint32_t sMaxCapacity = 1u << CAP_BITS; | ||||
1602 | |||||
1603 | // Hash-table alpha is conceptually a fraction, but to avoid floating-point | ||||
1604 | // math we implement it as a ratio of integers. | ||||
1605 | static const uint8_t sAlphaDenominator = 4; | ||||
1606 | static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4 | ||||
1607 | static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4 | ||||
1608 | |||||
1609 | static const HashNumber sFreeKey = Entry::sFreeKey; | ||||
1610 | static const HashNumber sRemovedKey = Entry::sRemovedKey; | ||||
1611 | static const HashNumber sCollisionBit = Entry::sCollisionBit; | ||||
1612 | |||||
1613 | static uint32_t bestCapacity(uint32_t aLen) { | ||||
1614 | static_assert( | ||||
1615 | (sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit, | ||||
1616 | "multiplication in numerator below could overflow"); | ||||
1617 | static_assert( | ||||
1618 | sMaxInit * sAlphaDenominator <= UINT32_MAX(4294967295U) - sMaxAlphaNumerator, | ||||
1619 | "numerator calculation below could potentially overflow"); | ||||
1620 | |||||
1621 | // Callers should ensure this is true. | ||||
1622 | MOZ_ASSERT(aLen <= sMaxInit)do { static_assert( mozilla::detail::AssertionConditionType< decltype(aLen <= sMaxInit)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aLen <= sMaxInit))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aLen <= sMaxInit" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1622); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aLen <= sMaxInit" ")"); do { *((volatile int*)__null) = 1622; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1623 | |||||
1624 | // Compute the smallest capacity allowing |aLen| elements to be | ||||
1625 | // inserted without rehashing: ceil(aLen / max-alpha). (Ceiling | ||||
1626 | // integral division: <http://stackoverflow.com/a/2745086>.) | ||||
1627 | uint32_t capacity = (aLen * sAlphaDenominator + sMaxAlphaNumerator - 1) / | ||||
1628 | sMaxAlphaNumerator; | ||||
1629 | capacity = (capacity < sMinCapacity) ? sMinCapacity : RoundUpPow2(capacity); | ||||
1630 | |||||
1631 | MOZ_ASSERT(capacity >= aLen)do { static_assert( mozilla::detail::AssertionConditionType< decltype(capacity >= aLen)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(capacity >= aLen))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("capacity >= aLen" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1631); AnnotateMozCrashReason("MOZ_ASSERT" "(" "capacity >= aLen" ")"); do { *((volatile int*)__null) = 1631; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1632 | MOZ_ASSERT(capacity <= sMaxCapacity)do { static_assert( mozilla::detail::AssertionConditionType< decltype(capacity <= sMaxCapacity)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(capacity <= sMaxCapacity) )), 0))) { do { } while (false); MOZ_ReportAssertionFailure("capacity <= sMaxCapacity" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1632); AnnotateMozCrashReason("MOZ_ASSERT" "(" "capacity <= sMaxCapacity" ")"); do { *((volatile int*)__null) = 1632; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1633 | |||||
1634 | return capacity; | ||||
1635 | } | ||||
1636 | |||||
1637 | static uint32_t hashShift(uint32_t aLen) { | ||||
1638 | // Reject all lengths whose initial computed capacity would exceed | ||||
1639 | // sMaxCapacity. Round that maximum aLen down to the nearest power of two | ||||
1640 | // for speedier code. | ||||
1641 | if (MOZ_UNLIKELY(aLen > sMaxInit)(__builtin_expect(!!(aLen > sMaxInit), 0))) { | ||||
1642 | MOZ_CRASH("initial length is too large")do { do { } while (false); MOZ_ReportCrash("" "initial length is too large" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1642); AnnotateMozCrashReason("MOZ_CRASH(" "initial length is too large" ")"); do { *((volatile int*)__null) = 1642; __attribute__((nomerge )) ::abort(); } while (false); } while (false); | ||||
1643 | } | ||||
1644 | |||||
1645 | return kHashNumberBits - mozilla::CeilingLog2(bestCapacity(aLen)); | ||||
1646 | } | ||||
1647 | |||||
1648 | static bool isLiveHash(HashNumber aHash) { return Entry::isLiveHash(aHash); } | ||||
1649 | |||||
1650 | static HashNumber prepareHash(HashNumber aInputHash) { | ||||
1651 | HashNumber keyHash = ScrambleHashCode(aInputHash); | ||||
1652 | |||||
1653 | // Avoid reserved hash codes. | ||||
1654 | if (!isLiveHash(keyHash)) { | ||||
1655 | keyHash -= (sRemovedKey + 1); | ||||
1656 | } | ||||
1657 | return keyHash & ~sCollisionBit; | ||||
1658 | } | ||||
1659 | |||||
1660 | enum FailureBehavior { DontReportFailure = false, ReportFailure = true }; | ||||
1661 | |||||
1662 | // Fake a struct that we're going to alloc. See the comments in | ||||
1663 | // HashTableEntry about how the table is laid out, and why it's safe. | ||||
1664 | struct FakeSlot { | ||||
1665 | unsigned char c[sizeof(HashNumber) + sizeof(typename Entry::NonConstT)]; | ||||
1666 | }; | ||||
1667 | |||||
1668 | static char* createTable(AllocPolicy& aAllocPolicy, uint32_t aCapacity, | ||||
1669 | FailureBehavior aReportFailure = ReportFailure) { | ||||
1670 | FakeSlot* fake = | ||||
1671 | aReportFailure | ||||
1672 | ? aAllocPolicy.template pod_malloc<FakeSlot>(aCapacity) | ||||
1673 | : aAllocPolicy.template maybe_pod_malloc<FakeSlot>(aCapacity); | ||||
1674 | |||||
1675 | MOZ_ASSERT((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) ==do { static_assert( mozilla::detail::AssertionConditionType< decltype((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment ) == 0)>::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment ) == 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1676); AnnotateMozCrashReason("MOZ_ASSERT" "(" "(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0" ")"); do { *((volatile int*)__null) = 1676; __attribute__((nomerge )) ::abort(); } while (false); } } while (false) | ||||
1676 | 0)do { static_assert( mozilla::detail::AssertionConditionType< decltype((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment ) == 0)>::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment ) == 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1676); AnnotateMozCrashReason("MOZ_ASSERT" "(" "(reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) == 0" ")"); do { *((volatile int*)__null) = 1676; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1677 | |||||
1678 | char* table = reinterpret_cast<char*>(fake); | ||||
1679 | if (table) { | ||||
1680 | forEachSlot(table, aCapacity, [&](Slot& slot) { | ||||
1681 | *slot.mKeyHash = sFreeKey; | ||||
1682 | new (KnownNotNull, slot.toEntry()) Entry(); | ||||
1683 | }); | ||||
1684 | } | ||||
1685 | return table; | ||||
1686 | } | ||||
1687 | |||||
1688 | static void destroyTable(AllocPolicy& aAllocPolicy, char* aOldTable, | ||||
1689 | uint32_t aCapacity) { | ||||
1690 | forEachSlot(aOldTable, aCapacity, [&](const Slot& slot) { | ||||
1691 | if (slot.isLive()) { | ||||
1692 | slot.toEntry()->destroyStoredT(); | ||||
1693 | } | ||||
1694 | }); | ||||
1695 | freeTable(aAllocPolicy, aOldTable, aCapacity); | ||||
1696 | } | ||||
1697 | |||||
1698 | static void freeTable(AllocPolicy& aAllocPolicy, char* aOldTable, | ||||
1699 | uint32_t aCapacity) { | ||||
1700 | FakeSlot* fake = reinterpret_cast<FakeSlot*>(aOldTable); | ||||
1701 | aAllocPolicy.free_(fake, aCapacity); | ||||
1702 | } | ||||
1703 | |||||
1704 | public: | ||||
1705 | HashTable(AllocPolicy aAllocPolicy, uint32_t aLen) | ||||
1706 | : AllocPolicy(std::move(aAllocPolicy)), | ||||
1707 | mGen(0), | ||||
1708 | mHashShift(hashShift(aLen)), | ||||
1709 | mTable(nullptr), | ||||
1710 | mEntryCount(0), | ||||
1711 | mRemovedCount(0) | ||||
1712 | #ifdef DEBUG1 | ||||
1713 | , | ||||
1714 | mMutationCount(0), | ||||
1715 | mEntered(false) | ||||
1716 | #endif | ||||
1717 | { | ||||
1718 | } | ||||
1719 | |||||
1720 | explicit HashTable(AllocPolicy aAllocPolicy) | ||||
1721 | : HashTable(aAllocPolicy, sDefaultLen) {} | ||||
1722 | |||||
1723 | ~HashTable() { | ||||
1724 | if (mTable) { | ||||
1725 | destroyTable(*this, mTable, capacity()); | ||||
1726 | } | ||||
1727 | } | ||||
1728 | |||||
1729 | private: | ||||
1730 | HashNumber hash1(HashNumber aHash0) const { return aHash0 >> mHashShift; } | ||||
| |||||
1731 | |||||
1732 | struct DoubleHash { | ||||
1733 | HashNumber mHash2; | ||||
1734 | HashNumber mSizeMask; | ||||
1735 | }; | ||||
1736 | |||||
1737 | DoubleHash hash2(HashNumber aCurKeyHash) const { | ||||
1738 | uint32_t sizeLog2 = kHashNumberBits - mHashShift; | ||||
1739 | DoubleHash dh = {((aCurKeyHash << sizeLog2) >> mHashShift) | 1, | ||||
1740 | (HashNumber(1) << sizeLog2) - 1}; | ||||
1741 | return dh; | ||||
1742 | } | ||||
1743 | |||||
1744 | static HashNumber applyDoubleHash(HashNumber aHash1, | ||||
1745 | const DoubleHash& aDoubleHash) { | ||||
1746 | return WrappingSubtract(aHash1, aDoubleHash.mHash2) & aDoubleHash.mSizeMask; | ||||
1747 | } | ||||
1748 | |||||
1749 | static MOZ_ALWAYS_INLINEinline bool match(T& aEntry, const Lookup& aLookup) { | ||||
1750 | return HashPolicy::match(HashPolicy::getKey(aEntry), aLookup); | ||||
1751 | } | ||||
1752 | |||||
1753 | enum LookupReason { ForNonAdd, ForAdd }; | ||||
1754 | |||||
1755 | Slot slotForIndex(HashNumber aIndex) const { | ||||
1756 | auto hashes = reinterpret_cast<HashNumber*>(mTable); | ||||
1757 | auto entries = reinterpret_cast<Entry*>(&hashes[capacity()]); | ||||
1758 | return Slot(&entries[aIndex], &hashes[aIndex]); | ||||
1759 | } | ||||
1760 | |||||
1761 | // Warning: in order for readonlyThreadsafeLookup() to be safe this | ||||
1762 | // function must not modify the table in any way when Reason==ForNonAdd. | ||||
1763 | template <LookupReason Reason> | ||||
1764 | MOZ_ALWAYS_INLINEinline Slot lookup(const Lookup& aLookup, | ||||
1765 | HashNumber aKeyHash) const { | ||||
1766 | MOZ_ASSERT(isLiveHash(aKeyHash))do { static_assert( mozilla::detail::AssertionConditionType< decltype(isLiveHash(aKeyHash))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(isLiveHash(aKeyHash)))), 0)) ) { do { } while (false); MOZ_ReportAssertionFailure("isLiveHash(aKeyHash)" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1766); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isLiveHash(aKeyHash)" ")"); do { *((volatile int*)__null) = 1766; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1767 | MOZ_ASSERT(!(aKeyHash & sCollisionBit))do { static_assert( mozilla::detail::AssertionConditionType< decltype(!(aKeyHash & sCollisionBit))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!(aKeyHash & sCollisionBit )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("!(aKeyHash & sCollisionBit)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1767); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!(aKeyHash & sCollisionBit)" ")"); do { *((volatile int*)__null) = 1767; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1768 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1768); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 1768; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1769 | |||||
1770 | // Compute the primary hash address. | ||||
1771 | HashNumber h1 = hash1(aKeyHash); | ||||
1772 | Slot slot = slotForIndex(h1); | ||||
1773 | |||||
1774 | // Miss: return space for a new entry. | ||||
1775 | if (slot.isFree()) { | ||||
1776 | return slot; | ||||
1777 | } | ||||
1778 | |||||
1779 | // Hit: return entry. | ||||
1780 | if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) { | ||||
1781 | return slot; | ||||
1782 | } | ||||
1783 | |||||
1784 | // Collision: double hash. | ||||
1785 | DoubleHash dh = hash2(aKeyHash); | ||||
1786 | |||||
1787 | // Save the first removed entry pointer so we can recycle later. | ||||
1788 | Maybe<Slot> firstRemoved; | ||||
1789 | |||||
1790 | while (true) { | ||||
1791 | if (Reason == ForAdd && !firstRemoved) { | ||||
1792 | if (MOZ_UNLIKELY(slot.isRemoved())(__builtin_expect(!!(slot.isRemoved()), 0))) { | ||||
1793 | firstRemoved.emplace(slot); | ||||
1794 | } else { | ||||
1795 | slot.setCollision(); | ||||
1796 | } | ||||
1797 | } | ||||
1798 | |||||
1799 | h1 = applyDoubleHash(h1, dh); | ||||
1800 | |||||
1801 | slot = slotForIndex(h1); | ||||
1802 | if (slot.isFree()) { | ||||
1803 | return firstRemoved.refOr(slot); | ||||
1804 | } | ||||
1805 | |||||
1806 | if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) { | ||||
1807 | return slot; | ||||
1808 | } | ||||
1809 | } | ||||
1810 | } | ||||
1811 | |||||
1812 | // This is a copy of lookup() hardcoded to the assumptions: | ||||
1813 | // 1. the lookup is for an add; | ||||
1814 | // 2. the key, whose |keyHash| has been passed, is not in the table. | ||||
1815 | Slot findNonLiveSlot(HashNumber aKeyHash) { | ||||
1816 | MOZ_ASSERT(!(aKeyHash & sCollisionBit))do { static_assert( mozilla::detail::AssertionConditionType< decltype(!(aKeyHash & sCollisionBit))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!(aKeyHash & sCollisionBit )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("!(aKeyHash & sCollisionBit)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1816); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!(aKeyHash & sCollisionBit)" ")"); do { *((volatile int*)__null) = 1816; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1817 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1817); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 1817; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1818 | |||||
1819 | // We assume 'aKeyHash' has already been distributed. | ||||
1820 | |||||
1821 | // Compute the primary hash address. | ||||
1822 | HashNumber h1 = hash1(aKeyHash); | ||||
1823 | Slot slot = slotForIndex(h1); | ||||
1824 | |||||
1825 | // Miss: return space for a new entry. | ||||
1826 | if (!slot.isLive()) { | ||||
1827 | return slot; | ||||
1828 | } | ||||
1829 | |||||
1830 | // Collision: double hash. | ||||
1831 | DoubleHash dh = hash2(aKeyHash); | ||||
1832 | |||||
1833 | while (true) { | ||||
1834 | slot.setCollision(); | ||||
1835 | |||||
1836 | h1 = applyDoubleHash(h1, dh); | ||||
1837 | |||||
1838 | slot = slotForIndex(h1); | ||||
1839 | if (!slot.isLive()) { | ||||
1840 | return slot; | ||||
1841 | } | ||||
1842 | } | ||||
1843 | } | ||||
1844 | |||||
1845 | enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed }; | ||||
1846 | |||||
1847 | RebuildStatus changeTableSize( | ||||
1848 | uint32_t newCapacity, FailureBehavior aReportFailure = ReportFailure) { | ||||
1849 | MOZ_ASSERT(IsPowerOfTwo(newCapacity))do { static_assert( mozilla::detail::AssertionConditionType< decltype(IsPowerOfTwo(newCapacity))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(IsPowerOfTwo(newCapacity)))) , 0))) { do { } while (false); MOZ_ReportAssertionFailure("IsPowerOfTwo(newCapacity)" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1849); AnnotateMozCrashReason("MOZ_ASSERT" "(" "IsPowerOfTwo(newCapacity)" ")"); do { *((volatile int*)__null) = 1849; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1850 | MOZ_ASSERT(!!mTable == !!capacity())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!!mTable == !!capacity())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!!mTable == !!capacity()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!!mTable == !!capacity()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1850); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!!mTable == !!capacity()" ")"); do { *((volatile int*)__null) = 1850; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1851 | |||||
1852 | // Look, but don't touch, until we succeed in getting new entry store. | ||||
1853 | char* oldTable = mTable; | ||||
1854 | uint32_t oldCapacity = capacity(); | ||||
1855 | uint32_t newLog2 = mozilla::CeilingLog2(newCapacity); | ||||
1856 | |||||
1857 | if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)(__builtin_expect(!!(newCapacity > sMaxCapacity), 0))) { | ||||
1858 | if (aReportFailure) { | ||||
1859 | this->reportAllocOverflow(); | ||||
1860 | } | ||||
1861 | return RehashFailed; | ||||
1862 | } | ||||
1863 | |||||
1864 | char* newTable = createTable(*this, newCapacity, aReportFailure); | ||||
1865 | if (!newTable
| ||||
1866 | return RehashFailed; | ||||
1867 | } | ||||
1868 | |||||
1869 | // We can't fail from here on, so update table parameters. | ||||
1870 | mHashShift = kHashNumberBits - newLog2; | ||||
1871 | mRemovedCount = 0; | ||||
1872 | mGen++; | ||||
1873 | mTable = newTable; | ||||
1874 | |||||
1875 | // Copy only live entries, leaving removed ones behind. | ||||
1876 | forEachSlot(oldTable, oldCapacity, [&](Slot& slot) { | ||||
1877 | if (slot.isLive()) { | ||||
1878 | HashNumber hn = slot.getKeyHash(); | ||||
1879 | findNonLiveSlot(hn).setLive( | ||||
1880 | hn, std::move(const_cast<typename Entry::NonConstT&>(slot.get()))); | ||||
1881 | } | ||||
1882 | |||||
1883 | slot.clear(); | ||||
1884 | }); | ||||
1885 | |||||
1886 | // All entries have been destroyed, no need to destroyTable. | ||||
1887 | freeTable(*this, oldTable, oldCapacity); | ||||
1888 | return Rehashed; | ||||
1889 | } | ||||
1890 | |||||
1891 | RebuildStatus rehashIfOverloaded( | ||||
1892 | FailureBehavior aReportFailure = ReportFailure) { | ||||
1893 | static_assert(sMaxCapacity <= UINT32_MAX(4294967295U) / sMaxAlphaNumerator, | ||||
1894 | "multiplication below could overflow"); | ||||
1895 | |||||
1896 | // Note: if capacity() is zero, this will always succeed, which is | ||||
1897 | // what we want. | ||||
1898 | bool overloaded = mEntryCount + mRemovedCount >= | ||||
1899 | capacity() * sMaxAlphaNumerator / sAlphaDenominator; | ||||
1900 | |||||
1901 | if (!overloaded
| ||||
1902 | return NotOverloaded; | ||||
1903 | } | ||||
1904 | |||||
1905 | // Succeed if a quarter or more of all entries are removed. Note that this | ||||
1906 | // always succeeds if capacity() == 0 (i.e. entry storage has not been | ||||
1907 | // allocated), which is what we want, because it means changeTableSize() | ||||
1908 | // will allocate the requested capacity rather than doubling it. | ||||
1909 | bool manyRemoved = mRemovedCount >= (capacity() >> 2); | ||||
1910 | uint32_t newCapacity = manyRemoved
| ||||
1911 | return changeTableSize(newCapacity, aReportFailure); | ||||
1912 | } | ||||
1913 | |||||
1914 | void infallibleRehashIfOverloaded() { | ||||
1915 | if (rehashIfOverloaded(DontReportFailure) == RehashFailed) { | ||||
1916 | rehashTableInPlace(); | ||||
1917 | } | ||||
1918 | } | ||||
1919 | |||||
1920 | void remove(Slot& aSlot) { | ||||
1921 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1921); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 1921; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1922 | |||||
1923 | if (aSlot.hasCollision()) { | ||||
1924 | aSlot.removeLive(); | ||||
1925 | mRemovedCount++; | ||||
1926 | } else { | ||||
1927 | aSlot.clearLive(); | ||||
1928 | } | ||||
1929 | mEntryCount--; | ||||
1930 | #ifdef DEBUG1 | ||||
1931 | mMutationCount++; | ||||
1932 | #endif | ||||
1933 | } | ||||
1934 | |||||
1935 | void shrinkIfUnderloaded() { | ||||
1936 | static_assert(sMaxCapacity <= UINT32_MAX(4294967295U) / sMinAlphaNumerator, | ||||
1937 | "multiplication below could overflow"); | ||||
1938 | bool underloaded = | ||||
1939 | capacity() > sMinCapacity && | ||||
1940 | mEntryCount <= capacity() * sMinAlphaNumerator / sAlphaDenominator; | ||||
1941 | |||||
1942 | if (underloaded) { | ||||
1943 | (void)changeTableSize(capacity() / 2, DontReportFailure); | ||||
1944 | } | ||||
1945 | } | ||||
1946 | |||||
1947 | // This is identical to changeTableSize(currentSize), but without requiring | ||||
1948 | // a second table. We do this by recycling the collision bits to tell us if | ||||
1949 | // the element is already inserted or still waiting to be inserted. Since | ||||
1950 | // already-inserted elements win any conflicts, we get the same table as we | ||||
1951 | // would have gotten through random insertion order. | ||||
1952 | void rehashTableInPlace() { | ||||
1953 | mRemovedCount = 0; | ||||
1954 | mGen++; | ||||
1955 | forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.unsetCollision(); }); | ||||
1956 | for (uint32_t i = 0; i < capacity();) { | ||||
1957 | Slot src = slotForIndex(i); | ||||
1958 | |||||
1959 | if (!src.isLive() || src.hasCollision()) { | ||||
1960 | ++i; | ||||
1961 | continue; | ||||
1962 | } | ||||
1963 | |||||
1964 | HashNumber keyHash = src.getKeyHash(); | ||||
1965 | HashNumber h1 = hash1(keyHash); | ||||
1966 | DoubleHash dh = hash2(keyHash); | ||||
1967 | Slot tgt = slotForIndex(h1); | ||||
1968 | while (true) { | ||||
1969 | if (!tgt.hasCollision()) { | ||||
1970 | src.swap(tgt); | ||||
1971 | tgt.setCollision(); | ||||
1972 | break; | ||||
1973 | } | ||||
1974 | |||||
1975 | h1 = applyDoubleHash(h1, dh); | ||||
1976 | tgt = slotForIndex(h1); | ||||
1977 | } | ||||
1978 | } | ||||
1979 | |||||
1980 | // TODO: this algorithm leaves collision bits on *all* elements, even if | ||||
1981 | // they are on no collision path. We have the option of setting the | ||||
1982 | // collision bits correctly on a subsequent pass or skipping the rehash | ||||
1983 | // unless we are totally filled with tombstones: benchmark to find out | ||||
1984 | // which approach is best. | ||||
1985 | } | ||||
1986 | |||||
1987 | // Prefer to use putNewInfallible; this function does not check | ||||
1988 | // invariants. | ||||
1989 | template <typename... Args> | ||||
1990 | void putNewInfallibleInternal(HashNumber aKeyHash, Args&&... aArgs) { | ||||
1991 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 1991); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 1991; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
1992 | |||||
1993 | Slot slot = findNonLiveSlot(aKeyHash); | ||||
1994 | |||||
1995 | if (slot.isRemoved()) { | ||||
1996 | mRemovedCount--; | ||||
1997 | aKeyHash |= sCollisionBit; | ||||
1998 | } | ||||
1999 | |||||
2000 | slot.setLive(aKeyHash, std::forward<Args>(aArgs)...); | ||||
2001 | mEntryCount++; | ||||
2002 | #ifdef DEBUG1 | ||||
2003 | mMutationCount++; | ||||
2004 | #endif | ||||
2005 | } | ||||
2006 | |||||
2007 | public: | ||||
2008 | void clear() { | ||||
2009 | forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.clear(); }); | ||||
2010 | mRemovedCount = 0; | ||||
2011 | mEntryCount = 0; | ||||
2012 | #ifdef DEBUG1 | ||||
2013 | mMutationCount++; | ||||
2014 | #endif | ||||
2015 | } | ||||
2016 | |||||
2017 | // Resize the table down to the smallest capacity that doesn't overload the | ||||
2018 | // table. Since we call shrinkIfUnderloaded() on every remove, you only need | ||||
2019 | // to call this after a bulk removal of items done without calling remove(). | ||||
2020 | void compact() { | ||||
2021 | if (empty()) { | ||||
2022 | // Free the entry storage. | ||||
2023 | freeTable(*this, mTable, capacity()); | ||||
2024 | mGen++; | ||||
2025 | mHashShift = hashShift(0); // gives minimum capacity on regrowth | ||||
2026 | mTable = nullptr; | ||||
2027 | mRemovedCount = 0; | ||||
2028 | return; | ||||
2029 | } | ||||
2030 | |||||
2031 | uint32_t bestCapacity = this->bestCapacity(mEntryCount); | ||||
2032 | MOZ_ASSERT(bestCapacity <= capacity())do { static_assert( mozilla::detail::AssertionConditionType< decltype(bestCapacity <= capacity())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(bestCapacity <= capacity( )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("bestCapacity <= capacity()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2032); AnnotateMozCrashReason("MOZ_ASSERT" "(" "bestCapacity <= capacity()" ")"); do { *((volatile int*)__null) = 2032; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2033 | |||||
2034 | if (bestCapacity < capacity()) { | ||||
2035 | (void)changeTableSize(bestCapacity, DontReportFailure); | ||||
2036 | } | ||||
2037 | } | ||||
2038 | |||||
2039 | void clearAndCompact() { | ||||
2040 | clear(); | ||||
2041 | compact(); | ||||
2042 | } | ||||
2043 | |||||
2044 | [[nodiscard]] bool reserve(uint32_t aLen) { | ||||
2045 | if (aLen == 0) { | ||||
2046 | return true; | ||||
2047 | } | ||||
2048 | |||||
2049 | if (MOZ_UNLIKELY(aLen > sMaxInit)(__builtin_expect(!!(aLen > sMaxInit), 0))) { | ||||
2050 | this->reportAllocOverflow(); | ||||
2051 | return false; | ||||
2052 | } | ||||
2053 | |||||
2054 | uint32_t bestCapacity = this->bestCapacity(aLen); | ||||
2055 | if (bestCapacity <= capacity()) { | ||||
2056 | return true; // Capacity is already sufficient. | ||||
2057 | } | ||||
2058 | |||||
2059 | RebuildStatus status = changeTableSize(bestCapacity, ReportFailure); | ||||
2060 | MOZ_ASSERT(status != NotOverloaded)do { static_assert( mozilla::detail::AssertionConditionType< decltype(status != NotOverloaded)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(status != NotOverloaded))), 0 ))) { do { } while (false); MOZ_ReportAssertionFailure("status != NotOverloaded" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2060); AnnotateMozCrashReason("MOZ_ASSERT" "(" "status != NotOverloaded" ")"); do { *((volatile int*)__null) = 2060; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2061 | return status != RehashFailed; | ||||
2062 | } | ||||
2063 | |||||
2064 | Iterator iter() const { return Iterator(*this); } | ||||
2065 | |||||
2066 | ModIterator modIter() { return ModIterator(*this); } | ||||
2067 | |||||
2068 | Range all() const { return Range(*this); } | ||||
2069 | |||||
2070 | bool empty() const { return mEntryCount == 0; } | ||||
2071 | |||||
2072 | uint32_t count() const { return mEntryCount; } | ||||
2073 | |||||
2074 | uint32_t rawCapacity() const { return 1u << (kHashNumberBits - mHashShift); } | ||||
2075 | |||||
2076 | uint32_t capacity() const { return mTable ? rawCapacity() : 0; } | ||||
2077 | |||||
2078 | Generation generation() const { return Generation(mGen); } | ||||
2079 | |||||
2080 | size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { | ||||
2081 | return aMallocSizeOf(mTable); | ||||
2082 | } | ||||
2083 | |||||
2084 | size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { | ||||
2085 | return aMallocSizeOf(this) + shallowSizeOfExcludingThis(aMallocSizeOf); | ||||
2086 | } | ||||
2087 | |||||
2088 | MOZ_ALWAYS_INLINEinline Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const { | ||||
2089 | if (empty()) { | ||||
2090 | return Ptr(); | ||||
2091 | } | ||||
2092 | |||||
2093 | HashNumber inputHash; | ||||
2094 | if (!MaybeGetHash<HashPolicy>(aLookup, &inputHash)) { | ||||
2095 | return Ptr(); | ||||
2096 | } | ||||
2097 | |||||
2098 | HashNumber keyHash = prepareHash(inputHash); | ||||
2099 | return Ptr(lookup<ForNonAdd>(aLookup, keyHash), *this); | ||||
2100 | } | ||||
2101 | |||||
2102 | MOZ_ALWAYS_INLINEinline Ptr lookup(const Lookup& aLookup) const { | ||||
2103 | ReentrancyGuard g(*this); | ||||
2104 | return readonlyThreadsafeLookup(aLookup); | ||||
2105 | } | ||||
2106 | |||||
2107 | MOZ_ALWAYS_INLINEinline AddPtr lookupForAdd(const Lookup& aLookup) { | ||||
2108 | ReentrancyGuard g(*this); | ||||
2109 | |||||
2110 | HashNumber inputHash; | ||||
2111 | if (!EnsureHash<HashPolicy>(aLookup, &inputHash)) { | ||||
2112 | return AddPtr(); | ||||
2113 | } | ||||
2114 | |||||
2115 | HashNumber keyHash = prepareHash(inputHash); | ||||
2116 | |||||
2117 | if (!mTable) { | ||||
2118 | return AddPtr(*this, keyHash); | ||||
2119 | } | ||||
2120 | |||||
2121 | // Directly call the constructor in the return statement to avoid | ||||
2122 | // excess copying when building with Visual Studio 2017. | ||||
2123 | // See bug 1385181. | ||||
2124 | return AddPtr(lookup<ForAdd>(aLookup, keyHash), *this, keyHash); | ||||
2125 | } | ||||
2126 | |||||
2127 | template <typename... Args> | ||||
2128 | [[nodiscard]] bool add(AddPtr& aPtr, Args&&... aArgs) { | ||||
2129 | ReentrancyGuard g(*this); | ||||
2130 | MOZ_ASSERT_IF(aPtr.isValid(), mTable)do { if (aPtr.isValid()) { do { static_assert( mozilla::detail ::AssertionConditionType<decltype(mTable)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2130); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 2130; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); } } while ( false); | ||||
2131 | MOZ_ASSERT_IF(aPtr.isValid(), aPtr.mTable == this)do { if (aPtr.isValid()) { do { static_assert( mozilla::detail ::AssertionConditionType<decltype(aPtr.mTable == this)> ::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!(aPtr.mTable == this))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPtr.mTable == this", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2131); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mTable == this" ")"); do { *((volatile int*)__null) = 2131; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); } } while ( false); | ||||
2132 | MOZ_ASSERT(!aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!aPtr.found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!aPtr.found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!aPtr.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2132); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!aPtr.found()" ")"); do { *((volatile int*)__null) = 2132; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2133 | MOZ_ASSERT(!(aPtr.mKeyHash & sCollisionBit))do { static_assert( mozilla::detail::AssertionConditionType< decltype(!(aPtr.mKeyHash & sCollisionBit))>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!(aPtr.mKeyHash & sCollisionBit )))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("!(aPtr.mKeyHash & sCollisionBit)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2133); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!(aPtr.mKeyHash & sCollisionBit)" ")"); do { *((volatile int*)__null) = 2133; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2134 | |||||
2135 | // Check for error from ensureHash() here. | ||||
2136 | if (!aPtr.isLive()) { | ||||
2137 | return false; | ||||
2138 | } | ||||
2139 | |||||
2140 | MOZ_ASSERT(aPtr.mGeneration == generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.mGeneration == generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.mGeneration == generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("aPtr.mGeneration == generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2140); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mGeneration == generation()" ")"); do { *((volatile int*)__null) = 2140; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2141 | #ifdef DEBUG1 | ||||
2142 | MOZ_ASSERT(aPtr.mMutationCount == mMutationCount)do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.mMutationCount == mMutationCount)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(! !(aPtr.mMutationCount == mMutationCount))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPtr.mMutationCount == mMutationCount" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2142); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mMutationCount == mMutationCount" ")"); do { *((volatile int*)__null) = 2142; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2143 | #endif | ||||
2144 | |||||
2145 | if (!aPtr.isValid()) { | ||||
2146 | MOZ_ASSERT(!mTable && mEntryCount == 0)do { static_assert( mozilla::detail::AssertionConditionType< decltype(!mTable && mEntryCount == 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!mTable && mEntryCount == 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("!mTable && mEntryCount == 0", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2146); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!mTable && mEntryCount == 0" ")"); do { *((volatile int*)__null) = 2146; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2147 | uint32_t newCapacity = rawCapacity(); | ||||
2148 | RebuildStatus status = changeTableSize(newCapacity, ReportFailure); | ||||
2149 | MOZ_ASSERT(status != NotOverloaded)do { static_assert( mozilla::detail::AssertionConditionType< decltype(status != NotOverloaded)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(status != NotOverloaded))), 0 ))) { do { } while (false); MOZ_ReportAssertionFailure("status != NotOverloaded" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2149); AnnotateMozCrashReason("MOZ_ASSERT" "(" "status != NotOverloaded" ")"); do { *((volatile int*)__null) = 2149; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2150 | if (status == RehashFailed) { | ||||
2151 | return false; | ||||
2152 | } | ||||
2153 | aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash); | ||||
2154 | |||||
2155 | } else if (aPtr.mSlot.isRemoved()) { | ||||
2156 | // Changing an entry from removed to live does not affect whether we are | ||||
2157 | // overloaded and can be handled separately. | ||||
2158 | if (!this->checkSimulatedOOM()) { | ||||
2159 | return false; | ||||
2160 | } | ||||
2161 | mRemovedCount--; | ||||
2162 | aPtr.mKeyHash |= sCollisionBit; | ||||
2163 | |||||
2164 | } else { | ||||
2165 | // Preserve the validity of |aPtr.mSlot|. | ||||
2166 | RebuildStatus status = rehashIfOverloaded(); | ||||
2167 | if (status == RehashFailed) { | ||||
2168 | return false; | ||||
2169 | } | ||||
2170 | if (status == NotOverloaded && !this->checkSimulatedOOM()) { | ||||
2171 | return false; | ||||
2172 | } | ||||
2173 | if (status == Rehashed) { | ||||
2174 | aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash); | ||||
2175 | } | ||||
2176 | } | ||||
2177 | |||||
2178 | aPtr.mSlot.setLive(aPtr.mKeyHash, std::forward<Args>(aArgs)...); | ||||
2179 | mEntryCount++; | ||||
2180 | #ifdef DEBUG1 | ||||
2181 | mMutationCount++; | ||||
2182 | aPtr.mGeneration = generation(); | ||||
2183 | aPtr.mMutationCount = mMutationCount; | ||||
2184 | #endif | ||||
2185 | return true; | ||||
2186 | } | ||||
2187 | |||||
2188 | // Note: |aLookup| may reference pieces of arguments in |aArgs|, so this | ||||
2189 | // function must take care not to use |aLookup| after moving |aArgs|. | ||||
2190 | template <typename... Args> | ||||
2191 | void putNewInfallible(const Lookup& aLookup, Args&&... aArgs) { | ||||
2192 | MOZ_ASSERT(!lookup(aLookup).found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!lookup(aLookup).found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!lookup(aLookup).found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!lookup(aLookup).found()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2192); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!lookup(aLookup).found()" ")"); do { *((volatile int*)__null) = 2192; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2193 | ReentrancyGuard g(*this); | ||||
2194 | HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup)); | ||||
2195 | putNewInfallibleInternal(keyHash, std::forward<Args>(aArgs)...); | ||||
2196 | } | ||||
2197 | |||||
2198 | // Note: |aLookup| may alias arguments in |aArgs|, so this function must take | ||||
2199 | // care not to use |aLookup| after moving |aArgs|. | ||||
2200 | template <typename... Args> | ||||
2201 | [[nodiscard]] bool putNew(const Lookup& aLookup, Args&&... aArgs) { | ||||
2202 | MOZ_ASSERT(!lookup(aLookup).found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(!lookup(aLookup).found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(!lookup(aLookup).found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!lookup(aLookup).found()" , "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2202); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!lookup(aLookup).found()" ")"); do { *((volatile int*)__null) = 2202; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2203 | ReentrancyGuard g(*this); | ||||
2204 | if (!this->checkSimulatedOOM()) { | ||||
2205 | return false; | ||||
2206 | } | ||||
2207 | HashNumber inputHash; | ||||
2208 | if (!EnsureHash<HashPolicy>(aLookup, &inputHash)) { | ||||
2209 | return false; | ||||
2210 | } | ||||
2211 | HashNumber keyHash = prepareHash(inputHash); | ||||
2212 | if (rehashIfOverloaded() == RehashFailed) { | ||||
2213 | return false; | ||||
2214 | } | ||||
2215 | putNewInfallibleInternal(keyHash, std::forward<Args>(aArgs)...); | ||||
2216 | return true; | ||||
2217 | } | ||||
2218 | |||||
2219 | // Note: |aLookup| may be a reference pieces of arguments in |aArgs|, so this | ||||
2220 | // function must take care not to use |aLookup| after moving |aArgs|. | ||||
2221 | template <typename... Args> | ||||
2222 | [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup, | ||||
2223 | Args&&... aArgs) { | ||||
2224 | // Check for error from ensureHash() here. | ||||
2225 | if (!aPtr.isLive()) { | ||||
2226 | return false; | ||||
2227 | } | ||||
2228 | #ifdef DEBUG1 | ||||
2229 | aPtr.mGeneration = generation(); | ||||
2230 | aPtr.mMutationCount = mMutationCount; | ||||
2231 | #endif | ||||
2232 | if (mTable) { | ||||
2233 | ReentrancyGuard g(*this); | ||||
2234 | // Check that aLookup has not been destroyed. | ||||
2235 | MOZ_ASSERT(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash)do { static_assert( mozilla::detail::AssertionConditionType< decltype(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash )>::isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash ))), 0))) { do { } while (false); MOZ_ReportAssertionFailure( "prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2235); AnnotateMozCrashReason("MOZ_ASSERT" "(" "prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash" ")"); do { *((volatile int*)__null) = 2235; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2236 | aPtr.mSlot = lookup<ForAdd>(aLookup, aPtr.mKeyHash); | ||||
2237 | if (aPtr.found()) { | ||||
2238 | return true; | ||||
2239 | } | ||||
2240 | } else { | ||||
2241 | // Clear aPtr so it's invalid; add() will allocate storage and redo the | ||||
2242 | // lookup. | ||||
2243 | aPtr.mSlot = Slot(nullptr, nullptr); | ||||
2244 | } | ||||
2245 | return add(aPtr, std::forward<Args>(aArgs)...); | ||||
2246 | } | ||||
2247 | |||||
2248 | void remove(Ptr aPtr) { | ||||
2249 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2249); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 2249; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2250 | ReentrancyGuard g(*this); | ||||
2251 | MOZ_ASSERT(aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPtr.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2251); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.found()" ")"); do { *((volatile int*)__null) = 2251; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2252 | MOZ_ASSERT(aPtr.mGeneration == generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.mGeneration == generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.mGeneration == generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("aPtr.mGeneration == generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2252); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mGeneration == generation()" ")"); do { *((volatile int*)__null) = 2252; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2253 | remove(aPtr.mSlot); | ||||
2254 | shrinkIfUnderloaded(); | ||||
2255 | } | ||||
2256 | |||||
2257 | void rekeyWithoutRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) { | ||||
2258 | MOZ_ASSERT(mTable)do { static_assert( mozilla::detail::AssertionConditionType< decltype(mTable)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(mTable))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("mTable", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2258); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mTable" ")" ); do { *((volatile int*)__null) = 2258; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2259 | ReentrancyGuard g(*this); | ||||
2260 | MOZ_ASSERT(aPtr.found())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.found())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.found()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPtr.found()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2260); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.found()" ")"); do { *((volatile int*)__null) = 2260; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2261 | MOZ_ASSERT(aPtr.mGeneration == generation())do { static_assert( mozilla::detail::AssertionConditionType< decltype(aPtr.mGeneration == generation())>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(aPtr.mGeneration == generation ()))), 0))) { do { } while (false); MOZ_ReportAssertionFailure ("aPtr.mGeneration == generation()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/HashTable.h" , 2261); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPtr.mGeneration == generation()" ")"); do { *((volatile int*)__null) = 2261; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); | ||||
2262 | typename HashTableEntry<T>::NonConstT t(std::move(*aPtr)); | ||||
2263 | HashPolicy::setKey(t, const_cast<Key&>(aKey)); | ||||
2264 | remove(aPtr.mSlot); | ||||
2265 | HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup)); | ||||
2266 | putNewInfallibleInternal(keyHash, std::move(t)); | ||||
2267 | } | ||||
2268 | |||||
2269 | void rekeyAndMaybeRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) { | ||||
2270 | rekeyWithoutRehash(aPtr, aLookup, aKey); | ||||
2271 | infallibleRehashIfOverloaded(); | ||||
2272 | } | ||||
2273 | }; | ||||
2274 | |||||
2275 | } // namespace detail | ||||
2276 | } // namespace mozilla | ||||
2277 | |||||
2278 | #endif /* mozilla_HashTable_h */ |