File: | var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h |
Warning: | line 273, column 7 Excessive padding in 'class js::gc::GCRuntime' (78 padding bytes, where 6 is optimal). Optimal fields order: rt, numActiveZoneIters, queuePos, lastLastDitchTime, systemZone, sharedAtomsZone_, delayedMarkingList, markLaterArenas, helperThreadRatio, maxHelperThreads, helperThreadCount, maxMarkingThreads, markingThreadCount, maxParallelThreads, dispatchedParallelTasks, createBudgetCallback, permanentAtoms, permanentWellKnownSymbols, nextCellUniqueId_, verifyPreData, lastGCStartTime_, lastGCEndTime_, initialized, minorGCNumber, majorGCNumber, number, sliceNumber, reservedMarkingThreads, largeBuffersToFreeAfterMinorGC, initialMinorGCNumber, sweepGroups, currentSweepGroup, sweepActions, sweepZone, foregroundFinalizedZone, zonesCompacted, relocatedArenasToRelease, markingValidator, defaultTimeBudgetMS_, maybeMarkStackLimit, inPageLoadCount, lastAllocRateUpdateTime, collectorTimeSinceAllocRateUpdate, emptyChunks_, availableChunks_, fullChunks_, backgroundSweepZones, zonesToMaybeCompact, gcCallback, gcDoCycleCollectionCallback, tenuredCallback, hostCleanupFinalizationRegistryCallback, grayRootTracer, stringStats, heapSize, queuedParallelTasks, weakCachesToSweep, markers, sweepingTracer, rootsHash, buffersToFreeAfterMinorGC, cellsToAssertNotGray, atomMarking, testMarkQueue, mainThreadContext, zones_, selectedForMarking, lock, storeBufferLock, delayedMarkingLock, bufferAllocatorLock, maybeAtomsToSweep, sweepTask, freeTask, decommitTask, stringBuffersToReleaseAfterMinorGC, finalizeCallbacks, updateWeakPointerZonesCallbacks, updateWeakPointerCompartmentCallbacks, nurseryCollectionCallbacks, blackRootTracers, lifoBlocksToFree, lifoBlocksToFreeAfterFullMinorGC, lifoBlocksToFreeAfterNextMinorGC, allocTask, markTask, unmarkTask, tunables, storeBuffer_, foregroundFinalizedArenas, nursery_, stats_, schedulingState, majorGCTriggerReason, heapState_, minEmptyChunkCount_, initialReason, incrementalState, initialState, sweepGroupIndex, sweepMarkResult, zealModeBits, zealFrequency, nextScheduled, zealSliceBudget, gcCallbackDepth, maybeGcOptions, delayedMarkingWorkAdded, fullGCRequested, incrementalGCEnabled, perZoneGCEnabled, cleanUpEverything, grayBitsValid, isIncremental, isFull, isCompacting, useParallelMarking, useZeal, lastMarkSlice, safeToYield, markOnBackgroundThreadDuringSweeping, useBackgroundThreads, haveDiscardedJITCodeThisSlice, hadShutdownGC, requestSliceAfterBackgroundTask, sweepAllocKind, abortSweepAfterCurrentGroup, foregroundFinalizedAllocKind, queueMarkColor, startedCompacting, compactingEnabled, nurseryEnabled, parallelMarkingEnabled, rootsRemoved, deterministicOnly, fullCompartmentChecks, alwaysPreserveCode, lowMemoryState, consider reordering the fields or adding explicit padding members |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- |
2 | * vim: set ts=8 sts=2 et sw=2 tw=80: |
3 | * This Source Code Form is subject to the terms of the Mozilla Public |
4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | |
7 | #ifndef gc_GCRuntime_h |
8 | #define gc_GCRuntime_h |
9 | |
10 | #include "mozilla/Atomics.h" |
11 | #include "mozilla/DoublyLinkedList.h" |
12 | #include "mozilla/EnumSet.h" |
13 | #include "mozilla/Maybe.h" |
14 | #include "mozilla/TimeStamp.h" |
15 | |
16 | #include "gc/ArenaList.h" |
17 | #include "gc/AtomMarking.h" |
18 | #include "gc/GCContext.h" |
19 | #include "gc/GCMarker.h" |
20 | #include "gc/GCParallelTask.h" |
21 | #include "gc/IteratorUtils.h" |
22 | #include "gc/Memory.h" |
23 | #include "gc/Nursery.h" |
24 | #include "gc/Scheduling.h" |
25 | #include "gc/Statistics.h" |
26 | #include "gc/StoreBuffer.h" |
27 | #include "js/friend/PerformanceHint.h" |
28 | #include "js/GCAnnotations.h" |
29 | #include "js/UniquePtr.h" |
30 | #include "vm/AtomsTable.h" |
31 | |
32 | namespace js { |
33 | |
34 | class AutoLockGC; |
35 | class AutoLockGCBgAlloc; |
36 | class AutoLockHelperThreadState; |
37 | class FinalizationRegistryObject; |
38 | class FinalizationRecordObject; |
39 | class FinalizationQueueObject; |
40 | class GlobalObject; |
41 | class VerifyPreTracer; |
42 | class WeakRefObject; |
43 | |
44 | namespace gc { |
45 | |
46 | using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>; |
47 | using ZoneVector = Vector<JS::Zone*, 4, SystemAllocPolicy>; |
48 | |
49 | class AutoCallGCCallbacks; |
50 | class AutoGCSession; |
51 | class AutoHeapSession; |
52 | class AutoTraceSession; |
53 | class BufferAllocator; |
54 | struct FinalizePhase; |
55 | class MarkingValidator; |
56 | struct MovingTracer; |
57 | class ParallelMarkTask; |
58 | enum class ShouldCheckThresholds; |
59 | class SweepGroupsIter; |
60 | |
61 | // Interface to a sweep action. |
62 | struct SweepAction { |
63 | // The arguments passed to each action. |
64 | struct Args { |
65 | GCRuntime* gc; |
66 | JS::GCContext* gcx; |
67 | JS::SliceBudget& budget; |
68 | }; |
69 | |
70 | virtual ~SweepAction() = default; |
71 | virtual IncrementalProgress run(Args& state) = 0; |
72 | virtual void assertFinished() const = 0; |
73 | virtual bool shouldSkip() { return false; } |
74 | }; |
75 | |
76 | class ChunkPool { |
77 | ArenaChunk* head_; |
78 | size_t count_; |
79 | |
80 | public: |
81 | ChunkPool() : head_(nullptr), count_(0) {} |
82 | ChunkPool(const ChunkPool& other) = delete; |
83 | ChunkPool(ChunkPool&& other) { *this = std::move(other); } |
84 | |
85 | ~ChunkPool() { |
86 | MOZ_ASSERT(!head_)do { static_assert( mozilla::detail::AssertionConditionType< decltype(!head_)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(!head_))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!head_", "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 86); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!head_" ")"); do { *((volatile int*)__null) = 86; __attribute__((nomerge)) ::abort(); } while (false); } } while (false); |
87 | MOZ_ASSERT(count_ == 0)do { static_assert( mozilla::detail::AssertionConditionType< decltype(count_ == 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(count_ == 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("count_ == 0", "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 87); AnnotateMozCrashReason("MOZ_ASSERT" "(" "count_ == 0" ")" ); do { *((volatile int*)__null) = 87; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); |
88 | } |
89 | |
90 | ChunkPool& operator=(const ChunkPool& other) = delete; |
91 | ChunkPool& operator=(ChunkPool&& other) { |
92 | head_ = other.head_; |
93 | other.head_ = nullptr; |
94 | count_ = other.count_; |
95 | other.count_ = 0; |
96 | return *this; |
97 | } |
98 | |
99 | bool empty() const { return !head_; } |
100 | size_t count() const { return count_; } |
101 | |
102 | ArenaChunk* head() { |
103 | MOZ_ASSERT(head_)do { static_assert( mozilla::detail::AssertionConditionType< decltype(head_)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(head_))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("head_", "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 103); AnnotateMozCrashReason("MOZ_ASSERT" "(" "head_" ")"); do { *((volatile int*)__null) = 103; __attribute__((nomerge) ) ::abort(); } while (false); } } while (false); |
104 | return head_; |
105 | } |
106 | ArenaChunk* pop(); |
107 | void push(ArenaChunk* chunk); |
108 | ArenaChunk* remove(ArenaChunk* chunk); |
109 | |
110 | void sort(); |
111 | |
112 | private: |
113 | ArenaChunk* mergeSort(ArenaChunk* list, size_t count); |
114 | bool isSorted() const; |
115 | |
116 | #ifdef DEBUG1 |
117 | public: |
118 | bool contains(ArenaChunk* chunk) const; |
119 | bool verify() const; |
120 | void verifyChunks() const; |
121 | #endif |
122 | |
123 | public: |
124 | // Pool mutation does not invalidate an Iter unless the mutation |
125 | // is of the ArenaChunk currently being visited by the Iter. |
126 | class Iter { |
127 | public: |
128 | explicit Iter(ChunkPool& pool) : current_(pool.head_) {} |
129 | bool done() const { return !current_; } |
130 | void next(); |
131 | ArenaChunk* get() const { return current_; } |
132 | operator ArenaChunk*() const { return get(); } |
133 | ArenaChunk* operator->() const { return get(); } |
134 | |
135 | private: |
136 | ArenaChunk* current_; |
137 | }; |
138 | }; |
139 | |
140 | class BackgroundMarkTask : public GCParallelTask { |
141 | public: |
142 | explicit BackgroundMarkTask(GCRuntime* gc); |
143 | void setBudget(const JS::SliceBudget& budget) { this->budget = budget; } |
144 | void run(AutoLockHelperThreadState& lock) override; |
145 | |
146 | private: |
147 | JS::SliceBudget budget; |
148 | }; |
149 | |
150 | class BackgroundUnmarkTask : public GCParallelTask { |
151 | public: |
152 | explicit BackgroundUnmarkTask(GCRuntime* gc); |
153 | void initZones(); |
154 | void run(AutoLockHelperThreadState& lock) override; |
155 | |
156 | private: |
157 | void unmark(); |
158 | |
159 | ZoneVector zones; |
160 | }; |
161 | |
162 | class BackgroundSweepTask : public GCParallelTask { |
163 | public: |
164 | explicit BackgroundSweepTask(GCRuntime* gc); |
165 | void run(AutoLockHelperThreadState& lock) override; |
166 | }; |
167 | |
168 | class BackgroundFreeTask : public GCParallelTask { |
169 | public: |
170 | explicit BackgroundFreeTask(GCRuntime* gc); |
171 | void run(AutoLockHelperThreadState& lock) override; |
172 | }; |
173 | |
174 | // Performs extra allocation off thread so that when memory is required on the |
175 | // main thread it will already be available and waiting. |
176 | class BackgroundAllocTask : public GCParallelTask { |
177 | // Guarded by the GC lock. |
178 | GCLockData<ChunkPool&> chunkPool_; |
179 | |
180 | const bool enabled_; |
181 | |
182 | public: |
183 | BackgroundAllocTask(GCRuntime* gc, ChunkPool& pool); |
184 | bool enabled() const { return enabled_; } |
185 | |
186 | void run(AutoLockHelperThreadState& lock) override; |
187 | }; |
188 | |
189 | // Search the provided chunks for free arenas and decommit them. |
190 | class BackgroundDecommitTask : public GCParallelTask { |
191 | public: |
192 | explicit BackgroundDecommitTask(GCRuntime* gc); |
193 | void run(AutoLockHelperThreadState& lock) override; |
194 | }; |
195 | |
196 | template <typename F> |
197 | struct Callback { |
198 | F op; |
199 | void* data; |
200 | |
201 | Callback() : op(nullptr), data(nullptr) {} |
202 | Callback(F op, void* data) : op(op), data(data) {} |
203 | }; |
204 | |
205 | template <typename F> |
206 | using CallbackVector = Vector<Callback<F>, 4, SystemAllocPolicy>; |
207 | |
208 | using RootedValueMap = |
209 | HashMap<Value*, const char*, DefaultHasher<Value*>, SystemAllocPolicy>; |
210 | |
211 | using AllocKinds = mozilla::EnumSet<AllocKind, uint64_t>; |
212 | |
213 | // A singly linked list of zones. |
214 | class ZoneList { |
215 | static Zone* const End; |
216 | |
217 | Zone* head; |
218 | Zone* tail; |
219 | |
220 | public: |
221 | ZoneList(); |
222 | ~ZoneList(); |
223 | |
224 | bool isEmpty() const; |
225 | Zone* front() const; |
226 | |
227 | void prepend(Zone* zone); |
228 | void append(Zone* zone); |
229 | void prependList(ZoneList&& other); |
230 | void appendList(ZoneList&& other); |
231 | Zone* removeFront(); |
232 | void clear(); |
233 | |
234 | private: |
235 | explicit ZoneList(Zone* singleZone); |
236 | void check() const; |
237 | |
238 | ZoneList(const ZoneList& other) = delete; |
239 | ZoneList& operator=(const ZoneList& other) = delete; |
240 | }; |
241 | |
242 | struct WeakCacheToSweep { |
243 | JS::detail::WeakCacheBase* cache; |
244 | JS::Zone* zone; |
245 | }; |
246 | |
247 | class WeakCacheSweepIterator { |
248 | using WeakCacheBase = JS::detail::WeakCacheBase; |
249 | |
250 | JS::Zone* sweepZone; |
251 | WeakCacheBase* sweepCache; |
252 | |
253 | public: |
254 | explicit WeakCacheSweepIterator(JS::Zone* sweepGroup); |
255 | |
256 | bool done() const; |
257 | WeakCacheToSweep get() const; |
258 | void next(); |
259 | |
260 | private: |
261 | void settle(); |
262 | }; |
263 | |
264 | struct SweepingTracer final : public GenericTracerImpl<SweepingTracer> { |
265 | explicit SweepingTracer(JSRuntime* rt); |
266 | |
267 | private: |
268 | template <typename T> |
269 | void onEdge(T** thingp, const char* name); |
270 | friend class GenericTracerImpl<SweepingTracer>; |
271 | }; |
272 | |
273 | class GCRuntime { |
Excessive padding in 'class js::gc::GCRuntime' (78 padding bytes, where 6 is optimal). Optimal fields order: rt, numActiveZoneIters, queuePos, lastLastDitchTime, systemZone, sharedAtomsZone_, delayedMarkingList, markLaterArenas, helperThreadRatio, maxHelperThreads, helperThreadCount, maxMarkingThreads, markingThreadCount, maxParallelThreads, dispatchedParallelTasks, createBudgetCallback, permanentAtoms, permanentWellKnownSymbols, nextCellUniqueId_, verifyPreData, lastGCStartTime_, lastGCEndTime_, initialized, minorGCNumber, majorGCNumber, number, sliceNumber, reservedMarkingThreads, largeBuffersToFreeAfterMinorGC, initialMinorGCNumber, sweepGroups, currentSweepGroup, sweepActions, sweepZone, foregroundFinalizedZone, zonesCompacted, relocatedArenasToRelease, markingValidator, defaultTimeBudgetMS_, maybeMarkStackLimit, inPageLoadCount, lastAllocRateUpdateTime, collectorTimeSinceAllocRateUpdate, emptyChunks_, availableChunks_, fullChunks_, backgroundSweepZones, zonesToMaybeCompact, gcCallback, gcDoCycleCollectionCallback, tenuredCallback, hostCleanupFinalizationRegistryCallback, grayRootTracer, stringStats, heapSize, queuedParallelTasks, weakCachesToSweep, markers, sweepingTracer, rootsHash, buffersToFreeAfterMinorGC, cellsToAssertNotGray, atomMarking, testMarkQueue, mainThreadContext, zones_, selectedForMarking, lock, storeBufferLock, delayedMarkingLock, bufferAllocatorLock, maybeAtomsToSweep, sweepTask, freeTask, decommitTask, stringBuffersToReleaseAfterMinorGC, finalizeCallbacks, updateWeakPointerZonesCallbacks, updateWeakPointerCompartmentCallbacks, nurseryCollectionCallbacks, blackRootTracers, lifoBlocksToFree, lifoBlocksToFreeAfterFullMinorGC, lifoBlocksToFreeAfterNextMinorGC, allocTask, markTask, unmarkTask, tunables, storeBuffer_, foregroundFinalizedArenas, nursery_, stats_, schedulingState, majorGCTriggerReason, heapState_, minEmptyChunkCount_, initialReason, incrementalState, initialState, sweepGroupIndex, sweepMarkResult, zealModeBits, zealFrequency, nextScheduled, zealSliceBudget, gcCallbackDepth, maybeGcOptions, delayedMarkingWorkAdded, fullGCRequested, incrementalGCEnabled, perZoneGCEnabled, cleanUpEverything, grayBitsValid, isIncremental, isFull, isCompacting, useParallelMarking, useZeal, lastMarkSlice, safeToYield, markOnBackgroundThreadDuringSweeping, useBackgroundThreads, haveDiscardedJITCodeThisSlice, hadShutdownGC, requestSliceAfterBackgroundTask, sweepAllocKind, abortSweepAfterCurrentGroup, foregroundFinalizedAllocKind, queueMarkColor, startedCompacting, compactingEnabled, nurseryEnabled, parallelMarkingEnabled, rootsRemoved, deterministicOnly, fullCompartmentChecks, alwaysPreserveCode, lowMemoryState, consider reordering the fields or adding explicit padding members | |
274 | public: |
275 | explicit GCRuntime(JSRuntime* rt); |
276 | [[nodiscard]] bool init(uint32_t maxbytes); |
277 | bool wasInitialized() const { return initialized; } |
278 | void finishRoots(); |
279 | void finish(); |
280 | |
281 | Zone* atomsZone() { |
282 | Zone* zone = zones()[0]; |
283 | MOZ_ASSERT(JS::shadow::Zone::from(zone)->isAtomsZone())do { static_assert( mozilla::detail::AssertionConditionType< decltype(JS::shadow::Zone::from(zone)->isAtomsZone())>:: isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!(JS::shadow::Zone::from(zone)->isAtomsZone()))), 0 ))) { do { } while (false); MOZ_ReportAssertionFailure("JS::shadow::Zone::from(zone)->isAtomsZone()" , "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 283); AnnotateMozCrashReason("MOZ_ASSERT" "(" "JS::shadow::Zone::from(zone)->isAtomsZone()" ")"); do { *((volatile int*)__null) = 283; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); |
284 | return zone; |
285 | } |
286 | Zone* maybeSharedAtomsZone() { return sharedAtomsZone_; } |
287 | |
288 | [[nodiscard]] bool freezeSharedAtomsZone(); |
289 | void restoreSharedAtomsZone(); |
290 | |
291 | JS::HeapState heapState() const { return heapState_; } |
292 | |
293 | bool hasZealMode(ZealMode mode) const; |
294 | bool hasAnyZealModeOf(mozilla::EnumSet<ZealMode> mode) const; |
295 | void clearZealMode(ZealMode mode); |
296 | bool needZealousGC(); |
297 | bool zealModeControlsYieldPoint() const; |
298 | |
299 | [[nodiscard]] bool addRoot(Value* vp, const char* name); |
300 | void removeRoot(Value* vp); |
301 | |
302 | [[nodiscard]] bool setParameter(JSContext* cx, JSGCParamKey key, |
303 | uint32_t value); |
304 | void resetParameter(JSContext* cx, JSGCParamKey key); |
305 | uint32_t getParameter(JSGCParamKey key); |
306 | |
307 | void setPerformanceHint(PerformanceHint hint); |
308 | bool isInPageLoad() const { return inPageLoadCount != 0; } |
309 | |
310 | [[nodiscard]] bool triggerGC(JS::GCReason reason); |
311 | // Check whether to trigger a zone GC after allocating GC cells. |
312 | void maybeTriggerGCAfterAlloc(Zone* zone); |
313 | // Check whether to trigger a zone GC after malloc memory. |
314 | void maybeTriggerGCAfterMalloc(Zone* zone); |
315 | bool maybeTriggerGCAfterMalloc(Zone* zone, const HeapSize& heap, |
316 | const HeapThreshold& threshold, |
317 | JS::GCReason reason); |
318 | // The return value indicates if we were able to do the GC. |
319 | bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes, |
320 | size_t thresholdBytes); |
321 | |
322 | void maybeGC(); |
323 | |
324 | // Return whether we want to run a major GC. If eagerOk is true, include eager |
325 | // triggers (eg EAGER_ALLOC_TRIGGER) in this determination, and schedule all |
326 | // zones that exceed the eager thresholds. |
327 | JS::GCReason wantMajorGC(bool eagerOk); |
328 | bool checkEagerAllocTrigger(const HeapSize& size, |
329 | const HeapThreshold& threshold); |
330 | |
331 | // Do a minor GC if requested, followed by a major GC if requested. The return |
332 | // value indicates whether a major GC was performed. |
333 | bool gcIfRequested() { return gcIfRequestedImpl(false); } |
334 | |
335 | // Internal function to do a GC if previously requested. But if not and |
336 | // eagerOk, do an eager GC for all Zones that have exceeded the eager |
337 | // thresholds. |
338 | // |
339 | // Return whether a major GC was performed or started. |
340 | bool gcIfRequestedImpl(bool eagerOk); |
341 | |
342 | void gc(JS::GCOptions options, JS::GCReason reason); |
343 | void startGC(JS::GCOptions options, JS::GCReason reason, |
344 | const JS::SliceBudget& budget); |
345 | void gcSlice(JS::GCReason reason, const JS::SliceBudget& budget); |
346 | void finishGC(JS::GCReason reason); |
347 | void abortGC(); |
348 | void startDebugGC(JS::GCOptions options, const JS::SliceBudget& budget); |
349 | void debugGCSlice(const JS::SliceBudget& budget); |
350 | |
351 | void runDebugGC(); |
352 | void notifyRootsRemoved(); |
353 | |
354 | enum TraceOrMarkRuntime { TraceRuntime, MarkRuntime }; |
355 | void traceRuntime(JSTracer* trc, AutoTraceSession& session); |
356 | void traceRuntimeForMinorGC(JSTracer* trc, AutoGCSession& session); |
357 | |
358 | void purgeRuntimeForMinorGC(); |
359 | |
360 | void shrinkBuffers(); |
361 | void onOutOfMallocMemory(); |
362 | void onOutOfMallocMemory(const AutoLockGC& lock); |
363 | |
364 | Nursery& nursery() { return nursery_.ref(); } |
365 | gc::StoreBuffer& storeBuffer() { return storeBuffer_.ref(); } |
366 | |
367 | void minorGC(JS::GCReason reason, |
368 | gcstats::PhaseKind phase = gcstats::PhaseKind::MINOR_GC) |
369 | JS_HAZ_GC_CALL; |
370 | void evictNursery(JS::GCReason reason = JS::GCReason::EVICT_NURSERY) { |
371 | minorGC(reason, gcstats::PhaseKind::EVICT_NURSERY); |
372 | } |
373 | |
374 | void* addressOfNurseryPosition() { |
375 | return nursery_.refNoCheck().addressOfPosition(); |
376 | } |
377 | |
378 | const void* addressOfLastBufferedWholeCell() { |
379 | return storeBuffer_.refNoCheck().addressOfLastBufferedWholeCell(); |
380 | } |
381 | |
382 | #ifdef JS_GC_ZEAL1 |
383 | const uint32_t* addressOfZealModeBits() { return &zealModeBits.refNoCheck(); } |
384 | void getZealBits(uint32_t* zealBits, uint32_t* frequency, |
385 | uint32_t* nextScheduled); |
386 | void setZeal(uint8_t zeal, uint32_t frequency); |
387 | void unsetZeal(uint8_t zeal); |
388 | bool parseAndSetZeal(const char* str); |
389 | void setNextScheduled(uint32_t count); |
390 | void verifyPreBarriers(); |
391 | void maybeVerifyPreBarriers(bool always); |
392 | bool selectForMarking(JSObject* object); |
393 | void clearSelectedForMarking(); |
394 | void setDeterministic(bool enable); |
395 | void setMarkStackLimit(size_t limit, AutoLockGC& lock); |
396 | #endif |
397 | |
398 | uint64_t nextCellUniqueId() { |
399 | MOZ_ASSERT(nextCellUniqueId_ > 0)do { static_assert( mozilla::detail::AssertionConditionType< decltype(nextCellUniqueId_ > 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(nextCellUniqueId_ > 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("nextCellUniqueId_ > 0" , "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 399); AnnotateMozCrashReason("MOZ_ASSERT" "(" "nextCellUniqueId_ > 0" ")"); do { *((volatile int*)__null) = 399; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); |
400 | uint64_t uid = ++nextCellUniqueId_; |
401 | return uid; |
402 | } |
403 | |
404 | void setLowMemoryState(bool newState) { lowMemoryState = newState; } |
405 | bool systemHasLowMemory() const { return lowMemoryState; } |
406 | |
407 | public: |
408 | // Internal public interface |
409 | ZoneVector& zones() { return zones_.ref(); } |
410 | gcstats::Statistics& stats() { return stats_.ref(); } |
411 | const gcstats::Statistics& stats() const { return stats_.ref(); } |
412 | State state() const { return incrementalState; } |
413 | bool isHeapCompacting() const { return state() == State::Compact; } |
414 | bool isForegroundSweeping() const { return state() == State::Sweep; } |
415 | bool isBackgroundSweeping() const { return sweepTask.wasStarted(); } |
416 | bool isBackgroundMarking() const { return markTask.wasStarted(); } |
417 | bool isBackgroundDecommitting() const { return decommitTask.wasStarted(); } |
418 | void waitBackgroundSweepEnd(); |
419 | void waitBackgroundDecommitEnd(); |
420 | void waitBackgroundAllocEnd() { allocTask.cancelAndWait(); } |
421 | void waitBackgroundFreeEnd(); |
422 | void waitForBackgroundTasks(); |
423 | bool isWaitingOnBackgroundTask() const; |
424 | |
425 | void lockGC() { lock.lock(); } |
426 | void unlockGC() { lock.unlock(); } |
427 | |
428 | void lockStoreBuffer() { storeBufferLock.lock(); } |
429 | void unlockStoreBuffer() { storeBufferLock.unlock(); } |
430 | |
431 | #ifdef DEBUG1 |
432 | void assertCurrentThreadHasLockedGC() const { |
433 | lock.assertOwnedByCurrentThread(); |
434 | } |
435 | void assertCurrentThreadHasLockedStoreBuffer() const { |
436 | storeBufferLock.assertOwnedByCurrentThread(); |
437 | } |
438 | #endif // DEBUG |
439 | |
440 | void setAlwaysPreserveCode() { alwaysPreserveCode = true; } |
441 | |
442 | void setIncrementalGCEnabled(bool enabled); |
443 | void setNurseryEnabled(bool enabled); |
444 | |
445 | bool isIncrementalGCEnabled() const { return incrementalGCEnabled; } |
446 | bool isPerZoneGCEnabled() const { return perZoneGCEnabled; } |
447 | bool isCompactingGCEnabled() const; |
448 | bool isParallelMarkingEnabled() const { return parallelMarkingEnabled; } |
449 | |
450 | bool isIncrementalGCInProgress() const { |
451 | return state() != State::NotActive && !isVerifyPreBarriersEnabled(); |
452 | } |
453 | |
454 | bool hasForegroundWork() const; |
455 | |
456 | bool isShrinkingGC() const { return gcOptions() == JS::GCOptions::Shrink; } |
457 | |
458 | bool isShutdownGC() const { return gcOptions() == JS::GCOptions::Shutdown; } |
459 | |
460 | #ifdef DEBUG1 |
461 | bool isShuttingDown() const { return hadShutdownGC; } |
462 | #endif |
463 | |
464 | bool initSweepActions(); |
465 | |
466 | void setGrayRootsTracer(JSGrayRootsTracer traceOp, void* data); |
467 | [[nodiscard]] bool addBlackRootsTracer(JSTraceDataOp traceOp, void* data); |
468 | void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data); |
469 | void clearBlackAndGrayRootTracers(); |
470 | |
471 | void setGCCallback(JSGCCallback callback, void* data); |
472 | void callGCCallback(JSGCStatus status, JS::GCReason reason) const; |
473 | void setObjectsTenuredCallback(JSObjectsTenuredCallback callback, void* data); |
474 | void callObjectsTenuredCallback(); |
475 | [[nodiscard]] bool addFinalizeCallback(JSFinalizeCallback callback, |
476 | void* data); |
477 | void removeFinalizeCallback(JSFinalizeCallback callback); |
478 | void setHostCleanupFinalizationRegistryCallback( |
479 | JSHostCleanupFinalizationRegistryCallback callback, void* data); |
480 | void callHostCleanupFinalizationRegistryCallback(JSFunction* doCleanup, |
481 | JSObject* hostDefinedData); |
482 | [[nodiscard]] bool addWeakPointerZonesCallback( |
483 | JSWeakPointerZonesCallback callback, void* data); |
484 | void removeWeakPointerZonesCallback(JSWeakPointerZonesCallback callback); |
485 | [[nodiscard]] bool addWeakPointerCompartmentCallback( |
486 | JSWeakPointerCompartmentCallback callback, void* data); |
487 | void removeWeakPointerCompartmentCallback( |
488 | JSWeakPointerCompartmentCallback callback); |
489 | JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback); |
490 | bool addNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback, |
491 | void* data); |
492 | void removeNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback, |
493 | void* data); |
494 | JS::DoCycleCollectionCallback setDoCycleCollectionCallback( |
495 | JS::DoCycleCollectionCallback callback); |
496 | void callNurseryCollectionCallbacks(JS::GCNurseryProgress progress, |
497 | JS::GCReason reason); |
498 | |
499 | bool addFinalizationRegistry(JSContext* cx, |
500 | Handle<FinalizationRegistryObject*> registry); |
501 | bool registerWithFinalizationRegistry(JSContext* cx, HandleObject target, |
502 | HandleObject record); |
503 | void queueFinalizationRegistryForCleanup(FinalizationQueueObject* queue); |
504 | |
505 | void nukeFinalizationRecordWrapper(JSObject* wrapper, |
506 | FinalizationRecordObject* record); |
507 | void nukeWeakRefWrapper(JSObject* wrapper, WeakRefObject* weakRef); |
508 | |
509 | void setFullCompartmentChecks(bool enable); |
510 | |
511 | // Get the main marking tracer. |
512 | GCMarker& marker() { return *markers[0]; } |
513 | |
514 | JS::Zone* getCurrentSweepGroup() { return currentSweepGroup; } |
515 | unsigned getCurrentSweepGroupIndex() { |
516 | MOZ_ASSERT_IF(unsigned(state()) < unsigned(State::Sweep),do { if (unsigned(state()) < unsigned(State::Sweep)) { do { static_assert( mozilla::detail::AssertionConditionType<decltype (sweepGroupIndex == 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(sweepGroupIndex == 0))), 0)) ) { do { } while (false); MOZ_ReportAssertionFailure("sweepGroupIndex == 0" , "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 517); AnnotateMozCrashReason("MOZ_ASSERT" "(" "sweepGroupIndex == 0" ")"); do { *((volatile int*)__null) = 517; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); } } while ( false) |
517 | sweepGroupIndex == 0)do { if (unsigned(state()) < unsigned(State::Sweep)) { do { static_assert( mozilla::detail::AssertionConditionType<decltype (sweepGroupIndex == 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(sweepGroupIndex == 0))), 0)) ) { do { } while (false); MOZ_ReportAssertionFailure("sweepGroupIndex == 0" , "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 517); AnnotateMozCrashReason("MOZ_ASSERT" "(" "sweepGroupIndex == 0" ")"); do { *((volatile int*)__null) = 517; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); } } while ( false); |
518 | return sweepGroupIndex; |
519 | } |
520 | |
521 | uint64_t gcNumber() const { return number; } |
522 | void incGcNumber() { ++number; } |
523 | |
524 | uint64_t minorGCCount() const { return minorGCNumber; } |
525 | void incMinorGcNumber() { ++minorGCNumber; } |
526 | |
527 | uint64_t majorGCCount() const { return majorGCNumber; } |
528 | void incMajorGcNumber() { ++majorGCNumber; } |
529 | |
530 | uint64_t gcSliceCount() const { return sliceNumber; } |
531 | void incGcSliceNumber() { ++sliceNumber; } |
532 | |
533 | int64_t defaultSliceBudgetMS() const { return defaultTimeBudgetMS_; } |
534 | |
535 | bool isIncrementalGc() const { return isIncremental; } |
536 | bool isFullGc() const { return isFull; } |
537 | bool isCompactingGc() const { return isCompacting; } |
538 | bool didCompactZones() const { return isCompacting && zonesCompacted; } |
539 | |
540 | bool areGrayBitsValid() const { return grayBitsValid; } |
541 | void setGrayBitsInvalid() { grayBitsValid = false; } |
542 | |
543 | mozilla::TimeStamp lastGCStartTime() const { return lastGCStartTime_; } |
544 | mozilla::TimeStamp lastGCEndTime() const { return lastGCEndTime_; } |
545 | |
546 | bool majorGCRequested() const { |
547 | return majorGCTriggerReason != JS::GCReason::NO_REASON; |
548 | } |
549 | |
550 | double computeHeapGrowthFactor(size_t lastBytes); |
551 | size_t computeTriggerBytes(double growthFactor, size_t lastBytes); |
552 | |
553 | ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_.ref(); } |
554 | ChunkPool& availableChunks(const AutoLockGC& lock) { |
555 | return availableChunks_.ref(); |
556 | } |
557 | ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_.ref(); } |
558 | const ChunkPool& fullChunks(const AutoLockGC& lock) const { |
559 | return fullChunks_.ref(); |
560 | } |
561 | const ChunkPool& availableChunks(const AutoLockGC& lock) const { |
562 | return availableChunks_.ref(); |
563 | } |
564 | const ChunkPool& emptyChunks(const AutoLockGC& lock) const { |
565 | return emptyChunks_.ref(); |
566 | } |
567 | using NonEmptyChunksIter = ChainedIterator<ChunkPool::Iter, 2>; |
568 | NonEmptyChunksIter allNonEmptyChunks(const AutoLockGC& lock) { |
569 | return NonEmptyChunksIter(availableChunks(lock), fullChunks(lock)); |
570 | } |
571 | uint32_t minEmptyChunkCount(const AutoLockGC& lock) const { |
572 | return minEmptyChunkCount_; |
573 | } |
574 | #ifdef DEBUG1 |
575 | void verifyAllChunks(); |
576 | #endif |
577 | |
578 | // Get a free chunk or allocate one if needed. The chunk is left in the empty |
579 | // chunks pool. |
580 | ArenaChunk* getOrAllocChunk(StallAndRetry stallAndRetry, |
581 | AutoLockGCBgAlloc& lock); |
582 | |
583 | // Get or allocate a free chunk, removing it from the empty chunks pool. |
584 | ArenaChunk* takeOrAllocChunk(StallAndRetry stallAndRetry, |
585 | AutoLockGCBgAlloc& lock); |
586 | |
587 | void recycleChunk(ArenaChunk* chunk, const AutoLockGC& lock); |
588 | |
589 | #ifdef JS_GC_ZEAL1 |
590 | void startVerifyPreBarriers(); |
591 | void endVerifyPreBarriers(); |
592 | void finishVerifier(); |
593 | bool isVerifyPreBarriersEnabled() const { return verifyPreData.refNoCheck(); } |
594 | bool shouldYieldForZeal(ZealMode mode); |
595 | #else |
596 | bool isVerifyPreBarriersEnabled() const { return false; } |
597 | #endif |
598 | |
599 | #ifdef JSGC_HASH_TABLE_CHECKS |
600 | void checkHashTablesAfterMovingGC(); |
601 | #endif |
602 | |
603 | // Crawl the heap to check whether an arbitary pointer is within a cell of |
604 | // the given kind. (TraceKind::Null means to ignore the kind.) |
605 | bool isPointerWithinTenuredCell( |
606 | void* ptr, JS::TraceKind traceKind = JS::TraceKind::Null); |
607 | // Crawl the heap to check whether an arbitary pointer is within a buffer. |
608 | bool isPointerWithinBufferAlloc(void* ptr); |
609 | |
610 | #ifdef DEBUG1 |
611 | bool hasZone(Zone* target); |
612 | #endif |
613 | |
614 | // Queue memory memory to be freed on a background thread if possible. |
615 | void queueUnusedLifoBlocksForFree(LifoAlloc* lifo); |
616 | void queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo); |
617 | void queueBuffersForFreeAfterMinorGC( |
618 | Nursery::BufferSet& buffers, Nursery::StringBufferVector& stringBuffers, |
619 | Nursery::LargeAllocList& largeAllocs); |
620 | |
621 | // Public here for ReleaseArenaLists and FinalizeTypedArenas. |
622 | void releaseArena(Arena* arena, const AutoLockGC& lock); |
623 | void releaseArenas(Arena* arena, const AutoLockGC& lock); |
624 | void releaseArenaList(ArenaList& arenaList, const AutoLockGC& lock); |
625 | |
626 | // Allocator internals. |
627 | static void* refillFreeListInGC(Zone* zone, AllocKind thingKind); |
628 | |
629 | // Delayed marking. |
630 | void delayMarkingChildren(gc::Cell* cell, MarkColor color); |
631 | bool hasDelayedMarking() const; |
632 | void markAllDelayedChildren(ShouldReportMarkTime reportTime); |
633 | |
634 | // If we have yielded to the mutator while foreground finalizing arenas from |
635 | // zone |zone| with kind |kind| then return a list of the arenas finalized so |
636 | // far. These will have been removed from the main arena lists at this |
637 | // point. Otherwise return nullptr. |
638 | SortedArenaList* maybeGetForegroundFinalizedArenas(Zone* zone, |
639 | AllocKind kind); |
640 | |
641 | /* |
642 | * Concurrent sweep infrastructure. |
643 | */ |
644 | void startTask(GCParallelTask& task, AutoLockHelperThreadState& lock); |
645 | void joinTask(GCParallelTask& task, AutoLockHelperThreadState& lock); |
646 | void updateHelperThreadCount(); |
647 | size_t parallelWorkerCount() const; |
648 | void maybeRequestGCAfterBackgroundTask(const AutoLockHelperThreadState& lock); |
649 | |
650 | // GC parallel task dispatch infrastructure. |
651 | size_t getMaxParallelThreads() const; |
652 | void dispatchOrQueueParallelTask(GCParallelTask* task, |
653 | const AutoLockHelperThreadState& lock); |
654 | void maybeDispatchParallelTasks(const AutoLockHelperThreadState& lock); |
655 | void onParallelTaskEnd(bool wasDispatched, |
656 | const AutoLockHelperThreadState& lock); |
657 | |
658 | // Parallel marking. |
659 | bool setParallelMarkingEnabled(bool enabled); |
660 | bool initOrDisableParallelMarking(); |
661 | [[nodiscard]] bool updateMarkersVector(); |
662 | size_t markingWorkerCount() const; |
663 | |
664 | // WeakRefs |
665 | bool registerWeakRef(HandleObject target, HandleObject weakRef); |
666 | void traceKeptObjects(JSTracer* trc); |
667 | |
668 | JS::GCReason lastStartReason() const { return initialReason; } |
669 | |
670 | void updateAllocationRates(); |
671 | |
672 | // Allocator internals |
673 | static void* refillFreeList(JS::Zone* zone, AllocKind thingKind); |
674 | void attemptLastDitchGC(); |
675 | |
676 | // Test mark queue. |
677 | #ifdef DEBUG1 |
678 | const GCVector<HeapPtr<JS::Value>, 0, SystemAllocPolicy>& getTestMarkQueue() |
679 | const; |
680 | [[nodiscard]] bool appendTestMarkQueue(const JS::Value& value); |
681 | void clearTestMarkQueue(); |
682 | size_t testMarkQueuePos() const; |
683 | #endif |
684 | |
685 | private: |
686 | enum IncrementalResult { ResetIncremental = 0, Ok }; |
687 | |
688 | bool hasBuffersForBackgroundFree() const { |
689 | return !lifoBlocksToFree.ref().isEmpty() || |
690 | !buffersToFreeAfterMinorGC.ref().empty() || |
691 | !stringBuffersToReleaseAfterMinorGC.ref().empty() || |
692 | !largeBuffersToFreeAfterMinorGC.ref().isEmpty(); |
693 | } |
694 | |
695 | // Returns false on failure without raising an exception. |
696 | [[nodiscard]] bool setParameter(JSGCParamKey key, uint32_t value, |
697 | AutoLockGC& lock); |
698 | void resetParameter(JSGCParamKey key, AutoLockGC& lock); |
699 | uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock); |
700 | // Returns false on failure without raising an exception. |
701 | bool setThreadParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock); |
702 | void resetThreadParameter(JSGCParamKey key, AutoLockGC& lock); |
703 | void updateThreadDataStructures(AutoLockGC& lock); |
704 | |
705 | JS::GCOptions gcOptions() const { return maybeGcOptions.ref().ref(); } |
706 | |
707 | TriggerResult checkHeapThreshold(Zone* zone, const HeapSize& heapSize, |
708 | const HeapThreshold& heapThreshold); |
709 | |
710 | void updateSchedulingStateOnGCStart(); |
711 | void updateSchedulingStateOnGCEnd(mozilla::TimeStamp currentTime); |
712 | void updateAllGCStartThresholds(); |
713 | |
714 | // For ArenaLists::allocateFromArena() |
715 | friend class ArenaLists; |
716 | ArenaChunk* pickChunk(StallAndRetry stallAndRetry, AutoLockGCBgAlloc& lock); |
717 | Arena* allocateArena(ArenaChunk* chunk, Zone* zone, AllocKind kind, |
718 | ShouldCheckThresholds checkThresholds, |
719 | const AutoLockGC& lock); |
720 | |
721 | /* |
722 | * Return the list of chunks that can be released outside the GC lock. |
723 | * Must be called either during the GC or with the GC lock taken. |
724 | */ |
725 | friend class BackgroundDecommitTask; |
726 | bool tooManyEmptyChunks(const AutoLockGC& lock); |
727 | ChunkPool expireEmptyChunkPool(const AutoLockGC& lock); |
728 | void freeEmptyChunks(const AutoLockGC& lock); |
729 | void prepareToFreeChunk(ArenaChunkInfo& info); |
730 | void setMinEmptyChunkCount(uint32_t value, const AutoLockGC& lock); |
731 | |
732 | friend class BackgroundAllocTask; |
733 | bool wantBackgroundAllocation(const AutoLockGC& lock) const; |
734 | void startBackgroundAllocTaskIfIdle(); |
735 | |
736 | void requestMajorGC(JS::GCReason reason); |
737 | JS::SliceBudget defaultBudget(JS::GCReason reason, int64_t millis); |
738 | bool maybeIncreaseSliceBudget(JS::SliceBudget& budget, |
739 | mozilla::TimeStamp sliceStartTime, |
740 | mozilla::TimeStamp gcStartTime); |
741 | bool maybeIncreaseSliceBudgetForLongCollections( |
742 | JS::SliceBudget& budget, mozilla::TimeStamp sliceStartTime, |
743 | mozilla::TimeStamp gcStartTime); |
744 | bool maybeIncreaseSliceBudgetForUrgentCollections(JS::SliceBudget& budget); |
745 | IncrementalResult budgetIncrementalGC(bool nonincrementalByAPI, |
746 | JS::GCReason reason, |
747 | JS::SliceBudget& budget); |
748 | void checkZoneIsScheduled(Zone* zone, JS::GCReason reason, |
749 | const char* trigger); |
750 | IncrementalResult resetIncrementalGC(GCAbortReason reason); |
751 | |
752 | // Assert if the system state is such that we should never |
753 | // receive a request to do GC work. |
754 | void checkCanCallAPI(); |
755 | |
756 | // Check if the system state is such that GC has been supressed |
757 | // or otherwise delayed. |
758 | [[nodiscard]] bool checkIfGCAllowedInCurrentState(JS::GCReason reason); |
759 | |
760 | gcstats::ZoneGCStats scanZonesBeforeGC(); |
761 | |
762 | void setGCOptions(JS::GCOptions options); |
763 | |
764 | void collect(bool nonincrementalByAPI, const JS::SliceBudget& budget, |
765 | JS::GCReason reason) JS_HAZ_GC_CALL; |
766 | |
767 | /* |
768 | * Run one GC "cycle" (either a slice of incremental GC or an entire |
769 | * non-incremental GC). |
770 | * |
771 | * Returns: |
772 | * * ResetIncremental if we "reset" an existing incremental GC, which would |
773 | * force us to run another cycle or |
774 | * * Ok otherwise. |
775 | */ |
776 | [[nodiscard]] IncrementalResult gcCycle(bool nonincrementalByAPI, |
777 | const JS::SliceBudget& budgetArg, |
778 | JS::GCReason reason); |
779 | bool shouldRepeatForDeadZone(JS::GCReason reason); |
780 | |
781 | void incrementalSlice(JS::SliceBudget& budget, JS::GCReason reason, |
782 | bool budgetWasIncreased); |
783 | |
784 | bool mightSweepInThisSlice(bool nonIncremental); |
785 | void collectNurseryFromMajorGC(JS::GCReason reason); |
786 | void collectNursery(JS::GCOptions options, JS::GCReason reason, |
787 | gcstats::PhaseKind phase); |
788 | |
789 | friend class AutoCallGCCallbacks; |
790 | void maybeCallGCCallback(JSGCStatus status, JS::GCReason reason); |
791 | |
792 | void startCollection(JS::GCReason reason); |
793 | |
794 | void purgeRuntime(); |
795 | [[nodiscard]] bool beginPreparePhase(JS::GCReason reason, |
796 | AutoGCSession& session); |
797 | bool prepareZonesForCollection(JS::GCReason reason, bool* isFullOut); |
798 | void unmarkWeakMaps(); |
799 | void endPreparePhase(JS::GCReason reason); |
800 | void beginMarkPhase(AutoGCSession& session); |
801 | bool shouldPreserveJITCode(JS::Realm* realm, |
802 | const mozilla::TimeStamp& currentTime, |
803 | JS::GCReason reason, bool canAllocateMoreCode, |
804 | bool isActiveCompartment); |
805 | void discardJITCodeForGC(); |
806 | void startBackgroundFreeAfterMinorGC(); |
807 | void relazifyFunctionsForShrinkingGC(); |
808 | void purgePropMapTablesForShrinkingGC(); |
809 | void purgeSourceURLsForShrinkingGC(); |
810 | void traceRuntimeForMajorGC(JSTracer* trc, AutoGCSession& session); |
811 | void traceRuntimeAtoms(JSTracer* trc); |
812 | void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark); |
813 | void traceEmbeddingBlackRoots(JSTracer* trc); |
814 | void traceEmbeddingGrayRoots(JSTracer* trc); |
815 | IncrementalProgress traceEmbeddingGrayRoots(JSTracer* trc, |
816 | JS::SliceBudget& budget); |
817 | void checkNoRuntimeRoots(AutoGCSession& session); |
818 | void maybeDoCycleCollection(); |
819 | void findDeadCompartments(); |
820 | |
821 | friend class BackgroundMarkTask; |
822 | enum ParallelMarking : bool { |
823 | SingleThreadedMarking = false, |
824 | AllowParallelMarking = true |
825 | }; |
826 | IncrementalProgress markUntilBudgetExhausted( |
827 | JS::SliceBudget& sliceBudget, |
828 | ParallelMarking allowParallelMarking = SingleThreadedMarking, |
829 | ShouldReportMarkTime reportTime = ReportMarkTime); |
830 | bool canMarkInParallel() const; |
831 | bool initParallelMarking(); |
832 | void finishParallelMarkers(); |
833 | |
834 | bool reserveMarkingThreads(size_t count); |
835 | void releaseMarkingThreads(); |
836 | |
837 | bool hasMarkingWork(MarkColor color) const; |
838 | |
839 | void drainMarkStack(); |
840 | |
841 | #ifdef DEBUG1 |
842 | void assertNoMarkingWork() const; |
843 | #else |
844 | void assertNoMarkingWork() const {} |
845 | #endif |
846 | |
847 | void markDelayedChildren(gc::Arena* arena, MarkColor color); |
848 | void processDelayedMarkingList(gc::MarkColor color); |
849 | void rebuildDelayedMarkingList(); |
850 | void appendToDelayedMarkingList(gc::Arena** listTail, gc::Arena* arena); |
851 | void resetDelayedMarking(); |
852 | template <typename F> |
853 | void forEachDelayedMarkingArena(F&& f); |
854 | |
855 | template <class ZoneIterT> |
856 | IncrementalProgress markWeakReferences(JS::SliceBudget& budget); |
857 | IncrementalProgress markWeakReferencesInCurrentGroup(JS::SliceBudget& budget); |
858 | IncrementalProgress markGrayRoots(JS::SliceBudget& budget, |
859 | gcstats::PhaseKind phase); |
860 | void markBufferedGrayRoots(JS::Zone* zone); |
861 | IncrementalProgress markAllWeakReferences(); |
862 | void markAllGrayReferences(gcstats::PhaseKind phase); |
863 | |
864 | // The mark queue is a testing-only feature for controlling mark ordering and |
865 | // yield timing. |
866 | enum MarkQueueProgress { |
867 | QueueYielded, // End this incremental GC slice, if possible |
868 | QueueComplete, // Done with the queue |
869 | QueueSuspended // Continue the GC without ending the slice |
870 | }; |
871 | MarkQueueProgress processTestMarkQueue(); |
872 | |
873 | // GC Sweeping. Implemented in Sweeping.cpp. |
874 | void beginSweepPhase(JS::GCReason reason, AutoGCSession& session); |
875 | void dropStringWrappers(); |
876 | void groupZonesForSweeping(JS::GCReason reason); |
877 | [[nodiscard]] bool findSweepGroupEdges(); |
878 | [[nodiscard]] bool addEdgesForMarkQueue(); |
879 | void moveToNextSweepGroup(); |
880 | void resetGrayList(Compartment* comp); |
881 | IncrementalProgress beginMarkingSweepGroup(JS::GCContext* gcx, |
882 | JS::SliceBudget& budget); |
883 | IncrementalProgress markGrayRootsInCurrentGroup(JS::GCContext* gcx, |
884 | JS::SliceBudget& budget); |
885 | IncrementalProgress markGray(JS::GCContext* gcx, JS::SliceBudget& budget); |
886 | IncrementalProgress endMarkingSweepGroup(JS::GCContext* gcx, |
887 | JS::SliceBudget& budget); |
888 | void markIncomingGrayCrossCompartmentPointers(); |
889 | IncrementalProgress beginSweepingSweepGroup(JS::GCContext* gcx, |
890 | JS::SliceBudget& budget); |
891 | void initBackgroundSweep(Zone* zone, JS::GCContext* gcx, |
892 | const FinalizePhase& phase); |
893 | IncrementalProgress markDuringSweeping(JS::GCContext* gcx, |
894 | JS::SliceBudget& budget); |
895 | void updateAtomsBitmap(); |
896 | void sweepCCWrappers(); |
897 | void sweepRealmGlobals(); |
898 | void sweepEmbeddingWeakPointers(JS::GCContext* gcx); |
899 | void sweepMisc(); |
900 | void sweepCompressionTasks(); |
901 | void sweepWeakMaps(); |
902 | void sweepUniqueIds(); |
903 | void sweepObjectsWithWeakPointers(); |
904 | void sweepDebuggerOnMainThread(JS::GCContext* gcx); |
905 | void sweepJitDataOnMainThread(JS::GCContext* gcx); |
906 | void sweepFinalizationObserversOnMainThread(); |
907 | void traceWeakFinalizationObserverEdges(JSTracer* trc, Zone* zone); |
908 | void sweepWeakRefs(); |
909 | IncrementalProgress endSweepingSweepGroup(JS::GCContext* gcx, |
910 | JS::SliceBudget& budget); |
911 | IncrementalProgress performSweepActions(JS::SliceBudget& sliceBudget); |
912 | void startSweepingAtomsTable(); |
913 | IncrementalProgress sweepAtomsTable(JS::GCContext* gcx, |
914 | JS::SliceBudget& budget); |
915 | IncrementalProgress sweepWeakCaches(JS::GCContext* gcx, |
916 | JS::SliceBudget& budget); |
917 | IncrementalProgress finalizeAllocKind(JS::GCContext* gcx, |
918 | JS::SliceBudget& budget); |
919 | bool foregroundFinalize(JS::GCContext* gcx, Zone* zone, AllocKind thingKind, |
920 | JS::SliceBudget& sliceBudget, |
921 | SortedArenaList& sweepList); |
922 | IncrementalProgress sweepPropMapTree(JS::GCContext* gcx, |
923 | JS::SliceBudget& budget); |
924 | void endSweepPhase(bool destroyingRuntime); |
925 | void queueZonesAndStartBackgroundSweep(ZoneList&& zones); |
926 | void sweepFromBackgroundThread(AutoLockHelperThreadState& lock); |
927 | void startBackgroundFree(); |
928 | void freeFromBackgroundThread(AutoLockHelperThreadState& lock); |
929 | void sweepBackgroundThings(ZoneList& zones); |
930 | void backgroundFinalize(JS::GCContext* gcx, Zone* zone, AllocKind kind, |
931 | Arena** empty); |
932 | void prepareForSweepSlice(JS::GCReason reason); |
933 | void assertBackgroundSweepingFinished(); |
934 | #ifdef DEBUG1 |
935 | bool zoneInCurrentSweepGroup(Zone* zone) const; |
936 | #endif |
937 | |
938 | bool allCCVisibleZonesWereCollected(); |
939 | void sweepZones(JS::GCContext* gcx, bool destroyingRuntime); |
940 | bool shouldDecommit() const; |
941 | void startDecommit(); |
942 | void decommitEmptyChunks(const bool& cancel, AutoLockGC& lock); |
943 | void decommitFreeArenas(const bool& cancel, AutoLockGC& lock); |
944 | void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock); |
945 | |
946 | // Compacting GC. Implemented in Compacting.cpp. |
947 | bool shouldCompact(); |
948 | void beginCompactPhase(); |
949 | IncrementalProgress compactPhase(JS::GCReason reason, |
950 | JS::SliceBudget& sliceBudget, |
951 | AutoGCSession& session); |
952 | void endCompactPhase(); |
953 | void sweepZoneAfterCompacting(MovingTracer* trc, Zone* zone); |
954 | bool canRelocateZone(Zone* zone) const; |
955 | [[nodiscard]] bool relocateArenas(Zone* zone, JS::GCReason reason, |
956 | Arena*& relocatedListOut, |
957 | JS::SliceBudget& sliceBudget); |
958 | void updateCellPointers(Zone* zone, AllocKinds kinds); |
959 | void updateAllCellPointers(MovingTracer* trc, Zone* zone); |
960 | void updateZonePointersToRelocatedCells(Zone* zone); |
961 | void updateRuntimePointersToRelocatedCells(AutoGCSession& session); |
962 | void clearRelocatedArenas(Arena* arenaList, JS::GCReason reason); |
963 | void clearRelocatedArenasWithoutUnlocking(Arena* arenaList, |
964 | JS::GCReason reason, |
965 | const AutoLockGC& lock); |
966 | void releaseRelocatedArenas(Arena* arenaList); |
967 | void releaseRelocatedArenasWithoutUnlocking(Arena* arenaList, |
968 | const AutoLockGC& lock); |
969 | #ifdef DEBUG1 |
970 | void protectOrReleaseRelocatedArenas(Arena* arenaList, JS::GCReason reason); |
971 | void protectAndHoldArenas(Arena* arenaList); |
972 | void unprotectHeldRelocatedArenas(const AutoLockGC& lock); |
973 | void releaseHeldRelocatedArenas(); |
974 | void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock); |
975 | #endif |
976 | |
977 | IncrementalProgress waitForBackgroundTask(GCParallelTask& task, |
978 | const JS::SliceBudget& budget, |
979 | bool shouldPauseMutator); |
980 | |
981 | void cancelRequestedGCAfterBackgroundTask(); |
982 | void finishCollection(JS::GCReason reason); |
983 | void maybeStopPretenuring(); |
984 | void checkGCStateNotInUse(); |
985 | IncrementalProgress joinBackgroundMarkTask(); |
986 | |
987 | #ifdef JS_GC_ZEAL1 |
988 | void computeNonIncrementalMarkingForValidation(AutoGCSession& session); |
989 | void validateIncrementalMarking(); |
990 | void finishMarkingValidation(); |
991 | #endif |
992 | |
993 | #ifdef DEBUG1 |
994 | void checkForCompartmentMismatches(); |
995 | #endif |
996 | |
997 | void callFinalizeCallbacks(JS::GCContext* gcx, JSFinalizeStatus status) const; |
998 | void callWeakPointerZonesCallbacks(JSTracer* trc) const; |
999 | void callWeakPointerCompartmentCallbacks(JSTracer* trc, |
1000 | JS::Compartment* comp) const; |
1001 | void callDoCycleCollectionCallback(JSContext* cx); |
1002 | |
1003 | public: |
1004 | JSRuntime* const rt; |
1005 | |
1006 | // Embedders can use this zone however they wish. |
1007 | MainThreadData<JS::Zone*> systemZone; |
1008 | |
1009 | MainThreadData<JS::GCContext> mainThreadContext; |
1010 | |
1011 | private: |
1012 | // For parent runtimes, a zone containing atoms that is shared by child |
1013 | // runtimes. |
1014 | MainThreadData<Zone*> sharedAtomsZone_; |
1015 | |
1016 | // All zones in the runtime. The first element is always the atoms zone. |
1017 | MainThreadOrGCTaskData<ZoneVector> zones_; |
1018 | |
1019 | // Any activity affecting the heap. |
1020 | MainThreadOrGCTaskData<JS::HeapState> heapState_; |
1021 | friend class AutoHeapSession; |
1022 | friend class JS::AutoEnterCycleCollection; |
1023 | |
1024 | UnprotectedData<gcstats::Statistics> stats_; |
1025 | |
1026 | public: |
1027 | js::StringStats stringStats; |
1028 | |
1029 | Vector<UniquePtr<GCMarker>, 1, SystemAllocPolicy> markers; |
1030 | |
1031 | // Delayed marking support in case we OOM pushing work onto the mark stack. |
1032 | MainThreadOrGCTaskData<js::gc::Arena*> delayedMarkingList; |
1033 | MainThreadOrGCTaskData<bool> delayedMarkingWorkAdded; |
1034 | #ifdef DEBUG1 |
1035 | /* Count of arenas that are currently in the stack. */ |
1036 | MainThreadOrGCTaskData<size_t> markLaterArenas; |
1037 | #endif |
1038 | |
1039 | SweepingTracer sweepingTracer; |
1040 | |
1041 | /* Track total GC heap size for this runtime. */ |
1042 | HeapSize heapSize; |
1043 | |
1044 | /* GC scheduling state and parameters. */ |
1045 | GCSchedulingTunables tunables; |
1046 | GCSchedulingState schedulingState; |
1047 | MainThreadData<bool> fullGCRequested; |
1048 | |
1049 | // Helper thread configuration. |
1050 | MainThreadData<double> helperThreadRatio; |
1051 | MainThreadData<size_t> maxHelperThreads; |
1052 | MainThreadOrGCTaskData<size_t> helperThreadCount; |
1053 | MainThreadData<size_t> maxMarkingThreads; |
1054 | MainThreadData<size_t> markingThreadCount; |
1055 | |
1056 | // Per-runtime helper thread task queue. Can be accessed from helper threads |
1057 | // in maybeDispatchParallelTasks(). |
1058 | HelperThreadLockData<size_t> maxParallelThreads; |
1059 | HelperThreadLockData<size_t> dispatchedParallelTasks; |
1060 | HelperThreadLockData<GCParallelTaskList> queuedParallelTasks; |
1061 | |
1062 | // State used for managing atom mark bitmaps in each zone. |
1063 | AtomMarkingRuntime atomMarking; |
1064 | |
1065 | /* |
1066 | * Pointer to a callback that, if set, will be used to create a |
1067 | * budget for internally-triggered GCs. |
1068 | */ |
1069 | MainThreadData<JS::CreateSliceBudgetCallback> createBudgetCallback; |
1070 | |
1071 | private: |
1072 | // Arenas used for permanent things created at startup and shared by child |
1073 | // runtimes. |
1074 | MainThreadData<ArenaList> permanentAtoms; |
1075 | MainThreadData<ArenaList> permanentWellKnownSymbols; |
1076 | |
1077 | // When chunks are empty, they reside in the emptyChunks pool and are |
1078 | // re-used as needed or eventually expired if not re-used. The emptyChunks |
1079 | // pool gets refilled from the background allocation task heuristically so |
1080 | // that empty chunks should always be available for immediate allocation |
1081 | // without syscalls. |
1082 | GCLockData<ChunkPool> emptyChunks_; |
1083 | |
1084 | // Chunks which have had some, but not all, of their arenas allocated live |
1085 | // in the available chunk lists. When all available arenas in a chunk have |
1086 | // been allocated, the chunk is removed from the available list and moved |
1087 | // to the fullChunks pool. During a GC, if all arenas are free, the chunk |
1088 | // is moved back to the emptyChunks pool and scheduled for eventual |
1089 | // release. |
1090 | GCLockData<ChunkPool> availableChunks_; |
1091 | |
1092 | // When all arenas in a chunk are used, it is moved to the fullChunks pool |
1093 | // so as to reduce the cost of operations on the available lists. |
1094 | GCLockData<ChunkPool> fullChunks_; |
1095 | |
1096 | /* |
1097 | * JSGC_MIN_EMPTY_CHUNK_COUNT |
1098 | * |
1099 | * Controls the number of empty chunks reserved for future allocation. |
1100 | * |
1101 | * They can be read off main thread by the background allocation task and the |
1102 | * background decommit task. |
1103 | */ |
1104 | GCLockData<uint32_t> minEmptyChunkCount_; |
1105 | |
1106 | MainThreadData<RootedValueMap> rootsHash; |
1107 | |
1108 | // An incrementing id used to assign unique ids to cells that require one. |
1109 | MainThreadData<uint64_t> nextCellUniqueId_; |
1110 | |
1111 | MainThreadData<VerifyPreTracer*> verifyPreData; |
1112 | |
1113 | MainThreadData<mozilla::TimeStamp> lastGCStartTime_; |
1114 | MainThreadData<mozilla::TimeStamp> lastGCEndTime_; |
1115 | |
1116 | WriteOnceData<bool> initialized; |
1117 | MainThreadData<bool> incrementalGCEnabled; |
1118 | MainThreadData<bool> perZoneGCEnabled; |
1119 | |
1120 | mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters; |
1121 | |
1122 | /* During shutdown, the GC needs to clean up every possible object. */ |
1123 | MainThreadData<bool> cleanUpEverything; |
1124 | |
1125 | /* |
1126 | * The gray bits can become invalid if UnmarkGray overflows the stack. A |
1127 | * full GC will reset this bit, since it fills in all the gray bits. |
1128 | */ |
1129 | UnprotectedData<bool> grayBitsValid; |
1130 | |
1131 | mozilla::Atomic<JS::GCReason, mozilla::ReleaseAcquire> majorGCTriggerReason; |
1132 | |
1133 | /* Incremented at the start of every minor GC. */ |
1134 | MainThreadData<uint64_t> minorGCNumber; |
1135 | |
1136 | /* Incremented at the start of every major GC. */ |
1137 | MainThreadData<uint64_t> majorGCNumber; |
1138 | |
1139 | /* Incremented on every GC slice or minor collection. */ |
1140 | MainThreadData<uint64_t> number; |
1141 | |
1142 | /* Incremented on every GC slice. */ |
1143 | MainThreadData<uint64_t> sliceNumber; |
1144 | |
1145 | /* |
1146 | * This runtime's current contribution to the global number of helper threads |
1147 | * 'reserved' for parallel marking. Does not affect other uses of helper |
1148 | * threads. |
1149 | */ |
1150 | MainThreadData<size_t> reservedMarkingThreads; |
1151 | |
1152 | /* Whether the currently running GC can finish in multiple slices. */ |
1153 | MainThreadOrGCTaskData<bool> isIncremental; |
1154 | |
1155 | /* Whether all zones are being collected in first GC slice. */ |
1156 | MainThreadData<bool> isFull; |
1157 | |
1158 | /* Whether the heap will be compacted at the end of GC. */ |
1159 | MainThreadData<bool> isCompacting; |
1160 | |
1161 | /* Whether to use parallel marking. */ |
1162 | MainThreadData<ParallelMarking> useParallelMarking; |
1163 | |
1164 | /* The invocation kind of the current GC, set at the start of collection. */ |
1165 | MainThreadOrGCTaskData<mozilla::Maybe<JS::GCOptions>> maybeGcOptions; |
1166 | |
1167 | /* The initial GC reason, taken from the first slice. */ |
1168 | MainThreadData<JS::GCReason> initialReason; |
1169 | |
1170 | /* |
1171 | * The current incremental GC phase. This is also used internally in |
1172 | * non-incremental GC. |
1173 | */ |
1174 | MainThreadOrGCTaskData<State> incrementalState; |
1175 | |
1176 | /* The incremental state at the start of this slice. */ |
1177 | MainThreadOrGCTaskData<State> initialState; |
1178 | |
1179 | /* Whether to pay attention the zeal settings in this incremental slice. */ |
1180 | #ifdef JS_GC_ZEAL1 |
1181 | MainThreadData<bool> useZeal; |
1182 | #else |
1183 | const bool useZeal; |
1184 | #endif |
1185 | |
1186 | /* Indicates that the last incremental slice exhausted the mark stack. */ |
1187 | MainThreadData<bool> lastMarkSlice; |
1188 | |
1189 | // Whether it's currently safe to yield to the mutator in an incremental GC. |
1190 | MainThreadData<bool> safeToYield; |
1191 | |
1192 | // Whether to do any marking caused by barriers on a background thread during |
1193 | // incremental sweeping, while also sweeping zones which have finished |
1194 | // marking. |
1195 | MainThreadData<bool> markOnBackgroundThreadDuringSweeping; |
1196 | |
1197 | // Whether any sweeping and decommitting will run on a separate GC helper |
1198 | // thread. |
1199 | MainThreadData<bool> useBackgroundThreads; |
1200 | |
1201 | // Whether we have already discarded JIT code for all collected zones in this |
1202 | // slice. |
1203 | MainThreadData<bool> haveDiscardedJITCodeThisSlice; |
1204 | |
1205 | #ifdef DEBUG1 |
1206 | /* Shutdown has started. Further collections must be shutdown collections. */ |
1207 | MainThreadData<bool> hadShutdownGC; |
1208 | #endif |
1209 | |
1210 | /* Singly linked list of zones to be swept in the background. */ |
1211 | HelperThreadLockData<ZoneList> backgroundSweepZones; |
1212 | |
1213 | /* |
1214 | * Whether to trigger a GC slice after a background task is complete, so that |
1215 | * the collector can continue or finsish collecting. This is only used for the |
1216 | * tasks that run concurrently with the mutator, which are background |
1217 | * finalization and background decommit. |
1218 | */ |
1219 | HelperThreadLockData<bool> requestSliceAfterBackgroundTask; |
1220 | |
1221 | /* |
1222 | * Free LIFO blocks are transferred to these allocators before being freed on |
1223 | * a background thread. |
1224 | */ |
1225 | HelperThreadLockData<LifoAlloc> lifoBlocksToFree; |
1226 | MainThreadData<LifoAlloc> lifoBlocksToFreeAfterFullMinorGC; |
1227 | MainThreadData<LifoAlloc> lifoBlocksToFreeAfterNextMinorGC; |
1228 | HelperThreadLockData<Nursery::BufferSet> buffersToFreeAfterMinorGC; |
1229 | HelperThreadLockData<Nursery::StringBufferVector> |
1230 | stringBuffersToReleaseAfterMinorGC; |
1231 | HelperThreadLockData<SlimLinkedList<LargeBuffer>> |
1232 | largeBuffersToFreeAfterMinorGC; |
1233 | |
1234 | /* The number of the minor GC peformed at the start of major GC. */ |
1235 | MainThreadData<uint64_t> initialMinorGCNumber; |
1236 | |
1237 | /* Index of current sweep group (for stats). */ |
1238 | MainThreadData<unsigned> sweepGroupIndex; |
1239 | |
1240 | /* |
1241 | * Incremental sweep state. |
1242 | */ |
1243 | MainThreadData<JS::Zone*> sweepGroups; |
1244 | MainThreadOrGCTaskData<JS::Zone*> currentSweepGroup; |
1245 | MainThreadData<UniquePtr<SweepAction>> sweepActions; |
1246 | MainThreadOrGCTaskData<JS::Zone*> sweepZone; |
1247 | MainThreadOrGCTaskData<AllocKind> sweepAllocKind; |
1248 | MainThreadData<mozilla::Maybe<AtomsTable::SweepIterator>> maybeAtomsToSweep; |
1249 | MainThreadOrGCTaskData<mozilla::Maybe<WeakCacheSweepIterator>> |
1250 | weakCachesToSweep; |
1251 | MainThreadData<bool> abortSweepAfterCurrentGroup; |
1252 | MainThreadOrGCTaskData<IncrementalProgress> sweepMarkResult; |
1253 | |
1254 | /* |
1255 | * During incremental foreground finalization, we may have a list of arenas of |
1256 | * the current AllocKind and Zone whose contents have been finalized but which |
1257 | * have not yet been merged back into the main arena lists. |
1258 | */ |
1259 | MainThreadOrGCTaskData<JS::Zone*> foregroundFinalizedZone; |
1260 | MainThreadOrGCTaskData<AllocKind> foregroundFinalizedAllocKind; |
1261 | MainThreadData<mozilla::Maybe<SortedArenaList>> foregroundFinalizedArenas; |
1262 | |
1263 | #ifdef DEBUG1 |
1264 | /* |
1265 | * List of objects to mark at the beginning of a GC for testing purposes. May |
1266 | * also contain string directives to change mark color or wait until different |
1267 | * phases of the GC. |
1268 | * |
1269 | * This is a WeakCache because not everything in this list is guaranteed to |
1270 | * end up marked (eg if you insert an object from an already-processed sweep |
1271 | * group in the middle of an incremental GC). Also, the mark queue is not |
1272 | * used during shutdown GCs. In either case, unmarked objects may need to be |
1273 | * discarded. |
1274 | */ |
1275 | JS::WeakCache<GCVector<HeapPtr<JS::Value>, 0, SystemAllocPolicy>> |
1276 | testMarkQueue; |
1277 | |
1278 | /* Position within the test mark queue. */ |
1279 | size_t queuePos = 0; |
1280 | |
1281 | /* The test marking queue might want to be marking a particular color. */ |
1282 | mozilla::Maybe<js::gc::MarkColor> queueMarkColor; |
1283 | |
1284 | // During gray marking, delay AssertCellIsNotGray checks by |
1285 | // recording the cell pointers here and checking after marking has |
1286 | // finished. |
1287 | MainThreadData<Vector<const Cell*, 0, SystemAllocPolicy>> |
1288 | cellsToAssertNotGray; |
1289 | friend void js::gc::detail::AssertCellIsNotGray(const Cell*); |
1290 | #endif |
1291 | |
1292 | friend class SweepGroupsIter; |
1293 | |
1294 | /* |
1295 | * Incremental compacting state. |
1296 | */ |
1297 | MainThreadData<bool> startedCompacting; |
1298 | MainThreadData<ZoneList> zonesToMaybeCompact; |
1299 | MainThreadData<size_t> zonesCompacted; |
1300 | #ifdef DEBUG1 |
1301 | GCLockData<Arena*> relocatedArenasToRelease; |
1302 | #endif |
1303 | |
1304 | #ifdef JS_GC_ZEAL1 |
1305 | MainThreadData<MarkingValidator*> markingValidator; |
1306 | #endif |
1307 | |
1308 | /* |
1309 | * Default budget for incremental GC slice. See js/SliceBudget.h. |
1310 | * |
1311 | * JSGC_SLICE_TIME_BUDGET_MS |
1312 | * pref: javascript.options.mem.gc_incremental_slice_ms, |
1313 | */ |
1314 | MainThreadData<int64_t> defaultTimeBudgetMS_; |
1315 | |
1316 | /* |
1317 | * Whether compacting GC is enabled globally. |
1318 | * |
1319 | * JSGC_COMPACTING_ENABLED |
1320 | * pref: javascript.options.mem.gc_compacting |
1321 | */ |
1322 | MainThreadData<bool> compactingEnabled; |
1323 | |
1324 | /* |
1325 | * Whether generational GC is enabled globally. |
1326 | * |
1327 | * JSGC_NURSERY_ENABLED |
1328 | * pref: javascript.options.mem.gc_generational |
1329 | */ |
1330 | MainThreadData<bool> nurseryEnabled; |
1331 | |
1332 | /* |
1333 | * Whether parallel marking is enabled globally. |
1334 | * |
1335 | * JSGC_PARALLEL_MARKING_ENABLED |
1336 | * pref: javascript.options.mem.gc_parallel_marking |
1337 | */ |
1338 | MainThreadData<bool> parallelMarkingEnabled; |
1339 | |
1340 | MainThreadData<bool> rootsRemoved; |
1341 | |
1342 | /* |
1343 | * These options control the zealousness of the GC. At every allocation, |
1344 | * nextScheduled is decremented. When it reaches zero we do a full GC. |
1345 | * |
1346 | * At this point, if zeal_ is one of the types that trigger periodic |
1347 | * collection, then nextScheduled is reset to the value of zealFrequency. |
1348 | * Otherwise, no additional GCs take place. |
1349 | * |
1350 | * You can control these values in several ways: |
1351 | * - Set the JS_GC_ZEAL environment variable |
1352 | * - Call gczeal() or schedulegc() from inside shell-executed JS code |
1353 | * (see the help for details) |
1354 | * |
1355 | * If gcZeal_ == 1 then we perform GCs in select places (during MaybeGC and |
1356 | * whenever we are notified that GC roots have been removed). This option is |
1357 | * mainly useful to embedders. |
1358 | * |
1359 | * We use zeal_ == 4 to enable write barrier verification. See the comment |
1360 | * in gc/Verifier.cpp for more information about this. |
1361 | * |
1362 | * zeal_ values from 8 to 10 periodically run different types of |
1363 | * incremental GC. |
1364 | * |
1365 | * zeal_ value 14 performs periodic shrinking collections. |
1366 | */ |
1367 | #ifdef JS_GC_ZEAL1 |
1368 | static_assert(size_t(ZealMode::Count) <= 32, |
1369 | "Too many zeal modes to store in a uint32_t"); |
1370 | MainThreadData<uint32_t> zealModeBits; |
1371 | MainThreadData<int> zealFrequency; |
1372 | MainThreadData<int> nextScheduled; |
1373 | MainThreadData<bool> deterministicOnly; |
1374 | MainThreadData<int> zealSliceBudget; |
1375 | MainThreadData<size_t> maybeMarkStackLimit; |
1376 | |
1377 | MainThreadData<PersistentRooted<GCVector<JSObject*, 0, SystemAllocPolicy>>> |
1378 | selectedForMarking; |
1379 | #endif |
1380 | |
1381 | MainThreadData<bool> fullCompartmentChecks; |
1382 | |
1383 | MainThreadData<uint32_t> gcCallbackDepth; |
1384 | |
1385 | MainThreadData<Callback<JSGCCallback>> gcCallback; |
1386 | MainThreadData<Callback<JS::DoCycleCollectionCallback>> |
1387 | gcDoCycleCollectionCallback; |
1388 | MainThreadData<Callback<JSObjectsTenuredCallback>> tenuredCallback; |
1389 | MainThreadData<CallbackVector<JSFinalizeCallback>> finalizeCallbacks; |
1390 | MainThreadOrGCTaskData<Callback<JSHostCleanupFinalizationRegistryCallback>> |
1391 | hostCleanupFinalizationRegistryCallback; |
1392 | MainThreadData<CallbackVector<JSWeakPointerZonesCallback>> |
1393 | updateWeakPointerZonesCallbacks; |
1394 | MainThreadData<CallbackVector<JSWeakPointerCompartmentCallback>> |
1395 | updateWeakPointerCompartmentCallbacks; |
1396 | MainThreadData<CallbackVector<JS::GCNurseryCollectionCallback>> |
1397 | nurseryCollectionCallbacks; |
1398 | |
1399 | /* |
1400 | * The trace operations to trace embedding-specific GC roots. One is for |
1401 | * tracing through black roots and the other is for tracing through gray |
1402 | * roots. The black/gray distinction is only relevant to the cycle |
1403 | * collector. |
1404 | */ |
1405 | MainThreadData<CallbackVector<JSTraceDataOp>> blackRootTracers; |
1406 | MainThreadOrGCTaskData<Callback<JSGrayRootsTracer>> grayRootTracer; |
1407 | |
1408 | /* Always preserve JIT code during GCs, for testing. */ |
1409 | MainThreadData<bool> alwaysPreserveCode; |
1410 | |
1411 | /* Count of the number of zones that are currently in page load. */ |
1412 | MainThreadData<size_t> inPageLoadCount; |
1413 | |
1414 | MainThreadData<bool> lowMemoryState; |
1415 | |
1416 | /* |
1417 | * General purpose GC lock, used for synchronising operations on |
1418 | * arenas and during parallel marking. |
1419 | */ |
1420 | friend class js::AutoLockGC; |
1421 | friend class js::AutoLockGCBgAlloc; |
1422 | Mutex lock MOZ_UNANNOTATED; |
1423 | |
1424 | /* |
1425 | * Lock used to synchronise access to the store buffer during parallel |
1426 | * sweeping. |
1427 | */ |
1428 | Mutex storeBufferLock MOZ_UNANNOTATED; |
1429 | |
1430 | /* Lock used to synchronise access to delayed marking state. */ |
1431 | Mutex delayedMarkingLock MOZ_UNANNOTATED; |
1432 | |
1433 | /* |
1434 | * Lock used by buffer allocators to synchronise data passed back to the main |
1435 | * thread by background sweeping. |
1436 | */ |
1437 | Mutex bufferAllocatorLock MOZ_UNANNOTATED; |
1438 | friend class BufferAllocator; |
1439 | friend class AutoLock; |
1440 | |
1441 | friend class BackgroundSweepTask; |
1442 | friend class BackgroundFreeTask; |
1443 | |
1444 | BackgroundAllocTask allocTask; |
1445 | BackgroundUnmarkTask unmarkTask; |
1446 | BackgroundMarkTask markTask; |
1447 | BackgroundSweepTask sweepTask; |
1448 | BackgroundFreeTask freeTask; |
1449 | BackgroundDecommitTask decommitTask; |
1450 | |
1451 | MainThreadData<Nursery> nursery_; |
1452 | |
1453 | // The store buffer used to track tenured to nursery edges for generational |
1454 | // GC. This is accessed off main thread when sweeping WeakCaches. |
1455 | MainThreadOrGCTaskData<gc::StoreBuffer> storeBuffer_; |
1456 | |
1457 | mozilla::TimeStamp lastLastDitchTime; |
1458 | |
1459 | // The last time per-zone allocation rates were updated. |
1460 | MainThreadData<mozilla::TimeStamp> lastAllocRateUpdateTime; |
1461 | |
1462 | // Total collector time since per-zone allocation rates were last updated. |
1463 | MainThreadData<mozilla::TimeDuration> collectorTimeSinceAllocRateUpdate; |
1464 | |
1465 | friend class MarkingValidator; |
1466 | friend class AutoEnterIteration; |
1467 | }; |
1468 | |
1469 | #ifndef JS_GC_ZEAL1 |
1470 | inline bool GCRuntime::hasZealMode(ZealMode mode) const { return false; } |
1471 | inline void GCRuntime::clearZealMode(ZealMode mode) {} |
1472 | inline bool GCRuntime::needZealousGC() { return false; } |
1473 | inline bool GCRuntime::zealModeControlsYieldPoint() const { return false; } |
1474 | #endif |
1475 | |
1476 | /* Prevent compartments and zones from being collected during iteration. */ |
1477 | class MOZ_RAII AutoEnterIteration { |
1478 | GCRuntime* gc; |
1479 | |
1480 | public: |
1481 | explicit AutoEnterIteration(GCRuntime* gc_) : gc(gc_) { |
1482 | ++gc->numActiveZoneIters; |
1483 | } |
1484 | |
1485 | ~AutoEnterIteration() { |
1486 | MOZ_ASSERT(gc->numActiveZoneIters)do { static_assert( mozilla::detail::AssertionConditionType< decltype(gc->numActiveZoneIters)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(gc->numActiveZoneIters))) , 0))) { do { } while (false); MOZ_ReportAssertionFailure("gc->numActiveZoneIters" , "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 1486); AnnotateMozCrashReason("MOZ_ASSERT" "(" "gc->numActiveZoneIters" ")"); do { *((volatile int*)__null) = 1486; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); |
1487 | --gc->numActiveZoneIters; |
1488 | } |
1489 | }; |
1490 | |
1491 | bool IsCurrentlyAnimating(const mozilla::TimeStamp& lastAnimationTime, |
1492 | const mozilla::TimeStamp& currentTime); |
1493 | |
1494 | } /* namespace gc */ |
1495 | } /* namespace js */ |
1496 | |
1497 | #endif |