File: | var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h |
Warning: | line 268, column 7 Excessive padding in 'class js::gc::GCRuntime' (78 padding bytes, where 6 is optimal). Optimal fields order: rt, numActiveZoneIters, queuePos, lastLastDitchTime, systemZone, sharedAtomsZone_, delayedMarkingList, markLaterArenas, helperThreadRatio, maxHelperThreads, helperThreadCount, maxMarkingThreads, markingThreadCount, maxParallelThreads, dispatchedParallelTasks, createBudgetCallback, nextCellUniqueId_, verifyPreData, lastGCStartTime_, lastGCEndTime_, initialized, minorGCNumber, majorGCNumber, number, sliceNumber, reservedMarkingThreads, sweepGroups, currentSweepGroup, sweepActions, sweepZone, foregroundFinalizedZone, zonesCompacted, relocatedArenasToRelease, markingValidator, defaultTimeBudgetMS_, maybeMarkStackLimit, inPageLoadCount, lastAllocRateUpdateTime, collectorTimeSinceAllocRateUpdate, permanentAtoms, permanentWellKnownSymbols, emptyChunks_, availableChunks_, fullChunks_, backgroundSweepZones, zonesToMaybeCompact, gcCallback, gcDoCycleCollectionCallback, tenuredCallback, hostCleanupFinalizationRegistryCallback, grayRootTracer, stringStats, heapSize, queuedParallelTasks, weakCachesToSweep, markers, sweepingTracer, rootsHash, buffersToFreeAfterMinorGC, cellsToAssertNotGray, atomMarking, testMarkQueue, mainThreadContext, zones_, selectedForMarking, lock, storeBufferLock, delayedMarkingLock, maybeAtomsToSweep, sweepTask, freeTask, decommitTask, stringBuffersToReleaseAfterMinorGC, finalizeCallbacks, updateWeakPointerZonesCallbacks, updateWeakPointerCompartmentCallbacks, nurseryCollectionCallbacks, blackRootTracers, lifoBlocksToFree, lifoBlocksToFreeAfterFullMinorGC, lifoBlocksToFreeAfterNextMinorGC, allocTask, markTask, unmarkTask, tunables, storeBuffer_, foregroundFinalizedArenas, nursery_, stats_, schedulingState, majorGCTriggerReason, heapState_, minEmptyChunkCount_, initialReason, incrementalState, initialState, sweepGroupIndex, sweepMarkResult, zealModeBits, zealFrequency, nextScheduled, zealSliceBudget, gcCallbackDepth, maybeGcOptions, delayedMarkingWorkAdded, fullGCRequested, incrementalGCEnabled, perZoneGCEnabled, cleanUpEverything, grayBitsValid, isIncremental, isFull, isCompacting, useParallelMarking, useZeal, lastMarkSlice, safeToYield, markOnBackgroundThreadDuringSweeping, useBackgroundThreads, haveDiscardedJITCodeThisSlice, hadShutdownGC, requestSliceAfterBackgroundTask, sweepAllocKind, abortSweepAfterCurrentGroup, foregroundFinalizedAllocKind, queueMarkColor, startedCompacting, compactingEnabled, nurseryEnabled, parallelMarkingEnabled, rootsRemoved, deterministicOnly, fullCompartmentChecks, alwaysPreserveCode, lowMemoryState, consider reordering the fields or adding explicit padding members |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- |
2 | * vim: set ts=8 sts=2 et sw=2 tw=80: |
3 | * This Source Code Form is subject to the terms of the Mozilla Public |
4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | |
7 | #ifndef gc_GCRuntime_h |
8 | #define gc_GCRuntime_h |
9 | |
10 | #include "mozilla/Atomics.h" |
11 | #include "mozilla/DoublyLinkedList.h" |
12 | #include "mozilla/EnumSet.h" |
13 | #include "mozilla/Maybe.h" |
14 | #include "mozilla/TimeStamp.h" |
15 | |
16 | #include "gc/ArenaList.h" |
17 | #include "gc/AtomMarking.h" |
18 | #include "gc/GCContext.h" |
19 | #include "gc/GCMarker.h" |
20 | #include "gc/GCParallelTask.h" |
21 | #include "gc/IteratorUtils.h" |
22 | #include "gc/Nursery.h" |
23 | #include "gc/Scheduling.h" |
24 | #include "gc/Statistics.h" |
25 | #include "gc/StoreBuffer.h" |
26 | #include "js/friend/PerformanceHint.h" |
27 | #include "js/GCAnnotations.h" |
28 | #include "js/UniquePtr.h" |
29 | #include "vm/AtomsTable.h" |
30 | |
31 | namespace js { |
32 | |
33 | class AutoLockGC; |
34 | class AutoLockGCBgAlloc; |
35 | class AutoLockHelperThreadState; |
36 | class FinalizationRegistryObject; |
37 | class FinalizationRecordObject; |
38 | class FinalizationQueueObject; |
39 | class GlobalObject; |
40 | class VerifyPreTracer; |
41 | class WeakRefObject; |
42 | |
43 | namespace gc { |
44 | |
45 | using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>; |
46 | using ZoneVector = Vector<JS::Zone*, 4, SystemAllocPolicy>; |
47 | |
48 | class AutoCallGCCallbacks; |
49 | class AutoGCSession; |
50 | class AutoHeapSession; |
51 | class AutoTraceSession; |
52 | struct FinalizePhase; |
53 | class MarkingValidator; |
54 | struct MovingTracer; |
55 | class ParallelMarkTask; |
56 | enum class ShouldCheckThresholds; |
57 | class SweepGroupsIter; |
58 | |
59 | // Interface to a sweep action. |
60 | struct SweepAction { |
61 | // The arguments passed to each action. |
62 | struct Args { |
63 | GCRuntime* gc; |
64 | JS::GCContext* gcx; |
65 | JS::SliceBudget& budget; |
66 | }; |
67 | |
68 | virtual ~SweepAction() = default; |
69 | virtual IncrementalProgress run(Args& state) = 0; |
70 | virtual void assertFinished() const = 0; |
71 | virtual bool shouldSkip() { return false; } |
72 | }; |
73 | |
74 | class ChunkPool { |
75 | ArenaChunk* head_; |
76 | size_t count_; |
77 | |
78 | public: |
79 | ChunkPool() : head_(nullptr), count_(0) {} |
80 | ChunkPool(const ChunkPool& other) = delete; |
81 | ChunkPool(ChunkPool&& other) { *this = std::move(other); } |
82 | |
83 | ~ChunkPool() { |
84 | MOZ_ASSERT(!head_)do { static_assert( mozilla::detail::AssertionConditionType< decltype(!head_)>::isValid, "invalid assertion condition") ; if ((__builtin_expect(!!(!(!!(!head_))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("!head_", "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 84); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!head_" ")"); do { *((volatile int*)__null) = 84; __attribute__((nomerge)) ::abort(); } while (false); } } while (false); |
85 | MOZ_ASSERT(count_ == 0)do { static_assert( mozilla::detail::AssertionConditionType< decltype(count_ == 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(count_ == 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("count_ == 0", "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 85); AnnotateMozCrashReason("MOZ_ASSERT" "(" "count_ == 0" ")" ); do { *((volatile int*)__null) = 85; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); |
86 | } |
87 | |
88 | ChunkPool& operator=(const ChunkPool& other) = delete; |
89 | ChunkPool& operator=(ChunkPool&& other) { |
90 | head_ = other.head_; |
91 | other.head_ = nullptr; |
92 | count_ = other.count_; |
93 | other.count_ = 0; |
94 | return *this; |
95 | } |
96 | |
97 | bool empty() const { return !head_; } |
98 | size_t count() const { return count_; } |
99 | |
100 | ArenaChunk* head() { |
101 | MOZ_ASSERT(head_)do { static_assert( mozilla::detail::AssertionConditionType< decltype(head_)>::isValid, "invalid assertion condition"); if ((__builtin_expect(!!(!(!!(head_))), 0))) { do { } while ( false); MOZ_ReportAssertionFailure("head_", "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 101); AnnotateMozCrashReason("MOZ_ASSERT" "(" "head_" ")"); do { *((volatile int*)__null) = 101; __attribute__((nomerge) ) ::abort(); } while (false); } } while (false); |
102 | return head_; |
103 | } |
104 | ArenaChunk* pop(); |
105 | void push(ArenaChunk* chunk); |
106 | ArenaChunk* remove(ArenaChunk* chunk); |
107 | |
108 | void sort(); |
109 | |
110 | private: |
111 | ArenaChunk* mergeSort(ArenaChunk* list, size_t count); |
112 | bool isSorted() const; |
113 | |
114 | #ifdef DEBUG1 |
115 | public: |
116 | bool contains(ArenaChunk* chunk) const; |
117 | bool verify() const; |
118 | void verifyChunks() const; |
119 | #endif |
120 | |
121 | public: |
122 | // Pool mutation does not invalidate an Iter unless the mutation |
123 | // is of the ArenaChunk currently being visited by the Iter. |
124 | class Iter { |
125 | public: |
126 | explicit Iter(ChunkPool& pool) : current_(pool.head_) {} |
127 | bool done() const { return !current_; } |
128 | void next(); |
129 | ArenaChunk* get() const { return current_; } |
130 | operator ArenaChunk*() const { return get(); } |
131 | ArenaChunk* operator->() const { return get(); } |
132 | |
133 | private: |
134 | ArenaChunk* current_; |
135 | }; |
136 | }; |
137 | |
138 | class BackgroundMarkTask : public GCParallelTask { |
139 | public: |
140 | explicit BackgroundMarkTask(GCRuntime* gc); |
141 | void setBudget(const JS::SliceBudget& budget) { this->budget = budget; } |
142 | void run(AutoLockHelperThreadState& lock) override; |
143 | |
144 | private: |
145 | JS::SliceBudget budget; |
146 | }; |
147 | |
148 | class BackgroundUnmarkTask : public GCParallelTask { |
149 | public: |
150 | explicit BackgroundUnmarkTask(GCRuntime* gc); |
151 | void initZones(); |
152 | void run(AutoLockHelperThreadState& lock) override; |
153 | |
154 | ZoneVector zones; |
155 | }; |
156 | |
157 | class BackgroundSweepTask : public GCParallelTask { |
158 | public: |
159 | explicit BackgroundSweepTask(GCRuntime* gc); |
160 | void run(AutoLockHelperThreadState& lock) override; |
161 | }; |
162 | |
163 | class BackgroundFreeTask : public GCParallelTask { |
164 | public: |
165 | explicit BackgroundFreeTask(GCRuntime* gc); |
166 | void run(AutoLockHelperThreadState& lock) override; |
167 | }; |
168 | |
169 | // Performs extra allocation off thread so that when memory is required on the |
170 | // main thread it will already be available and waiting. |
171 | class BackgroundAllocTask : public GCParallelTask { |
172 | // Guarded by the GC lock. |
173 | GCLockData<ChunkPool&> chunkPool_; |
174 | |
175 | const bool enabled_; |
176 | |
177 | public: |
178 | BackgroundAllocTask(GCRuntime* gc, ChunkPool& pool); |
179 | bool enabled() const { return enabled_; } |
180 | |
181 | void run(AutoLockHelperThreadState& lock) override; |
182 | }; |
183 | |
184 | // Search the provided chunks for free arenas and decommit them. |
185 | class BackgroundDecommitTask : public GCParallelTask { |
186 | public: |
187 | explicit BackgroundDecommitTask(GCRuntime* gc); |
188 | void run(AutoLockHelperThreadState& lock) override; |
189 | }; |
190 | |
191 | template <typename F> |
192 | struct Callback { |
193 | F op; |
194 | void* data; |
195 | |
196 | Callback() : op(nullptr), data(nullptr) {} |
197 | Callback(F op, void* data) : op(op), data(data) {} |
198 | }; |
199 | |
200 | template <typename F> |
201 | using CallbackVector = Vector<Callback<F>, 4, SystemAllocPolicy>; |
202 | |
203 | using RootedValueMap = |
204 | HashMap<Value*, const char*, DefaultHasher<Value*>, SystemAllocPolicy>; |
205 | |
206 | using AllocKinds = mozilla::EnumSet<AllocKind, uint64_t>; |
207 | |
208 | // A singly linked list of zones. |
209 | class ZoneList { |
210 | static Zone* const End; |
211 | |
212 | Zone* head; |
213 | Zone* tail; |
214 | |
215 | public: |
216 | ZoneList(); |
217 | ~ZoneList(); |
218 | |
219 | bool isEmpty() const; |
220 | Zone* front() const; |
221 | |
222 | void prepend(Zone* zone); |
223 | void append(Zone* zone); |
224 | void prependList(ZoneList&& other); |
225 | void appendList(ZoneList&& other); |
226 | Zone* removeFront(); |
227 | void clear(); |
228 | |
229 | private: |
230 | explicit ZoneList(Zone* singleZone); |
231 | void check() const; |
232 | |
233 | ZoneList(const ZoneList& other) = delete; |
234 | ZoneList& operator=(const ZoneList& other) = delete; |
235 | }; |
236 | |
237 | struct WeakCacheToSweep { |
238 | JS::detail::WeakCacheBase* cache; |
239 | JS::Zone* zone; |
240 | }; |
241 | |
242 | class WeakCacheSweepIterator { |
243 | using WeakCacheBase = JS::detail::WeakCacheBase; |
244 | |
245 | JS::Zone* sweepZone; |
246 | WeakCacheBase* sweepCache; |
247 | |
248 | public: |
249 | explicit WeakCacheSweepIterator(JS::Zone* sweepGroup); |
250 | |
251 | bool done() const; |
252 | WeakCacheToSweep get() const; |
253 | void next(); |
254 | |
255 | private: |
256 | void settle(); |
257 | }; |
258 | |
259 | struct SweepingTracer final : public GenericTracerImpl<SweepingTracer> { |
260 | explicit SweepingTracer(JSRuntime* rt); |
261 | |
262 | private: |
263 | template <typename T> |
264 | void onEdge(T** thingp, const char* name); |
265 | friend class GenericTracerImpl<SweepingTracer>; |
266 | }; |
267 | |
268 | class GCRuntime { |
Excessive padding in 'class js::gc::GCRuntime' (78 padding bytes, where 6 is optimal). Optimal fields order: rt, numActiveZoneIters, queuePos, lastLastDitchTime, systemZone, sharedAtomsZone_, delayedMarkingList, markLaterArenas, helperThreadRatio, maxHelperThreads, helperThreadCount, maxMarkingThreads, markingThreadCount, maxParallelThreads, dispatchedParallelTasks, createBudgetCallback, nextCellUniqueId_, verifyPreData, lastGCStartTime_, lastGCEndTime_, initialized, minorGCNumber, majorGCNumber, number, sliceNumber, reservedMarkingThreads, sweepGroups, currentSweepGroup, sweepActions, sweepZone, foregroundFinalizedZone, zonesCompacted, relocatedArenasToRelease, markingValidator, defaultTimeBudgetMS_, maybeMarkStackLimit, inPageLoadCount, lastAllocRateUpdateTime, collectorTimeSinceAllocRateUpdate, permanentAtoms, permanentWellKnownSymbols, emptyChunks_, availableChunks_, fullChunks_, backgroundSweepZones, zonesToMaybeCompact, gcCallback, gcDoCycleCollectionCallback, tenuredCallback, hostCleanupFinalizationRegistryCallback, grayRootTracer, stringStats, heapSize, queuedParallelTasks, weakCachesToSweep, markers, sweepingTracer, rootsHash, buffersToFreeAfterMinorGC, cellsToAssertNotGray, atomMarking, testMarkQueue, mainThreadContext, zones_, selectedForMarking, lock, storeBufferLock, delayedMarkingLock, maybeAtomsToSweep, sweepTask, freeTask, decommitTask, stringBuffersToReleaseAfterMinorGC, finalizeCallbacks, updateWeakPointerZonesCallbacks, updateWeakPointerCompartmentCallbacks, nurseryCollectionCallbacks, blackRootTracers, lifoBlocksToFree, lifoBlocksToFreeAfterFullMinorGC, lifoBlocksToFreeAfterNextMinorGC, allocTask, markTask, unmarkTask, tunables, storeBuffer_, foregroundFinalizedArenas, nursery_, stats_, schedulingState, majorGCTriggerReason, heapState_, minEmptyChunkCount_, initialReason, incrementalState, initialState, sweepGroupIndex, sweepMarkResult, zealModeBits, zealFrequency, nextScheduled, zealSliceBudget, gcCallbackDepth, maybeGcOptions, delayedMarkingWorkAdded, fullGCRequested, incrementalGCEnabled, perZoneGCEnabled, cleanUpEverything, grayBitsValid, isIncremental, isFull, isCompacting, useParallelMarking, useZeal, lastMarkSlice, safeToYield, markOnBackgroundThreadDuringSweeping, useBackgroundThreads, haveDiscardedJITCodeThisSlice, hadShutdownGC, requestSliceAfterBackgroundTask, sweepAllocKind, abortSweepAfterCurrentGroup, foregroundFinalizedAllocKind, queueMarkColor, startedCompacting, compactingEnabled, nurseryEnabled, parallelMarkingEnabled, rootsRemoved, deterministicOnly, fullCompartmentChecks, alwaysPreserveCode, lowMemoryState, consider reordering the fields or adding explicit padding members | |
269 | public: |
270 | explicit GCRuntime(JSRuntime* rt); |
271 | [[nodiscard]] bool init(uint32_t maxbytes); |
272 | bool wasInitialized() const { return initialized; } |
273 | void finishRoots(); |
274 | void finish(); |
275 | |
276 | Zone* atomsZone() { |
277 | Zone* zone = zones()[0]; |
278 | MOZ_ASSERT(JS::shadow::Zone::from(zone)->isAtomsZone())do { static_assert( mozilla::detail::AssertionConditionType< decltype(JS::shadow::Zone::from(zone)->isAtomsZone())>:: isValid, "invalid assertion condition"); if ((__builtin_expect (!!(!(!!(JS::shadow::Zone::from(zone)->isAtomsZone()))), 0 ))) { do { } while (false); MOZ_ReportAssertionFailure("JS::shadow::Zone::from(zone)->isAtomsZone()" , "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 278); AnnotateMozCrashReason("MOZ_ASSERT" "(" "JS::shadow::Zone::from(zone)->isAtomsZone()" ")"); do { *((volatile int*)__null) = 278; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); |
279 | return zone; |
280 | } |
281 | Zone* maybeSharedAtomsZone() { return sharedAtomsZone_; } |
282 | |
283 | [[nodiscard]] bool freezeSharedAtomsZone(); |
284 | void restoreSharedAtomsZone(); |
285 | |
286 | JS::HeapState heapState() const { return heapState_; } |
287 | |
288 | bool hasZealMode(ZealMode mode) const; |
289 | bool hasAnyZealModeOf(mozilla::EnumSet<ZealMode> mode) const; |
290 | void clearZealMode(ZealMode mode); |
291 | bool needZealousGC(); |
292 | bool zealModeControlsYieldPoint() const; |
293 | |
294 | [[nodiscard]] bool addRoot(Value* vp, const char* name); |
295 | void removeRoot(Value* vp); |
296 | |
297 | [[nodiscard]] bool setParameter(JSContext* cx, JSGCParamKey key, |
298 | uint32_t value); |
299 | void resetParameter(JSContext* cx, JSGCParamKey key); |
300 | uint32_t getParameter(JSGCParamKey key); |
301 | |
302 | void setPerformanceHint(PerformanceHint hint); |
303 | bool isInPageLoad() const { return inPageLoadCount != 0; } |
304 | |
305 | [[nodiscard]] bool triggerGC(JS::GCReason reason); |
306 | // Check whether to trigger a zone GC after allocating GC cells. |
307 | void maybeTriggerGCAfterAlloc(Zone* zone); |
308 | // Check whether to trigger a zone GC after malloc memory. |
309 | void maybeTriggerGCAfterMalloc(Zone* zone); |
310 | bool maybeTriggerGCAfterMalloc(Zone* zone, const HeapSize& heap, |
311 | const HeapThreshold& threshold, |
312 | JS::GCReason reason); |
313 | // The return value indicates if we were able to do the GC. |
314 | bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes, |
315 | size_t thresholdBytes); |
316 | |
317 | void maybeGC(); |
318 | |
319 | // Return whether we want to run a major GC. If eagerOk is true, include eager |
320 | // triggers (eg EAGER_ALLOC_TRIGGER) in this determination, and schedule all |
321 | // zones that exceed the eager thresholds. |
322 | JS::GCReason wantMajorGC(bool eagerOk); |
323 | bool checkEagerAllocTrigger(const HeapSize& size, |
324 | const HeapThreshold& threshold); |
325 | |
326 | // Do a minor GC if requested, followed by a major GC if requested. The return |
327 | // value indicates whether a major GC was performed. |
328 | bool gcIfRequested() { return gcIfRequestedImpl(false); } |
329 | |
330 | // Internal function to do a GC if previously requested. But if not and |
331 | // eagerOk, do an eager GC for all Zones that have exceeded the eager |
332 | // thresholds. |
333 | // |
334 | // Return whether a major GC was performed or started. |
335 | bool gcIfRequestedImpl(bool eagerOk); |
336 | |
337 | void gc(JS::GCOptions options, JS::GCReason reason); |
338 | void startGC(JS::GCOptions options, JS::GCReason reason, |
339 | const JS::SliceBudget& budget); |
340 | void gcSlice(JS::GCReason reason, const JS::SliceBudget& budget); |
341 | void finishGC(JS::GCReason reason); |
342 | void abortGC(); |
343 | void startDebugGC(JS::GCOptions options, const JS::SliceBudget& budget); |
344 | void debugGCSlice(const JS::SliceBudget& budget); |
345 | |
346 | void runDebugGC(); |
347 | void notifyRootsRemoved(); |
348 | |
349 | enum TraceOrMarkRuntime { TraceRuntime, MarkRuntime }; |
350 | void traceRuntime(JSTracer* trc, AutoTraceSession& session); |
351 | void traceRuntimeForMinorGC(JSTracer* trc, AutoGCSession& session); |
352 | |
353 | void purgeRuntimeForMinorGC(); |
354 | |
355 | void shrinkBuffers(); |
356 | void onOutOfMallocMemory(); |
357 | void onOutOfMallocMemory(const AutoLockGC& lock); |
358 | |
359 | Nursery& nursery() { return nursery_.ref(); } |
360 | gc::StoreBuffer& storeBuffer() { return storeBuffer_.ref(); } |
361 | |
362 | void minorGC(JS::GCReason reason, |
363 | gcstats::PhaseKind phase = gcstats::PhaseKind::MINOR_GC) |
364 | JS_HAZ_GC_CALL; |
365 | void evictNursery(JS::GCReason reason = JS::GCReason::EVICT_NURSERY) { |
366 | minorGC(reason, gcstats::PhaseKind::EVICT_NURSERY); |
367 | } |
368 | |
369 | void* addressOfNurseryPosition() { |
370 | return nursery_.refNoCheck().addressOfPosition(); |
371 | } |
372 | |
373 | const void* addressOfLastBufferedWholeCell() { |
374 | return storeBuffer_.refNoCheck().addressOfLastBufferedWholeCell(); |
375 | } |
376 | |
377 | #ifdef JS_GC_ZEAL1 |
378 | const uint32_t* addressOfZealModeBits() { return &zealModeBits.refNoCheck(); } |
379 | void getZealBits(uint32_t* zealBits, uint32_t* frequency, |
380 | uint32_t* nextScheduled); |
381 | void setZeal(uint8_t zeal, uint32_t frequency); |
382 | void unsetZeal(uint8_t zeal); |
383 | bool parseAndSetZeal(const char* str); |
384 | void setNextScheduled(uint32_t count); |
385 | void verifyPreBarriers(); |
386 | void maybeVerifyPreBarriers(bool always); |
387 | bool selectForMarking(JSObject* object); |
388 | void clearSelectedForMarking(); |
389 | void setDeterministic(bool enable); |
390 | void setMarkStackLimit(size_t limit, AutoLockGC& lock); |
391 | #endif |
392 | |
393 | uint64_t nextCellUniqueId() { |
394 | MOZ_ASSERT(nextCellUniqueId_ > 0)do { static_assert( mozilla::detail::AssertionConditionType< decltype(nextCellUniqueId_ > 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(nextCellUniqueId_ > 0))), 0))) { do { } while (false); MOZ_ReportAssertionFailure("nextCellUniqueId_ > 0" , "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 394); AnnotateMozCrashReason("MOZ_ASSERT" "(" "nextCellUniqueId_ > 0" ")"); do { *((volatile int*)__null) = 394; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); |
395 | uint64_t uid = ++nextCellUniqueId_; |
396 | return uid; |
397 | } |
398 | |
399 | void setLowMemoryState(bool newState) { lowMemoryState = newState; } |
400 | bool systemHasLowMemory() const { return lowMemoryState; } |
401 | |
402 | public: |
403 | // Internal public interface |
404 | ZoneVector& zones() { return zones_.ref(); } |
405 | gcstats::Statistics& stats() { return stats_.ref(); } |
406 | const gcstats::Statistics& stats() const { return stats_.ref(); } |
407 | State state() const { return incrementalState; } |
408 | bool isHeapCompacting() const { return state() == State::Compact; } |
409 | bool isForegroundSweeping() const { return state() == State::Sweep; } |
410 | bool isBackgroundSweeping() const { return sweepTask.wasStarted(); } |
411 | bool isBackgroundMarking() const { return markTask.wasStarted(); } |
412 | bool isBackgroundDecommitting() const { return decommitTask.wasStarted(); } |
413 | void waitBackgroundSweepEnd(); |
414 | void waitBackgroundDecommitEnd(); |
415 | void waitBackgroundAllocEnd() { allocTask.cancelAndWait(); } |
416 | void waitBackgroundFreeEnd(); |
417 | void waitForBackgroundTasks(); |
418 | bool isWaitingOnBackgroundTask() const; |
419 | |
420 | void lockGC() { lock.lock(); } |
421 | void unlockGC() { lock.unlock(); } |
422 | |
423 | void lockStoreBuffer() { storeBufferLock.lock(); } |
424 | void unlockStoreBuffer() { storeBufferLock.unlock(); } |
425 | |
426 | #ifdef DEBUG1 |
427 | void assertCurrentThreadHasLockedGC() const { |
428 | lock.assertOwnedByCurrentThread(); |
429 | } |
430 | void assertCurrentThreadHasLockedStoreBuffer() const { |
431 | storeBufferLock.assertOwnedByCurrentThread(); |
432 | } |
433 | #endif // DEBUG |
434 | |
435 | void setAlwaysPreserveCode() { alwaysPreserveCode = true; } |
436 | |
437 | void setIncrementalGCEnabled(bool enabled); |
438 | void setNurseryEnabled(bool enabled); |
439 | |
440 | bool isIncrementalGCEnabled() const { return incrementalGCEnabled; } |
441 | bool isPerZoneGCEnabled() const { return perZoneGCEnabled; } |
442 | bool isCompactingGCEnabled() const; |
443 | bool isParallelMarkingEnabled() const { return parallelMarkingEnabled; } |
444 | |
445 | bool isIncrementalGCInProgress() const { |
446 | return state() != State::NotActive && !isVerifyPreBarriersEnabled(); |
447 | } |
448 | |
449 | bool hasForegroundWork() const; |
450 | |
451 | bool isShrinkingGC() const { return gcOptions() == JS::GCOptions::Shrink; } |
452 | |
453 | bool isShutdownGC() const { return gcOptions() == JS::GCOptions::Shutdown; } |
454 | |
455 | #ifdef DEBUG1 |
456 | bool isShuttingDown() const { return hadShutdownGC; } |
457 | #endif |
458 | |
459 | bool initSweepActions(); |
460 | |
461 | void setGrayRootsTracer(JSGrayRootsTracer traceOp, void* data); |
462 | [[nodiscard]] bool addBlackRootsTracer(JSTraceDataOp traceOp, void* data); |
463 | void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data); |
464 | void clearBlackAndGrayRootTracers(); |
465 | |
466 | void setGCCallback(JSGCCallback callback, void* data); |
467 | void callGCCallback(JSGCStatus status, JS::GCReason reason) const; |
468 | void setObjectsTenuredCallback(JSObjectsTenuredCallback callback, void* data); |
469 | void callObjectsTenuredCallback(); |
470 | [[nodiscard]] bool addFinalizeCallback(JSFinalizeCallback callback, |
471 | void* data); |
472 | void removeFinalizeCallback(JSFinalizeCallback callback); |
473 | void setHostCleanupFinalizationRegistryCallback( |
474 | JSHostCleanupFinalizationRegistryCallback callback, void* data); |
475 | void callHostCleanupFinalizationRegistryCallback( |
476 | JSFunction* doCleanup, GlobalObject* incumbentGlobal); |
477 | [[nodiscard]] bool addWeakPointerZonesCallback( |
478 | JSWeakPointerZonesCallback callback, void* data); |
479 | void removeWeakPointerZonesCallback(JSWeakPointerZonesCallback callback); |
480 | [[nodiscard]] bool addWeakPointerCompartmentCallback( |
481 | JSWeakPointerCompartmentCallback callback, void* data); |
482 | void removeWeakPointerCompartmentCallback( |
483 | JSWeakPointerCompartmentCallback callback); |
484 | JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback); |
485 | bool addNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback, |
486 | void* data); |
487 | void removeNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback, |
488 | void* data); |
489 | JS::DoCycleCollectionCallback setDoCycleCollectionCallback( |
490 | JS::DoCycleCollectionCallback callback); |
491 | void callNurseryCollectionCallbacks(JS::GCNurseryProgress progress, |
492 | JS::GCReason reason); |
493 | |
494 | bool addFinalizationRegistry(JSContext* cx, |
495 | Handle<FinalizationRegistryObject*> registry); |
496 | bool registerWithFinalizationRegistry(JSContext* cx, HandleObject target, |
497 | HandleObject record); |
498 | void queueFinalizationRegistryForCleanup(FinalizationQueueObject* queue); |
499 | |
500 | void nukeFinalizationRecordWrapper(JSObject* wrapper, |
501 | FinalizationRecordObject* record); |
502 | void nukeWeakRefWrapper(JSObject* wrapper, WeakRefObject* weakRef); |
503 | |
504 | void setFullCompartmentChecks(bool enable); |
505 | |
506 | // Get the main marking tracer. |
507 | GCMarker& marker() { return *markers[0]; } |
508 | |
509 | JS::Zone* getCurrentSweepGroup() { return currentSweepGroup; } |
510 | unsigned getCurrentSweepGroupIndex() { |
511 | MOZ_ASSERT_IF(unsigned(state()) < unsigned(State::Sweep),do { if (unsigned(state()) < unsigned(State::Sweep)) { do { static_assert( mozilla::detail::AssertionConditionType<decltype (sweepGroupIndex == 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(sweepGroupIndex == 0))), 0)) ) { do { } while (false); MOZ_ReportAssertionFailure("sweepGroupIndex == 0" , "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 512); AnnotateMozCrashReason("MOZ_ASSERT" "(" "sweepGroupIndex == 0" ")"); do { *((volatile int*)__null) = 512; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); } } while ( false) |
512 | sweepGroupIndex == 0)do { if (unsigned(state()) < unsigned(State::Sweep)) { do { static_assert( mozilla::detail::AssertionConditionType<decltype (sweepGroupIndex == 0)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(sweepGroupIndex == 0))), 0)) ) { do { } while (false); MOZ_ReportAssertionFailure("sweepGroupIndex == 0" , "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 512); AnnotateMozCrashReason("MOZ_ASSERT" "(" "sweepGroupIndex == 0" ")"); do { *((volatile int*)__null) = 512; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); } } while ( false); |
513 | return sweepGroupIndex; |
514 | } |
515 | |
516 | uint64_t gcNumber() const { return number; } |
517 | void incGcNumber() { ++number; } |
518 | |
519 | uint64_t minorGCCount() const { return minorGCNumber; } |
520 | void incMinorGcNumber() { ++minorGCNumber; } |
521 | |
522 | uint64_t majorGCCount() const { return majorGCNumber; } |
523 | void incMajorGcNumber() { ++majorGCNumber; } |
524 | |
525 | uint64_t gcSliceCount() const { return sliceNumber; } |
526 | void incGcSliceNumber() { ++sliceNumber; } |
527 | |
528 | int64_t defaultSliceBudgetMS() const { return defaultTimeBudgetMS_; } |
529 | |
530 | bool isIncrementalGc() const { return isIncremental; } |
531 | bool isFullGc() const { return isFull; } |
532 | bool isCompactingGc() const { return isCompacting; } |
533 | bool didCompactZones() const { return isCompacting && zonesCompacted; } |
534 | |
535 | bool areGrayBitsValid() const { return grayBitsValid; } |
536 | void setGrayBitsInvalid() { grayBitsValid = false; } |
537 | |
538 | mozilla::TimeStamp lastGCStartTime() const { return lastGCStartTime_; } |
539 | mozilla::TimeStamp lastGCEndTime() const { return lastGCEndTime_; } |
540 | |
541 | bool majorGCRequested() const { |
542 | return majorGCTriggerReason != JS::GCReason::NO_REASON; |
543 | } |
544 | |
545 | double computeHeapGrowthFactor(size_t lastBytes); |
546 | size_t computeTriggerBytes(double growthFactor, size_t lastBytes); |
547 | |
548 | ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_.ref(); } |
549 | ChunkPool& availableChunks(const AutoLockGC& lock) { |
550 | return availableChunks_.ref(); |
551 | } |
552 | ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_.ref(); } |
553 | const ChunkPool& fullChunks(const AutoLockGC& lock) const { |
554 | return fullChunks_.ref(); |
555 | } |
556 | const ChunkPool& availableChunks(const AutoLockGC& lock) const { |
557 | return availableChunks_.ref(); |
558 | } |
559 | const ChunkPool& emptyChunks(const AutoLockGC& lock) const { |
560 | return emptyChunks_.ref(); |
561 | } |
562 | using NonEmptyChunksIter = ChainedIterator<ChunkPool::Iter, 2>; |
563 | NonEmptyChunksIter allNonEmptyChunks(const AutoLockGC& lock) { |
564 | return NonEmptyChunksIter(availableChunks(lock), fullChunks(lock)); |
565 | } |
566 | uint32_t minEmptyChunkCount(const AutoLockGC& lock) const { |
567 | return minEmptyChunkCount_; |
568 | } |
569 | #ifdef DEBUG1 |
570 | void verifyAllChunks(); |
571 | #endif |
572 | |
573 | ArenaChunk* getOrAllocChunk(AutoLockGCBgAlloc& lock); |
574 | void recycleChunk(ArenaChunk* chunk, const AutoLockGC& lock); |
575 | |
576 | #ifdef JS_GC_ZEAL1 |
577 | void startVerifyPreBarriers(); |
578 | void endVerifyPreBarriers(); |
579 | void finishVerifier(); |
580 | bool isVerifyPreBarriersEnabled() const { return verifyPreData.refNoCheck(); } |
581 | bool shouldYieldForZeal(ZealMode mode); |
582 | #else |
583 | bool isVerifyPreBarriersEnabled() const { return false; } |
584 | #endif |
585 | |
586 | #ifdef JSGC_HASH_TABLE_CHECKS |
587 | void checkHashTablesAfterMovingGC(); |
588 | #endif |
589 | |
590 | // Crawl the heap to check whether an arbitary pointer is within a cell of |
591 | // the given kind. (TraceKind::Null means to ignore the kind.) |
592 | bool isPointerWithinTenuredCell( |
593 | void* ptr, JS::TraceKind traceKind = JS::TraceKind::Null); |
594 | |
595 | #ifdef DEBUG1 |
596 | bool hasZone(Zone* target); |
597 | #endif |
598 | |
599 | // Queue memory memory to be freed on a background thread if possible. |
600 | void queueUnusedLifoBlocksForFree(LifoAlloc* lifo); |
601 | void queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo); |
602 | void queueBuffersForFreeAfterMinorGC( |
603 | Nursery::BufferSet& buffers, Nursery::StringBufferVector& stringBuffers); |
604 | |
605 | // Public here for ReleaseArenaLists and FinalizeTypedArenas. |
606 | void releaseArena(Arena* arena, const AutoLockGC& lock); |
607 | void releaseArenas(Arena* arena, const AutoLockGC& lock); |
608 | void releaseArenaList(ArenaList& arenaList, const AutoLockGC& lock); |
609 | |
610 | // Allocator internals. |
611 | static void* refillFreeListInGC(Zone* zone, AllocKind thingKind); |
612 | |
613 | // Delayed marking. |
614 | void delayMarkingChildren(gc::Cell* cell, MarkColor color); |
615 | bool hasDelayedMarking() const; |
616 | void markAllDelayedChildren(ShouldReportMarkTime reportTime); |
617 | |
618 | // If we have yielded to the mutator while foreground finalizing arenas from |
619 | // zone |zone| with kind |kind| then return a list of the arenas finalized so |
620 | // far. These will have been removed from the main arena lists at this |
621 | // point. Otherwise return nullptr. |
622 | SortedArenaList* maybeGetForegroundFinalizedArenas(Zone* zone, |
623 | AllocKind kind); |
624 | |
625 | /* |
626 | * Concurrent sweep infrastructure. |
627 | */ |
628 | void startTask(GCParallelTask& task, AutoLockHelperThreadState& lock); |
629 | void joinTask(GCParallelTask& task, AutoLockHelperThreadState& lock); |
630 | void updateHelperThreadCount(); |
631 | size_t parallelWorkerCount() const; |
632 | |
633 | // GC parallel task dispatch infrastructure. |
634 | size_t getMaxParallelThreads() const; |
635 | void dispatchOrQueueParallelTask(GCParallelTask* task, |
636 | const AutoLockHelperThreadState& lock); |
637 | void maybeDispatchParallelTasks(const AutoLockHelperThreadState& lock); |
638 | void onParallelTaskEnd(bool wasDispatched, |
639 | const AutoLockHelperThreadState& lock); |
640 | |
641 | // Parallel marking. |
642 | bool setParallelMarkingEnabled(bool enabled); |
643 | bool initOrDisableParallelMarking(); |
644 | [[nodiscard]] bool updateMarkersVector(); |
645 | size_t markingWorkerCount() const; |
646 | |
647 | // WeakRefs |
648 | bool registerWeakRef(HandleObject target, HandleObject weakRef); |
649 | void traceKeptObjects(JSTracer* trc); |
650 | |
651 | JS::GCReason lastStartReason() const { return initialReason; } |
652 | |
653 | void updateAllocationRates(); |
654 | |
655 | // Allocator internals |
656 | static void* refillFreeList(JS::Zone* zone, AllocKind thingKind); |
657 | void attemptLastDitchGC(); |
658 | |
659 | // Test mark queue. |
660 | #ifdef DEBUG1 |
661 | const GCVector<HeapPtr<JS::Value>, 0, SystemAllocPolicy>& getTestMarkQueue() |
662 | const; |
663 | [[nodiscard]] bool appendTestMarkQueue(const JS::Value& value); |
664 | void clearTestMarkQueue(); |
665 | size_t testMarkQueuePos() const; |
666 | #endif |
667 | |
668 | private: |
669 | enum IncrementalResult { ResetIncremental = 0, Ok }; |
670 | |
671 | bool hasBuffersForBackgroundFree() const { |
672 | return !lifoBlocksToFree.ref().isEmpty() || |
673 | !buffersToFreeAfterMinorGC.ref().empty() || |
674 | !stringBuffersToReleaseAfterMinorGC.ref().empty(); |
675 | } |
676 | |
677 | [[nodiscard]] bool setParameter(JSGCParamKey key, uint32_t value, |
678 | AutoLockGC& lock); |
679 | void resetParameter(JSGCParamKey key, AutoLockGC& lock); |
680 | uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock); |
681 | bool setThreadParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock); |
682 | void resetThreadParameter(JSGCParamKey key, AutoLockGC& lock); |
683 | void updateThreadDataStructures(AutoLockGC& lock); |
684 | |
685 | JS::GCOptions gcOptions() const { return maybeGcOptions.ref().ref(); } |
686 | |
687 | TriggerResult checkHeapThreshold(Zone* zone, const HeapSize& heapSize, |
688 | const HeapThreshold& heapThreshold); |
689 | |
690 | void updateSchedulingStateOnGCStart(); |
691 | void updateSchedulingStateOnGCEnd(mozilla::TimeStamp currentTime); |
692 | void updateAllGCStartThresholds(); |
693 | |
694 | // For ArenaLists::allocateFromArena() |
695 | friend class ArenaLists; |
696 | ArenaChunk* pickChunk(AutoLockGCBgAlloc& lock); |
697 | Arena* allocateArena(ArenaChunk* chunk, Zone* zone, AllocKind kind, |
698 | ShouldCheckThresholds checkThresholds, |
699 | const AutoLockGC& lock); |
700 | |
701 | /* |
702 | * Return the list of chunks that can be released outside the GC lock. |
703 | * Must be called either during the GC or with the GC lock taken. |
704 | */ |
705 | friend class BackgroundDecommitTask; |
706 | bool tooManyEmptyChunks(const AutoLockGC& lock); |
707 | ChunkPool expireEmptyChunkPool(const AutoLockGC& lock); |
708 | void freeEmptyChunks(const AutoLockGC& lock); |
709 | void prepareToFreeChunk(ArenaChunkInfo& info); |
710 | void setMinEmptyChunkCount(uint32_t value, const AutoLockGC& lock); |
711 | |
712 | friend class BackgroundAllocTask; |
713 | bool wantBackgroundAllocation(const AutoLockGC& lock) const; |
714 | void startBackgroundAllocTaskIfIdle(); |
715 | |
716 | void requestMajorGC(JS::GCReason reason); |
717 | JS::SliceBudget defaultBudget(JS::GCReason reason, int64_t millis); |
718 | bool maybeIncreaseSliceBudget(JS::SliceBudget& budget, |
719 | mozilla::TimeStamp sliceStartTime, |
720 | mozilla::TimeStamp gcStartTime); |
721 | bool maybeIncreaseSliceBudgetForLongCollections( |
722 | JS::SliceBudget& budget, mozilla::TimeStamp sliceStartTime, |
723 | mozilla::TimeStamp gcStartTime); |
724 | bool maybeIncreaseSliceBudgetForUrgentCollections(JS::SliceBudget& budget); |
725 | IncrementalResult budgetIncrementalGC(bool nonincrementalByAPI, |
726 | JS::GCReason reason, |
727 | JS::SliceBudget& budget); |
728 | void checkZoneIsScheduled(Zone* zone, JS::GCReason reason, |
729 | const char* trigger); |
730 | IncrementalResult resetIncrementalGC(GCAbortReason reason); |
731 | |
732 | // Assert if the system state is such that we should never |
733 | // receive a request to do GC work. |
734 | void checkCanCallAPI(); |
735 | |
736 | // Check if the system state is such that GC has been supressed |
737 | // or otherwise delayed. |
738 | [[nodiscard]] bool checkIfGCAllowedInCurrentState(JS::GCReason reason); |
739 | |
740 | gcstats::ZoneGCStats scanZonesBeforeGC(); |
741 | |
742 | void setGCOptions(JS::GCOptions options); |
743 | |
744 | void collect(bool nonincrementalByAPI, const JS::SliceBudget& budget, |
745 | JS::GCReason reason) JS_HAZ_GC_CALL; |
746 | |
747 | /* |
748 | * Run one GC "cycle" (either a slice of incremental GC or an entire |
749 | * non-incremental GC). |
750 | * |
751 | * Returns: |
752 | * * ResetIncremental if we "reset" an existing incremental GC, which would |
753 | * force us to run another cycle or |
754 | * * Ok otherwise. |
755 | */ |
756 | [[nodiscard]] IncrementalResult gcCycle(bool nonincrementalByAPI, |
757 | const JS::SliceBudget& budgetArg, |
758 | JS::GCReason reason); |
759 | bool shouldRepeatForDeadZone(JS::GCReason reason); |
760 | |
761 | void incrementalSlice(JS::SliceBudget& budget, JS::GCReason reason, |
762 | bool budgetWasIncreased); |
763 | |
764 | bool mightSweepInThisSlice(bool nonIncremental); |
765 | void collectNurseryFromMajorGC(JS::GCReason reason); |
766 | void collectNursery(JS::GCOptions options, JS::GCReason reason, |
767 | gcstats::PhaseKind phase); |
768 | |
769 | friend class AutoCallGCCallbacks; |
770 | void maybeCallGCCallback(JSGCStatus status, JS::GCReason reason); |
771 | |
772 | void startCollection(JS::GCReason reason); |
773 | |
774 | void purgeRuntime(); |
775 | [[nodiscard]] bool beginPreparePhase(JS::GCReason reason, |
776 | AutoGCSession& session); |
777 | bool prepareZonesForCollection(JS::GCReason reason, bool* isFullOut); |
778 | void unmarkWeakMaps(); |
779 | void endPreparePhase(JS::GCReason reason); |
780 | void beginMarkPhase(AutoGCSession& session); |
781 | bool shouldPreserveJITCode(JS::Realm* realm, |
782 | const mozilla::TimeStamp& currentTime, |
783 | JS::GCReason reason, bool canAllocateMoreCode, |
784 | bool isActiveCompartment); |
785 | void discardJITCodeForGC(); |
786 | void startBackgroundFreeAfterMinorGC(); |
787 | void relazifyFunctionsForShrinkingGC(); |
788 | void purgePropMapTablesForShrinkingGC(); |
789 | void purgeSourceURLsForShrinkingGC(); |
790 | void traceRuntimeForMajorGC(JSTracer* trc, AutoGCSession& session); |
791 | void traceRuntimeAtoms(JSTracer* trc); |
792 | void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark); |
793 | void traceEmbeddingBlackRoots(JSTracer* trc); |
794 | void traceEmbeddingGrayRoots(JSTracer* trc); |
795 | IncrementalProgress traceEmbeddingGrayRoots(JSTracer* trc, |
796 | JS::SliceBudget& budget); |
797 | void checkNoRuntimeRoots(AutoGCSession& session); |
798 | void maybeDoCycleCollection(); |
799 | void findDeadCompartments(); |
800 | |
801 | friend class BackgroundMarkTask; |
802 | enum ParallelMarking : bool { |
803 | SingleThreadedMarking = false, |
804 | AllowParallelMarking = true |
805 | }; |
806 | IncrementalProgress markUntilBudgetExhausted( |
807 | JS::SliceBudget& sliceBudget, |
808 | ParallelMarking allowParallelMarking = SingleThreadedMarking, |
809 | ShouldReportMarkTime reportTime = ReportMarkTime); |
810 | bool canMarkInParallel() const; |
811 | bool initParallelMarking(); |
812 | void finishParallelMarkers(); |
813 | |
814 | bool reserveMarkingThreads(size_t count); |
815 | void releaseMarkingThreads(); |
816 | |
817 | bool hasMarkingWork(MarkColor color) const; |
818 | |
819 | void drainMarkStack(); |
820 | |
821 | #ifdef DEBUG1 |
822 | void assertNoMarkingWork() const; |
823 | #else |
824 | void assertNoMarkingWork() const {} |
825 | #endif |
826 | |
827 | void markDelayedChildren(gc::Arena* arena, MarkColor color); |
828 | void processDelayedMarkingList(gc::MarkColor color); |
829 | void rebuildDelayedMarkingList(); |
830 | void appendToDelayedMarkingList(gc::Arena** listTail, gc::Arena* arena); |
831 | void resetDelayedMarking(); |
832 | template <typename F> |
833 | void forEachDelayedMarkingArena(F&& f); |
834 | |
835 | template <class ZoneIterT> |
836 | IncrementalProgress markWeakReferences(JS::SliceBudget& budget); |
837 | IncrementalProgress markWeakReferencesInCurrentGroup(JS::SliceBudget& budget); |
838 | IncrementalProgress markGrayRoots(JS::SliceBudget& budget, |
839 | gcstats::PhaseKind phase); |
840 | void markBufferedGrayRoots(JS::Zone* zone); |
841 | IncrementalProgress markAllWeakReferences(); |
842 | void markAllGrayReferences(gcstats::PhaseKind phase); |
843 | |
844 | // The mark queue is a testing-only feature for controlling mark ordering and |
845 | // yield timing. |
846 | enum MarkQueueProgress { |
847 | QueueYielded, // End this incremental GC slice, if possible |
848 | QueueComplete, // Done with the queue |
849 | QueueSuspended // Continue the GC without ending the slice |
850 | }; |
851 | MarkQueueProgress processTestMarkQueue(); |
852 | |
853 | // GC Sweeping. Implemented in Sweeping.cpp. |
854 | void beginSweepPhase(JS::GCReason reason, AutoGCSession& session); |
855 | void dropStringWrappers(); |
856 | void groupZonesForSweeping(JS::GCReason reason); |
857 | [[nodiscard]] bool findSweepGroupEdges(); |
858 | [[nodiscard]] bool addEdgesForMarkQueue(); |
859 | void moveToNextSweepGroup(); |
860 | void resetGrayList(Compartment* comp); |
861 | IncrementalProgress beginMarkingSweepGroup(JS::GCContext* gcx, |
862 | JS::SliceBudget& budget); |
863 | IncrementalProgress markGrayRootsInCurrentGroup(JS::GCContext* gcx, |
864 | JS::SliceBudget& budget); |
865 | IncrementalProgress markGray(JS::GCContext* gcx, JS::SliceBudget& budget); |
866 | IncrementalProgress endMarkingSweepGroup(JS::GCContext* gcx, |
867 | JS::SliceBudget& budget); |
868 | void markIncomingGrayCrossCompartmentPointers(); |
869 | IncrementalProgress beginSweepingSweepGroup(JS::GCContext* gcx, |
870 | JS::SliceBudget& budget); |
871 | void initBackgroundSweep(Zone* zone, JS::GCContext* gcx, |
872 | const FinalizePhase& phase); |
873 | IncrementalProgress markDuringSweeping(JS::GCContext* gcx, |
874 | JS::SliceBudget& budget); |
875 | void updateAtomsBitmap(); |
876 | void sweepCCWrappers(); |
877 | void sweepRealmGlobals(); |
878 | void sweepEmbeddingWeakPointers(JS::GCContext* gcx); |
879 | void sweepMisc(); |
880 | void sweepCompressionTasks(); |
881 | void sweepWeakMaps(); |
882 | void sweepUniqueIds(); |
883 | void sweepObjectsWithWeakPointers(); |
884 | void sweepDebuggerOnMainThread(JS::GCContext* gcx); |
885 | void sweepJitDataOnMainThread(JS::GCContext* gcx); |
886 | void sweepFinalizationObserversOnMainThread(); |
887 | void traceWeakFinalizationObserverEdges(JSTracer* trc, Zone* zone); |
888 | void sweepWeakRefs(); |
889 | IncrementalProgress endSweepingSweepGroup(JS::GCContext* gcx, |
890 | JS::SliceBudget& budget); |
891 | IncrementalProgress performSweepActions(JS::SliceBudget& sliceBudget); |
892 | void startSweepingAtomsTable(); |
893 | IncrementalProgress sweepAtomsTable(JS::GCContext* gcx, |
894 | JS::SliceBudget& budget); |
895 | IncrementalProgress sweepWeakCaches(JS::GCContext* gcx, |
896 | JS::SliceBudget& budget); |
897 | IncrementalProgress finalizeAllocKind(JS::GCContext* gcx, |
898 | JS::SliceBudget& budget); |
899 | bool foregroundFinalize(JS::GCContext* gcx, Zone* zone, AllocKind thingKind, |
900 | JS::SliceBudget& sliceBudget, |
901 | SortedArenaList& sweepList); |
902 | IncrementalProgress sweepPropMapTree(JS::GCContext* gcx, |
903 | JS::SliceBudget& budget); |
904 | void endSweepPhase(bool destroyingRuntime); |
905 | void queueZonesAndStartBackgroundSweep(ZoneList&& zones); |
906 | void sweepFromBackgroundThread(AutoLockHelperThreadState& lock); |
907 | void startBackgroundFree(); |
908 | void freeFromBackgroundThread(AutoLockHelperThreadState& lock); |
909 | void sweepBackgroundThings(ZoneList& zones); |
910 | void backgroundFinalize(JS::GCContext* gcx, Zone* zone, AllocKind kind, |
911 | Arena** empty); |
912 | void prepareForSweepSlice(JS::GCReason reason); |
913 | void assertBackgroundSweepingFinished(); |
914 | #ifdef DEBUG1 |
915 | bool zoneInCurrentSweepGroup(Zone* zone) const; |
916 | #endif |
917 | |
918 | bool allCCVisibleZonesWereCollected(); |
919 | void sweepZones(JS::GCContext* gcx, bool destroyingRuntime); |
920 | bool shouldDecommit() const; |
921 | void startDecommit(); |
922 | void decommitEmptyChunks(const bool& cancel, AutoLockGC& lock); |
923 | void decommitFreeArenas(const bool& cancel, AutoLockGC& lock); |
924 | void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock); |
925 | |
926 | // Compacting GC. Implemented in Compacting.cpp. |
927 | bool shouldCompact(); |
928 | void beginCompactPhase(); |
929 | IncrementalProgress compactPhase(JS::GCReason reason, |
930 | JS::SliceBudget& sliceBudget, |
931 | AutoGCSession& session); |
932 | void endCompactPhase(); |
933 | void sweepZoneAfterCompacting(MovingTracer* trc, Zone* zone); |
934 | bool canRelocateZone(Zone* zone) const; |
935 | [[nodiscard]] bool relocateArenas(Zone* zone, JS::GCReason reason, |
936 | Arena*& relocatedListOut, |
937 | JS::SliceBudget& sliceBudget); |
938 | void updateCellPointers(Zone* zone, AllocKinds kinds); |
939 | void updateAllCellPointers(MovingTracer* trc, Zone* zone); |
940 | void updateZonePointersToRelocatedCells(Zone* zone); |
941 | void updateRuntimePointersToRelocatedCells(AutoGCSession& session); |
942 | void clearRelocatedArenas(Arena* arenaList, JS::GCReason reason); |
943 | void clearRelocatedArenasWithoutUnlocking(Arena* arenaList, |
944 | JS::GCReason reason, |
945 | const AutoLockGC& lock); |
946 | void releaseRelocatedArenas(Arena* arenaList); |
947 | void releaseRelocatedArenasWithoutUnlocking(Arena* arenaList, |
948 | const AutoLockGC& lock); |
949 | #ifdef DEBUG1 |
950 | void protectOrReleaseRelocatedArenas(Arena* arenaList, JS::GCReason reason); |
951 | void protectAndHoldArenas(Arena* arenaList); |
952 | void unprotectHeldRelocatedArenas(const AutoLockGC& lock); |
953 | void releaseHeldRelocatedArenas(); |
954 | void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock); |
955 | #endif |
956 | |
957 | /* |
958 | * Whether to immediately trigger a slice after a background task |
959 | * finishes. This may not happen at a convenient time, so the consideration is |
960 | * whether the slice will run quickly or may take a long time. |
961 | */ |
962 | enum ShouldTriggerSliceWhenFinished : bool { |
963 | DontTriggerSliceWhenFinished = false, |
964 | TriggerSliceWhenFinished = true |
965 | }; |
966 | |
967 | IncrementalProgress waitForBackgroundTask( |
968 | GCParallelTask& task, const JS::SliceBudget& budget, |
969 | bool shouldPauseMutator, ShouldTriggerSliceWhenFinished triggerSlice); |
970 | |
971 | void maybeRequestGCAfterBackgroundTask(const AutoLockHelperThreadState& lock); |
972 | void cancelRequestedGCAfterBackgroundTask(); |
973 | void finishCollection(JS::GCReason reason); |
974 | void maybeStopPretenuring(); |
975 | void checkGCStateNotInUse(); |
976 | IncrementalProgress joinBackgroundMarkTask(); |
977 | |
978 | #ifdef JS_GC_ZEAL1 |
979 | void computeNonIncrementalMarkingForValidation(AutoGCSession& session); |
980 | void validateIncrementalMarking(); |
981 | void finishMarkingValidation(); |
982 | #endif |
983 | |
984 | #ifdef DEBUG1 |
985 | void checkForCompartmentMismatches(); |
986 | #endif |
987 | |
988 | void callFinalizeCallbacks(JS::GCContext* gcx, JSFinalizeStatus status) const; |
989 | void callWeakPointerZonesCallbacks(JSTracer* trc) const; |
990 | void callWeakPointerCompartmentCallbacks(JSTracer* trc, |
991 | JS::Compartment* comp) const; |
992 | void callDoCycleCollectionCallback(JSContext* cx); |
993 | |
994 | public: |
995 | JSRuntime* const rt; |
996 | |
997 | // Embedders can use this zone however they wish. |
998 | MainThreadData<JS::Zone*> systemZone; |
999 | |
1000 | MainThreadData<JS::GCContext> mainThreadContext; |
1001 | |
1002 | private: |
1003 | // For parent runtimes, a zone containing atoms that is shared by child |
1004 | // runtimes. |
1005 | MainThreadData<Zone*> sharedAtomsZone_; |
1006 | |
1007 | // All zones in the runtime. The first element is always the atoms zone. |
1008 | MainThreadOrGCTaskData<ZoneVector> zones_; |
1009 | |
1010 | // Any activity affecting the heap. |
1011 | MainThreadOrGCTaskData<JS::HeapState> heapState_; |
1012 | friend class AutoHeapSession; |
1013 | friend class JS::AutoEnterCycleCollection; |
1014 | |
1015 | UnprotectedData<gcstats::Statistics> stats_; |
1016 | |
1017 | public: |
1018 | js::StringStats stringStats; |
1019 | |
1020 | Vector<UniquePtr<GCMarker>, 1, SystemAllocPolicy> markers; |
1021 | |
1022 | // Delayed marking support in case we OOM pushing work onto the mark stack. |
1023 | MainThreadOrGCTaskData<js::gc::Arena*> delayedMarkingList; |
1024 | MainThreadOrGCTaskData<bool> delayedMarkingWorkAdded; |
1025 | #ifdef DEBUG1 |
1026 | /* Count of arenas that are currently in the stack. */ |
1027 | MainThreadOrGCTaskData<size_t> markLaterArenas; |
1028 | #endif |
1029 | |
1030 | SweepingTracer sweepingTracer; |
1031 | |
1032 | /* Track total GC heap size for this runtime. */ |
1033 | HeapSize heapSize; |
1034 | |
1035 | /* GC scheduling state and parameters. */ |
1036 | GCSchedulingTunables tunables; |
1037 | GCSchedulingState schedulingState; |
1038 | MainThreadData<bool> fullGCRequested; |
1039 | |
1040 | // Helper thread configuration. |
1041 | MainThreadData<double> helperThreadRatio; |
1042 | MainThreadData<size_t> maxHelperThreads; |
1043 | MainThreadOrGCTaskData<size_t> helperThreadCount; |
1044 | MainThreadData<size_t> maxMarkingThreads; |
1045 | MainThreadData<size_t> markingThreadCount; |
1046 | |
1047 | // Per-runtime helper thread task queue. Can be accessed from helper threads |
1048 | // in maybeDispatchParallelTasks(). |
1049 | HelperThreadLockData<size_t> maxParallelThreads; |
1050 | HelperThreadLockData<size_t> dispatchedParallelTasks; |
1051 | HelperThreadLockData<GCParallelTaskList> queuedParallelTasks; |
1052 | |
1053 | // State used for managing atom mark bitmaps in each zone. |
1054 | AtomMarkingRuntime atomMarking; |
1055 | |
1056 | /* |
1057 | * Pointer to a callback that, if set, will be used to create a |
1058 | * budget for internally-triggered GCs. |
1059 | */ |
1060 | MainThreadData<JS::CreateSliceBudgetCallback> createBudgetCallback; |
1061 | |
1062 | private: |
1063 | // Arenas used for permanent things created at startup and shared by child |
1064 | // runtimes. |
1065 | MainThreadData<ArenaList> permanentAtoms; |
1066 | MainThreadData<ArenaList> permanentWellKnownSymbols; |
1067 | |
1068 | // When chunks are empty, they reside in the emptyChunks pool and are |
1069 | // re-used as needed or eventually expired if not re-used. The emptyChunks |
1070 | // pool gets refilled from the background allocation task heuristically so |
1071 | // that empty chunks should always be available for immediate allocation |
1072 | // without syscalls. |
1073 | GCLockData<ChunkPool> emptyChunks_; |
1074 | |
1075 | // Chunks which have had some, but not all, of their arenas allocated live |
1076 | // in the available chunk lists. When all available arenas in a chunk have |
1077 | // been allocated, the chunk is removed from the available list and moved |
1078 | // to the fullChunks pool. During a GC, if all arenas are free, the chunk |
1079 | // is moved back to the emptyChunks pool and scheduled for eventual |
1080 | // release. |
1081 | GCLockData<ChunkPool> availableChunks_; |
1082 | |
1083 | // When all arenas in a chunk are used, it is moved to the fullChunks pool |
1084 | // so as to reduce the cost of operations on the available lists. |
1085 | GCLockData<ChunkPool> fullChunks_; |
1086 | |
1087 | /* |
1088 | * JSGC_MIN_EMPTY_CHUNK_COUNT |
1089 | * |
1090 | * Controls the number of empty chunks reserved for future allocation. |
1091 | * |
1092 | * They can be read off main thread by the background allocation task and the |
1093 | * background decommit task. |
1094 | */ |
1095 | GCLockData<uint32_t> minEmptyChunkCount_; |
1096 | |
1097 | MainThreadData<RootedValueMap> rootsHash; |
1098 | |
1099 | // An incrementing id used to assign unique ids to cells that require one. |
1100 | MainThreadData<uint64_t> nextCellUniqueId_; |
1101 | |
1102 | MainThreadData<VerifyPreTracer*> verifyPreData; |
1103 | |
1104 | MainThreadData<mozilla::TimeStamp> lastGCStartTime_; |
1105 | MainThreadData<mozilla::TimeStamp> lastGCEndTime_; |
1106 | |
1107 | WriteOnceData<bool> initialized; |
1108 | MainThreadData<bool> incrementalGCEnabled; |
1109 | MainThreadData<bool> perZoneGCEnabled; |
1110 | |
1111 | mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters; |
1112 | |
1113 | /* During shutdown, the GC needs to clean up every possible object. */ |
1114 | MainThreadData<bool> cleanUpEverything; |
1115 | |
1116 | /* |
1117 | * The gray bits can become invalid if UnmarkGray overflows the stack. A |
1118 | * full GC will reset this bit, since it fills in all the gray bits. |
1119 | */ |
1120 | UnprotectedData<bool> grayBitsValid; |
1121 | |
1122 | mozilla::Atomic<JS::GCReason, mozilla::ReleaseAcquire> majorGCTriggerReason; |
1123 | |
1124 | /* Incremented at the start of every minor GC. */ |
1125 | MainThreadData<uint64_t> minorGCNumber; |
1126 | |
1127 | /* Incremented at the start of every major GC. */ |
1128 | MainThreadData<uint64_t> majorGCNumber; |
1129 | |
1130 | /* Incremented on every GC slice or minor collection. */ |
1131 | MainThreadData<uint64_t> number; |
1132 | |
1133 | /* Incremented on every GC slice. */ |
1134 | MainThreadData<uint64_t> sliceNumber; |
1135 | |
1136 | /* |
1137 | * This runtime's current contribution to the global number of helper threads |
1138 | * 'reserved' for parallel marking. Does not affect other uses of helper |
1139 | * threads. |
1140 | */ |
1141 | MainThreadData<size_t> reservedMarkingThreads; |
1142 | |
1143 | /* Whether the currently running GC can finish in multiple slices. */ |
1144 | MainThreadOrGCTaskData<bool> isIncremental; |
1145 | |
1146 | /* Whether all zones are being collected in first GC slice. */ |
1147 | MainThreadData<bool> isFull; |
1148 | |
1149 | /* Whether the heap will be compacted at the end of GC. */ |
1150 | MainThreadData<bool> isCompacting; |
1151 | |
1152 | /* Whether to use parallel marking. */ |
1153 | MainThreadData<ParallelMarking> useParallelMarking; |
1154 | |
1155 | /* The invocation kind of the current GC, set at the start of collection. */ |
1156 | MainThreadOrGCTaskData<mozilla::Maybe<JS::GCOptions>> maybeGcOptions; |
1157 | |
1158 | /* The initial GC reason, taken from the first slice. */ |
1159 | MainThreadData<JS::GCReason> initialReason; |
1160 | |
1161 | /* |
1162 | * The current incremental GC phase. This is also used internally in |
1163 | * non-incremental GC. |
1164 | */ |
1165 | MainThreadOrGCTaskData<State> incrementalState; |
1166 | |
1167 | /* The incremental state at the start of this slice. */ |
1168 | MainThreadOrGCTaskData<State> initialState; |
1169 | |
1170 | /* Whether to pay attention the zeal settings in this incremental slice. */ |
1171 | #ifdef JS_GC_ZEAL1 |
1172 | MainThreadData<bool> useZeal; |
1173 | #else |
1174 | const bool useZeal; |
1175 | #endif |
1176 | |
1177 | /* Indicates that the last incremental slice exhausted the mark stack. */ |
1178 | MainThreadData<bool> lastMarkSlice; |
1179 | |
1180 | // Whether it's currently safe to yield to the mutator in an incremental GC. |
1181 | MainThreadData<bool> safeToYield; |
1182 | |
1183 | // Whether to do any marking caused by barriers on a background thread during |
1184 | // incremental sweeping, while also sweeping zones which have finished |
1185 | // marking. |
1186 | MainThreadData<bool> markOnBackgroundThreadDuringSweeping; |
1187 | |
1188 | // Whether any sweeping and decommitting will run on a separate GC helper |
1189 | // thread. |
1190 | MainThreadData<bool> useBackgroundThreads; |
1191 | |
1192 | // Whether we have already discarded JIT code for all collected zones in this |
1193 | // slice. |
1194 | MainThreadData<bool> haveDiscardedJITCodeThisSlice; |
1195 | |
1196 | #ifdef DEBUG1 |
1197 | /* Shutdown has started. Further collections must be shutdown collections. */ |
1198 | MainThreadData<bool> hadShutdownGC; |
1199 | #endif |
1200 | |
1201 | /* Singly linked list of zones to be swept in the background. */ |
1202 | HelperThreadLockData<ZoneList> backgroundSweepZones; |
1203 | |
1204 | /* |
1205 | * Whether to trigger a GC slice after a background task is complete, so that |
1206 | * the collector can continue or finsish collecting. This is only used for the |
1207 | * tasks that run concurrently with the mutator, which are background |
1208 | * finalization and background decommit. |
1209 | */ |
1210 | HelperThreadLockData<bool> requestSliceAfterBackgroundTask; |
1211 | |
1212 | /* |
1213 | * Free LIFO blocks are transferred to these allocators before being freed on |
1214 | * a background thread. |
1215 | */ |
1216 | HelperThreadLockData<LifoAlloc> lifoBlocksToFree; |
1217 | MainThreadData<LifoAlloc> lifoBlocksToFreeAfterFullMinorGC; |
1218 | MainThreadData<LifoAlloc> lifoBlocksToFreeAfterNextMinorGC; |
1219 | HelperThreadLockData<Nursery::BufferSet> buffersToFreeAfterMinorGC; |
1220 | HelperThreadLockData<Nursery::StringBufferVector> |
1221 | stringBuffersToReleaseAfterMinorGC; |
1222 | |
1223 | /* Index of current sweep group (for stats). */ |
1224 | MainThreadData<unsigned> sweepGroupIndex; |
1225 | |
1226 | /* |
1227 | * Incremental sweep state. |
1228 | */ |
1229 | MainThreadData<JS::Zone*> sweepGroups; |
1230 | MainThreadOrGCTaskData<JS::Zone*> currentSweepGroup; |
1231 | MainThreadData<UniquePtr<SweepAction>> sweepActions; |
1232 | MainThreadOrGCTaskData<JS::Zone*> sweepZone; |
1233 | MainThreadOrGCTaskData<AllocKind> sweepAllocKind; |
1234 | MainThreadData<mozilla::Maybe<AtomsTable::SweepIterator>> maybeAtomsToSweep; |
1235 | MainThreadOrGCTaskData<mozilla::Maybe<WeakCacheSweepIterator>> |
1236 | weakCachesToSweep; |
1237 | MainThreadData<bool> abortSweepAfterCurrentGroup; |
1238 | MainThreadOrGCTaskData<IncrementalProgress> sweepMarkResult; |
1239 | |
1240 | /* |
1241 | * During incremental foreground finalization, we may have a list of arenas of |
1242 | * the current AllocKind and Zone whose contents have been finalized but which |
1243 | * have not yet been merged back into the main arena lists. |
1244 | */ |
1245 | MainThreadOrGCTaskData<JS::Zone*> foregroundFinalizedZone; |
1246 | MainThreadOrGCTaskData<AllocKind> foregroundFinalizedAllocKind; |
1247 | MainThreadData<mozilla::Maybe<SortedArenaList>> foregroundFinalizedArenas; |
1248 | |
1249 | #ifdef DEBUG1 |
1250 | /* |
1251 | * List of objects to mark at the beginning of a GC for testing purposes. May |
1252 | * also contain string directives to change mark color or wait until different |
1253 | * phases of the GC. |
1254 | * |
1255 | * This is a WeakCache because not everything in this list is guaranteed to |
1256 | * end up marked (eg if you insert an object from an already-processed sweep |
1257 | * group in the middle of an incremental GC). Also, the mark queue is not |
1258 | * used during shutdown GCs. In either case, unmarked objects may need to be |
1259 | * discarded. |
1260 | */ |
1261 | JS::WeakCache<GCVector<HeapPtr<JS::Value>, 0, SystemAllocPolicy>> |
1262 | testMarkQueue; |
1263 | |
1264 | /* Position within the test mark queue. */ |
1265 | size_t queuePos = 0; |
1266 | |
1267 | /* The test marking queue might want to be marking a particular color. */ |
1268 | mozilla::Maybe<js::gc::MarkColor> queueMarkColor; |
1269 | |
1270 | // During gray marking, delay AssertCellIsNotGray checks by |
1271 | // recording the cell pointers here and checking after marking has |
1272 | // finished. |
1273 | MainThreadData<Vector<const Cell*, 0, SystemAllocPolicy>> |
1274 | cellsToAssertNotGray; |
1275 | friend void js::gc::detail::AssertCellIsNotGray(const Cell*); |
1276 | #endif |
1277 | |
1278 | friend class SweepGroupsIter; |
1279 | |
1280 | /* |
1281 | * Incremental compacting state. |
1282 | */ |
1283 | MainThreadData<bool> startedCompacting; |
1284 | MainThreadData<ZoneList> zonesToMaybeCompact; |
1285 | MainThreadData<size_t> zonesCompacted; |
1286 | #ifdef DEBUG1 |
1287 | GCLockData<Arena*> relocatedArenasToRelease; |
1288 | #endif |
1289 | |
1290 | #ifdef JS_GC_ZEAL1 |
1291 | MainThreadData<MarkingValidator*> markingValidator; |
1292 | #endif |
1293 | |
1294 | /* |
1295 | * Default budget for incremental GC slice. See js/SliceBudget.h. |
1296 | * |
1297 | * JSGC_SLICE_TIME_BUDGET_MS |
1298 | * pref: javascript.options.mem.gc_incremental_slice_ms, |
1299 | */ |
1300 | MainThreadData<int64_t> defaultTimeBudgetMS_; |
1301 | |
1302 | /* |
1303 | * Whether compacting GC is enabled globally. |
1304 | * |
1305 | * JSGC_COMPACTING_ENABLED |
1306 | * pref: javascript.options.mem.gc_compacting |
1307 | */ |
1308 | MainThreadData<bool> compactingEnabled; |
1309 | |
1310 | /* |
1311 | * Whether generational GC is enabled globally. |
1312 | * |
1313 | * JSGC_NURSERY_ENABLED |
1314 | * pref: javascript.options.mem.gc_generational |
1315 | */ |
1316 | MainThreadData<bool> nurseryEnabled; |
1317 | |
1318 | /* |
1319 | * Whether parallel marking is enabled globally. |
1320 | * |
1321 | * JSGC_PARALLEL_MARKING_ENABLED |
1322 | * pref: javascript.options.mem.gc_parallel_marking |
1323 | */ |
1324 | MainThreadData<bool> parallelMarkingEnabled; |
1325 | |
1326 | MainThreadData<bool> rootsRemoved; |
1327 | |
1328 | /* |
1329 | * These options control the zealousness of the GC. At every allocation, |
1330 | * nextScheduled is decremented. When it reaches zero we do a full GC. |
1331 | * |
1332 | * At this point, if zeal_ is one of the types that trigger periodic |
1333 | * collection, then nextScheduled is reset to the value of zealFrequency. |
1334 | * Otherwise, no additional GCs take place. |
1335 | * |
1336 | * You can control these values in several ways: |
1337 | * - Set the JS_GC_ZEAL environment variable |
1338 | * - Call gczeal() or schedulegc() from inside shell-executed JS code |
1339 | * (see the help for details) |
1340 | * |
1341 | * If gcZeal_ == 1 then we perform GCs in select places (during MaybeGC and |
1342 | * whenever we are notified that GC roots have been removed). This option is |
1343 | * mainly useful to embedders. |
1344 | * |
1345 | * We use zeal_ == 4 to enable write barrier verification. See the comment |
1346 | * in gc/Verifier.cpp for more information about this. |
1347 | * |
1348 | * zeal_ values from 8 to 10 periodically run different types of |
1349 | * incremental GC. |
1350 | * |
1351 | * zeal_ value 14 performs periodic shrinking collections. |
1352 | */ |
1353 | #ifdef JS_GC_ZEAL1 |
1354 | static_assert(size_t(ZealMode::Count) <= 32, |
1355 | "Too many zeal modes to store in a uint32_t"); |
1356 | MainThreadData<uint32_t> zealModeBits; |
1357 | MainThreadData<int> zealFrequency; |
1358 | MainThreadData<int> nextScheduled; |
1359 | MainThreadData<bool> deterministicOnly; |
1360 | MainThreadData<int> zealSliceBudget; |
1361 | MainThreadData<size_t> maybeMarkStackLimit; |
1362 | |
1363 | MainThreadData<PersistentRooted<GCVector<JSObject*, 0, SystemAllocPolicy>>> |
1364 | selectedForMarking; |
1365 | #endif |
1366 | |
1367 | MainThreadData<bool> fullCompartmentChecks; |
1368 | |
1369 | MainThreadData<uint32_t> gcCallbackDepth; |
1370 | |
1371 | MainThreadData<Callback<JSGCCallback>> gcCallback; |
1372 | MainThreadData<Callback<JS::DoCycleCollectionCallback>> |
1373 | gcDoCycleCollectionCallback; |
1374 | MainThreadData<Callback<JSObjectsTenuredCallback>> tenuredCallback; |
1375 | MainThreadData<CallbackVector<JSFinalizeCallback>> finalizeCallbacks; |
1376 | MainThreadOrGCTaskData<Callback<JSHostCleanupFinalizationRegistryCallback>> |
1377 | hostCleanupFinalizationRegistryCallback; |
1378 | MainThreadData<CallbackVector<JSWeakPointerZonesCallback>> |
1379 | updateWeakPointerZonesCallbacks; |
1380 | MainThreadData<CallbackVector<JSWeakPointerCompartmentCallback>> |
1381 | updateWeakPointerCompartmentCallbacks; |
1382 | MainThreadData<CallbackVector<JS::GCNurseryCollectionCallback>> |
1383 | nurseryCollectionCallbacks; |
1384 | |
1385 | /* |
1386 | * The trace operations to trace embedding-specific GC roots. One is for |
1387 | * tracing through black roots and the other is for tracing through gray |
1388 | * roots. The black/gray distinction is only relevant to the cycle |
1389 | * collector. |
1390 | */ |
1391 | MainThreadData<CallbackVector<JSTraceDataOp>> blackRootTracers; |
1392 | MainThreadOrGCTaskData<Callback<JSGrayRootsTracer>> grayRootTracer; |
1393 | |
1394 | /* Always preserve JIT code during GCs, for testing. */ |
1395 | MainThreadData<bool> alwaysPreserveCode; |
1396 | |
1397 | /* Count of the number of zones that are currently in page load. */ |
1398 | MainThreadData<size_t> inPageLoadCount; |
1399 | |
1400 | MainThreadData<bool> lowMemoryState; |
1401 | |
1402 | /* |
1403 | * General purpose GC lock, used for synchronising operations on |
1404 | * arenas and during parallel marking. |
1405 | */ |
1406 | friend class js::AutoLockGC; |
1407 | friend class js::AutoLockGCBgAlloc; |
1408 | Mutex lock MOZ_UNANNOTATED; |
1409 | |
1410 | /* |
1411 | * Lock used to synchronise access to the store buffer during parallel |
1412 | * sweeping. |
1413 | */ |
1414 | Mutex storeBufferLock MOZ_UNANNOTATED; |
1415 | |
1416 | /* Lock used to synchronise access to delayed marking state. */ |
1417 | Mutex delayedMarkingLock MOZ_UNANNOTATED; |
1418 | |
1419 | friend class BackgroundSweepTask; |
1420 | friend class BackgroundFreeTask; |
1421 | |
1422 | BackgroundAllocTask allocTask; |
1423 | BackgroundUnmarkTask unmarkTask; |
1424 | BackgroundMarkTask markTask; |
1425 | BackgroundSweepTask sweepTask; |
1426 | BackgroundFreeTask freeTask; |
1427 | BackgroundDecommitTask decommitTask; |
1428 | |
1429 | MainThreadData<Nursery> nursery_; |
1430 | |
1431 | // The store buffer used to track tenured to nursery edges for generational |
1432 | // GC. This is accessed off main thread when sweeping WeakCaches. |
1433 | MainThreadOrGCTaskData<gc::StoreBuffer> storeBuffer_; |
1434 | |
1435 | mozilla::TimeStamp lastLastDitchTime; |
1436 | |
1437 | // The last time per-zone allocation rates were updated. |
1438 | MainThreadData<mozilla::TimeStamp> lastAllocRateUpdateTime; |
1439 | |
1440 | // Total collector time since per-zone allocation rates were last updated. |
1441 | MainThreadData<mozilla::TimeDuration> collectorTimeSinceAllocRateUpdate; |
1442 | |
1443 | friend class MarkingValidator; |
1444 | friend class AutoEnterIteration; |
1445 | }; |
1446 | |
1447 | #ifndef JS_GC_ZEAL1 |
1448 | inline bool GCRuntime::hasZealMode(ZealMode mode) const { return false; } |
1449 | inline void GCRuntime::clearZealMode(ZealMode mode) {} |
1450 | inline bool GCRuntime::needZealousGC() { return false; } |
1451 | inline bool GCRuntime::zealModeControlsYieldPoint() const { return false; } |
1452 | #endif |
1453 | |
1454 | /* Prevent compartments and zones from being collected during iteration. */ |
1455 | class MOZ_RAII AutoEnterIteration { |
1456 | GCRuntime* gc; |
1457 | |
1458 | public: |
1459 | explicit AutoEnterIteration(GCRuntime* gc_) : gc(gc_) { |
1460 | ++gc->numActiveZoneIters; |
1461 | } |
1462 | |
1463 | ~AutoEnterIteration() { |
1464 | MOZ_ASSERT(gc->numActiveZoneIters)do { static_assert( mozilla::detail::AssertionConditionType< decltype(gc->numActiveZoneIters)>::isValid, "invalid assertion condition" ); if ((__builtin_expect(!!(!(!!(gc->numActiveZoneIters))) , 0))) { do { } while (false); MOZ_ReportAssertionFailure("gc->numActiveZoneIters" , "/var/lib/jenkins/workspace/firefox-scan-build/js/src/gc/GCRuntime.h" , 1464); AnnotateMozCrashReason("MOZ_ASSERT" "(" "gc->numActiveZoneIters" ")"); do { *((volatile int*)__null) = 1464; __attribute__((nomerge )) ::abort(); } while (false); } } while (false); |
1465 | --gc->numActiveZoneIters; |
1466 | } |
1467 | }; |
1468 | |
1469 | bool IsCurrentlyAnimating(const mozilla::TimeStamp& lastAnimationTime, |
1470 | const mozilla::TimeStamp& currentTime); |
1471 | |
1472 | } /* namespace gc */ |
1473 | } /* namespace js */ |
1474 | |
1475 | #endif |