Bug Summary

File:root/firefox-clang/memory/build/PHC.cpp
Warning:line 656, column 7
Excessive padding in 'class PHC' (80 padding bytes, where 16 is optimal). Optimal fields order: mAvgFirstAllocDelay, mPhcState, mNow, mRNG, mFreePageListHead, mFreePageListTail, mMutex, mAllocPages, mAvgAllocDelay, mAvgPageReuseDelay, consider reordering the fields or adding explicit padding members

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name Unified_cpp_memory_build0.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -ffp-contract=off -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/root/firefox-clang/obj-x86_64-pc-linux-gnu/memory/build -fcoverage-compilation-dir=/root/firefox-clang/obj-x86_64-pc-linux-gnu/memory/build -resource-dir /usr/lib/llvm-21/lib/clang/21 -include /root/firefox-clang/config/gcc_hidden.h -include /root/firefox-clang/obj-x86_64-pc-linux-gnu/mozilla-config.h -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/system_wrappers -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=2 -D _GLIBCXX_ASSERTIONS -D DEBUG=1 -D MOZ_MEMORY_IMPL -D MOZ_SUPPORT_LEAKCHECKING -D MOZ_PHC -D MOZ_REPLACE_MALLOC_STATIC -D NON_RANDOM_ARENA_IDS -D MOZJEMALLOC_PROFILING_CALLBACKS -D MOZ_HAS_MOZGLUE -D IMPL_MFBT -I /root/firefox-clang/memory/build -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/memory/build -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nspr -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nss -D MOZILLA_CLIENT -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/x86_64-linux-gnu/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14/backward -internal-isystem /usr/lib/llvm-21/lib/clang/21/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-error=pessimizing-move -Wno-error=large-by-value-copy=128 -Wno-error=implicit-int-float-conversion -Wno-error=thread-safety-analysis -Wno-error=tautological-type-limit-compare -Wno-invalid-offsetof -Wno-range-loop-analysis -Wno-deprecated-anon-enum-enum-conversion -Wno-deprecated-enum-enum-conversion -Wno-deprecated-this-capture -Wno-inline-new-delete -Wno-error=deprecated-declarations -Wno-error=array-bounds -Wno-error=free-nonheap-object -Wno-error=atomic-alignment -Wno-error=deprecated-builtins -Wno-psabi -Wno-error=builtin-macro-redefined -Wno-vla-cxx-extension -Wno-unknown-warning-option -fdeprecated-macro -ferror-limit 19 -fstrict-flex-arrays=1 -stack-protector 2 -fstack-clash-protection -ftrivial-auto-var-init=pattern -fno-rtti -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -fno-sized-deallocation -fno-aligned-allocation -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2025-06-27-100320-3286336-1 -x c++ Unified_cpp_memory_build0.cpp
1/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3/* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7// PHC is a probabilistic heap checker. A tiny fraction of randomly chosen heap
8// allocations are subject to some expensive checking via the use of OS page
9// access protection. A failed check triggers a crash, whereupon useful
10// information about the failure is put into the crash report. The cost and
11// coverage for each user is minimal, but spread over the entire user base the
12// coverage becomes significant.
13//
14// The idea comes from Chromium, where it is called GWP-ASAN. (Firefox uses PHC
15// as the name because GWP-ASAN is long, awkward, and doesn't have any
16// particular meaning.)
17//
18// In the current implementation up to 64 allocations per process can become
19// PHC allocations. These allocations must be page-sized or smaller. Each PHC
20// allocation gets its own page, and when the allocation is freed its page is
21// marked inaccessible until the page is reused for another allocation. This
22// means that a use-after-free defect (which includes double-frees) will be
23// caught if the use occurs before the page is reused for another allocation.
24// The crash report will contain stack traces for the allocation site, the free
25// site, and the use-after-free site, which is often enough to diagnose the
26// defect.
27//
28// Also, each PHC allocation is followed by a guard page. The PHC allocation is
29// positioned so that its end abuts the guard page (or as close as possible,
30// given alignment constraints). This means that a bounds violation at the end
31// of the allocation (overflow) will be caught. The crash report will contain
32// stack traces for the allocation site and the bounds violation use site,
33// which is often enough to diagnose the defect.
34//
35// (A bounds violation at the start of the allocation (underflow) will not be
36// caught, unless it is sufficiently large to hit the preceding allocation's
37// guard page, which is not that likely. It would be possible to look more
38// assiduously for underflow by randomly placing some allocations at the end of
39// the page and some at the start of the page, and GWP-ASAN does this. PHC does
40// not, however, because overflow is likely to be much more common than
41// underflow in practice.)
42//
43// We use a simple heuristic to categorize a guard page access as overflow or
44// underflow: if the address falls in the lower half of the guard page, we
45// assume it is overflow, otherwise we assume it is underflow. More
46// sophisticated heuristics are possible, but this one is very simple, and it is
47// likely that most overflows/underflows in practice are very close to the page
48// boundary.
49//
50// The design space for the randomization strategy is large. The current
51// implementation has a large random delay before it starts operating, and a
52// small random delay between each PHC allocation attempt. Each freed PHC
53// allocation is quarantined for a medium random delay before being reused, in
54// order to increase the chance of catching UAFs.
55//
56// The basic cost of PHC's operation is as follows.
57//
58// - The physical memory cost is 64 pages plus some metadata (including stack
59// traces) for each page. This amounts to 256 KiB per process on
60// architectures with 4 KiB pages and 1024 KiB on macOS/AArch64 which uses
61// 16 KiB pages.
62//
63// - The virtual memory cost is the physical memory cost plus the guard pages:
64// another 64 pages. This amounts to another 256 KiB per process on
65// architectures with 4 KiB pages and 1024 KiB on macOS/AArch64 which uses
66// 16 KiB pages. PHC is currently only enabled on 64-bit platforms so the
67// impact of the virtual memory usage is negligible.
68//
69// - Every allocation requires a size check and a decrement-and-check of an
70// atomic counter. When the counter reaches zero a PHC allocation can occur,
71// which involves marking a page as accessible and getting a stack trace for
72// the allocation site. Otherwise, mozjemalloc performs the allocation.
73//
74// - Every deallocation requires a range check on the pointer to see if it
75// involves a PHC allocation. (The choice to only do PHC allocations that are
76// a page or smaller enables this range check, because the 64 pages are
77// contiguous. Allowing larger allocations would make this more complicated,
78// and we definitely don't want something as slow as a hash table lookup on
79// every deallocation.) PHC deallocations involve marking a page as
80// inaccessible and getting a stack trace for the deallocation site.
81//
82// Note that calls to realloc(), free(), and malloc_usable_size() will
83// immediately crash if the given pointer falls within a page allocation's
84// page, but does not point to the start of the allocation itself.
85//
86// void* p = malloc(64);
87// free(p + 1); // p+1 doesn't point to the allocation start; crash
88//
89// Such crashes will not have the PHC fields in the crash report.
90//
91// PHC-specific tests can be run with the following commands:
92// - gtests: `./mach gtest '*PHC*'`
93// - xpcshell-tests: `./mach test toolkit/crashreporter/test/unit`
94// - This runs some non-PHC tests as well.
95
96#include "PHC.h"
97
98#include <stdlib.h>
99#include <time.h>
100
101#include <algorithm>
102
103#ifdef XP_WIN
104# include <process.h>
105#else
106# include <sys/mman.h>
107# include <sys/types.h>
108# include <pthread.h>
109# include <unistd.h>
110#endif
111
112#include "mozjemalloc.h"
113#include "FdPrintf.h"
114#include "Mutex.h"
115#include "mozilla/Array.h"
116#include "mozilla/Assertions.h"
117#include "mozilla/Atomics.h"
118#include "mozilla/Attributes.h"
119#include "mozilla/CheckedInt.h"
120#include "mozilla/Maybe.h"
121#include "mozilla/StackWalk.h"
122#include "mozilla/ThreadLocal.h"
123#include "mozilla/XorShift128PlusRNG.h"
124
125using namespace mozilla;
126
127//---------------------------------------------------------------------------
128// Utilities
129//---------------------------------------------------------------------------
130
131#ifdef ANDROID
132// Android doesn't have pthread_atfork defined in pthread.h.
133extern "C" MOZ_EXPORT__attribute__((visibility("default"))) int pthread_atfork(void (*)(void), void (*)(void),
134 void (*)(void));
135#endif
136
137#ifndef DISALLOW_COPY_AND_ASSIGN
138# define DISALLOW_COPY_AND_ASSIGN(T)T(const T&); void operator=(const T&) \
139 T(const T&); \
140 void operator=(const T&)
141#endif
142
143// This class provides infallible operations for the small number of heap
144// allocations that PHC does for itself. It would be nice if we could use the
145// InfallibleAllocPolicy from mozalloc, but PHC cannot use mozalloc.
146class InfallibleAllocPolicy {
147 public:
148 static void AbortOnFailure(const void* aP) {
149 if (!aP) {
150 MOZ_CRASH("PHC failed to allocate")do { do { } while (false); MOZ_ReportCrash("" "PHC failed to allocate"
, "/root/firefox-clang/memory/build/PHC.cpp", 150); AnnotateMozCrashReason
("MOZ_CRASH(" "PHC failed to allocate" ")"); do { MOZ_CrashSequence
(__null, 150); __attribute__((nomerge)) ::abort(); } while (false
); } while (false)
;
151 }
152 }
153
154 template <class T>
155 static T* new_() {
156 void* p = MozJemalloc::malloc(sizeof(T));
157 AbortOnFailure(p);
158 return new (p) T;
159 }
160};
161
162//---------------------------------------------------------------------------
163// Stack traces
164//---------------------------------------------------------------------------
165
166// This code is similar to the equivalent code within DMD.
167
168class StackTrace : public phc::StackTrace {
169 public:
170 StackTrace() = default;
171
172 void Clear() { mLength = 0; }
173
174 void Fill();
175
176 private:
177 static void StackWalkCallback(uint32_t aFrameNumber, void* aPc, void* aSp,
178 void* aClosure) {
179 StackTrace* st = (StackTrace*)aClosure;
180 MOZ_ASSERT(st->mLength < kMaxFrames)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(st->mLength < kMaxFrames)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(st->mLength < kMaxFrames
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"st->mLength < kMaxFrames", "/root/firefox-clang/memory/build/PHC.cpp"
, 180); AnnotateMozCrashReason("MOZ_ASSERT" "(" "st->mLength < kMaxFrames"
")"); do { MOZ_CrashSequence(__null, 180); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
181 st->mPcs[st->mLength] = aPc;
182 st->mLength++;
183 MOZ_ASSERT(st->mLength == aFrameNumber)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(st->mLength == aFrameNumber)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(st->mLength == aFrameNumber
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"st->mLength == aFrameNumber", "/root/firefox-clang/memory/build/PHC.cpp"
, 183); AnnotateMozCrashReason("MOZ_ASSERT" "(" "st->mLength == aFrameNumber"
")"); do { MOZ_CrashSequence(__null, 183); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
184 }
185};
186
187// WARNING WARNING WARNING: this function must only be called when PHC::mMutex
188// is *not* locked, otherwise we might get deadlocks.
189//
190// How? On Windows, MozStackWalk() can lock a mutex, M, from the shared library
191// loader. Another thread might call malloc() while holding M locked (when
192// loading a shared library) and try to lock PHC::mMutex, causing a deadlock.
193// So PHC::mMutex can't be locked during the call to MozStackWalk(). (For
194// details, see https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8. On
195// Linux, something similar can happen; see bug 824340. So we just disallow it
196// on all platforms.)
197//
198// In DMD, to avoid this problem we temporarily unlock the equivalent mutex for
199// the MozStackWalk() call. But that's grotty, and things are a bit different
200// here, so we just require that stack traces be obtained before locking
201// PHC::mMutex.
202//
203// Unfortunately, there is no reliable way at compile-time or run-time to ensure
204// this pre-condition. Hence this large comment.
205//
206void StackTrace::Fill() {
207 mLength = 0;
208
209// These ifdefs should be kept in sync with the conditions in
210// phc_implies_frame_pointers in build/moz.configure/memory.configure
211#if defined(XP_WIN) && defined(_M_IX86)
212 // This avoids MozStackWalk(), which causes unusably slow startup on Win32
213 // when it is called during static initialization (see bug 1241684).
214 //
215 // This code is cribbed from the Gecko Profiler, which also uses
216 // FramePointerStackWalk() on Win32: Registers::SyncPopulate() for the
217 // frame pointer, and GetStackTop() for the stack end.
218 CONTEXT context;
219 RtlCaptureContext(&context);
220 void** fp = reinterpret_cast<void**>(context.Ebp);
221
222 PNT_TIB pTib = reinterpret_cast<PNT_TIB>(NtCurrentTeb());
223 void* stackEnd = static_cast<void*>(pTib->StackBase);
224 FramePointerStackWalk(StackWalkCallback, kMaxFrames, this, fp, stackEnd);
225#elif defined(XP_DARWIN)
226 // This avoids MozStackWalk(), which has become unusably slow on Mac due to
227 // changes in libunwind.
228 //
229 // This code is cribbed from the Gecko Profiler, which also uses
230 // FramePointerStackWalk() on Mac: Registers::SyncPopulate() for the frame
231 // pointer, and GetStackTop() for the stack end.
232# pragma GCC diagnostic push
233# pragma GCC diagnostic ignored "-Wframe-address"
234 void** fp = reinterpret_cast<void**>(__builtin_frame_address(1));
235# pragma GCC diagnostic pop
236 void* stackEnd = pthread_get_stackaddr_np(pthread_self());
237 FramePointerStackWalk(StackWalkCallback, kMaxFrames, this, fp, stackEnd);
238#else
239 MozStackWalk(StackWalkCallback, nullptr, kMaxFrames, this);
240#endif
241}
242
243//---------------------------------------------------------------------------
244// Logging
245//---------------------------------------------------------------------------
246
247// Change this to 1 to enable some PHC logging. Useful for debugging.
248#define PHC_LOGGING0 0
249
250static void Log(const char* fmt, ...);
251
252//---------------------------------------------------------------------------
253// Global state
254//---------------------------------------------------------------------------
255
256// Throughout this entire file time is measured as the number of sub-page
257// allocations performed (by PHC and mozjemalloc combined). `Time` is 64-bit
258// because we could have more than 2**32 allocations in a long-running session.
259// `Delay` is 32-bit because the delays used within PHC are always much smaller
260// than 2**32. Delay must be unsigned so that IsPowerOfTwo() can work on some
261// Delay values.
262using Time = uint64_t; // A moment in time.
263using Delay = uint32_t; // A time duration.
264static constexpr Delay DELAY_MAX = UINT32_MAX(4294967295U) / 2;
265
266// PHC only runs if the page size is 4 KiB; anything more is uncommon and would
267// use too much memory. So we hardwire this size for all platforms but macOS
268// on ARM processors. For the latter we make an exception because the minimum
269// page size supported is 16KiB so there's no way to go below that.
270static const size_t kPageSize =
271#if defined(XP_DARWIN) && defined(__aarch64__)
272 16384
273#else
274 4096
275#endif
276 ;
277
278// We align the PHC area to a multiple of the jemalloc and JS GC chunk size
279// (both use 1MB aligned chunks) so that their address computations don't lead
280// from non-PHC memory into PHC memory causing misleading PHC stacks to be
281// attached to a crash report.
282static const size_t kPhcAlign = 1024 * 1024;
283
284static_assert(IsPowerOfTwo(kPhcAlign));
285static_assert((kPhcAlign % kPageSize) == 0);
286
287// There are two kinds of page.
288// - Allocation pages, from which allocations are made.
289// - Guard pages, which are never touched by PHC.
290//
291// These page kinds are interleaved; each allocation page has a guard page on
292// either side.
293#ifdef EARLY_BETA_OR_EARLIER1
294static const size_t kNumAllocPages = kPageSize == 4096 ? 4096 : 1024;
295#else
296// This will use between 82KiB and 1.1MiB per process (depending on how many
297// objects are currently allocated). We will tune this in the future.
298static const size_t kNumAllocPages = kPageSize == 4096 ? 256 : 64;
299#endif
300static const size_t kNumAllPages = kNumAllocPages * 2 + 1;
301
302// The total size of the allocation pages and guard pages.
303static const size_t kAllPagesSize = kNumAllPages * kPageSize;
304
305// jemalloc adds a guard page to the end of our allocation, see the comment in
306// AllocVirtualAddresses() for more information.
307static const size_t kAllPagesJemallocSize = kAllPagesSize - kPageSize;
308
309// The amount to decrement from the shared allocation delay each time a thread's
310// local allocation delay reaches zero.
311static const Delay kDelayDecrementAmount = 256;
312
313// When PHC is disabled on the current thread wait this many allocations before
314// accessing sAllocDelay once more.
315static const Delay kDelayBackoffAmount = 64;
316
317// When PHC is disabled globally reset the shared delay by this many allocations
318// to keep code running on the fast path.
319static const Delay kDelayResetWhenDisabled = 64 * 1024;
320
321// The default state for PHC. Either Enabled or OnlyFree.
322#define DEFAULT_STATEmozilla::phc::OnlyFree mozilla::phc::OnlyFree
323
324// The maximum time.
325static const Time kMaxTime = ~(Time(0));
326
327// Truncate aRnd to the range (1 .. aAvgDelay*2). If aRnd is random, this
328// results in an average value of aAvgDelay + 0.5, which is close enough to
329// aAvgDelay. aAvgDelay must be a power-of-two for speed.
330constexpr Delay Rnd64ToDelay(Delay aAvgDelay, uint64_t aRnd) {
331 MOZ_ASSERT(IsPowerOfTwo(aAvgDelay), "must be a power of two")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(IsPowerOfTwo(aAvgDelay))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(IsPowerOfTwo(aAvgDelay)))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("IsPowerOfTwo(aAvgDelay)"
" (" "must be a power of two" ")", "/root/firefox-clang/memory/build/PHC.cpp"
, 331); AnnotateMozCrashReason("MOZ_ASSERT" "(" "IsPowerOfTwo(aAvgDelay)"
") (" "must be a power of two" ")"); do { MOZ_CrashSequence(
__null, 331); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
332
333 return (aRnd & (uint64_t(aAvgDelay) * 2 - 1)) + 1;
334}
335
336static Delay CheckProbability(int64_t aProb) {
337 // Limit delays calculated from prefs to 0x80000000, this is the largest
338 // power-of-two that fits in a Delay since it is a uint32_t.
339 // The minimum is 2 that way not every allocation goes straight to PHC.
340 return RoundUpPow2(std::clamp(aProb, int64_t(2), int64_t(0x80000000)));
341}
342
343// Maps a pointer to a PHC-specific structure:
344// - Nothing
345// - A guard page (it is unspecified which one)
346// - An allocation page (with an index < kNumAllocPages)
347//
348// The standard way of handling a PtrKind is to check IsNothing(), and if that
349// fails, to check IsGuardPage(), and if that fails, to call AllocPage().
350class PtrKind {
351 private:
352 enum class Tag : uint8_t {
353 Nothing,
354 GuardPage,
355 AllocPage,
356 };
357
358 Tag mTag;
359 uintptr_t mIndex; // Only used if mTag == Tag::AllocPage.
360
361 public:
362 // Detect what a pointer points to. This constructor must be fast because it
363 // is called for every call to free(), realloc(), malloc_usable_size(), and
364 // jemalloc_ptr_info().
365 PtrKind(const void* aPtr, const uint8_t* aPagesStart,
366 const uint8_t* aPagesLimit) {
367 if (!(aPagesStart <= aPtr && aPtr < aPagesLimit)) {
368 mTag = Tag::Nothing;
369 } else {
370 uintptr_t offset = static_cast<const uint8_t*>(aPtr) - aPagesStart;
371 uintptr_t allPageIndex = offset / kPageSize;
372 MOZ_ASSERT(allPageIndex < kNumAllPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(allPageIndex < kNumAllPages)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(allPageIndex < kNumAllPages
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"allPageIndex < kNumAllPages", "/root/firefox-clang/memory/build/PHC.cpp"
, 372); AnnotateMozCrashReason("MOZ_ASSERT" "(" "allPageIndex < kNumAllPages"
")"); do { MOZ_CrashSequence(__null, 372); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
373 if (allPageIndex & 1) {
374 // Odd-indexed pages are allocation pages.
375 uintptr_t allocPageIndex = allPageIndex / 2;
376 MOZ_ASSERT(allocPageIndex < kNumAllocPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(allocPageIndex < kNumAllocPages)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(allocPageIndex < kNumAllocPages
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"allocPageIndex < kNumAllocPages", "/root/firefox-clang/memory/build/PHC.cpp"
, 376); AnnotateMozCrashReason("MOZ_ASSERT" "(" "allocPageIndex < kNumAllocPages"
")"); do { MOZ_CrashSequence(__null, 376); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
377 mTag = Tag::AllocPage;
378 mIndex = allocPageIndex;
379 } else {
380 // Even-numbered pages are guard pages.
381 mTag = Tag::GuardPage;
382 }
383 }
384 }
385
386 bool IsNothing() const { return mTag == Tag::Nothing; }
387 bool IsGuardPage() const { return mTag == Tag::GuardPage; }
388
389 // This should only be called after IsNothing() and IsGuardPage() have been
390 // checked and failed.
391 uintptr_t AllocPageIndex() const {
392 MOZ_RELEASE_ASSERT(mTag == Tag::AllocPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mTag == Tag::AllocPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mTag == Tag::AllocPage))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("mTag == Tag::AllocPage"
, "/root/firefox-clang/memory/build/PHC.cpp", 392); AnnotateMozCrashReason
("MOZ_RELEASE_ASSERT" "(" "mTag == Tag::AllocPage" ")"); do {
MOZ_CrashSequence(__null, 392); __attribute__((nomerge)) ::abort
(); } while (false); } } while (false)
;
393 return mIndex;
394 }
395};
396
397// On MacOS, the first __thread/thread_local access calls malloc, which leads
398// to an infinite loop. So we use pthread-based TLS instead, which somehow
399// doesn't have this problem.
400#if !defined(XP_DARWIN)
401# define PHC_THREAD_LOCAL(T)__thread ::mozilla::detail::ThreadLocal< T, ::mozilla::detail
::ThreadLocalNativeStorage>
MOZ_THREAD_LOCAL(T)__thread ::mozilla::detail::ThreadLocal< T, ::mozilla::detail
::ThreadLocalNativeStorage>
402#else
403# define PHC_THREAD_LOCAL(T)__thread ::mozilla::detail::ThreadLocal< T, ::mozilla::detail
::ThreadLocalNativeStorage>
\
404 detail::ThreadLocal<T, detail::ThreadLocalKeyStorage>
405#endif
406
407enum class AllocPageState {
408 NeverAllocated = 0,
409 InUse = 1,
410 Freed = 2,
411};
412
413// Metadata for each allocation page.
414class AllocPageInfo {
415 public:
416 AllocPageInfo()
417 : mState(AllocPageState::NeverAllocated),
418 mBaseAddr(nullptr),
419 mReuseTime(0) {}
420
421 // The current allocation page state.
422 AllocPageState mState;
423
424 // The arena that the allocation is nominally from. This isn't meaningful
425 // within PHC, which has no arenas. But it is necessary for reallocation of
426 // page allocations as normal allocations, such as in this code:
427 //
428 // p = moz_arena_malloc(arenaId, 4096);
429 // realloc(p, 8192);
430 //
431 // The realloc is more than one page, and thus too large for PHC to handle.
432 // Therefore, if PHC handles the first allocation, it must ask mozjemalloc
433 // to allocate the 8192 bytes in the correct arena, and to do that, it must
434 // call MozJemalloc::moz_arena_malloc with the correct arenaId under the
435 // covers. Therefore it must record that arenaId.
436 //
437 // This field is also needed for jemalloc_ptr_info() to work, because it
438 // also returns the arena ID (but only in debug builds).
439 //
440 // - NeverAllocated: must be 0.
441 // - InUse | Freed: can be any valid arena ID value.
442 Maybe<arena_id_t> mArenaId;
443
444 // The starting address of the allocation. Will not be the same as the page
445 // address unless the allocation is a full page.
446 // - NeverAllocated: must be 0.
447 // - InUse | Freed: must be within the allocation page.
448 uint8_t* mBaseAddr;
449
450 // Usable size is computed as the number of bytes between the pointer and
451 // the end of the allocation page. This might be bigger than the requested
452 // size, especially if an outsized alignment is requested.
453 size_t UsableSize() const {
454 return mState == AllocPageState::NeverAllocated
455 ? 0
456 : kPageSize -
457 (reinterpret_cast<uintptr_t>(mBaseAddr) & (kPageSize - 1));
458 }
459
460 // The internal fragmentation for this allocation.
461 size_t FragmentationBytes() const {
462 MOZ_ASSERT(kPageSize >= UsableSize())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(kPageSize >= UsableSize())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(kPageSize >= UsableSize()
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"kPageSize >= UsableSize()", "/root/firefox-clang/memory/build/PHC.cpp"
, 462); AnnotateMozCrashReason("MOZ_ASSERT" "(" "kPageSize >= UsableSize()"
")"); do { MOZ_CrashSequence(__null, 462); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
463 return mState == AllocPageState::InUse ? kPageSize - UsableSize() : 0;
464 }
465
466 // The allocation stack.
467 // - NeverAllocated: Nothing.
468 // - InUse | Freed: Some.
469 Maybe<StackTrace> mAllocStack;
470
471 // The free stack.
472 // - NeverAllocated | InUse: Nothing.
473 // - Freed: Some.
474 Maybe<StackTrace> mFreeStack;
475
476 // The time at which the page is available for reuse, as measured against
477 // mNow. When the page is in use this value will be kMaxTime.
478 // - NeverAllocated: must be 0.
479 // - InUse: must be kMaxTime.
480 // - Freed: must be > 0 and < kMaxTime.
481 Time mReuseTime;
482
483#if PHC_LOGGING0
484 Time mFreeTime;
485#endif
486
487 // The next index for a free list of pages.`
488 Maybe<uintptr_t> mNextPage;
489
490 void AssertInUse() const {
491 MOZ_ASSERT(mState == AllocPageState::InUse)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mState == AllocPageState::InUse)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mState == AllocPageState::InUse
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"mState == AllocPageState::InUse", "/root/firefox-clang/memory/build/PHC.cpp"
, 491); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mState == AllocPageState::InUse"
")"); do { MOZ_CrashSequence(__null, 491); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
492 // There is nothing to assert about aPage.mArenaId.
493 MOZ_ASSERT(mBaseAddr)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mBaseAddr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mBaseAddr))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("mBaseAddr", "/root/firefox-clang/memory/build/PHC.cpp"
, 493); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mBaseAddr" ")"
); do { MOZ_CrashSequence(__null, 493); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
494 MOZ_ASSERT(UsableSize() > 0)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(UsableSize() > 0)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(UsableSize() > 0))), 0)))
{ do { } while (false); MOZ_ReportAssertionFailure("UsableSize() > 0"
, "/root/firefox-clang/memory/build/PHC.cpp", 494); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "UsableSize() > 0" ")"); do { MOZ_CrashSequence
(__null, 494); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
495 MOZ_ASSERT(mAllocStack.isSome())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mAllocStack.isSome())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mAllocStack.isSome()))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("mAllocStack.isSome()"
, "/root/firefox-clang/memory/build/PHC.cpp", 495); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "mAllocStack.isSome()" ")"); do { MOZ_CrashSequence
(__null, 495); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
496 MOZ_ASSERT(mFreeStack.isNothing())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mFreeStack.isNothing())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mFreeStack.isNothing()))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("mFreeStack.isNothing()"
, "/root/firefox-clang/memory/build/PHC.cpp", 496); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "mFreeStack.isNothing()" ")"); do { MOZ_CrashSequence
(__null, 496); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
497 MOZ_ASSERT(mReuseTime == kMaxTime)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mReuseTime == kMaxTime)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mReuseTime == kMaxTime))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("mReuseTime == kMaxTime"
, "/root/firefox-clang/memory/build/PHC.cpp", 497); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "mReuseTime == kMaxTime" ")"); do { MOZ_CrashSequence
(__null, 497); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
498 MOZ_ASSERT(!mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!mNextPage))), 0))) { do { }
while (false); MOZ_ReportAssertionFailure("!mNextPage", "/root/firefox-clang/memory/build/PHC.cpp"
, 498); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!mNextPage" ")"
); do { MOZ_CrashSequence(__null, 498); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
499 }
500
501 void AssertNotInUse() const {
502 // We can assert a lot about `NeverAllocated` pages, but not much about
503 // `Freed` pages.
504#ifdef DEBUG1
505 bool isFresh = mState == AllocPageState::NeverAllocated;
506 MOZ_ASSERT(isFresh || mState == AllocPageState::Freed)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isFresh || mState == AllocPageState::Freed)>::isValid
, "invalid assertion condition"); if ((__builtin_expect(!!(!(
!!(isFresh || mState == AllocPageState::Freed))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("isFresh || mState == AllocPageState::Freed"
, "/root/firefox-clang/memory/build/PHC.cpp", 506); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "isFresh || mState == AllocPageState::Freed"
")"); do { MOZ_CrashSequence(__null, 506); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
507 MOZ_ASSERT_IF(isFresh, mArenaId == Nothing())do { if (isFresh) { do { static_assert( mozilla::detail::AssertionConditionType
<decltype(mArenaId == Nothing())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mArenaId == Nothing()))), 0)
)) { do { } while (false); MOZ_ReportAssertionFailure("mArenaId == Nothing()"
, "/root/firefox-clang/memory/build/PHC.cpp", 507); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "mArenaId == Nothing()" ")"); do { MOZ_CrashSequence
(__null, 507); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false); } } while (false)
;
508 MOZ_ASSERT(isFresh == (mBaseAddr == nullptr))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isFresh == (mBaseAddr == nullptr))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isFresh == (mBaseAddr == nullptr
)))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("isFresh == (mBaseAddr == nullptr)", "/root/firefox-clang/memory/build/PHC.cpp"
, 508); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isFresh == (mBaseAddr == nullptr)"
")"); do { MOZ_CrashSequence(__null, 508); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
509 MOZ_ASSERT(isFresh == (mAllocStack.isNothing()))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isFresh == (mAllocStack.isNothing()))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isFresh == (mAllocStack.isNothing
())))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("isFresh == (mAllocStack.isNothing())", "/root/firefox-clang/memory/build/PHC.cpp"
, 509); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isFresh == (mAllocStack.isNothing())"
")"); do { MOZ_CrashSequence(__null, 509); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
510 MOZ_ASSERT(isFresh == (mFreeStack.isNothing()))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isFresh == (mFreeStack.isNothing()))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isFresh == (mFreeStack.isNothing
())))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("isFresh == (mFreeStack.isNothing())", "/root/firefox-clang/memory/build/PHC.cpp"
, 510); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isFresh == (mFreeStack.isNothing())"
")"); do { MOZ_CrashSequence(__null, 510); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
511 MOZ_ASSERT(mReuseTime != kMaxTime)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mReuseTime != kMaxTime)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mReuseTime != kMaxTime))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("mReuseTime != kMaxTime"
, "/root/firefox-clang/memory/build/PHC.cpp", 511); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "mReuseTime != kMaxTime" ")"); do { MOZ_CrashSequence
(__null, 511); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
512#endif
513 }
514
515 bool IsPageInUse() const { return mState == AllocPageState::InUse; }
516 bool IsPageFreed() const { return mState == AllocPageState::Freed; }
517
518 bool IsPageAllocatable(Time aNow) const {
519 return !IsPageInUse() && aNow >= mReuseTime;
520 }
521
522 void SetInUse(const Maybe<arena_id_t>& aArenaId, uint8_t* aBaseAddr,
523 const StackTrace& aAllocStack) {
524 AssertNotInUse();
525 mState = AllocPageState::InUse;
526 mArenaId = aArenaId;
527 mBaseAddr = aBaseAddr;
528 mAllocStack = Some(aAllocStack);
529 mFreeStack = Nothing();
530 mReuseTime = kMaxTime;
531
532 MOZ_ASSERT(!mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!mNextPage))), 0))) { do { }
while (false); MOZ_ReportAssertionFailure("!mNextPage", "/root/firefox-clang/memory/build/PHC.cpp"
, 532); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!mNextPage" ")"
); do { MOZ_CrashSequence(__null, 532); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
533 }
534
535 void ResizeInUse(const Maybe<arena_id_t>& aArenaId, uint8_t* aNewBaseAddr,
536 const StackTrace& aAllocStack) {
537 AssertInUse();
538
539 // page.mState is not changed.
540 if (aArenaId.isSome()) {
541 // Crash if the arenas don't match.
542 MOZ_RELEASE_ASSERT(mArenaId == aArenaId)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mArenaId == aArenaId)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mArenaId == aArenaId))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("mArenaId == aArenaId"
, "/root/firefox-clang/memory/build/PHC.cpp", 542); AnnotateMozCrashReason
("MOZ_RELEASE_ASSERT" "(" "mArenaId == aArenaId" ")"); do { MOZ_CrashSequence
(__null, 542); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
543 }
544 mBaseAddr = aNewBaseAddr;
545 // We could just keep the original alloc stack, but the realloc stack is
546 // more recent and therefore seems more useful.
547 mAllocStack = Some(aAllocStack);
548 // mFreeStack is not changed.
549 // mReuseTime is not changed.
550 // mNextPage is not changed.
551 }
552
553 void SetPageFreed(const Maybe<arena_id_t>& aArenaId,
554 const StackTrace& aFreeStack, Delay aReuseDelay,
555 Time aNow) {
556 AssertInUse();
557
558 mState = AllocPageState::Freed;
559
560 // page.mArenaId is left unchanged, for jemalloc_ptr_info() calls that
561 // occur after freeing (e.g. in the PtrInfo test in TestJemalloc.cpp).
562 if (aArenaId.isSome()) {
563 // Crash if the arenas don't match.
564 MOZ_RELEASE_ASSERT(mArenaId == aArenaId)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mArenaId == aArenaId)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mArenaId == aArenaId))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("mArenaId == aArenaId"
, "/root/firefox-clang/memory/build/PHC.cpp", 564); AnnotateMozCrashReason
("MOZ_RELEASE_ASSERT" "(" "mArenaId == aArenaId" ")"); do { MOZ_CrashSequence
(__null, 564); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
565 }
566
567 // page.musableSize is left unchanged, for reporting on UAF, and for
568 // jemalloc_ptr_info() calls that occur after freeing (e.g. in the PtrInfo
569 // test in TestJemalloc.cpp).
570
571 // page.mAllocStack is left unchanged, for reporting on UAF.
572
573 mFreeStack = Some(aFreeStack);
574#if PHC_LOGGING0
575 mFreeTime = aNow;
576#endif
577 mReuseTime = aNow + aReuseDelay;
578 }
579};
580
581// The virtual address space reserved by PHC. It is shared, immutable global
582// state. Initialized by phc_init() and never changed after that. phc_init()
583// runs early enough that no synchronization is needed.
584class PHCRegion {
585 private:
586 // The bounds of PHC's virtual address space. These are only ever set once
587 // before any threads are spawned, after that they're read only, and therefore
588 // can be accessed without a lock.
589 uint8_t* mPagesStart = nullptr;
590 uint8_t* mPagesLimit = nullptr;
591
592 public:
593 // Allocates the allocation pages and the guard pages, contiguously.
594 bool AllocVirtualAddresses() {
595 MOZ_ASSERT(!mPagesStart || !mPagesLimit)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!mPagesStart || !mPagesLimit)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!mPagesStart || !mPagesLimit
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"!mPagesStart || !mPagesLimit", "/root/firefox-clang/memory/build/PHC.cpp"
, 595); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!mPagesStart || !mPagesLimit"
")"); do { MOZ_CrashSequence(__null, 595); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
596
597 // The memory allocated here is never freed, because it would happen at
598 // process termination when it would be of little use.
599
600 // We can rely on jemalloc's behaviour that when it allocates memory aligned
601 // with its own chunk size it will over-allocate and guarantee that the
602 // memory after the end of our allocation, but before the next chunk, is
603 // decommitted and inaccessible. Elsewhere in PHC we assume that we own
604 // that page (so that memory errors in it get caught by PHC) but here we
605 // use kAllPagesJemallocSize which subtracts jemalloc's guard page.
606 void* pages = MozJemalloc::memalign(kPhcAlign, kAllPagesJemallocSize);
607 if (!pages) {
608 return false;
609 }
610
611 // Make the pages inaccessible.
612#ifdef XP_WIN
613 if (!VirtualFree(pages, kAllPagesJemallocSize, MEM_DECOMMIT)) {
614 return false;
615 }
616#else
617 if (mmap_mmap(pages, kAllPagesJemallocSize, PROT_NONE0x0,
618 MAP_FIXED0x10 | MAP_PRIVATE0x02 | MAP_ANON0x20, -1, 0) == MAP_FAILED((void *) -1)) {
619 return false;
620 }
621#endif
622
623 mPagesStart = static_cast<uint8_t*>(pages);
624 mPagesLimit = mPagesStart + kAllPagesSize;
625 Log("AllocVirtualAddresses at %p..%p\n", mPagesStart, mPagesLimit);
626 return true;
627 }
628
629 constexpr PHCRegion() {}
630
631 class PtrKind PtrKind(const void* aPtr) {
632 MOZ_ASSERT(mPagesStart != nullptr && mPagesLimit != nullptr)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mPagesStart != nullptr && mPagesLimit != nullptr
)>::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(mPagesStart != nullptr && mPagesLimit != nullptr
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"mPagesStart != nullptr && mPagesLimit != nullptr", "/root/firefox-clang/memory/build/PHC.cpp"
, 632); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mPagesStart != nullptr && mPagesLimit != nullptr"
")"); do { MOZ_CrashSequence(__null, 632); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
633 class PtrKind pk(aPtr, mPagesStart, mPagesLimit);
634 return pk;
635 }
636
637 bool IsInFirstGuardPage(const void* aPtr) {
638 MOZ_ASSERT(mPagesStart != nullptr && mPagesLimit != nullptr)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mPagesStart != nullptr && mPagesLimit != nullptr
)>::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(mPagesStart != nullptr && mPagesLimit != nullptr
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"mPagesStart != nullptr && mPagesLimit != nullptr", "/root/firefox-clang/memory/build/PHC.cpp"
, 638); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mPagesStart != nullptr && mPagesLimit != nullptr"
")"); do { MOZ_CrashSequence(__null, 638); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
639 return mPagesStart <= aPtr && aPtr < mPagesStart + kPageSize;
640 }
641
642 // Get the address of the allocation page referred to via an index. Used when
643 // marking the page as accessible/inaccessible.
644 uint8_t* AllocPagePtr(uintptr_t aIndex) {
645 MOZ_ASSERT(mPagesStart != nullptr && mPagesLimit != nullptr)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mPagesStart != nullptr && mPagesLimit != nullptr
)>::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(mPagesStart != nullptr && mPagesLimit != nullptr
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"mPagesStart != nullptr && mPagesLimit != nullptr", "/root/firefox-clang/memory/build/PHC.cpp"
, 645); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mPagesStart != nullptr && mPagesLimit != nullptr"
")"); do { MOZ_CrashSequence(__null, 645); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
646 MOZ_ASSERT(aIndex < kNumAllocPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aIndex < kNumAllocPages)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aIndex < kNumAllocPages))
), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aIndex < kNumAllocPages"
, "/root/firefox-clang/memory/build/PHC.cpp", 646); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "aIndex < kNumAllocPages" ")"); do { MOZ_CrashSequence
(__null, 646); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
647 // Multiply by two and add one to account for allocation pages *and* guard
648 // pages.
649 return mPagesStart + (2 * aIndex + 1) * kPageSize;
650 }
651};
652
653// Shared, mutable global state. Many fields are protected by sMutex; functions
654// that access those feilds should take a PHCLock as proof that mMutex is held.
655// Other fields are TLS or Atomic and don't need the lock.
656class PHC {
Excessive padding in 'class PHC' (80 padding bytes, where 16 is optimal). Optimal fields order: mAvgFirstAllocDelay, mPhcState, mNow, mRNG, mFreePageListHead, mFreePageListTail, mMutex, mAllocPages, mAvgAllocDelay, mAvgPageReuseDelay, consider reordering the fields or adding explicit padding members
657 public:
658 // The RNG seeds here are poor, but non-reentrant since this can be called
659 // from malloc(). SetState() will reset the RNG later.
660 PHC() : mRNG(RandomSeed<1>(), RandomSeed<2>()) {
661 mMutex.Init();
662 if (!tlsIsDisabled.init()) {
663 MOZ_CRASH()do { do { } while (false); MOZ_ReportCrash("" , "/root/firefox-clang/memory/build/PHC.cpp"
, 663); AnnotateMozCrashReason("MOZ_CRASH(" ")"); do { MOZ_CrashSequence
(__null, 663); __attribute__((nomerge)) ::abort(); } while (false
); } while (false)
;
664 }
665 if (!tlsAllocDelay.init()) {
666 MOZ_CRASH()do { do { } while (false); MOZ_ReportCrash("" , "/root/firefox-clang/memory/build/PHC.cpp"
, 666); AnnotateMozCrashReason("MOZ_CRASH(" ")"); do { MOZ_CrashSequence
(__null, 666); __attribute__((nomerge)) ::abort(); } while (false
); } while (false)
;
667 }
668 if (!tlsLastDelay.init()) {
669 MOZ_CRASH()do { do { } while (false); MOZ_ReportCrash("" , "/root/firefox-clang/memory/build/PHC.cpp"
, 669); AnnotateMozCrashReason("MOZ_CRASH(" ")"); do { MOZ_CrashSequence
(__null, 669); __attribute__((nomerge)) ::abort(); } while (false
); } while (false)
;
670 }
671
672 // This constructor is part of PHC's very early initialisation,
673 // see phc_init(), and if PHC is default-on it'll start marking allocations
674 // and we must setup the delay. However once XPCOM starts it'll call
675 // SetState() which will re-initialise the RNG and allocation delay.
676 MutexAutoLock lock(mMutex);
677
678 ForceSetNewAllocDelay(Rnd64ToDelay(mAvgFirstAllocDelay, Random64()));
679
680 for (uintptr_t i = 0; i < kNumAllocPages; i++) {
681 AppendPageToFreeList(i);
682 }
683 }
684
685 uint64_t Random64() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) { return mRNG.next(); }
686
687 // Get the address of the allocation page referred to via an index. Used
688 // when checking pointers against page boundaries.
689 uint8_t* AllocPageBaseAddr(uintptr_t aIndex) MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
690 return mAllocPages[aIndex].mBaseAddr;
691 }
692
693 Maybe<arena_id_t> PageArena(uintptr_t aIndex) MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
694 const AllocPageInfo& page = mAllocPages[aIndex];
695 page.AssertInUse();
696
697 return page.mArenaId;
698 }
699
700 size_t PageUsableSize(uintptr_t aIndex) MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
701 const AllocPageInfo& page = mAllocPages[aIndex];
702 page.AssertInUse();
703
704 return page.UsableSize();
705 }
706
707 // The total fragmentation in PHC
708 size_t FragmentationBytes() MOZ_EXCLUDES(mMutex)__attribute__((locks_excluded(mMutex))) {
709 MutexAutoLock lock(mMutex);
710
711 size_t sum = 0;
712 for (const auto& page : mAllocPages) {
713 sum += page.FragmentationBytes();
714 }
715 return sum;
716 }
717
718 // Used by the memory reporter to count usable space of in-use allocations.
719 size_t AllocatedBytes() MOZ_EXCLUDES(mMutex)__attribute__((locks_excluded(mMutex))) {
720 MutexAutoLock lock(mMutex);
721
722 size_t allocated = 0;
723 for (const auto& page : mAllocPages) {
724 if (page.IsPageInUse()) {
725 allocated += page.UsableSize();
726 }
727 }
728 return allocated;
729 }
730
731 void SetPageInUse(uintptr_t aIndex, const Maybe<arena_id_t>& aArenaId,
732 uint8_t* aBaseAddr, const StackTrace& aAllocStack)
733 MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
734 mAllocPages[aIndex].SetInUse(aArenaId, aBaseAddr, aAllocStack);
735 }
736
737#if PHC_LOGGING0
738 Time GetFreeTime(uintptr_t aIndex) const MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
739 return mAllocPages[aIndex].mFreeTime;
740 }
741#endif
742
743 void ResizePageInUse(uintptr_t aIndex, const Maybe<arena_id_t>& aArenaId,
744 uint8_t* aNewBaseAddr, const StackTrace& aAllocStack)
745 MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
746 mAllocPages[aIndex].ResizeInUse(aArenaId, aNewBaseAddr, aAllocStack);
747 };
748
749 void SetPageFreed(uintptr_t aIndex, const Maybe<arena_id_t>& aArenaId,
750 const StackTrace& aFreeStack, Delay aReuseDelay)
751 MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
752 AllocPageInfo& page = mAllocPages[aIndex];
753
754 page.SetPageFreed(aArenaId, aFreeStack, aReuseDelay, Now());
755
756 MOZ_ASSERT(!page.mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!page.mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!page.mNextPage))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!page.mNextPage"
, "/root/firefox-clang/memory/build/PHC.cpp", 756); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "!page.mNextPage" ")"); do { MOZ_CrashSequence
(__null, 756); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
757 AppendPageToFreeList(aIndex);
758 }
759
760 static void CrashOnGuardPage(void* aPtr) {
761 // An operation on a guard page? This is a bounds violation. Deliberately
762 // touch the page in question to cause a crash that triggers the usual PHC
763 // machinery.
764 Log("CrashOnGuardPage(%p), bounds violation\n", aPtr);
765 *static_cast<uint8_t*>(aPtr) = 0;
766 MOZ_CRASH("unreachable")do { do { } while (false); MOZ_ReportCrash("" "unreachable", "/root/firefox-clang/memory/build/PHC.cpp"
, 766); AnnotateMozCrashReason("MOZ_CRASH(" "unreachable" ")"
); do { MOZ_CrashSequence(__null, 766); __attribute__((nomerge
)) ::abort(); } while (false); } while (false)
;
767 }
768
769 void EnsureValidAndInUse(void* aPtr, uintptr_t aIndex) MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
770 const AllocPageInfo& page = mAllocPages[aIndex];
771
772 // The pointer must point to the start of the allocation.
773 MOZ_RELEASE_ASSERT(page.mBaseAddr == aPtr)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(page.mBaseAddr == aPtr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(page.mBaseAddr == aPtr))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("page.mBaseAddr == aPtr"
, "/root/firefox-clang/memory/build/PHC.cpp", 773); AnnotateMozCrashReason
("MOZ_RELEASE_ASSERT" "(" "page.mBaseAddr == aPtr" ")"); do {
MOZ_CrashSequence(__null, 773); __attribute__((nomerge)) ::abort
(); } while (false); } } while (false)
;
774
775 if (page.mState == AllocPageState::Freed) {
776 Log("EnsureValidAndInUse(%p), use-after-free\n", aPtr);
777 // An operation on a freed page? This is a particular kind of
778 // use-after-free. Deliberately touch the page in question, in order to
779 // cause a crash that triggers the usual PHC machinery. But unlock mMutex
780 // first, because that self-same PHC machinery needs to re-lock it, and
781 // the crash causes non-local control flow so mMutex won't be unlocked
782 // the normal way in the caller.
783 mMutex.Unlock();
784 *static_cast<uint8_t*>(aPtr) = 0;
785 MOZ_CRASH("unreachable")do { do { } while (false); MOZ_ReportCrash("" "unreachable", "/root/firefox-clang/memory/build/PHC.cpp"
, 785); AnnotateMozCrashReason("MOZ_CRASH(" "unreachable" ")"
); do { MOZ_CrashSequence(__null, 785); __attribute__((nomerge
)) ::abort(); } while (false); } while (false)
;
786 }
787 }
788
789 // This expects sPHC::mMutex to be locked but can't check it with a parameter
790 // since we try-lock it.
791 void FillAddrInfo(uintptr_t aIndex, const void* aBaseAddr, bool isGuardPage,
792 phc::AddrInfo& aOut) MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
793 const AllocPageInfo& page = mAllocPages[aIndex];
794 if (isGuardPage) {
795 aOut.mKind = phc::AddrInfo::Kind::GuardPage;
796 } else {
797 switch (page.mState) {
798 case AllocPageState::NeverAllocated:
799 aOut.mKind = phc::AddrInfo::Kind::NeverAllocatedPage;
800 break;
801
802 case AllocPageState::InUse:
803 aOut.mKind = phc::AddrInfo::Kind::InUsePage;
804 break;
805
806 case AllocPageState::Freed:
807 aOut.mKind = phc::AddrInfo::Kind::FreedPage;
808 break;
809
810 default:
811 MOZ_CRASH()do { do { } while (false); MOZ_ReportCrash("" , "/root/firefox-clang/memory/build/PHC.cpp"
, 811); AnnotateMozCrashReason("MOZ_CRASH(" ")"); do { MOZ_CrashSequence
(__null, 811); __attribute__((nomerge)) ::abort(); } while (false
); } while (false)
;
812 }
813 }
814 aOut.mBaseAddr = page.mBaseAddr;
815 aOut.mUsableSize = page.UsableSize();
816 aOut.mAllocStack = page.mAllocStack;
817 aOut.mFreeStack = page.mFreeStack;
818 }
819
820 void FillJemallocPtrInfo(const void* aPtr, uintptr_t aIndex,
821 jemalloc_ptr_info_t* aInfo) MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
822 const AllocPageInfo& page = mAllocPages[aIndex];
823 switch (page.mState) {
824 case AllocPageState::NeverAllocated:
825 break;
826
827 case AllocPageState::InUse: {
828 // Only return TagLiveAlloc if the pointer is within the bounds of the
829 // allocation's usable size.
830 uint8_t* base = page.mBaseAddr;
831 uint8_t* limit = base + page.UsableSize();
832 if (base <= aPtr && aPtr < limit) {
833 *aInfo = {TagLiveAlloc, page.mBaseAddr, page.UsableSize(),
834 page.mArenaId.valueOr(0)};
835 return;
836 }
837 break;
838 }
839
840 case AllocPageState::Freed: {
841 // Only return TagFreedAlloc if the pointer is within the bounds of the
842 // former allocation's usable size.
843 uint8_t* base = page.mBaseAddr;
844 uint8_t* limit = base + page.UsableSize();
845 if (base <= aPtr && aPtr < limit) {
846 *aInfo = {TagFreedAlloc, page.mBaseAddr, page.UsableSize(),
847 page.mArenaId.valueOr(0)};
848 return;
849 }
850 break;
851 }
852
853 default:
854 MOZ_CRASH()do { do { } while (false); MOZ_ReportCrash("" , "/root/firefox-clang/memory/build/PHC.cpp"
, 854); AnnotateMozCrashReason("MOZ_CRASH(" ")"); do { MOZ_CrashSequence
(__null, 854); __attribute__((nomerge)) ::abort(); } while (false
); } while (false)
;
855 }
856
857 // Pointers into guard pages will end up here, as will pointers into
858 // allocation pages that aren't within the allocation's bounds.
859 *aInfo = {TagUnknown, nullptr, 0, 0};
860 }
861
862#ifndef XP_WIN
863 static void prefork() MOZ_NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) {
864 PHC::sPHC->mMutex.Lock();
865 }
866 static void postfork_parent() MOZ_NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) {
867 PHC::sPHC->mMutex.Unlock();
868 }
869 static void postfork_child() { PHC::sPHC->mMutex.Init(); }
870#endif
871
872 void IncPageAllocHits() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
873#if PHC_LOGGING0
874 mPageAllocHits++;
875#endif
876 }
877 void IncPageAllocMisses() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
878#if PHC_LOGGING0
879 mPageAllocMisses++;
880#endif
881 }
882
883 phc::PHCStats GetPageStatsLocked() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
884 phc::PHCStats stats;
885
886 for (const auto& page : mAllocPages) {
887 stats.mSlotsAllocated += page.IsPageInUse() ? 1 : 0;
888 stats.mSlotsFreed += page.IsPageFreed() ? 1 : 0;
889 }
890 stats.mSlotsUnused =
891 kNumAllocPages - stats.mSlotsAllocated - stats.mSlotsFreed;
892
893 return stats;
894 }
895
896 phc::PHCStats GetPageStats() MOZ_EXCLUDES(mMutex)__attribute__((locks_excluded(mMutex))) {
897 MutexAutoLock lock(mMutex);
898 return GetPageStatsLocked();
899 }
900
901#if PHC_LOGGING0
902 size_t PageAllocHits() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) { return mPageAllocHits; }
903 size_t PageAllocAttempts() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
904 return mPageAllocHits + mPageAllocMisses;
905 }
906
907 // This is an integer because FdPrintf only supports integer printing.
908 size_t PageAllocHitRate() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
909 return mPageAllocHits * 100 / (mPageAllocHits + mPageAllocMisses);
910 }
911#endif
912
913 void LogNoAlloc(size_t aReqSize, size_t aAlignment, Delay newAllocDelay);
914
915 // Should we make new PHC allocations?
916 bool ShouldMakeNewAllocations() const {
917 return mPhcState == mozilla::phc::Enabled;
918 }
919
920 using PHCState = mozilla::phc::PHCState;
921 void SetState(PHCState aState) {
922 if (mPhcState != PHCState::Enabled && aState == PHCState::Enabled) {
923 MutexAutoLock lock(mMutex);
924 // Reset the RNG at this point with a better seed.
925 ResetRNG();
926 ForceSetNewAllocDelay(Rnd64ToDelay(mAvgFirstAllocDelay, Random64()));
927 }
928
929 mPhcState = aState;
930 }
931
932 void ResetRNG() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
933 mRNG = non_crypto::XorShift128PlusRNG(RandomSeed<0>(), RandomSeed<1>());
934 }
935
936 void SetProbabilities(int64_t aAvgDelayFirst, int64_t aAvgDelayNormal,
937 int64_t aAvgDelayPageReuse) MOZ_EXCLUDES(mMutex)__attribute__((locks_excluded(mMutex))) {
938 MutexAutoLock lock(mMutex);
939
940 mAvgFirstAllocDelay = CheckProbability(aAvgDelayFirst);
941 mAvgAllocDelay = CheckProbability(aAvgDelayNormal);
942 mAvgPageReuseDelay = CheckProbability(aAvgDelayPageReuse);
943 }
944
945 static void DisableOnCurrentThread() {
946 MOZ_ASSERT(!tlsIsDisabled.get())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!tlsIsDisabled.get())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!tlsIsDisabled.get()))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("!tlsIsDisabled.get()"
, "/root/firefox-clang/memory/build/PHC.cpp", 946); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "!tlsIsDisabled.get()" ")"); do { MOZ_CrashSequence
(__null, 946); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
947 tlsIsDisabled.set(true);
948 }
949
950 void EnableOnCurrentThread() {
951 MOZ_ASSERT(tlsIsDisabled.get())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(tlsIsDisabled.get())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(tlsIsDisabled.get()))), 0)))
{ do { } while (false); MOZ_ReportAssertionFailure("tlsIsDisabled.get()"
, "/root/firefox-clang/memory/build/PHC.cpp", 951); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "tlsIsDisabled.get()" ")"); do { MOZ_CrashSequence
(__null, 951); __attribute__((nomerge)) ::abort(); } while (false
); } } while (false)
;
952 tlsIsDisabled.set(false);
953 }
954
955 static bool IsDisabledOnCurrentThread() { return tlsIsDisabled.get(); }
956
957 static Time Now() {
958 if (!sPHC) {
959 return 0;
960 }
961
962 return sPHC->mNow;
963 }
964
965 void AdvanceNow(uint32_t delay = 0) {
966 mNow += tlsLastDelay.get() - delay;
967 tlsLastDelay.set(delay);
968 }
969
970 // Decrements the delay and returns true if it's time to make a new PHC
971 // allocation.
972 static bool DecrementDelay() {
973 const Delay alloc_delay = tlsAllocDelay.get();
974
975 if (MOZ_LIKELY(alloc_delay > 0)(__builtin_expect(!!(alloc_delay > 0), 1))) {
976 tlsAllocDelay.set(alloc_delay - 1);
977 return false;
978 }
979 // The local delay has expired, check the shared delay. This path is also
980 // executed on a new thread's first allocation, the result is the same: all
981 // the thread's TLS fields will be initialised.
982
983 // This accesses sPHC but we want to ensure it's still a static member
984 // function so that sPHC isn't dereferenced until after the hot path above.
985 MOZ_ASSERT(sPHC)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(sPHC)>::isValid, "invalid assertion condition"); if
((__builtin_expect(!!(!(!!(sPHC))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("sPHC", "/root/firefox-clang/memory/build/PHC.cpp"
, 985); AnnotateMozCrashReason("MOZ_ASSERT" "(" "sPHC" ")"); do
{ MOZ_CrashSequence(__null, 985); __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
986 sPHC->AdvanceNow();
987
988 // Use an atomic fetch-and-subtract. This uses unsigned underflow semantics
989 // to avoid doing a full compare-and-swap.
990 Delay new_delay = (sAllocDelay -= kDelayDecrementAmount);
991 Delay old_delay = new_delay + kDelayDecrementAmount;
992 if (MOZ_LIKELY(new_delay < DELAY_MAX)(__builtin_expect(!!(new_delay < DELAY_MAX), 1))) {
993 // Normal case, we decremented the shared delay but it's not yet
994 // underflowed.
995 tlsAllocDelay.set(kDelayDecrementAmount);
996 tlsLastDelay.set(kDelayDecrementAmount);
997 Log("Update sAllocDelay <- %zu, tlsAllocDelay <- %zu\n",
998 size_t(new_delay), size_t(kDelayDecrementAmount));
999 return false;
1000 }
1001
1002 if (old_delay < new_delay) {
1003 // The shared delay only just underflowed, so unless we hit exactly zero
1004 // we should set our local counter and continue.
1005 Log("Update sAllocDelay <- %zu, tlsAllocDelay <- %zu\n",
1006 size_t(new_delay), size_t(old_delay));
1007 if (old_delay == 0) {
1008 // We don't need to set tlsAllocDelay because it's already zero, we know
1009 // because the condition at the beginning of this function failed.
1010 return true;
1011 }
1012 tlsAllocDelay.set(old_delay);
1013 tlsLastDelay.set(old_delay);
1014 return false;
1015 }
1016
1017 // The delay underflowed on another thread or a previous failed allocation
1018 // by this thread. Return true and attempt the next allocation, if the
1019 // other thread wins we'll check for that before committing.
1020 Log("Update sAllocDelay <- %zu, tlsAllocDelay <- %zu\n", size_t(new_delay),
1021 size_t(alloc_delay));
1022 return true;
1023 }
1024
1025 static void ResetLocalAllocDelay(Delay aDelay = 0) {
1026 // We could take some delay from the shared delay but we'd need a
1027 // compare-and-swap because this is called on paths that don't make
1028 // allocations. Or we can set the local delay to zero and let it get
1029 // initialised on the next allocation.
1030 tlsAllocDelay.set(aDelay);
1031 tlsLastDelay.set(aDelay);
1032 }
1033
1034 static void ForceSetNewAllocDelay(Delay aNewAllocDelay) {
1035 Log("Setting sAllocDelay <- %zu\n", size_t(aNewAllocDelay));
1036 sAllocDelay = aNewAllocDelay;
1037 ResetLocalAllocDelay();
1038 }
1039
1040 // Set a new allocation delay and return true if the delay was less than zero
1041 // (but it's unsigned so interpret it as signed) indicating that we won the
1042 // race to make the next allocation.
1043 static bool SetNewAllocDelay(Delay aNewAllocDelay) {
1044 bool cas_retry;
1045 do {
1046 // We read the current delay on every iteration, we consider that the PHC
1047 // allocation is still "up for grabs" if sAllocDelay < 0. This is safe
1048 // even while other threads continuing to fetch-and-subtract sAllocDelay
1049 // in DecrementDelay(), up to DELAY_MAX (2^31) calls to DecrementDelay().
1050 Delay read_delay = sAllocDelay;
1051 if (read_delay < DELAY_MAX) {
1052 // Another thread already set a valid delay.
1053 Log("Observe delay %zu this thread lost the race\n",
1054 size_t(read_delay));
1055 ResetLocalAllocDelay();
1056 return false;
1057 } else {
1058 Log("Preparing for CAS, read sAllocDelay %zu\n", size_t(read_delay));
1059 }
1060
1061 cas_retry = !sAllocDelay.compareExchange(read_delay, aNewAllocDelay);
1062 if (cas_retry) {
1063 Log("Lost the CAS, sAllocDelay is now %zu\n", size_t(sAllocDelay));
1064 cpu_pause();
1065 // We raced against another thread and lost.
1066 }
1067 } while (cas_retry);
1068 Log("Won the CAS, set sAllocDelay = %zu\n", size_t(sAllocDelay));
1069 ResetLocalAllocDelay();
1070 return true;
1071 }
1072
1073 static Delay LocalAllocDelay() { return tlsAllocDelay.get(); }
1074 static Delay SharedAllocDelay() { return sAllocDelay; }
1075
1076 static Delay LastDelay() { return tlsLastDelay.get(); }
1077
1078 Maybe<uintptr_t> PopNextFreeIfAllocatable(Time now) MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
1079 if (!mFreePageListHead) {
1080 return Nothing();
1081 }
1082
1083 uintptr_t index = mFreePageListHead.value();
1084
1085 MOZ_RELEASE_ASSERT(index < kNumAllocPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(index < kNumAllocPages)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(index < kNumAllocPages)))
, 0))) { do { } while (false); MOZ_ReportAssertionFailure("index < kNumAllocPages"
, "/root/firefox-clang/memory/build/PHC.cpp", 1085); AnnotateMozCrashReason
("MOZ_RELEASE_ASSERT" "(" "index < kNumAllocPages" ")"); do
{ MOZ_CrashSequence(__null, 1085); __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
1086 AllocPageInfo& page = mAllocPages[index];
1087 page.AssertNotInUse();
1088
1089 if (!page.IsPageAllocatable(now)) {
1090 return Nothing();
1091 }
1092
1093 mFreePageListHead = page.mNextPage;
1094 page.mNextPage = Nothing();
1095 if (!mFreePageListHead) {
1096 mFreePageListTail = Nothing();
1097 }
1098
1099 return Some(index);
1100 }
1101
1102 void UnpopNextFree(uintptr_t index) MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
1103 AllocPageInfo& page = mAllocPages[index];
1104 MOZ_ASSERT(!page.mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!page.mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!page.mNextPage))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!page.mNextPage"
, "/root/firefox-clang/memory/build/PHC.cpp", 1104); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "!page.mNextPage" ")"); do { MOZ_CrashSequence
(__null, 1104); __attribute__((nomerge)) ::abort(); } while (
false); } } while (false)
;
1105
1106 page.mNextPage = mFreePageListHead;
1107 mFreePageListHead = Some(index);
1108 if (!mFreePageListTail) {
1109 mFreePageListTail = Some(index);
1110 }
1111 }
1112
1113 void AppendPageToFreeList(uintptr_t aIndex) MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
1114 MOZ_RELEASE_ASSERT(aIndex < kNumAllocPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aIndex < kNumAllocPages)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aIndex < kNumAllocPages))
), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aIndex < kNumAllocPages"
, "/root/firefox-clang/memory/build/PHC.cpp", 1114); AnnotateMozCrashReason
("MOZ_RELEASE_ASSERT" "(" "aIndex < kNumAllocPages" ")"); do
{ MOZ_CrashSequence(__null, 1114); __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
1115 AllocPageInfo& page = mAllocPages[aIndex];
1116 MOZ_ASSERT(!page.mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!page.mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!page.mNextPage))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!page.mNextPage"
, "/root/firefox-clang/memory/build/PHC.cpp", 1116); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "!page.mNextPage" ")"); do { MOZ_CrashSequence
(__null, 1116); __attribute__((nomerge)) ::abort(); } while (
false); } } while (false)
;
1117 MOZ_ASSERT(mFreePageListHead != Some(aIndex) &&do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mFreePageListHead != Some(aIndex) && mFreePageListTail
!= Some(aIndex))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mFreePageListHead != Some(aIndex
) && mFreePageListTail != Some(aIndex)))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("mFreePageListHead != Some(aIndex) && mFreePageListTail != Some(aIndex)"
, "/root/firefox-clang/memory/build/PHC.cpp", 1118); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "mFreePageListHead != Some(aIndex) && mFreePageListTail != Some(aIndex)"
")"); do { MOZ_CrashSequence(__null, 1118); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
1118 mFreePageListTail != Some(aIndex))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mFreePageListHead != Some(aIndex) && mFreePageListTail
!= Some(aIndex))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mFreePageListHead != Some(aIndex
) && mFreePageListTail != Some(aIndex)))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("mFreePageListHead != Some(aIndex) && mFreePageListTail != Some(aIndex)"
, "/root/firefox-clang/memory/build/PHC.cpp", 1118); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "mFreePageListHead != Some(aIndex) && mFreePageListTail != Some(aIndex)"
")"); do { MOZ_CrashSequence(__null, 1118); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1119
1120 if (!mFreePageListTail) {
1121 // The list is empty this page will become the beginning and end.
1122 MOZ_ASSERT(!mFreePageListHead)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!mFreePageListHead)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!mFreePageListHead))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("!mFreePageListHead"
, "/root/firefox-clang/memory/build/PHC.cpp", 1122); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "!mFreePageListHead" ")"); do { MOZ_CrashSequence
(__null, 1122); __attribute__((nomerge)) ::abort(); } while (
false); } } while (false)
;
1123 mFreePageListHead = Some(aIndex);
1124 } else {
1125 MOZ_ASSERT(mFreePageListTail.value() < kNumAllocPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mFreePageListTail.value() < kNumAllocPages)>::
isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(mFreePageListTail.value() < kNumAllocPages))), 0)
)) { do { } while (false); MOZ_ReportAssertionFailure("mFreePageListTail.value() < kNumAllocPages"
, "/root/firefox-clang/memory/build/PHC.cpp", 1125); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "mFreePageListTail.value() < kNumAllocPages"
")"); do { MOZ_CrashSequence(__null, 1125); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1126 AllocPageInfo& tail_page = mAllocPages[mFreePageListTail.value()];
1127 MOZ_ASSERT(!tail_page.mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!tail_page.mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!tail_page.mNextPage))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("!tail_page.mNextPage"
, "/root/firefox-clang/memory/build/PHC.cpp", 1127); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "!tail_page.mNextPage" ")"); do { MOZ_CrashSequence
(__null, 1127); __attribute__((nomerge)) ::abort(); } while (
false); } } while (false)
;
1128 tail_page.mNextPage = Some(aIndex);
1129 }
1130 page.mNextPage = Nothing();
1131 mFreePageListTail = Some(aIndex);
1132 }
1133
1134 private:
1135 template <int N>
1136 uint64_t RandomSeed() {
1137 // An older version of this code used RandomUint64() here, but on Mac that
1138 // function uses arc4random(), which can allocate, which would cause
1139 // re-entry, which would be bad. So we just use time(), a local variable
1140 // address and a global variable address. These are mediocre sources of
1141 // entropy, but good enough for PHC.
1142 static_assert(N == 0 || N == 1 || N == 2, "must be 0, 1 or 2");
1143 uint64_t seed;
1144 if (N == 0) {
1145 time_t t = time(nullptr);
1146 seed = t ^ (t << 32);
1147 } else if (N == 1) {
1148 seed = uintptr_t(&seed) ^ (uintptr_t(&seed) << 32);
1149 } else {
1150 seed = uintptr_t(&sRegion) ^ (uintptr_t(&sRegion) << 32);
1151 }
1152 return seed;
1153 }
1154
1155 public:
1156 // Attempt a page allocation if the time and the size are right. Allocated
1157 // memory is zeroed if aZero is true. On failure, the caller should attempt a
1158 // normal allocation via MozJemalloc. Can be called in a context where
1159 // PHC::mMutex is locked.
1160 void* MaybePageAlloc(const Maybe<arena_id_t>& aArenaId, size_t aReqSize,
1161 size_t aAlignment, bool aZero);
1162
1163 void FreePage(uintptr_t aIndex, const Maybe<arena_id_t>& aArenaId,
1164 const StackTrace& aFreeStack, Delay aReuseDelay);
1165
1166 // This handles both free and moz_arena_free.
1167 void PageFree(const Maybe<arena_id_t>& aArenaId, void* aPtr);
1168
1169 Maybe<void*> PageRealloc(const Maybe<arena_id_t>& aArenaId, void* aOldPtr,
1170 size_t aNewSize);
1171
1172 void PagePtrInfo(const void* aPtr, jemalloc_ptr_info_t* aInfo);
1173
1174 size_t PtrUsableSize(usable_ptr_t aPtr);
1175
1176 bool IsPHCAllocation(const void* aPtr, mozilla::phc::AddrInfo* aOut);
1177
1178 void Crash(const char* aMessage);
1179
1180 private:
1181 // To improve locality we try to order this file by how frequently different
1182 // fields are modified and place all the modified-together fields early and
1183 // ideally within a single cache line.
1184 // The mutex that protects the other members.
1185 alignas(kCacheLineSize) Mutex mMutex MOZ_UNANNOTATED;
1186
1187 // The current time. We use ReleaseAcquire semantics since we attempt to
1188 // update this by larger increments and don't want to lose an entire update.
1189 Atomic<Time, ReleaseAcquire> mNow;
1190
1191 // This will only ever be updated from one thread. The other threads should
1192 // eventually get the update.
1193 Atomic<PHCState, Relaxed> mPhcState =
1194 Atomic<PHCState, Relaxed>(DEFAULT_STATEmozilla::phc::OnlyFree);
1195
1196 // RNG for deciding which allocations to treat specially. It doesn't need to
1197 // be high quality.
1198 //
1199 // This is a raw pointer for the reason explained in the comment above
1200 // PHC's constructor. Don't change it to UniquePtr or anything like that.
1201 non_crypto::XorShift128PlusRNG mRNG MOZ_GUARDED_BY(mMutex)__attribute__((guarded_by(mMutex)));
1202
1203 // A linked list of free pages. Pages are allocated from the head of the list
1204 // and returned to the tail. The list will naturally order itself by "last
1205 // freed time" so if the head of the list can't satisfy an allocation due to
1206 // time then none of the pages can.
1207 Maybe<uintptr_t> mFreePageListHead MOZ_GUARDED_BY(mMutex)__attribute__((guarded_by(mMutex)));
1208 Maybe<uintptr_t> mFreePageListTail MOZ_GUARDED_BY(mMutex)__attribute__((guarded_by(mMutex)));
1209
1210#if PHC_LOGGING0
1211 // How many allocations that could have been page allocs actually were? As
1212 // constrained kNumAllocPages. If the hit ratio isn't close to 100% it's
1213 // likely that the global constants are poorly chosen.
1214 size_t mPageAllocHits MOZ_GUARDED_BY(mMutex)__attribute__((guarded_by(mMutex))) = 0;
1215 size_t mPageAllocMisses MOZ_GUARDED_BY(mMutex)__attribute__((guarded_by(mMutex))) = 0;
1216#endif
1217
1218 // The remaining fields are updated much less often, place them on the next
1219 // cache line.
1220
1221 // The average delay before doing any page allocations at the start of a
1222 // process. Note that roughly 1 million allocations occur in the main process
1223 // while starting the browser. The delay range is 1..gAvgFirstAllocDelay*2.
1224 alignas(kCacheLineSize) Delay mAvgFirstAllocDelay
1225 MOZ_GUARDED_BY(mMutex)__attribute__((guarded_by(mMutex))) = 64 * 1024;
1226
1227 // The average delay until the next attempted page allocation, once we get
1228 // past the first delay. The delay range is 1..kAvgAllocDelay*2.
1229 Delay mAvgAllocDelay MOZ_GUARDED_BY(mMutex)__attribute__((guarded_by(mMutex))) = 16 * 1024;
1230
1231 // The average delay before reusing a freed page. Should be significantly
1232 // larger than kAvgAllocDelay, otherwise there's not much point in having it.
1233 // The delay range is (kAvgAllocDelay / 2)..(kAvgAllocDelay / 2 * 3). This is
1234 // different to the other delay ranges in not having a minimum of 1, because
1235 // that's such a short delay that there is a high likelihood of bad stacks in
1236 // any crash report.
1237 Delay mAvgPageReuseDelay MOZ_GUARDED_BY(mMutex)__attribute__((guarded_by(mMutex))) = 256 * 1024;
1238
1239 // When true, PHC does as little as possible.
1240 //
1241 // (a) It does not allocate any new page allocations.
1242 //
1243 // (b) It avoids doing any operations that might call malloc/free/etc., which
1244 // would cause re-entry into PHC. (In practice, MozStackWalk() is the
1245 // only such operation.) Note that calls to the functions in MozJemalloc
1246 // are ok.
1247 //
1248 // For example, replace_malloc() will just fall back to mozjemalloc. However,
1249 // operations involving existing allocations are more complex, because those
1250 // existing allocations may be page allocations. For example, if
1251 // replace_free() is passed a page allocation on a PHC-disabled thread, it
1252 // will free the page allocation in the usual way, but it will get a dummy
1253 // freeStack in order to avoid calling MozStackWalk(), as per (b) above.
1254 //
1255 // This single disabling mechanism has two distinct uses.
1256 //
1257 // - It's used to prevent re-entry into PHC, which can cause correctness
1258 // problems. For example, consider this sequence.
1259 //
1260 // 1. enter replace_free()
1261 // 2. which calls PageFree()
1262 // 3. which calls MozStackWalk()
1263 // 4. which locks a mutex M, and then calls malloc
1264 // 5. enter replace_malloc()
1265 // 6. which calls MaybePageAlloc()
1266 // 7. which calls MozStackWalk()
1267 // 8. which (re)locks a mutex M --> deadlock
1268 //
1269 // We avoid this sequence by "disabling" the thread in PageFree() (at step
1270 // 2), which causes MaybePageAlloc() to fail, avoiding the call to
1271 // MozStackWalk() (at step 7).
1272 //
1273 // In practice, realloc or free of a PHC allocation is unlikely on a thread
1274 // that is disabled because of this use: MozStackWalk() will probably only
1275 // realloc/free allocations that it allocated itself, but those won't be
1276 // page allocations because PHC is disabled before calling MozStackWalk().
1277 //
1278 // (Note that MaybePageAlloc() could safely do a page allocation so long as
1279 // it avoided calling MozStackWalk() by getting a dummy allocStack. But it
1280 // wouldn't be useful, and it would prevent the second use below.)
1281 //
1282 // - It's used to prevent PHC allocations in some tests that rely on
1283 // mozjemalloc's exact allocation behaviour, which PHC does not replicate
1284 // exactly. (Note that (b) isn't necessary for this use -- MozStackWalk()
1285 // could be safely called -- but it is necessary for the first use above.)
1286 //
1287 static PHC_THREAD_LOCAL(bool)__thread ::mozilla::detail::ThreadLocal< bool, ::mozilla::
detail::ThreadLocalNativeStorage>
tlsIsDisabled;
1288
1289 // Delay until the next attempt at a page allocation. The delay is made up of
1290 // two parts the global delay and each thread's local portion of that delay:
1291 //
1292 // delay = sDelay + sum_all_threads(tlsAllocDelay)
1293 //
1294 // Threads use their local delay to reduce contention on the shared delay.
1295 //
1296 // See the comment in MaybePageAlloc() for an explanation of why it uses
1297 // ReleaseAcquire semantics.
1298 static Atomic<Delay, ReleaseAcquire> sAllocDelay;
1299 static PHC_THREAD_LOCAL(Delay)__thread ::mozilla::detail::ThreadLocal< Delay, ::mozilla::
detail::ThreadLocalNativeStorage>
tlsAllocDelay;
1300
1301 // The last value we set tlsAllocDelay to before starting to count down.
1302 static PHC_THREAD_LOCAL(Delay)__thread ::mozilla::detail::ThreadLocal< Delay, ::mozilla::
detail::ThreadLocalNativeStorage>
tlsLastDelay;
1303
1304 // Using mfbt/Array.h makes MOZ_GUARDED_BY more reliable than a C array.
1305 Array<AllocPageInfo, kNumAllocPages> mAllocPages MOZ_GUARDED_BY(mMutex)__attribute__((guarded_by(mMutex)));
1306
1307 public:
1308 Delay GetAvgAllocDelay() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) { return mAvgAllocDelay; }
1309 Delay GetAvgFirstAllocDelay() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
1310 return mAvgFirstAllocDelay;
1311 }
1312 Delay GetAvgPageReuseDelay() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
1313 return mAvgPageReuseDelay;
1314 }
1315 Delay ReuseDelay() MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
1316 Delay avg_reuse_delay = GetAvgPageReuseDelay();
1317 return (avg_reuse_delay / 2) +
1318 Rnd64ToDelay(avg_reuse_delay / 2, Random64());
1319 }
1320
1321 // Both of these are accessed early on hot code paths. We make them both
1322 // static variables rathan making sRegion a member of sPHC to keep these hot
1323 // code paths as fast as possible. They're both "write once" so they can
1324 // share a cache line.
1325 static PHCRegion sRegion;
1326 static PHC* sPHC;
1327};
1328
1329// These globals are read together and hardly ever written. They should be on
1330// the same cache line. They should be in a different cache line to data that
1331// is manipulated often (sMutex and mNow are members of sPHC for that reason) so
1332// that this cache line can be shared amoung cores. This makes a measurable
1333// impact to calls to maybe_init()
1334alignas(kCacheLineSize) PHCRegion PHC::sRegion;
1335PHC* PHC::sPHC;
1336
1337PHC_THREAD_LOCAL(bool)__thread ::mozilla::detail::ThreadLocal< bool, ::mozilla::
detail::ThreadLocalNativeStorage>
PHC::tlsIsDisabled;
1338PHC_THREAD_LOCAL(Delay)__thread ::mozilla::detail::ThreadLocal< Delay, ::mozilla::
detail::ThreadLocalNativeStorage>
PHC::tlsAllocDelay;
1339Atomic<Delay, ReleaseAcquire> PHC::sAllocDelay;
1340PHC_THREAD_LOCAL(Delay)__thread ::mozilla::detail::ThreadLocal< Delay, ::mozilla::
detail::ThreadLocalNativeStorage>
PHC::tlsLastDelay;
1341
1342// When PHC wants to crash we first have to unlock so that the crash reporter
1343// can call into PHC to lockup its pointer. That also means that before calling
1344// PHCCrash please ensure that state is consistent. Because this can report an
1345// arbitrary string, use of it must be reviewed by Firefox data stewards.
1346void PHC::Crash(const char* aMessage) MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
1347 mMutex.Unlock();
1348 MOZ_CRASH_UNSAFE(aMessage)MOZ_Crash("/root/firefox-clang/memory/build/PHC.cpp", 1348, aMessage
)
;
1349}
1350
1351class AutoDisableOnCurrentThread {
1352 public:
1353 AutoDisableOnCurrentThread(const AutoDisableOnCurrentThread&) = delete;
1354
1355 const AutoDisableOnCurrentThread& operator=(
1356 const AutoDisableOnCurrentThread&) = delete;
1357
1358 explicit AutoDisableOnCurrentThread() { PHC::DisableOnCurrentThread(); }
1359 ~AutoDisableOnCurrentThread() { PHC::sPHC->EnableOnCurrentThread(); }
1360};
1361
1362//---------------------------------------------------------------------------
1363// Initialisation
1364//---------------------------------------------------------------------------
1365
1366// WARNING: this function runs *very* early -- before all static initializers
1367// have run. For this reason, non-scalar globals (sPHC) are allocated
1368// dynamically (so we can guarantee their construction in this function) rather
1369// than statically. sRegion is allocated statically to avoid an extra
1370// dereference.
1371static bool phc_init() {
1372 if (GetKernelPageSize() != kPageSize) {
1373 return false;
1374 }
1375
1376 if (!PHC::sRegion.AllocVirtualAddresses()) {
1377 return false;
1378 }
1379
1380 // sPHC is never freed. It lives for the life of the process.
1381 PHC::sPHC = InfallibleAllocPolicy::new_<PHC>();
1382
1383#ifndef XP_WIN
1384 // Avoid deadlocks when forking by acquiring our state lock prior to forking
1385 // and releasing it after forking. See |LogAlloc|'s |phc_init| for
1386 // in-depth details.
1387 pthread_atfork(PHC::prefork, PHC::postfork_parent, PHC::postfork_child);
1388#endif
1389
1390 return true;
1391}
1392
1393static inline bool maybe_init() {
1394 // This runs on hot paths and we can save some memory accesses by using sPHC
1395 // to test if we've already initialised PHC successfully.
1396 if (MOZ_UNLIKELY(!PHC::sPHC)(__builtin_expect(!!(!PHC::sPHC), 0))) {
1397 // The lambda will only be called once and is thread safe.
1398 static bool sInitSuccess = []() { return phc_init(); }();
1399 return sInitSuccess;
1400 }
1401
1402 return true;
1403}
1404
1405//---------------------------------------------------------------------------
1406// Page allocation operations
1407//---------------------------------------------------------------------------
1408
1409// This is the hot-path for testing if we should make a PHC allocation, it
1410// should be inlined into the caller while the remainder of the tests that are
1411// in MaybePageAlloc need not be inlined.
1412static MOZ_ALWAYS_INLINEinline bool ShouldPageAllocHot(size_t aReqSize) {
1413 if (MOZ_UNLIKELY(!maybe_init())(__builtin_expect(!!(!maybe_init()), 0))) {
1414 return false;
1415 }
1416
1417 if (MOZ_UNLIKELY(aReqSize > kPageSize)(__builtin_expect(!!(aReqSize > kPageSize), 0))) {
1418 return false;
1419 }
1420
1421 // Decrement the delay. If it's zero, we do a page allocation and reset the
1422 // delay to a random number.
1423 if (MOZ_LIKELY(!PHC::DecrementDelay())(__builtin_expect(!!(!PHC::DecrementDelay()), 1))) {
1424 return false;
1425 }
1426
1427 return true;
1428}
1429
1430void PHC::LogNoAlloc(size_t aReqSize, size_t aAlignment, Delay newAllocDelay)
1431 MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
1432 // No pages are available, or VirtualAlloc/mprotect failed.
1433#if PHC_LOGGING0
1434 phc::PHCStats stats = GetPageStatsLocked();
1435 Log("No PageAlloc(%zu, %zu), sAllocDelay <- %zu, fullness %zu/%zu/%zu, "
1436 "hits %zu/%zu (%zu%%)\n",
1437 aReqSize, aAlignment, size_t(newAllocDelay), stats.mSlotsAllocated,
1438 stats.mSlotsFreed, kNumAllocPages, PageAllocHits(), PageAllocAttempts(),
1439 PageAllocHitRate());
1440#endif
1441}
1442
1443void* PHC::MaybePageAlloc(const Maybe<arena_id_t>& aArenaId, size_t aReqSize,
1444 size_t aAlignment, bool aZero) {
1445 MOZ_ASSERT(IsPowerOfTwo(aAlignment))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(IsPowerOfTwo(aAlignment))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(IsPowerOfTwo(aAlignment)))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("IsPowerOfTwo(aAlignment)"
, "/root/firefox-clang/memory/build/PHC.cpp", 1445); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "IsPowerOfTwo(aAlignment)" ")"); do { MOZ_CrashSequence
(__null, 1445); __attribute__((nomerge)) ::abort(); } while (
false); } } while (false)
;
1446 if (!ShouldMakeNewAllocations()) {
1447 // Reset the allocation delay so that we take the fast path most of the
1448 // time. Rather than take the lock and use the RNG which are unnecessary
1449 // when PHC is disabled, instead set the delay to a reasonably high number,
1450 // the default average first allocation delay. This is reset when PHC is
1451 // re-enabled anyway.
1452 ForceSetNewAllocDelay(kDelayResetWhenDisabled);
1453 return nullptr;
1454 }
1455
1456 if (IsDisabledOnCurrentThread()) {
1457 // We don't reset sAllocDelay since that might affect other threads. We
1458 // assume this is okay because either this thread will be re-enabled after
1459 // less than DELAY_MAX allocations or that there are other active threads
1460 // that will reset sAllocDelay. We do reset our local delay which will
1461 // cause this thread to "back off" from updating sAllocDelay on future
1462 // allocations.
1463 ResetLocalAllocDelay(kDelayBackoffAmount);
1464 return nullptr;
1465 }
1466
1467 // Disable on this thread *before* getting the stack trace.
1468 AutoDisableOnCurrentThread disable;
1469
1470 // Get the stack trace *before* locking the mutex. If we return nullptr then
1471 // it was a waste, but it's not so frequent, and doing a stack walk while
1472 // the mutex is locked is problematic (see the big comment on
1473 // StackTrace::Fill() for details).
1474 StackTrace allocStack;
1475 allocStack.Fill();
1476
1477 MutexAutoLock lock(mMutex);
1478
1479 Time now = Now();
1480
1481 Delay newAllocDelay = Rnd64ToDelay(GetAvgAllocDelay(), Random64());
1482 if (!SetNewAllocDelay(newAllocDelay)) {
1483 return nullptr;
1484 }
1485
1486 // Pages are allocated from a free list populated in order of when they're
1487 // freed. If the page at the head of the list is too recently freed to be
1488 // reused then no other pages on the list will be either.
1489
1490 Maybe<uintptr_t> mb_index = PopNextFreeIfAllocatable(now);
1491 if (!mb_index) {
1492 IncPageAllocMisses();
1493 LogNoAlloc(aReqSize, aAlignment, newAllocDelay);
1494 return nullptr;
1495 }
1496 uintptr_t index = mb_index.value();
1497
1498#if PHC_LOGGING0
1499 Time lifetime = 0;
1500#endif
1501 uint8_t* pagePtr = sRegion.AllocPagePtr(index);
1502 MOZ_ASSERT(pagePtr)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(pagePtr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(pagePtr))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("pagePtr", "/root/firefox-clang/memory/build/PHC.cpp"
, 1502); AnnotateMozCrashReason("MOZ_ASSERT" "(" "pagePtr" ")"
); do { MOZ_CrashSequence(__null, 1502); __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1503 bool ok =
1504#ifdef XP_WIN
1505 !!VirtualAlloc(pagePtr, kPageSize, MEM_COMMIT, PAGE_READWRITE);
1506#else
1507 mprotect(pagePtr, kPageSize, PROT_READ0x1 | PROT_WRITE0x2) == 0;
1508#endif
1509
1510 if (!ok) {
1511 UnpopNextFree(index);
1512 IncPageAllocMisses();
1513 LogNoAlloc(aReqSize, aAlignment, newAllocDelay);
1514 return nullptr;
1515 }
1516
1517 size_t usableSize = MozJemalloc::malloc_good_size(aReqSize);
1518 MOZ_ASSERT(usableSize > 0)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(usableSize > 0)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(usableSize > 0))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("usableSize > 0"
, "/root/firefox-clang/memory/build/PHC.cpp", 1518); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "usableSize > 0" ")"); do { MOZ_CrashSequence
(__null, 1518); __attribute__((nomerge)) ::abort(); } while (
false); } } while (false)
;
1519
1520 // Put the allocation as close to the end of the page as possible,
1521 // allowing for alignment requirements.
1522 uint8_t* ptr = pagePtr + kPageSize - usableSize;
1523 if (aAlignment != 1) {
1524 ptr = reinterpret_cast<uint8_t*>(
1525 (reinterpret_cast<uintptr_t>(ptr) & ~(aAlignment - 1)));
1526 }
1527
1528#if PHC_LOGGING0
1529 Time then = GetFreeTime(index);
1530 lifetime = then != 0 ? now - then : 0;
1531#endif
1532
1533 SetPageInUse(index, aArenaId, ptr, allocStack);
1534
1535 if (aZero) {
1536 memset(ptr, 0, usableSize);
1537 } else {
1538#ifdef DEBUG1
1539 memset(ptr, kAllocJunk, usableSize);
1540#endif
1541 }
1542
1543 IncPageAllocHits();
1544#if PHC_LOGGING0
1545 phc::PHCStats stats = GetPageStatsLocked();
1546 Log("PageAlloc(%zu, %zu) -> %p[%zu]/%p (%zu) (z%zu), sAllocDelay <- %zu, "
1547 "fullness %zu/%zu/%zu, hits %zu/%zu (%zu%%), lifetime %zu\n",
1548 aReqSize, aAlignment, pagePtr, index, ptr, usableSize,
1549 size_t(newAllocDelay), size_t(SharedAllocDelay()), stats.mSlotsAllocated,
1550 stats.mSlotsFreed, kNumAllocPages, PageAllocHits(), PageAllocAttempts(),
1551 PageAllocHitRate(), lifetime);
1552#endif
1553
1554 return ptr;
1555}
1556
1557void PHC::FreePage(uintptr_t aIndex, const Maybe<arena_id_t>& aArenaId,
1558 const StackTrace& aFreeStack, Delay aReuseDelay)
1559 MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
1560 void* pagePtr = sRegion.AllocPagePtr(aIndex);
1561
1562#ifdef XP_WIN
1563 if (!VirtualFree(pagePtr, kPageSize, MEM_DECOMMIT)) {
1564 Crash("VirtualFree failed");
1565 }
1566#else
1567 if (mmap_mmap(pagePtr, kPageSize, PROT_NONE0x0, MAP_FIXED0x10 | MAP_PRIVATE0x02 | MAP_ANON0x20,
1568 -1, 0) == MAP_FAILED((void *) -1)) {
1569 Crash("mmap failed");
1570 }
1571#endif
1572
1573 SetPageFreed(aIndex, aArenaId, aFreeStack, aReuseDelay);
1574}
1575
1576//---------------------------------------------------------------------------
1577// replace-malloc machinery
1578//---------------------------------------------------------------------------
1579
1580// This handles malloc, moz_arena_malloc, and realloc-with-a-nullptr.
1581MOZ_ALWAYS_INLINEinline static void* PageMalloc(const Maybe<arena_id_t>& aArenaId,
1582 size_t aReqSize) {
1583 void* ptr =
1584 ShouldPageAllocHot(aReqSize)
1585 // The test on aArenaId here helps the compiler optimise away
1586 // the construction of Nothing() in the caller.
1587 ? PHC::sPHC->MaybePageAlloc(aArenaId.isSome() ? aArenaId : Nothing(),
1588 aReqSize, /* aAlignment */ 1,
1589 /* aZero */ false)
1590 : nullptr;
1591 return ptr ? ptr
1592 : (aArenaId.isSome()
1593 ? MozJemalloc::moz_arena_malloc(*aArenaId, aReqSize)
1594 : MozJemalloc::malloc(aReqSize));
1595}
1596
1597inline void* MozJemallocPHC::malloc(size_t aReqSize) {
1598 return PageMalloc(Nothing(), aReqSize);
1599}
1600
1601// This handles both calloc and moz_arena_calloc.
1602MOZ_ALWAYS_INLINEinline static void* PageCalloc(const Maybe<arena_id_t>& aArenaId,
1603 size_t aNum, size_t aReqSize) {
1604 CheckedInt<size_t> checkedSize = CheckedInt<size_t>(aNum) * aReqSize;
1605 if (!checkedSize.isValid()) {
1606 return nullptr;
1607 }
1608
1609 void* ptr =
1610 ShouldPageAllocHot(checkedSize.value())
1611 // The test on aArenaId here helps the compiler optimise away
1612 // the construction of Nothing() in the caller.
1613 ? PHC::sPHC->MaybePageAlloc(aArenaId.isSome() ? aArenaId : Nothing(),
1614 checkedSize.value(), /* aAlignment */ 1,
1615 /* aZero */ true)
1616 : nullptr;
1617 return ptr ? ptr
1618 : (aArenaId.isSome()
1619 ? MozJemalloc::moz_arena_calloc(*aArenaId, aNum, aReqSize)
1620 : MozJemalloc::calloc(aNum, aReqSize));
1621}
1622
1623inline void* MozJemallocPHC::calloc(size_t aNum, size_t aReqSize) {
1624 return PageCalloc(Nothing(), aNum, aReqSize);
1625}
1626
1627MOZ_ALWAYS_INLINEinline static bool FastIsPHCPtr(const void* aPtr) {
1628 if (MOZ_UNLIKELY(!maybe_init())(__builtin_expect(!!(!maybe_init()), 0))) {
1629 return false;
1630 }
1631
1632 PtrKind pk = PHC::sRegion.PtrKind(aPtr);
1633 return !pk.IsNothing();
1634}
1635
1636// This function handles both realloc and moz_arena_realloc.
1637//
1638// As always, realloc is complicated, and doubly so when there are two
1639// different kinds of allocations in play. Here are the possible transitions,
1640// and what we do in practice.
1641//
1642// - normal-to-normal: This is straightforward and obviously necessary.
1643//
1644// - normal-to-page: This is disallowed because it would require getting the
1645// arenaId of the normal allocation, which isn't possible in non-DEBUG builds
1646// for security reasons.
1647//
1648// - page-to-page: This is done whenever possible, i.e. whenever the new size
1649// is less than or equal to 4 KiB. This choice counterbalances the
1650// disallowing of normal-to-page allocations, in order to avoid biasing
1651// towards or away from page allocations. It always occurs in-place.
1652//
1653// - page-to-normal: this is done only when necessary, i.e. only when the new
1654// size is greater than 4 KiB. This choice naturally flows from the
1655// prior choice on page-to-page transitions.
1656//
1657// In summary: realloc doesn't change the allocation kind unless it must.
1658//
1659// This function may return:
1660// - Some(pointer) when PHC handled the reallocation.
1661// - Some(nullptr) when PHC should have handled a page-to-normal transition
1662// but couldn't because of OOM.
1663// - Nothing() when PHC is disabled or the original allocation was not
1664// under PHC.
1665MOZ_ALWAYS_INLINEinline static Maybe<void*> MaybePageRealloc(
1666 const Maybe<arena_id_t>& aArenaId, void* aOldPtr, size_t aNewSize) {
1667 if (!aOldPtr) {
1668 // Null pointer. Treat like malloc(aNewSize).
1669 return Some(PageMalloc(aArenaId, aNewSize));
1670 }
1671
1672 if (!FastIsPHCPtr(aOldPtr)) {
1673 // A normal-to-normal transition.
1674 return Nothing();
1675 }
1676
1677 return PHC::sPHC->PageRealloc(aArenaId, aOldPtr, aNewSize);
1678}
1679
1680Maybe<void*> PHC::PageRealloc(const Maybe<arena_id_t>& aArenaId, void* aOldPtr,
1681 size_t aNewSize) MOZ_EXCLUDES(mMutex)__attribute__((locks_excluded(mMutex))) {
1682 PtrKind pk = sRegion.PtrKind(aOldPtr);
1683 MOZ_ASSERT(!pk.IsNothing())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!pk.IsNothing())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!pk.IsNothing()))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!pk.IsNothing()"
, "/root/firefox-clang/memory/build/PHC.cpp", 1683); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "!pk.IsNothing()" ")"); do { MOZ_CrashSequence
(__null, 1683); __attribute__((nomerge)) ::abort(); } while (
false); } } while (false)
;
1684
1685 if (pk.IsGuardPage()) {
1686 CrashOnGuardPage(aOldPtr);
1687 }
1688
1689 // At this point we know we have an allocation page.
1690 uintptr_t index = pk.AllocPageIndex();
1691
1692 // A page-to-something transition.
1693 AdvanceNow(LocalAllocDelay());
1694
1695 // Note that `disable` has no effect unless it is emplaced below.
1696 Maybe<AutoDisableOnCurrentThread> disable;
1697 // Get the stack trace *before* locking the mutex.
1698 StackTrace stack;
1699 if (IsDisabledOnCurrentThread()) {
1700 // PHC is disabled on this thread. Leave the stack empty.
1701 } else {
1702 // Disable on this thread *before* getting the stack trace.
1703 disable.emplace();
1704 stack.Fill();
1705 }
1706
1707 MutexAutoLock lock(mMutex);
1708
1709 // Check for realloc() of a freed block.
1710 EnsureValidAndInUse(aOldPtr, index);
1711
1712 if (aNewSize <= kPageSize && ShouldMakeNewAllocations()) {
1713 // A page-to-page transition. Just keep using the page allocation. We do
1714 // this even if the thread is disabled, because it doesn't create a new
1715 // page allocation. Note that ResizePageInUse() checks aArenaId.
1716 //
1717 // Move the bytes with memmove(), because the old allocation and the new
1718 // allocation overlap. Move the usable size rather than the requested size,
1719 // because the user might have used malloc_usable_size() and filled up the
1720 // usable size.
1721 size_t oldUsableSize = PageUsableSize(index);
1722 size_t newUsableSize = MozJemalloc::malloc_good_size(aNewSize);
1723 uint8_t* pagePtr = sRegion.AllocPagePtr(index);
1724 uint8_t* newPtr = pagePtr + kPageSize - newUsableSize;
1725 memmove(newPtr, aOldPtr, std::min(oldUsableSize, aNewSize));
1726 ResizePageInUse(index, aArenaId, newPtr, stack);
1727 Log("PageRealloc-Reuse(%p, %zu) -> %p\n", aOldPtr, aNewSize, newPtr);
1728 return Some(newPtr);
1729 }
1730
1731 // A page-to-normal transition (with the new size greater than page-sized).
1732 // (Note that aArenaId is checked below.)
1733 void* newPtr;
1734 if (aArenaId.isSome()) {
1735 newPtr = MozJemalloc::moz_arena_malloc(*aArenaId, aNewSize);
1736 } else {
1737 Maybe<arena_id_t> oldArenaId = PageArena(index);
1738 newPtr = (oldArenaId.isSome()
1739 ? MozJemalloc::moz_arena_malloc(*oldArenaId, aNewSize)
1740 : MozJemalloc::malloc(aNewSize));
1741 }
1742 if (!newPtr) {
1743 return Some(nullptr);
1744 }
1745
1746 Delay reuseDelay = ReuseDelay();
1747
1748 // Copy the usable size rather than the requested size, because the user
1749 // might have used malloc_usable_size() and filled up the usable size. Note
1750 // that FreePage() checks aArenaId (via SetPageFreed()).
1751 size_t oldUsableSize = PageUsableSize(index);
1752 memcpy(newPtr, aOldPtr, std::min(oldUsableSize, aNewSize));
1753 FreePage(index, aArenaId, stack, reuseDelay);
1754 Log("PageRealloc-Free(%p[%zu], %zu) -> %p, %zu delay, reuse at ~%zu\n",
1755 aOldPtr, index, aNewSize, newPtr, size_t(reuseDelay),
1756 size_t(Now()) + reuseDelay);
1757
1758 return Some(newPtr);
1759}
1760
1761MOZ_ALWAYS_INLINEinline static void* PageRealloc(const Maybe<arena_id_t>& aArenaId,
1762 void* aOldPtr, size_t aNewSize) {
1763 Maybe<void*> ptr = MaybePageRealloc(aArenaId, aOldPtr, aNewSize);
1764
1765 return ptr.isSome()
1766 ? *ptr
1767 : (aArenaId.isSome() ? MozJemalloc::moz_arena_realloc(
1768 *aArenaId, aOldPtr, aNewSize)
1769 : MozJemalloc::realloc(aOldPtr, aNewSize));
1770}
1771
1772inline void* MozJemallocPHC::realloc(void* aOldPtr, size_t aNewSize) {
1773 return PageRealloc(Nothing(), aOldPtr, aNewSize);
1774}
1775
1776void PHC::PageFree(const Maybe<arena_id_t>& aArenaId, void* aPtr)
1777 MOZ_EXCLUDES(mMutex)__attribute__((locks_excluded(mMutex))) {
1778 PtrKind pk = sRegion.PtrKind(aPtr);
1779 MOZ_ASSERT(!pk.IsNothing())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!pk.IsNothing())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!pk.IsNothing()))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!pk.IsNothing()"
, "/root/firefox-clang/memory/build/PHC.cpp", 1779); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "!pk.IsNothing()" ")"); do { MOZ_CrashSequence
(__null, 1779); __attribute__((nomerge)) ::abort(); } while (
false); } } while (false)
;
1780 if (pk.IsGuardPage()) {
1781 PHC::CrashOnGuardPage(aPtr);
1782 }
1783
1784 // At this point we know we have an allocation page.
1785 AdvanceNow(LocalAllocDelay());
1786 uintptr_t index = pk.AllocPageIndex();
1787
1788 // Note that `disable` has no effect unless it is emplaced below.
1789 Maybe<AutoDisableOnCurrentThread> disable;
1790 // Get the stack trace *before* locking the mutex.
1791 StackTrace freeStack;
1792 if (IsDisabledOnCurrentThread()) {
1793 // PHC is disabled on this thread. Leave the stack empty.
1794 } else {
1795 // Disable on this thread *before* getting the stack trace.
1796 disable.emplace();
1797 freeStack.Fill();
1798 }
1799
1800 MutexAutoLock lock(mMutex);
1801
1802 // Check for a double-free.
1803 EnsureValidAndInUse(aPtr, index);
1804
1805 // Note that FreePage() checks aArenaId (via SetPageFreed()).
1806 Delay reuseDelay = ReuseDelay();
1807 FreePage(index, aArenaId, freeStack, reuseDelay);
1808
1809#if PHC_LOGGING0
1810 phc::PHCStats stats = GetPageStatsLocked();
1811 Log("PageFree(%p[%zu]), %zu delay, reuse at ~%zu, fullness %zu/%zu/%zu\n",
1812 aPtr, index, size_t(reuseDelay), size_t(Now()) + reuseDelay,
1813 stats.mSlotsAllocated, stats.mSlotsFreed, kNumAllocPages);
1814#endif
1815}
1816
1817MOZ_ALWAYS_INLINEinline static void PageFree(const Maybe<arena_id_t>& aArenaId,
1818 void* aPtr) {
1819 if (MOZ_UNLIKELY(FastIsPHCPtr(aPtr))(__builtin_expect(!!(FastIsPHCPtr(aPtr)), 0))) {
1820 // The tenery expression here helps the compiler optimise away the
1821 // construction of Nothing() in the caller.
1822 PHC::sPHC->PageFree(aArenaId.isSome() ? aArenaId : Nothing(), aPtr);
1823 return;
1824 }
1825
1826 aArenaId.isSome() ? MozJemalloc::moz_arena_free(*aArenaId, aPtr)
1827 : MozJemalloc::free(aPtr);
1828}
1829
1830inline void MozJemallocPHC::free(void* aPtr) { PageFree(Nothing(), aPtr); }
1831
1832// This handles memalign and moz_arena_memalign.
1833MOZ_ALWAYS_INLINEinline static void* PageMemalign(const Maybe<arena_id_t>& aArenaId,
1834 size_t aAlignment,
1835 size_t aReqSize) {
1836 MOZ_RELEASE_ASSERT(IsPowerOfTwo(aAlignment))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(IsPowerOfTwo(aAlignment))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(IsPowerOfTwo(aAlignment)))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("IsPowerOfTwo(aAlignment)"
, "/root/firefox-clang/memory/build/PHC.cpp", 1836); AnnotateMozCrashReason
("MOZ_RELEASE_ASSERT" "(" "IsPowerOfTwo(aAlignment)" ")"); do
{ MOZ_CrashSequence(__null, 1836); __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
1837
1838 // PHC can't satisfy an alignment greater than a page size, so fall back to
1839 // mozjemalloc in that case.
1840 void* ptr = nullptr;
1841 if (ShouldPageAllocHot(aReqSize) && aAlignment <= kPageSize) {
1842 // The test on aArenaId here helps the compiler optimise away
1843 // the construction of Nothing() in the caller.
1844 ptr = PHC::sPHC->MaybePageAlloc(aArenaId.isSome() ? aArenaId : Nothing(),
1845 aReqSize, aAlignment, /* aZero */ false);
1846 }
1847 return ptr ? ptr
1848 : (aArenaId.isSome()
1849 ? MozJemalloc::moz_arena_memalign(*aArenaId, aAlignment,
1850 aReqSize)
1851 : MozJemalloc::memalign(aAlignment, aReqSize));
1852}
1853
1854inline void* MozJemallocPHC::memalign(size_t aAlignment, size_t aReqSize) {
1855 return PageMemalign(Nothing(), aAlignment, aReqSize);
1856}
1857
1858inline size_t MozJemallocPHC::malloc_usable_size(usable_ptr_t aPtr) {
1859 if (!FastIsPHCPtr(aPtr)) {
1860 // Not a page allocation. Measure it normally.
1861 return MozJemalloc::malloc_usable_size(aPtr);
1862 }
1863
1864 return PHC::sPHC->PtrUsableSize(aPtr);
1865}
1866
1867size_t PHC::PtrUsableSize(usable_ptr_t aPtr) MOZ_EXCLUDES(mMutex)__attribute__((locks_excluded(mMutex))) {
1868 PtrKind pk = sRegion.PtrKind(aPtr);
1869 if (pk.IsGuardPage()) {
1870 CrashOnGuardPage(const_cast<void*>(aPtr));
1871 }
1872
1873 // At this point we know aPtr lands within an allocation page, due to the
1874 // math done in the PtrKind constructor. But if aPtr points to memory
1875 // before the base address of the allocation, we return 0.
1876 uintptr_t index = pk.AllocPageIndex();
1877
1878 MutexAutoLock lock(mMutex);
1879
1880 void* pageBaseAddr = AllocPageBaseAddr(index);
1881
1882 if (MOZ_UNLIKELY(aPtr < pageBaseAddr)(__builtin_expect(!!(aPtr < pageBaseAddr), 0))) {
1883 return 0;
1884 }
1885
1886 return PageUsableSize(index);
1887}
1888
1889static size_t metadata_size() {
1890 return MozJemalloc::malloc_usable_size(PHC::sPHC);
1891}
1892
1893inline void MozJemallocPHC::jemalloc_stats_internal(
1894 jemalloc_stats_t* aStats, jemalloc_bin_stats_t* aBinStats) {
1895 MozJemalloc::jemalloc_stats_internal(aStats, aBinStats);
1896
1897 if (!maybe_init()) {
1898 // If we're not initialised, then we're not using any additional memory and
1899 // have nothing to add to the report.
1900 return;
1901 }
1902
1903 // We allocate our memory from jemalloc so it has already counted our memory
1904 // usage within "mapped" and "allocated", we must subtract the memory we
1905 // allocated from jemalloc from allocated before adding in only the parts that
1906 // we have allocated out to Firefox.
1907
1908 aStats->allocated -= kAllPagesJemallocSize;
1909
1910 aStats->allocated += PHC::sPHC->AllocatedBytes();
1911
1912 // guards is the gap between `allocated` and `mapped`. In some ways this
1913 // almost fits into aStats->wasted since it feels like wasted memory. However
1914 // wasted should only include committed memory and these guard pages are
1915 // uncommitted. Therefore we don't include it anywhere.
1916 // size_t guards = mapped - allocated;
1917
1918 // aStats.page_cache and aStats.bin_unused are left unchanged because PHC
1919 // doesn't have anything corresponding to those.
1920
1921 // The metadata is stored in normal heap allocations, so they're measured by
1922 // mozjemalloc as `allocated`. Move them into `bookkeeping`.
1923 // They're also reported under explicit/heap-overhead/phc/fragmentation in
1924 // about:memory.
1925 size_t bookkeeping = metadata_size();
1926 aStats->allocated -= bookkeeping;
1927 aStats->bookkeeping += bookkeeping;
1928}
1929
1930inline void MozJemallocPHC::jemalloc_stats_lite(jemalloc_stats_lite_t* aStats) {
1931 MozJemalloc::jemalloc_stats_lite(aStats);
1932}
1933
1934inline void MozJemallocPHC::jemalloc_ptr_info(const void* aPtr,
1935 jemalloc_ptr_info_t* aInfo) {
1936 if (!FastIsPHCPtr(aPtr)) {
1937 // Not a page allocation.
1938 MozJemalloc::jemalloc_ptr_info(aPtr, aInfo);
1939 return;
1940 }
1941
1942 PHC::sPHC->PagePtrInfo(aPtr, aInfo);
1943}
1944
1945void PHC::PagePtrInfo(const void* aPtr, jemalloc_ptr_info_t* aInfo)
1946 MOZ_EXCLUDES(mMutex)__attribute__((locks_excluded(mMutex))) {
1947 // We need to implement this properly, because various code locations do
1948 // things like checking that allocations are in the expected arena.
1949
1950 PtrKind pk = sRegion.PtrKind(aPtr);
1951 if (pk.IsGuardPage()) {
1952 // Treat a guard page as unknown because there's no better alternative.
1953 *aInfo = {TagUnknown, nullptr, 0, 0};
1954 return;
1955 }
1956
1957 // At this point we know we have an allocation page.
1958 uintptr_t index = pk.AllocPageIndex();
1959
1960 MutexAutoLock lock(mMutex);
1961
1962 FillJemallocPtrInfo(aPtr, index, aInfo);
1963#if DEBUG1
1964 Log("JemallocPtrInfo(%p[%zu]) -> {%zu, %p, %zu, %zu}\n", aPtr, index,
1965 size_t(aInfo->tag), aInfo->addr, aInfo->size, aInfo->arenaId);
1966#else
1967 Log("JemallocPtrInfo(%p[%zu]) -> {%zu, %p, %zu}\n", aPtr, index,
1968 size_t(aInfo->tag), aInfo->addr, aInfo->size);
1969#endif
1970}
1971
1972inline void* MozJemallocPHC::moz_arena_malloc(arena_id_t aArenaId,
1973 size_t aReqSize) {
1974 return PageMalloc(Some(aArenaId), aReqSize);
1975}
1976
1977inline void* MozJemallocPHC::moz_arena_calloc(arena_id_t aArenaId, size_t aNum,
1978 size_t aReqSize) {
1979 return PageCalloc(Some(aArenaId), aNum, aReqSize);
1980}
1981
1982inline void* MozJemallocPHC::moz_arena_realloc(arena_id_t aArenaId,
1983 void* aOldPtr, size_t aNewSize) {
1984 return PageRealloc(Some(aArenaId), aOldPtr, aNewSize);
1985}
1986
1987inline void MozJemallocPHC::moz_arena_free(arena_id_t aArenaId, void* aPtr) {
1988 return PageFree(Some(aArenaId), aPtr);
1989}
1990
1991inline void* MozJemallocPHC::moz_arena_memalign(arena_id_t aArenaId,
1992 size_t aAlignment,
1993 size_t aReqSize) {
1994 return PageMemalign(Some(aArenaId), aAlignment, aReqSize);
1995}
1996
1997bool PHC::IsPHCAllocation(const void* aPtr, mozilla::phc::AddrInfo* aOut) {
1998 PtrKind pk = sRegion.PtrKind(aPtr);
1999 MOZ_ASSERT(!pk.IsNothing())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!pk.IsNothing())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!pk.IsNothing()))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!pk.IsNothing()"
, "/root/firefox-clang/memory/build/PHC.cpp", 1999); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "!pk.IsNothing()" ")"); do { MOZ_CrashSequence
(__null, 1999); __attribute__((nomerge)) ::abort(); } while (
false); } } while (false)
;
2000
2001 bool isGuardPage = false;
2002 if (pk.IsGuardPage()) {
2003 if ((uintptr_t(aPtr) % kPageSize) < (kPageSize / 2)) {
2004 // The address is in the lower half of a guard page, so it's probably an
2005 // overflow. But first check that it is not on the very first guard
2006 // page, in which case it cannot be an overflow, and we ignore it.
2007 if (sRegion.IsInFirstGuardPage(aPtr)) {
2008 return false;
2009 }
2010
2011 // Get the allocation page preceding this guard page.
2012 pk = sRegion.PtrKind(static_cast<const uint8_t*>(aPtr) - kPageSize);
2013
2014 } else {
2015 // The address is in the upper half of a guard page, so it's probably an
2016 // underflow. Get the allocation page following this guard page.
2017 pk = sRegion.PtrKind(static_cast<const uint8_t*>(aPtr) + kPageSize);
2018 }
2019
2020 // Make a note of the fact that we hit a guard page.
2021 isGuardPage = true;
2022 }
2023
2024 // At this point we know we have an allocation page.
2025 uintptr_t index = pk.AllocPageIndex();
2026
2027 if (aOut) {
2028 if (mMutex.TryLock()) {
2029 FillAddrInfo(index, aPtr, isGuardPage, *aOut);
2030 Log("IsPHCAllocation: %zu, %p, %zu, %zu, %zu\n", size_t(aOut->mKind),
2031 aOut->mBaseAddr, aOut->mUsableSize,
2032 aOut->mAllocStack.isSome() ? aOut->mAllocStack->mLength : 0,
2033 aOut->mFreeStack.isSome() ? aOut->mFreeStack->mLength : 0);
2034 mMutex.Unlock();
2035 } else {
2036 Log("IsPHCAllocation: PHC is locked\n");
2037 aOut->mPhcWasLocked = true;
2038 }
2039 }
2040 return true;
2041}
2042
2043namespace mozilla::phc {
2044
2045bool IsPHCAllocation(const void* aPtr, AddrInfo* aOut) {
2046 if (!FastIsPHCPtr(aPtr)) {
2047 return false;
2048 }
2049
2050 return PHC::sPHC->IsPHCAllocation(aPtr, aOut);
2051}
2052
2053void DisablePHCOnCurrentThread() {
2054 PHC::DisableOnCurrentThread();
2055 Log("DisablePHCOnCurrentThread: %zu\n", 0ul);
2056}
2057
2058void ReenablePHCOnCurrentThread() {
2059 PHC::sPHC->EnableOnCurrentThread();
2060 Log("ReenablePHCOnCurrentThread: %zu\n", 0ul);
2061}
2062
2063bool IsPHCEnabledOnCurrentThread() {
2064 bool enabled = !PHC::IsDisabledOnCurrentThread();
2065 Log("IsPHCEnabledOnCurrentThread: %zu\n", size_t(enabled));
2066 return enabled;
2067}
2068
2069void PHCMemoryUsage(MemoryUsage& aMemoryUsage) {
2070 if (!maybe_init()) {
2071 aMemoryUsage = MemoryUsage();
2072 return;
2073 }
2074
2075 aMemoryUsage.mMetadataBytes = metadata_size();
2076 if (PHC::sPHC) {
2077 aMemoryUsage.mFragmentationBytes = PHC::sPHC->FragmentationBytes();
2078 } else {
2079 aMemoryUsage.mFragmentationBytes = 0;
2080 }
2081}
2082
2083void GetPHCStats(PHCStats& aStats) {
2084 if (!maybe_init()) {
2085 aStats = PHCStats();
2086 return;
2087 }
2088
2089 aStats = PHC::sPHC->GetPageStats();
2090}
2091
2092// Enable or Disable PHC at runtime. If PHC is disabled it will still trap
2093// bad uses of previous allocations, but won't track any new allocations.
2094void SetPHCState(PHCState aState) {
2095 if (!maybe_init()) {
2096 return;
2097 }
2098
2099 PHC::sPHC->SetState(aState);
2100}
2101
2102void SetPHCProbabilities(int64_t aAvgDelayFirst, int64_t aAvgDelayNormal,
2103 int64_t aAvgDelayPageReuse) {
2104 if (!maybe_init()) {
2105 return;
2106 }
2107
2108 PHC::sPHC->SetProbabilities(aAvgDelayFirst, aAvgDelayNormal,
2109 aAvgDelayPageReuse);
2110}
2111
2112} // namespace mozilla::phc
2113
2114#if PHC_LOGGING0
2115static size_t GetPid() { return size_t(getpid()); }
2116
2117static size_t GetTid() {
2118# if defined(XP_WIN)
2119 return size_t(GetCurrentThreadId());
2120# else
2121 return size_t(pthread_self());
2122# endif
2123}
2124#endif // PHC_LOGGING
2125
2126static void Log(const char* fmt, ...) {
2127#if PHC_LOGGING0
2128# if defined(XP_WIN)
2129# define LOG_STDERR \
2130 reinterpret_cast<intptr_t>(GetStdHandle(STD_ERROR_HANDLE))
2131# else
2132# define LOG_STDERR 2
2133# endif
2134
2135 char buf[256];
2136 size_t pos = SNPrintf(buf, sizeof(buf), "PHC[%zu,%zu,~%zu] ", GetPid(),
2137 GetTid(), size_t(PHC::Now()));
2138 va_list vargs;
2139 va_start(vargs, fmt)__builtin_va_start(vargs, fmt);
2140 pos += VSNPrintf(&buf[pos], sizeof(buf) - pos, fmt, vargs);
2141 MOZ_ASSERT(pos < sizeof(buf))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(pos < sizeof(buf))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(pos < sizeof(buf)))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("pos < sizeof(buf)"
, "/root/firefox-clang/memory/build/PHC.cpp", 2141); AnnotateMozCrashReason
("MOZ_ASSERT" "(" "pos < sizeof(buf)" ")"); do { MOZ_CrashSequence
(__null, 2141); __attribute__((nomerge)) ::abort(); } while (
false); } } while (false)
;
2142 va_end(vargs)__builtin_va_end(vargs);
2143
2144 FdPuts(LOG_STDERR, buf, pos);
2145#endif // PHC_LOGGING
2146}