Bug Summary

File:var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp
Warning:line 504, column 7
Excessive padding in 'class PHC' (80 padding bytes, where 16 is optimal). Optimal fields order: mAvgFirstAllocDelay, mPhcState, mNow, mRNG, mFreePageListHead, mFreePageListTail, mMutex, mAllocPages, mAvgAllocDelay, mAvgPageReuseDelay, consider reordering the fields or adding explicit padding members

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name Unified_cpp_memory_build0.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -ffp-contract=off -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/memory/build -fcoverage-compilation-dir=/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/memory/build -resource-dir /usr/lib/llvm-19/lib/clang/19 -include /var/lib/jenkins/workspace/firefox-scan-build/config/gcc_hidden.h -include /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/mozilla-config.h -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/system_wrappers -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=2 -D DEBUG=1 -D MOZ_MEMORY_IMPL -D MOZ_PHC -D MOZ_REPLACE_MALLOC_STATIC -D MOZ_HAS_MOZGLUE -D IMPL_MFBT -I /var/lib/jenkins/workspace/firefox-scan-build/memory/build -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/memory/build -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/nspr -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/nss -D MOZILLA_CLIENT -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/x86_64-linux-gnu/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14/backward -internal-isystem /usr/lib/llvm-19/lib/clang/19/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-error=tautological-type-limit-compare -Wno-invalid-offsetof -Wno-range-loop-analysis -Wno-deprecated-anon-enum-enum-conversion -Wno-deprecated-enum-enum-conversion -Wno-deprecated-this-capture -Wno-inline-new-delete -Wno-error=deprecated-declarations -Wno-error=array-bounds -Wno-error=free-nonheap-object -Wno-error=atomic-alignment -Wno-error=deprecated-builtins -Wno-psabi -Wno-error=builtin-macro-redefined -Wno-vla-cxx-extension -Wno-unknown-warning-option -fdeprecated-macro -ferror-limit 19 -fstrict-flex-arrays=1 -stack-protector 2 -fstack-clash-protection -ftrivial-auto-var-init=pattern -fno-rtti -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -fno-sized-deallocation -fno-aligned-allocation -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2024-09-22-115206-3586786-1 -x c++ Unified_cpp_memory_build0.cpp
1/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3/* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7// PHC is a probabilistic heap checker. A tiny fraction of randomly chosen heap
8// allocations are subject to some expensive checking via the use of OS page
9// access protection. A failed check triggers a crash, whereupon useful
10// information about the failure is put into the crash report. The cost and
11// coverage for each user is minimal, but spread over the entire user base the
12// coverage becomes significant.
13//
14// The idea comes from Chromium, where it is called GWP-ASAN. (Firefox uses PHC
15// as the name because GWP-ASAN is long, awkward, and doesn't have any
16// particular meaning.)
17//
18// In the current implementation up to 64 allocations per process can become
19// PHC allocations. These allocations must be page-sized or smaller. Each PHC
20// allocation gets its own page, and when the allocation is freed its page is
21// marked inaccessible until the page is reused for another allocation. This
22// means that a use-after-free defect (which includes double-frees) will be
23// caught if the use occurs before the page is reused for another allocation.
24// The crash report will contain stack traces for the allocation site, the free
25// site, and the use-after-free site, which is often enough to diagnose the
26// defect.
27//
28// Also, each PHC allocation is followed by a guard page. The PHC allocation is
29// positioned so that its end abuts the guard page (or as close as possible,
30// given alignment constraints). This means that a bounds violation at the end
31// of the allocation (overflow) will be caught. The crash report will contain
32// stack traces for the allocation site and the bounds violation use site,
33// which is often enough to diagnose the defect.
34//
35// (A bounds violation at the start of the allocation (underflow) will not be
36// caught, unless it is sufficiently large to hit the preceding allocation's
37// guard page, which is not that likely. It would be possible to look more
38// assiduously for underflow by randomly placing some allocations at the end of
39// the page and some at the start of the page, and GWP-ASAN does this. PHC does
40// not, however, because overflow is likely to be much more common than
41// underflow in practice.)
42//
43// We use a simple heuristic to categorize a guard page access as overflow or
44// underflow: if the address falls in the lower half of the guard page, we
45// assume it is overflow, otherwise we assume it is underflow. More
46// sophisticated heuristics are possible, but this one is very simple, and it is
47// likely that most overflows/underflows in practice are very close to the page
48// boundary.
49//
50// The design space for the randomization strategy is large. The current
51// implementation has a large random delay before it starts operating, and a
52// small random delay between each PHC allocation attempt. Each freed PHC
53// allocation is quarantined for a medium random delay before being reused, in
54// order to increase the chance of catching UAFs.
55//
56// The basic cost of PHC's operation is as follows.
57//
58// - The physical memory cost is 64 pages plus some metadata (including stack
59// traces) for each page. This amounts to 256 KiB per process on
60// architectures with 4 KiB pages and 1024 KiB on macOS/AArch64 which uses
61// 16 KiB pages.
62//
63// - The virtual memory cost is the physical memory cost plus the guard pages:
64// another 64 pages. This amounts to another 256 KiB per process on
65// architectures with 4 KiB pages and 1024 KiB on macOS/AArch64 which uses
66// 16 KiB pages. PHC is currently only enabled on 64-bit platforms so the
67// impact of the virtual memory usage is negligible.
68//
69// - Every allocation requires a size check and a decrement-and-check of an
70// atomic counter. When the counter reaches zero a PHC allocation can occur,
71// which involves marking a page as accessible and getting a stack trace for
72// the allocation site. Otherwise, mozjemalloc performs the allocation.
73//
74// - Every deallocation requires a range check on the pointer to see if it
75// involves a PHC allocation. (The choice to only do PHC allocations that are
76// a page or smaller enables this range check, because the 64 pages are
77// contiguous. Allowing larger allocations would make this more complicated,
78// and we definitely don't want something as slow as a hash table lookup on
79// every deallocation.) PHC deallocations involve marking a page as
80// inaccessible and getting a stack trace for the deallocation site.
81//
82// Note that calls to realloc(), free(), and malloc_usable_size() will
83// immediately crash if the given pointer falls within a page allocation's
84// page, but does not point to the start of the allocation itself.
85//
86// void* p = malloc(64);
87// free(p + 1); // p+1 doesn't point to the allocation start; crash
88//
89// Such crashes will not have the PHC fields in the crash report.
90//
91// PHC-specific tests can be run with the following commands:
92// - gtests: `./mach gtest '*PHC*'`
93// - xpcshell-tests: `./mach test toolkit/crashreporter/test/unit`
94// - This runs some non-PHC tests as well.
95
96#include "PHC.h"
97
98#include <stdlib.h>
99#include <time.h>
100
101#include <algorithm>
102
103#ifdef XP_WIN
104# include <process.h>
105#else
106# include <sys/mman.h>
107# include <sys/types.h>
108# include <pthread.h>
109# include <unistd.h>
110#endif
111
112#include "mozjemalloc.h"
113
114#include "mozjemalloc.h"
115#include "FdPrintf.h"
116#include "Mutex.h"
117#include "mozilla/Assertions.h"
118#include "mozilla/Atomics.h"
119#include "mozilla/Attributes.h"
120#include "mozilla/CheckedInt.h"
121#include "mozilla/Maybe.h"
122#include "mozilla/StackWalk.h"
123#include "mozilla/ThreadLocal.h"
124#include "mozilla/XorShift128PlusRNG.h"
125
126using namespace mozilla;
127
128//---------------------------------------------------------------------------
129// Utilities
130//---------------------------------------------------------------------------
131
132#ifdef ANDROID
133// Android doesn't have pthread_atfork defined in pthread.h.
134extern "C" MOZ_EXPORT__attribute__((visibility("default"))) int pthread_atfork(void (*)(void), void (*)(void),
135 void (*)(void));
136#endif
137
138#ifndef DISALLOW_COPY_AND_ASSIGN
139# define DISALLOW_COPY_AND_ASSIGN(T)T(const T&); void operator=(const T&) \
140 T(const T&); \
141 void operator=(const T&)
142#endif
143
144// This class provides infallible operations for the small number of heap
145// allocations that PHC does for itself. It would be nice if we could use the
146// InfallibleAllocPolicy from mozalloc, but PHC cannot use mozalloc.
147class InfallibleAllocPolicy {
148 public:
149 static void AbortOnFailure(const void* aP) {
150 if (!aP) {
151 MOZ_CRASH("PHC failed to allocate")do { do { } while (false); MOZ_ReportCrash("" "PHC failed to allocate"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 151); AnnotateMozCrashReason("MOZ_CRASH(" "PHC failed to allocate"
")"); do { *((volatile int*)__null) = 151; __attribute__((nomerge
)) ::abort(); } while (false); } while (false)
;
152 }
153 }
154
155 template <class T>
156 static T* new_() {
157 void* p = MozJemalloc::malloc(sizeof(T));
158 AbortOnFailure(p);
159 return new (p) T;
160 }
161};
162
163//---------------------------------------------------------------------------
164// Stack traces
165//---------------------------------------------------------------------------
166
167// This code is similar to the equivalent code within DMD.
168
169class StackTrace : public phc::StackTrace {
170 public:
171 StackTrace() = default;
172
173 void Clear() { mLength = 0; }
174
175 void Fill();
176
177 private:
178 static void StackWalkCallback(uint32_t aFrameNumber, void* aPc, void* aSp,
179 void* aClosure) {
180 StackTrace* st = (StackTrace*)aClosure;
181 MOZ_ASSERT(st->mLength < kMaxFrames)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(st->mLength < kMaxFrames)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(st->mLength < kMaxFrames
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"st->mLength < kMaxFrames", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 181); AnnotateMozCrashReason("MOZ_ASSERT" "(" "st->mLength < kMaxFrames"
")"); do { *((volatile int*)__null) = 181; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
182 st->mPcs[st->mLength] = aPc;
183 st->mLength++;
184 MOZ_ASSERT(st->mLength == aFrameNumber)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(st->mLength == aFrameNumber)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(st->mLength == aFrameNumber
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"st->mLength == aFrameNumber", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 184); AnnotateMozCrashReason("MOZ_ASSERT" "(" "st->mLength == aFrameNumber"
")"); do { *((volatile int*)__null) = 184; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
185 }
186};
187
188// WARNING WARNING WARNING: this function must only be called when PHC::mMutex
189// is *not* locked, otherwise we might get deadlocks.
190//
191// How? On Windows, MozStackWalk() can lock a mutex, M, from the shared library
192// loader. Another thread might call malloc() while holding M locked (when
193// loading a shared library) and try to lock PHC::mMutex, causing a deadlock.
194// So PHC::mMutex can't be locked during the call to MozStackWalk(). (For
195// details, see https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8. On
196// Linux, something similar can happen; see bug 824340. So we just disallow it
197// on all platforms.)
198//
199// In DMD, to avoid this problem we temporarily unlock the equivalent mutex for
200// the MozStackWalk() call. But that's grotty, and things are a bit different
201// here, so we just require that stack traces be obtained before locking
202// PHC::mMutex.
203//
204// Unfortunately, there is no reliable way at compile-time or run-time to ensure
205// this pre-condition. Hence this large comment.
206//
207void StackTrace::Fill() {
208 mLength = 0;
209
210// These ifdefs should be kept in sync with the conditions in
211// phc_implies_frame_pointers in build/moz.configure/memory.configure
212#if defined(XP_WIN) && defined(_M_IX86)
213 // This avoids MozStackWalk(), which causes unusably slow startup on Win32
214 // when it is called during static initialization (see bug 1241684).
215 //
216 // This code is cribbed from the Gecko Profiler, which also uses
217 // FramePointerStackWalk() on Win32: Registers::SyncPopulate() for the
218 // frame pointer, and GetStackTop() for the stack end.
219 CONTEXT context;
220 RtlCaptureContext(&context);
221 void** fp = reinterpret_cast<void**>(context.Ebp);
222
223 PNT_TIB pTib = reinterpret_cast<PNT_TIB>(NtCurrentTeb());
224 void* stackEnd = static_cast<void*>(pTib->StackBase);
225 FramePointerStackWalk(StackWalkCallback, kMaxFrames, this, fp, stackEnd);
226#elif defined(XP_DARWIN)
227 // This avoids MozStackWalk(), which has become unusably slow on Mac due to
228 // changes in libunwind.
229 //
230 // This code is cribbed from the Gecko Profiler, which also uses
231 // FramePointerStackWalk() on Mac: Registers::SyncPopulate() for the frame
232 // pointer, and GetStackTop() for the stack end.
233# pragma GCC diagnostic push
234# pragma GCC diagnostic ignored "-Wframe-address"
235 void** fp = reinterpret_cast<void**>(__builtin_frame_address(1));
236# pragma GCC diagnostic pop
237 void* stackEnd = pthread_get_stackaddr_np(pthread_self());
238 FramePointerStackWalk(StackWalkCallback, kMaxFrames, this, fp, stackEnd);
239#else
240 MozStackWalk(StackWalkCallback, nullptr, kMaxFrames, this);
241#endif
242}
243
244//---------------------------------------------------------------------------
245// Logging
246//---------------------------------------------------------------------------
247
248// Change this to 1 to enable some PHC logging. Useful for debugging.
249#define PHC_LOGGING0 0
250
251#if PHC_LOGGING0
252
253static size_t GetPid() { return size_t(getpid()); }
254
255static size_t GetTid() {
256# if defined(XP_WIN)
257 return size_t(GetCurrentThreadId());
258# else
259 return size_t(pthread_self());
260# endif
261}
262
263# if defined(XP_WIN)
264# define LOG_STDERR \
265 reinterpret_cast<intptr_t>(GetStdHandle(STD_ERROR_HANDLE))
266# else
267# define LOG_STDERR 2
268# endif
269# define LOG(fmt, ...) \
270 FdPrintf(LOG_STDERR, "PHC[%zu,%zu,~%zu] " fmt, GetPid(), GetTid(), \
271 size_t(PHC::Now()), ##__VA_ARGS__)
272
273#else
274
275# define LOG(fmt, ...)
276
277#endif // PHC_LOGGING
278
279//---------------------------------------------------------------------------
280// Global state
281//---------------------------------------------------------------------------
282
283// Throughout this entire file time is measured as the number of sub-page
284// allocations performed (by PHC and mozjemalloc combined). `Time` is 64-bit
285// because we could have more than 2**32 allocations in a long-running session.
286// `Delay` is 32-bit because the delays used within PHC are always much smaller
287// than 2**32. Delay must be unsigned so that IsPowerOfTwo() can work on some
288// Delay values.
289using Time = uint64_t; // A moment in time.
290using Delay = uint32_t; // A time duration.
291static constexpr Delay DELAY_MAX = UINT32_MAX(4294967295U) / 2;
292
293// PHC only runs if the page size is 4 KiB; anything more is uncommon and would
294// use too much memory. So we hardwire this size for all platforms but macOS
295// on ARM processors. For the latter we make an exception because the minimum
296// page size supported is 16KiB so there's no way to go below that.
297static const size_t kPageSize =
298#if defined(XP_DARWIN) && defined(__aarch64__)
299 16384
300#else
301 4096
302#endif
303 ;
304
305// We align the PHC area to a multiple of the jemalloc and JS GC chunk size
306// (both use 1MB aligned chunks) so that their address computations don't lead
307// from non-PHC memory into PHC memory causing misleading PHC stacks to be
308// attached to a crash report.
309static const size_t kPhcAlign = 1024 * 1024;
310
311static_assert(IsPowerOfTwo(kPhcAlign));
312static_assert((kPhcAlign % kPageSize) == 0);
313
314// There are two kinds of page.
315// - Allocation pages, from which allocations are made.
316// - Guard pages, which are never touched by PHC.
317//
318// These page kinds are interleaved; each allocation page has a guard page on
319// either side.
320#ifdef EARLY_BETA_OR_EARLIER1
321static const size_t kNumAllocPages = kPageSize == 4096 ? 4096 : 1024;
322#else
323// This will use between 82KiB and 1.1MiB per process (depending on how many
324// objects are currently allocated). We will tune this in the future.
325static const size_t kNumAllocPages = kPageSize == 4096 ? 256 : 64;
326#endif
327static const size_t kNumAllPages = kNumAllocPages * 2 + 1;
328
329// The total size of the allocation pages and guard pages.
330static const size_t kAllPagesSize = kNumAllPages * kPageSize;
331
332// jemalloc adds a guard page to the end of our allocation, see the comment in
333// AllocAllPages() for more information.
334static const size_t kAllPagesJemallocSize = kAllPagesSize - kPageSize;
335
336// The amount to decrement from the shared allocation delay each time a thread's
337// local allocation delay reaches zero.
338static const Delay kDelayDecrementAmount = 256;
339
340// When PHC is disabled on the current thread wait this many allocations before
341// accessing sAllocDelay once more.
342static const Delay kDelayBackoffAmount = 64;
343
344// When PHC is disabled globally reset the shared delay by this many allocations
345// to keep code running on the fast path.
346static const Delay kDelayResetWhenDisabled = 64 * 1024;
347
348// The default state for PHC. Either Enabled or OnlyFree.
349#define DEFAULT_STATEmozilla::phc::OnlyFree mozilla::phc::OnlyFree
350
351// The maximum time.
352static const Time kMaxTime = ~(Time(0));
353
354// Truncate aRnd to the range (1 .. aAvgDelay*2). If aRnd is random, this
355// results in an average value of aAvgDelay + 0.5, which is close enough to
356// aAvgDelay. aAvgDelay must be a power-of-two for speed.
357constexpr Delay Rnd64ToDelay(Delay aAvgDelay, uint64_t aRnd) {
358 MOZ_ASSERT(IsPowerOfTwo(aAvgDelay), "must be a power of two")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(IsPowerOfTwo(aAvgDelay))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(IsPowerOfTwo(aAvgDelay)))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("IsPowerOfTwo(aAvgDelay)"
" (" "must be a power of two" ")", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 358); AnnotateMozCrashReason("MOZ_ASSERT" "(" "IsPowerOfTwo(aAvgDelay)"
") (" "must be a power of two" ")"); do { *((volatile int*)__null
) = 358; __attribute__((nomerge)) ::abort(); } while (false);
} } while (false)
;
359
360 return (aRnd & (uint64_t(aAvgDelay) * 2 - 1)) + 1;
361}
362
363static Delay CheckProbability(int64_t aProb) {
364 // Limit delays calculated from prefs to 0x80000000, this is the largest
365 // power-of-two that fits in a Delay since it is a uint32_t.
366 // The minimum is 2 that way not every allocation goes straight to PHC.
367 return RoundUpPow2(
368 std::min(std::max(aProb, int64_t(2)), int64_t(0x80000000)));
369}
370
371// Maps a pointer to a PHC-specific structure:
372// - Nothing
373// - A guard page (it is unspecified which one)
374// - An allocation page (with an index < kNumAllocPages)
375//
376// The standard way of handling a PtrKind is to check IsNothing(), and if that
377// fails, to check IsGuardPage(), and if that fails, to call AllocPage().
378class PtrKind {
379 private:
380 enum class Tag : uint8_t {
381 Nothing,
382 GuardPage,
383 AllocPage,
384 };
385
386 Tag mTag;
387 uintptr_t mIndex; // Only used if mTag == Tag::AllocPage.
388
389 public:
390 // Detect what a pointer points to. This constructor must be fast because it
391 // is called for every call to free(), realloc(), malloc_usable_size(), and
392 // jemalloc_ptr_info().
393 PtrKind(const void* aPtr, const uint8_t* aPagesStart,
394 const uint8_t* aPagesLimit) {
395 if (!(aPagesStart <= aPtr && aPtr < aPagesLimit)) {
396 mTag = Tag::Nothing;
397 } else {
398 uintptr_t offset = static_cast<const uint8_t*>(aPtr) - aPagesStart;
399 uintptr_t allPageIndex = offset / kPageSize;
400 MOZ_ASSERT(allPageIndex < kNumAllPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(allPageIndex < kNumAllPages)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(allPageIndex < kNumAllPages
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"allPageIndex < kNumAllPages", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 400); AnnotateMozCrashReason("MOZ_ASSERT" "(" "allPageIndex < kNumAllPages"
")"); do { *((volatile int*)__null) = 400; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
401 if (allPageIndex & 1) {
402 // Odd-indexed pages are allocation pages.
403 uintptr_t allocPageIndex = allPageIndex / 2;
404 MOZ_ASSERT(allocPageIndex < kNumAllocPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(allocPageIndex < kNumAllocPages)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(allocPageIndex < kNumAllocPages
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"allocPageIndex < kNumAllocPages", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 404); AnnotateMozCrashReason("MOZ_ASSERT" "(" "allocPageIndex < kNumAllocPages"
")"); do { *((volatile int*)__null) = 404; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
405 mTag = Tag::AllocPage;
406 mIndex = allocPageIndex;
407 } else {
408 // Even-numbered pages are guard pages.
409 mTag = Tag::GuardPage;
410 }
411 }
412 }
413
414 bool IsNothing() const { return mTag == Tag::Nothing; }
415 bool IsGuardPage() const { return mTag == Tag::GuardPage; }
416
417 // This should only be called after IsNothing() and IsGuardPage() have been
418 // checked and failed.
419 uintptr_t AllocPageIndex() const {
420 MOZ_RELEASE_ASSERT(mTag == Tag::AllocPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mTag == Tag::AllocPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mTag == Tag::AllocPage))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("mTag == Tag::AllocPage"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 420); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "mTag == Tag::AllocPage"
")"); do { *((volatile int*)__null) = 420; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
421 return mIndex;
422 }
423};
424
425// On MacOS, the first __thread/thread_local access calls malloc, which leads
426// to an infinite loop. So we use pthread-based TLS instead, which somehow
427// doesn't have this problem.
428#if !defined(XP_DARWIN)
429# define PHC_THREAD_LOCAL(T)__thread ::mozilla::detail::ThreadLocal< T, ::mozilla::detail
::ThreadLocalNativeStorage>
MOZ_THREAD_LOCAL(T)__thread ::mozilla::detail::ThreadLocal< T, ::mozilla::detail
::ThreadLocalNativeStorage>
430#else
431# define PHC_THREAD_LOCAL(T)__thread ::mozilla::detail::ThreadLocal< T, ::mozilla::detail
::ThreadLocalNativeStorage>
\
432 detail::ThreadLocal<T, detail::ThreadLocalKeyStorage>
433#endif
434
435// The virtual address space reserved by PHC. It is shared, immutable global
436// state. Initialized by phc_init() and never changed after that. phc_init()
437// runs early enough that no synchronization is needed.
438class PHCRegion {
439 private:
440 // The bounds of the allocated pages.
441 uint8_t* const mPagesStart;
442 uint8_t* const mPagesLimit;
443
444 // Allocates the allocation pages and the guard pages, contiguously.
445 uint8_t* AllocAllPages() {
446 // The memory allocated here is never freed, because it would happen at
447 // process termination when it would be of little use.
448
449 // We can rely on jemalloc's behaviour that when it allocates memory aligned
450 // with its own chunk size it will over-allocate and guarantee that the
451 // memory after the end of our allocation, but before the next chunk, is
452 // decommitted and inaccessible. Elsewhere in PHC we assume that we own
453 // that page (so that memory errors in it get caught by PHC) but here we
454 // use kAllPagesJemallocSize which subtracts jemalloc's guard page.
455 void* pages = MozJemalloc::memalign(kPhcAlign, kAllPagesJemallocSize);
456 if (!pages) {
457 MOZ_CRASH()do { do { } while (false); MOZ_ReportCrash("" , "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 457); AnnotateMozCrashReason("MOZ_CRASH(" ")"); do { *((volatile
int*)__null) = 457; __attribute__((nomerge)) ::abort(); } while
(false); } while (false)
;
458 }
459
460 // Make the pages inaccessible.
461#ifdef XP_WIN
462 if (!VirtualFree(pages, kAllPagesJemallocSize, MEM_DECOMMIT)) {
463 MOZ_CRASH("VirtualFree failed")do { do { } while (false); MOZ_ReportCrash("" "VirtualFree failed"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 463); AnnotateMozCrashReason("MOZ_CRASH(" "VirtualFree failed"
")"); do { *((volatile int*)__null) = 463; __attribute__((nomerge
)) ::abort(); } while (false); } while (false)
;
464 }
465#else
466 if (mmap_mmap(pages, kAllPagesJemallocSize, PROT_NONE0x0,
467 MAP_FIXED0x10 | MAP_PRIVATE0x02 | MAP_ANON0x20, -1, 0) == MAP_FAILED((void *) -1)) {
468 MOZ_CRASH("mmap failed")do { do { } while (false); MOZ_ReportCrash("" "mmap failed", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 468); AnnotateMozCrashReason("MOZ_CRASH(" "mmap failed" ")"
); do { *((volatile int*)__null) = 468; __attribute__((nomerge
)) ::abort(); } while (false); } while (false)
;
469 }
470#endif
471
472 return static_cast<uint8_t*>(pages);
473 }
474
475 public:
476 PHCRegion();
477
478 class PtrKind PtrKind(const void* aPtr) {
479 class PtrKind pk(aPtr, mPagesStart, mPagesLimit);
480 return pk;
481 }
482
483 bool IsInFirstGuardPage(const void* aPtr) {
484 return mPagesStart <= aPtr && aPtr < mPagesStart + kPageSize;
485 }
486
487 // Get the address of the allocation page referred to via an index. Used when
488 // marking the page as accessible/inaccessible.
489 uint8_t* AllocPagePtr(uintptr_t aIndex) {
490 MOZ_ASSERT(aIndex < kNumAllocPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aIndex < kNumAllocPages)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aIndex < kNumAllocPages))
), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aIndex < kNumAllocPages"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 490); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aIndex < kNumAllocPages"
")"); do { *((volatile int*)__null) = 490; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
491 // Multiply by two and add one to account for allocation pages *and* guard
492 // pages.
493 return mPagesStart + (2 * aIndex + 1) * kPageSize;
494 }
495};
496
497// This type is used as a proof-of-lock token, to make it clear which functions
498// require mMutex to be locked.
499using PHCLock = const MutexAutoLock&;
500
501// Shared, mutable global state. Many fields are protected by sMutex; functions
502// that access those feilds should take a PHCLock as proof that mMutex is held.
503// Other fields are TLS or Atomic and don't need the lock.
504class PHC {
Excessive padding in 'class PHC' (80 padding bytes, where 16 is optimal). Optimal fields order: mAvgFirstAllocDelay, mPhcState, mNow, mRNG, mFreePageListHead, mFreePageListTail, mMutex, mAllocPages, mAvgAllocDelay, mAvgPageReuseDelay, consider reordering the fields or adding explicit padding members
505 enum class AllocPageState {
506 NeverAllocated = 0,
507 InUse = 1,
508 Freed = 2,
509 };
510
511 // Metadata for each allocation page.
512 class AllocPageInfo {
513 public:
514 AllocPageInfo()
515 : mState(AllocPageState::NeverAllocated),
516 mBaseAddr(nullptr),
517 mReuseTime(0) {}
518
519 // The current allocation page state.
520 AllocPageState mState;
521
522 // The arena that the allocation is nominally from. This isn't meaningful
523 // within PHC, which has no arenas. But it is necessary for reallocation of
524 // page allocations as normal allocations, such as in this code:
525 //
526 // p = moz_arena_malloc(arenaId, 4096);
527 // realloc(p, 8192);
528 //
529 // The realloc is more than one page, and thus too large for PHC to handle.
530 // Therefore, if PHC handles the first allocation, it must ask mozjemalloc
531 // to allocate the 8192 bytes in the correct arena, and to do that, it must
532 // call MozJemalloc::moz_arena_malloc with the correct arenaId under the
533 // covers. Therefore it must record that arenaId.
534 //
535 // This field is also needed for jemalloc_ptr_info() to work, because it
536 // also returns the arena ID (but only in debug builds).
537 //
538 // - NeverAllocated: must be 0.
539 // - InUse | Freed: can be any valid arena ID value.
540 Maybe<arena_id_t> mArenaId;
541
542 // The starting address of the allocation. Will not be the same as the page
543 // address unless the allocation is a full page.
544 // - NeverAllocated: must be 0.
545 // - InUse | Freed: must be within the allocation page.
546 uint8_t* mBaseAddr;
547
548 // Usable size is computed as the number of bytes between the pointer and
549 // the end of the allocation page. This might be bigger than the requested
550 // size, especially if an outsized alignment is requested.
551 size_t UsableSize() const {
552 return mState == AllocPageState::NeverAllocated
553 ? 0
554 : kPageSize - (reinterpret_cast<uintptr_t>(mBaseAddr) &
555 (kPageSize - 1));
556 }
557
558 // The internal fragmentation for this allocation.
559 size_t FragmentationBytes() const {
560 MOZ_ASSERT(kPageSize >= UsableSize())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(kPageSize >= UsableSize())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(kPageSize >= UsableSize()
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"kPageSize >= UsableSize()", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 560); AnnotateMozCrashReason("MOZ_ASSERT" "(" "kPageSize >= UsableSize()"
")"); do { *((volatile int*)__null) = 560; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
561 return mState == AllocPageState::InUse ? kPageSize - UsableSize() : 0;
562 }
563
564 // The allocation stack.
565 // - NeverAllocated: Nothing.
566 // - InUse | Freed: Some.
567 Maybe<StackTrace> mAllocStack;
568
569 // The free stack.
570 // - NeverAllocated | InUse: Nothing.
571 // - Freed: Some.
572 Maybe<StackTrace> mFreeStack;
573
574 // The time at which the page is available for reuse, as measured against
575 // mNow. When the page is in use this value will be kMaxTime.
576 // - NeverAllocated: must be 0.
577 // - InUse: must be kMaxTime.
578 // - Freed: must be > 0 and < kMaxTime.
579 Time mReuseTime;
580
581 // The next index for a free list of pages.`
582 Maybe<uintptr_t> mNextPage;
583 };
584
585 public:
586 // The RNG seeds here are poor, but non-reentrant since this can be called
587 // from malloc(). SetState() will reset the RNG later.
588 PHC() : mRNG(RandomSeed<1>(), RandomSeed<2>()) {
589 mMutex.Init();
590 if (!tlsIsDisabled.init()) {
591 MOZ_CRASH()do { do { } while (false); MOZ_ReportCrash("" , "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 591); AnnotateMozCrashReason("MOZ_CRASH(" ")"); do { *((volatile
int*)__null) = 591; __attribute__((nomerge)) ::abort(); } while
(false); } while (false)
;
592 }
593 if (!tlsAllocDelay.init()) {
594 MOZ_CRASH()do { do { } while (false); MOZ_ReportCrash("" , "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 594); AnnotateMozCrashReason("MOZ_CRASH(" ")"); do { *((volatile
int*)__null) = 594; __attribute__((nomerge)) ::abort(); } while
(false); } while (false)
;
595 }
596 if (!tlsLastDelay.init()) {
597 MOZ_CRASH()do { do { } while (false); MOZ_ReportCrash("" , "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 597); AnnotateMozCrashReason("MOZ_CRASH(" ")"); do { *((volatile
int*)__null) = 597; __attribute__((nomerge)) ::abort(); } while
(false); } while (false)
;
598 }
599
600 // This constructor is part of PHC's very early initialisation,
601 // see phc_init(), and if PHC is default-on it'll start marking allocations
602 // and we must setup the delay. However once XPCOM starts it'll call
603 // SetState() which will re-initialise the RNG and allocation delay.
604 MutexAutoLock lock(mMutex);
605
606 ForceSetNewAllocDelay(Rnd64ToDelay(mAvgFirstAllocDelay, Random64(lock)));
607
608 for (uintptr_t i = 0; i < kNumAllocPages; i++) {
609 AppendPageToFreeList(lock, i);
610 }
611 }
612
613 uint64_t Random64(PHCLock) { return mRNG.next(); }
614
615 bool IsPageInUse(PHCLock, uintptr_t aIndex) {
616 return mAllocPages[aIndex].mState == AllocPageState::InUse;
617 }
618
619 // Is the page free? And if so, has enough time passed that we can use it?
620 bool IsPageAllocatable(PHCLock, uintptr_t aIndex, Time aNow) {
621 const AllocPageInfo& page = mAllocPages[aIndex];
622 return page.mState != AllocPageState::InUse && aNow >= page.mReuseTime;
623 }
624
625 // Get the address of the allocation page referred to via an index. Used
626 // when checking pointers against page boundaries.
627 uint8_t* AllocPageBaseAddr(PHCLock, uintptr_t aIndex) {
628 return mAllocPages[aIndex].mBaseAddr;
629 }
630
631 Maybe<arena_id_t> PageArena(PHCLock aLock, uintptr_t aIndex) {
632 const AllocPageInfo& page = mAllocPages[aIndex];
633 AssertAllocPageInUse(aLock, page);
634
635 return page.mArenaId;
636 }
637
638 size_t PageUsableSize(PHCLock aLock, uintptr_t aIndex) {
639 const AllocPageInfo& page = mAllocPages[aIndex];
640 AssertAllocPageInUse(aLock, page);
641
642 return page.UsableSize();
643 }
644
645 // The total fragmentation in PHC
646 size_t FragmentationBytes() const {
647 size_t sum = 0;
648 for (const auto& page : mAllocPages) {
649 sum += page.FragmentationBytes();
650 }
651 return sum;
652 }
653
654 void SetPageInUse(PHCLock aLock, uintptr_t aIndex,
655 const Maybe<arena_id_t>& aArenaId, uint8_t* aBaseAddr,
656 const StackTrace& aAllocStack) {
657 AllocPageInfo& page = mAllocPages[aIndex];
658 AssertAllocPageNotInUse(aLock, page);
659
660 page.mState = AllocPageState::InUse;
661 page.mArenaId = aArenaId;
662 page.mBaseAddr = aBaseAddr;
663 page.mAllocStack = Some(aAllocStack);
664 page.mFreeStack = Nothing();
665 page.mReuseTime = kMaxTime;
666 MOZ_ASSERT(!page.mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!page.mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!page.mNextPage))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!page.mNextPage"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 666); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!page.mNextPage"
")"); do { *((volatile int*)__null) = 666; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
667 }
668
669#if PHC_LOGGING0
670 Time GetFreeTime(uintptr_t aIndex) const { return mFreeTime[aIndex]; }
671#endif
672
673 void ResizePageInUse(PHCLock aLock, uintptr_t aIndex,
674 const Maybe<arena_id_t>& aArenaId, uint8_t* aNewBaseAddr,
675 const StackTrace& aAllocStack) {
676 AllocPageInfo& page = mAllocPages[aIndex];
677 AssertAllocPageInUse(aLock, page);
678
679 // page.mState is not changed.
680 if (aArenaId.isSome()) {
681 // Crash if the arenas don't match.
682 MOZ_RELEASE_ASSERT(page.mArenaId == aArenaId)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(page.mArenaId == aArenaId)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(page.mArenaId == aArenaId)))
, 0))) { do { } while (false); MOZ_ReportAssertionFailure("page.mArenaId == aArenaId"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 682); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "page.mArenaId == aArenaId"
")"); do { *((volatile int*)__null) = 682; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
683 }
684 page.mBaseAddr = aNewBaseAddr;
685 // We could just keep the original alloc stack, but the realloc stack is
686 // more recent and therefore seems more useful.
687 page.mAllocStack = Some(aAllocStack);
688 // page.mFreeStack is not changed.
689 // page.mReuseTime is not changed.
690 // page.mNextPage is not changed.
691 };
692
693 void SetPageFreed(PHCLock aLock, uintptr_t aIndex,
694 const Maybe<arena_id_t>& aArenaId,
695 const StackTrace& aFreeStack, Delay aReuseDelay) {
696 AllocPageInfo& page = mAllocPages[aIndex];
697 AssertAllocPageInUse(aLock, page);
698
699 page.mState = AllocPageState::Freed;
700
701 // page.mArenaId is left unchanged, for jemalloc_ptr_info() calls that
702 // occur after freeing (e.g. in the PtrInfo test in TestJemalloc.cpp).
703 if (aArenaId.isSome()) {
704 // Crash if the arenas don't match.
705 MOZ_RELEASE_ASSERT(page.mArenaId == aArenaId)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(page.mArenaId == aArenaId)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(page.mArenaId == aArenaId)))
, 0))) { do { } while (false); MOZ_ReportAssertionFailure("page.mArenaId == aArenaId"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 705); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "page.mArenaId == aArenaId"
")"); do { *((volatile int*)__null) = 705; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
706 }
707
708 // page.musableSize is left unchanged, for reporting on UAF, and for
709 // jemalloc_ptr_info() calls that occur after freeing (e.g. in the PtrInfo
710 // test in TestJemalloc.cpp).
711
712 // page.mAllocStack is left unchanged, for reporting on UAF.
713
714 page.mFreeStack = Some(aFreeStack);
715 Time now = Now();
716#if PHC_LOGGING0
717 mFreeTime[aIndex] = now;
718#endif
719 page.mReuseTime = now + aReuseDelay;
720
721 MOZ_ASSERT(!page.mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!page.mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!page.mNextPage))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!page.mNextPage"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 721); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!page.mNextPage"
")"); do { *((volatile int*)__null) = 721; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
722 AppendPageToFreeList(aLock, aIndex);
723 }
724
725 static void CrashOnGuardPage(void* aPtr) {
726 // An operation on a guard page? This is a bounds violation. Deliberately
727 // touch the page in question to cause a crash that triggers the usual PHC
728 // machinery.
729 LOG("CrashOnGuardPage(%p), bounds violation\n", aPtr);
730 *static_cast<uint8_t*>(aPtr) = 0;
731 MOZ_CRASH("unreachable")do { do { } while (false); MOZ_ReportCrash("" "unreachable", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 731); AnnotateMozCrashReason("MOZ_CRASH(" "unreachable" ")"
); do { *((volatile int*)__null) = 731; __attribute__((nomerge
)) ::abort(); } while (false); } while (false)
;
732 }
733
734 void EnsureValidAndInUse(PHCLock, void* aPtr, uintptr_t aIndex)
735 MOZ_REQUIRES(mMutex)__attribute__((exclusive_locks_required(mMutex))) {
736 const AllocPageInfo& page = mAllocPages[aIndex];
737
738 // The pointer must point to the start of the allocation.
739 MOZ_RELEASE_ASSERT(page.mBaseAddr == aPtr)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(page.mBaseAddr == aPtr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(page.mBaseAddr == aPtr))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("page.mBaseAddr == aPtr"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 739); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "page.mBaseAddr == aPtr"
")"); do { *((volatile int*)__null) = 739; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
740
741 if (page.mState == AllocPageState::Freed) {
742 LOG("EnsureValidAndInUse(%p), use-after-free\n", aPtr);
743 // An operation on a freed page? This is a particular kind of
744 // use-after-free. Deliberately touch the page in question, in order to
745 // cause a crash that triggers the usual PHC machinery. But unlock mMutex
746 // first, because that self-same PHC machinery needs to re-lock it, and
747 // the crash causes non-local control flow so mMutex won't be unlocked
748 // the normal way in the caller.
749 mMutex.Unlock();
750 *static_cast<uint8_t*>(aPtr) = 0;
751 MOZ_CRASH("unreachable")do { do { } while (false); MOZ_ReportCrash("" "unreachable", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 751); AnnotateMozCrashReason("MOZ_CRASH(" "unreachable" ")"
); do { *((volatile int*)__null) = 751; __attribute__((nomerge
)) ::abort(); } while (false); } while (false)
;
752 }
753 }
754
755 // This expects GMUt::mMutex to be locked but can't check it with a parameter
756 // since we try-lock it.
757 void FillAddrInfo(uintptr_t aIndex, const void* aBaseAddr, bool isGuardPage,
758 phc::AddrInfo& aOut) {
759 const AllocPageInfo& page = mAllocPages[aIndex];
760 if (isGuardPage) {
761 aOut.mKind = phc::AddrInfo::Kind::GuardPage;
762 } else {
763 switch (page.mState) {
764 case AllocPageState::NeverAllocated:
765 aOut.mKind = phc::AddrInfo::Kind::NeverAllocatedPage;
766 break;
767
768 case AllocPageState::InUse:
769 aOut.mKind = phc::AddrInfo::Kind::InUsePage;
770 break;
771
772 case AllocPageState::Freed:
773 aOut.mKind = phc::AddrInfo::Kind::FreedPage;
774 break;
775
776 default:
777 MOZ_CRASH()do { do { } while (false); MOZ_ReportCrash("" , "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 777); AnnotateMozCrashReason("MOZ_CRASH(" ")"); do { *((volatile
int*)__null) = 777; __attribute__((nomerge)) ::abort(); } while
(false); } while (false)
;
778 }
779 }
780 aOut.mBaseAddr = page.mBaseAddr;
781 aOut.mUsableSize = page.UsableSize();
782 aOut.mAllocStack = page.mAllocStack;
783 aOut.mFreeStack = page.mFreeStack;
784 }
785
786 void FillJemallocPtrInfo(PHCLock, const void* aPtr, uintptr_t aIndex,
787 jemalloc_ptr_info_t* aInfo) {
788 const AllocPageInfo& page = mAllocPages[aIndex];
789 switch (page.mState) {
790 case AllocPageState::NeverAllocated:
791 break;
792
793 case AllocPageState::InUse: {
794 // Only return TagLiveAlloc if the pointer is within the bounds of the
795 // allocation's usable size.
796 uint8_t* base = page.mBaseAddr;
797 uint8_t* limit = base + page.UsableSize();
798 if (base <= aPtr && aPtr < limit) {
799 *aInfo = {TagLiveAlloc, page.mBaseAddr, page.UsableSize(),
800 page.mArenaId.valueOr(0)};
801 return;
802 }
803 break;
804 }
805
806 case AllocPageState::Freed: {
807 // Only return TagFreedAlloc if the pointer is within the bounds of the
808 // former allocation's usable size.
809 uint8_t* base = page.mBaseAddr;
810 uint8_t* limit = base + page.UsableSize();
811 if (base <= aPtr && aPtr < limit) {
812 *aInfo = {TagFreedAlloc, page.mBaseAddr, page.UsableSize(),
813 page.mArenaId.valueOr(0)};
814 return;
815 }
816 break;
817 }
818
819 default:
820 MOZ_CRASH()do { do { } while (false); MOZ_ReportCrash("" , "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 820); AnnotateMozCrashReason("MOZ_CRASH(" ")"); do { *((volatile
int*)__null) = 820; __attribute__((nomerge)) ::abort(); } while
(false); } while (false)
;
821 }
822
823 // Pointers into guard pages will end up here, as will pointers into
824 // allocation pages that aren't within the allocation's bounds.
825 *aInfo = {TagUnknown, nullptr, 0, 0};
826 }
827
828#ifndef XP_WIN
829 static void prefork() MOZ_NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) {
830 PHC::sPHC->mMutex.Lock();
831 }
832 static void postfork_parent() MOZ_NO_THREAD_SAFETY_ANALYSIS__attribute__((no_thread_safety_analysis)) {
833 PHC::sPHC->mMutex.Unlock();
834 }
835 static void postfork_child() { PHC::sPHC->mMutex.Init(); }
836#endif
837
838#if PHC_LOGGING0
839 void IncPageAllocHits(PHCLock) { mPageAllocHits++; }
840 void IncPageAllocMisses(PHCLock) { mPageAllocMisses++; }
841#else
842 void IncPageAllocHits(PHCLock) {}
843 void IncPageAllocMisses(PHCLock) {}
844#endif
845
846 phc::PHCStats GetPageStats(PHCLock) {
847 phc::PHCStats stats;
848
849 for (const auto& page : mAllocPages) {
850 stats.mSlotsAllocated += page.mState == AllocPageState::InUse ? 1 : 0;
851 stats.mSlotsFreed += page.mState == AllocPageState::Freed ? 1 : 0;
852 }
853 stats.mSlotsUnused =
854 kNumAllocPages - stats.mSlotsAllocated - stats.mSlotsFreed;
855
856 return stats;
857 }
858
859#if PHC_LOGGING0
860 size_t PageAllocHits(PHCLock) { return mPageAllocHits; }
861 size_t PageAllocAttempts(PHCLock) {
862 return mPageAllocHits + mPageAllocMisses;
863 }
864
865 // This is an integer because FdPrintf only supports integer printing.
866 size_t PageAllocHitRate(PHCLock) {
867 return mPageAllocHits * 100 / (mPageAllocHits + mPageAllocMisses);
868 }
869#endif
870
871 // Should we make new PHC allocations?
872 bool ShouldMakeNewAllocations() const {
873 return mPhcState == mozilla::phc::Enabled;
874 }
875
876 using PHCState = mozilla::phc::PHCState;
877 void SetState(PHCState aState) {
878 if (mPhcState != PHCState::Enabled && aState == PHCState::Enabled) {
879 MutexAutoLock lock(mMutex);
880 // Reset the RNG at this point with a better seed.
881 ResetRNG(lock);
882
883 ForceSetNewAllocDelay(Rnd64ToDelay(mAvgFirstAllocDelay, Random64(lock)));
884 }
885
886 mPhcState = aState;
887 }
888
889 void ResetRNG(MutexAutoLock&) {
890 mRNG = non_crypto::XorShift128PlusRNG(RandomSeed<0>(), RandomSeed<1>());
891 }
892
893 void SetProbabilities(int64_t aAvgDelayFirst, int64_t aAvgDelayNormal,
894 int64_t aAvgDelayPageReuse) {
895 MutexAutoLock lock(mMutex);
896
897 mAvgFirstAllocDelay = CheckProbability(aAvgDelayFirst);
898 mAvgAllocDelay = CheckProbability(aAvgDelayNormal);
899 mAvgPageReuseDelay = CheckProbability(aAvgDelayPageReuse);
900 }
901
902 static void DisableOnCurrentThread() {
903 MOZ_ASSERT(!tlsIsDisabled.get())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!tlsIsDisabled.get())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!tlsIsDisabled.get()))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("!tlsIsDisabled.get()"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 903); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!tlsIsDisabled.get()"
")"); do { *((volatile int*)__null) = 903; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
904 tlsIsDisabled.set(true);
905 }
906
907 void EnableOnCurrentThread() {
908 MOZ_ASSERT(tlsIsDisabled.get())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(tlsIsDisabled.get())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(tlsIsDisabled.get()))), 0)))
{ do { } while (false); MOZ_ReportAssertionFailure("tlsIsDisabled.get()"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 908); AnnotateMozCrashReason("MOZ_ASSERT" "(" "tlsIsDisabled.get()"
")"); do { *((volatile int*)__null) = 908; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
909 tlsIsDisabled.set(false);
910 }
911
912 static bool IsDisabledOnCurrentThread() { return tlsIsDisabled.get(); }
913
914 static Time Now() {
915 if (!sPHC) {
916 return 0;
917 }
918
919 return sPHC->mNow;
920 }
921
922 void AdvanceNow(uint32_t delay = 0) {
923 mNow += tlsLastDelay.get() - delay;
924 tlsLastDelay.set(delay);
925 }
926
927 // Decrements the delay and returns true if it's time to make a new PHC
928 // allocation.
929 static bool DecrementDelay() {
930 const Delay alloc_delay = tlsAllocDelay.get();
931
932 if (MOZ_LIKELY(alloc_delay > 0)(__builtin_expect(!!(alloc_delay > 0), 1))) {
933 tlsAllocDelay.set(alloc_delay - 1);
934 return false;
935 }
936 // The local delay has expired, check the shared delay. This path is also
937 // executed on a new thread's first allocation, the result is the same: all
938 // the thread's TLS fields will be initialised.
939
940 // This accesses sPHC but we want to ensure it's still a static member
941 // function so that sPHC isn't dereferenced until after the hot path above.
942 MOZ_ASSERT(sPHC)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(sPHC)>::isValid, "invalid assertion condition"); if
((__builtin_expect(!!(!(!!(sPHC))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("sPHC", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 942); AnnotateMozCrashReason("MOZ_ASSERT" "(" "sPHC" ")"); do
{ *((volatile int*)__null) = 942; __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
943 sPHC->AdvanceNow();
944
945 // Use an atomic fetch-and-subtract. This uses unsigned underflow semantics
946 // to avoid doing a full compare-and-swap.
947 Delay new_delay = (sAllocDelay -= kDelayDecrementAmount);
948 Delay old_delay = new_delay + kDelayDecrementAmount;
949 if (MOZ_LIKELY(new_delay < DELAY_MAX)(__builtin_expect(!!(new_delay < DELAY_MAX), 1))) {
950 // Normal case, we decremented the shared delay but it's not yet
951 // underflowed.
952 tlsAllocDelay.set(kDelayDecrementAmount);
953 tlsLastDelay.set(kDelayDecrementAmount);
954 LOG("Update sAllocDelay <- %zu, tlsAllocDelay <- %zu\n",
955 size_t(new_delay), size_t(kDelayDecrementAmount));
956 return false;
957 }
958
959 if (old_delay < new_delay) {
960 // The shared delay only just underflowed, so unless we hit exactly zero
961 // we should set our local counter and continue.
962 LOG("Update sAllocDelay <- %zu, tlsAllocDelay <- %zu\n",
963 size_t(new_delay), size_t(old_delay));
964 if (old_delay == 0) {
965 // We don't need to set tlsAllocDelay because it's already zero, we know
966 // because the condition at the beginning of this function failed.
967 return true;
968 }
969 tlsAllocDelay.set(old_delay);
970 tlsLastDelay.set(old_delay);
971 return false;
972 }
973
974 // The delay underflowed on another thread or a previous failed allocation
975 // by this thread. Return true and attempt the next allocation, if the
976 // other thread wins we'll check for that before committing.
977 LOG("Update sAllocDelay <- %zu, tlsAllocDelay <- %zu\n", size_t(new_delay),
978 size_t(alloc_delay));
979 return true;
980 }
981
982 static void ResetLocalAllocDelay(Delay aDelay = 0) {
983 // We could take some delay from the shared delay but we'd need a
984 // compare-and-swap because this is called on paths that don't make
985 // allocations. Or we can set the local delay to zero and let it get
986 // initialised on the next allocation.
987 tlsAllocDelay.set(aDelay);
988 tlsLastDelay.set(aDelay);
989 }
990
991 static void ForceSetNewAllocDelay(Delay aNewAllocDelay) {
992 LOG("Setting sAllocDelay <- %zu\n", size_t(aNewAllocDelay));
993 sAllocDelay = aNewAllocDelay;
994 ResetLocalAllocDelay();
995 }
996
997 // Set a new allocation delay and return true if the delay was less than zero
998 // (but it's unsigned so interpret it as signed) indicating that we won the
999 // race to make the next allocation.
1000 static bool SetNewAllocDelay(Delay aNewAllocDelay) {
1001 bool cas_retry;
1002 do {
1003 // We read the current delay on every iteration, we consider that the PHC
1004 // allocation is still "up for grabs" if sAllocDelay < 0. This is safe
1005 // even while other threads continuing to fetch-and-subtract sAllocDelay
1006 // in DecrementDelay(), up to DELAY_MAX (2^31) calls to DecrementDelay().
1007 Delay read_delay = sAllocDelay;
1008 if (read_delay < DELAY_MAX) {
1009 // Another thread already set a valid delay.
1010 LOG("Observe delay %zu this thread lost the race\n",
1011 size_t(read_delay));
1012 ResetLocalAllocDelay();
1013 return false;
1014 } else {
1015 LOG("Preparing for CAS, read sAllocDelay %zu\n", size_t(read_delay));
1016 }
1017
1018 cas_retry = !sAllocDelay.compareExchange(read_delay, aNewAllocDelay);
1019 if (cas_retry) {
1020 LOG("Lost the CAS, sAllocDelay is now %zu\n", size_t(sAllocDelay));
1021 cpu_pause();
1022 // We raced against another thread and lost.
1023 }
1024 } while (cas_retry);
1025 LOG("Won the CAS, set sAllocDelay = %zu\n", size_t(sAllocDelay));
1026 ResetLocalAllocDelay();
1027 return true;
1028 }
1029
1030 static Delay LocalAllocDelay() { return tlsAllocDelay.get(); }
1031 static Delay SharedAllocDelay() { return sAllocDelay; }
1032
1033 static Delay LastDelay() { return tlsLastDelay.get(); }
1034
1035 Maybe<uintptr_t> PopNextFreeIfAllocatable(const MutexAutoLock& lock,
1036 Time now) {
1037 if (!mFreePageListHead) {
1038 return Nothing();
1039 }
1040
1041 uintptr_t index = mFreePageListHead.value();
1042
1043 MOZ_RELEASE_ASSERT(index < kNumAllocPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(index < kNumAllocPages)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(index < kNumAllocPages)))
, 0))) { do { } while (false); MOZ_ReportAssertionFailure("index < kNumAllocPages"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1043); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "index < kNumAllocPages"
")"); do { *((volatile int*)__null) = 1043; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1044 AllocPageInfo& page = mAllocPages[index];
1045 AssertAllocPageNotInUse(lock, page);
1046
1047 if (!IsPageAllocatable(lock, index, now)) {
1048 return Nothing();
1049 }
1050
1051 mFreePageListHead = page.mNextPage;
1052 page.mNextPage = Nothing();
1053 if (!mFreePageListHead) {
1054 mFreePageListTail = Nothing();
1055 }
1056
1057 return Some(index);
1058 }
1059
1060 void UnpopNextFree(const MutexAutoLock& lock, uintptr_t index) {
1061 AllocPageInfo& page = mAllocPages[index];
1062 MOZ_ASSERT(!page.mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!page.mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!page.mNextPage))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!page.mNextPage"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1062); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!page.mNextPage"
")"); do { *((volatile int*)__null) = 1062; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1063
1064 page.mNextPage = mFreePageListHead;
1065 mFreePageListHead = Some(index);
1066 if (!mFreePageListTail) {
1067 mFreePageListTail = Some(index);
1068 }
1069 }
1070
1071 void AppendPageToFreeList(const MutexAutoLock& lock, uintptr_t aIndex) {
1072 MOZ_RELEASE_ASSERT(aIndex < kNumAllocPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aIndex < kNumAllocPages)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aIndex < kNumAllocPages))
), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aIndex < kNumAllocPages"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1072); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "aIndex < kNumAllocPages"
")"); do { *((volatile int*)__null) = 1072; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1073 AllocPageInfo& page = mAllocPages[aIndex];
1074 MOZ_ASSERT(!page.mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!page.mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!page.mNextPage))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!page.mNextPage"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1074); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!page.mNextPage"
")"); do { *((volatile int*)__null) = 1074; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1075 MOZ_ASSERT(mFreePageListHead != Some(aIndex) &&do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mFreePageListHead != Some(aIndex) && mFreePageListTail
!= Some(aIndex))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mFreePageListHead != Some(aIndex
) && mFreePageListTail != Some(aIndex)))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("mFreePageListHead != Some(aIndex) && mFreePageListTail != Some(aIndex)"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1076); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mFreePageListHead != Some(aIndex) && mFreePageListTail != Some(aIndex)"
")"); do { *((volatile int*)__null) = 1076; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
1076 mFreePageListTail != Some(aIndex))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mFreePageListHead != Some(aIndex) && mFreePageListTail
!= Some(aIndex))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mFreePageListHead != Some(aIndex
) && mFreePageListTail != Some(aIndex)))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("mFreePageListHead != Some(aIndex) && mFreePageListTail != Some(aIndex)"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1076); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mFreePageListHead != Some(aIndex) && mFreePageListTail != Some(aIndex)"
")"); do { *((volatile int*)__null) = 1076; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1077
1078 if (!mFreePageListTail) {
1079 // The list is empty this page will become the beginning and end.
1080 MOZ_ASSERT(!mFreePageListHead)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!mFreePageListHead)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!mFreePageListHead))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("!mFreePageListHead"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1080); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!mFreePageListHead"
")"); do { *((volatile int*)__null) = 1080; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1081 mFreePageListHead = Some(aIndex);
1082 } else {
1083 MOZ_ASSERT(mFreePageListTail.value() < kNumAllocPages)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mFreePageListTail.value() < kNumAllocPages)>::
isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(mFreePageListTail.value() < kNumAllocPages))), 0)
)) { do { } while (false); MOZ_ReportAssertionFailure("mFreePageListTail.value() < kNumAllocPages"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1083); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mFreePageListTail.value() < kNumAllocPages"
")"); do { *((volatile int*)__null) = 1083; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1084 AllocPageInfo& tail_page = mAllocPages[mFreePageListTail.value()];
1085 MOZ_ASSERT(!tail_page.mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!tail_page.mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!tail_page.mNextPage))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("!tail_page.mNextPage"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1085); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!tail_page.mNextPage"
")"); do { *((volatile int*)__null) = 1085; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1086 tail_page.mNextPage = Some(aIndex);
1087 }
1088 page.mNextPage = Nothing();
1089 mFreePageListTail = Some(aIndex);
1090 }
1091
1092 private:
1093 template <int N>
1094 uint64_t RandomSeed() {
1095 // An older version of this code used RandomUint64() here, but on Mac that
1096 // function uses arc4random(), which can allocate, which would cause
1097 // re-entry, which would be bad. So we just use time(), a local variable
1098 // address and a global variable address. These are mediocre sources of
1099 // entropy, but good enough for PHC.
1100 static_assert(N == 0 || N == 1 || N == 2, "must be 0, 1 or 2");
1101 uint64_t seed;
1102 if (N == 0) {
1103 time_t t = time(nullptr);
1104 seed = t ^ (t << 32);
1105 } else if (N == 1) {
1106 seed = uintptr_t(&seed) ^ (uintptr_t(&seed) << 32);
1107 } else {
1108 seed = uintptr_t(&sRegion) ^ (uintptr_t(&sRegion) << 32);
1109 }
1110 return seed;
1111 }
1112
1113 void AssertAllocPageInUse(PHCLock, const AllocPageInfo& aPage) {
1114 MOZ_ASSERT(aPage.mState == AllocPageState::InUse)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPage.mState == AllocPageState::InUse)>::isValid,
"invalid assertion condition"); if ((__builtin_expect(!!(!(!
!(aPage.mState == AllocPageState::InUse))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("aPage.mState == AllocPageState::InUse"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1114); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPage.mState == AllocPageState::InUse"
")"); do { *((volatile int*)__null) = 1114; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1115 // There is nothing to assert about aPage.mArenaId.
1116 MOZ_ASSERT(aPage.mBaseAddr)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPage.mBaseAddr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPage.mBaseAddr))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("aPage.mBaseAddr"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1116); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPage.mBaseAddr"
")"); do { *((volatile int*)__null) = 1116; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1117 MOZ_ASSERT(aPage.UsableSize() > 0)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPage.UsableSize() > 0)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPage.UsableSize() > 0)))
, 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPage.UsableSize() > 0"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1117); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPage.UsableSize() > 0"
")"); do { *((volatile int*)__null) = 1117; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1118 MOZ_ASSERT(aPage.mAllocStack.isSome())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPage.mAllocStack.isSome())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPage.mAllocStack.isSome()))
), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPage.mAllocStack.isSome()"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1118); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPage.mAllocStack.isSome()"
")"); do { *((volatile int*)__null) = 1118; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1119 MOZ_ASSERT(aPage.mFreeStack.isNothing())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPage.mFreeStack.isNothing())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPage.mFreeStack.isNothing()
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"aPage.mFreeStack.isNothing()", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1119); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPage.mFreeStack.isNothing()"
")"); do { *((volatile int*)__null) = 1119; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1120 MOZ_ASSERT(aPage.mReuseTime == kMaxTime)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPage.mReuseTime == kMaxTime)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPage.mReuseTime == kMaxTime
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"aPage.mReuseTime == kMaxTime", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1120); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPage.mReuseTime == kMaxTime"
")"); do { *((volatile int*)__null) = 1120; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1121 MOZ_ASSERT(!aPage.mNextPage)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!aPage.mNextPage)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!aPage.mNextPage))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("!aPage.mNextPage"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1121); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!aPage.mNextPage"
")"); do { *((volatile int*)__null) = 1121; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1122 }
1123
1124 void AssertAllocPageNotInUse(PHCLock, const AllocPageInfo& aPage) {
1125 // We can assert a lot about `NeverAllocated` pages, but not much about
1126 // `Freed` pages.
1127#ifdef DEBUG1
1128 bool isFresh = aPage.mState == AllocPageState::NeverAllocated;
1129 MOZ_ASSERT(isFresh || aPage.mState == AllocPageState::Freed)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isFresh || aPage.mState == AllocPageState::Freed)>
::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(isFresh || aPage.mState == AllocPageState::Freed))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("isFresh || aPage.mState == AllocPageState::Freed"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1129); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isFresh || aPage.mState == AllocPageState::Freed"
")"); do { *((volatile int*)__null) = 1129; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1130 MOZ_ASSERT_IF(isFresh, aPage.mArenaId == Nothing())do { if (isFresh) { do { static_assert( mozilla::detail::AssertionConditionType
<decltype(aPage.mArenaId == Nothing())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPage.mArenaId == Nothing())
)), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPage.mArenaId == Nothing()"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1130); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPage.mArenaId == Nothing()"
")"); do { *((volatile int*)__null) = 1130; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false); } } while (
false)
;
1131 MOZ_ASSERT(isFresh == (aPage.mBaseAddr == nullptr))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isFresh == (aPage.mBaseAddr == nullptr))>::isValid
, "invalid assertion condition"); if ((__builtin_expect(!!(!(
!!(isFresh == (aPage.mBaseAddr == nullptr)))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("isFresh == (aPage.mBaseAddr == nullptr)"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1131); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isFresh == (aPage.mBaseAddr == nullptr)"
")"); do { *((volatile int*)__null) = 1131; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1132 MOZ_ASSERT(isFresh == (aPage.mAllocStack.isNothing()))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isFresh == (aPage.mAllocStack.isNothing()))>::isValid
, "invalid assertion condition"); if ((__builtin_expect(!!(!(
!!(isFresh == (aPage.mAllocStack.isNothing())))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("isFresh == (aPage.mAllocStack.isNothing())"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1132); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isFresh == (aPage.mAllocStack.isNothing())"
")"); do { *((volatile int*)__null) = 1132; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1133 MOZ_ASSERT(isFresh == (aPage.mFreeStack.isNothing()))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isFresh == (aPage.mFreeStack.isNothing()))>::isValid
, "invalid assertion condition"); if ((__builtin_expect(!!(!(
!!(isFresh == (aPage.mFreeStack.isNothing())))), 0))) { do { }
while (false); MOZ_ReportAssertionFailure("isFresh == (aPage.mFreeStack.isNothing())"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1133); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isFresh == (aPage.mFreeStack.isNothing())"
")"); do { *((volatile int*)__null) = 1133; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1134 MOZ_ASSERT(aPage.mReuseTime != kMaxTime)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPage.mReuseTime != kMaxTime)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aPage.mReuseTime != kMaxTime
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"aPage.mReuseTime != kMaxTime", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1134); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPage.mReuseTime != kMaxTime"
")"); do { *((volatile int*)__null) = 1134; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1135#endif
1136 }
1137
1138 // To improve locality we try to order this file by how frequently different
1139 // fields are modified and place all the modified-together fields early and
1140 // ideally within a single cache line.
1141 public:
1142 // The mutex that protects the other members.
1143 alignas(kCacheLineSize) Mutex mMutex MOZ_UNANNOTATED;
1144
1145 private:
1146 // The current time. We use ReleaseAcquire semantics since we attempt to
1147 // update this by larger increments and don't want to lose an entire update.
1148 Atomic<Time, ReleaseAcquire> mNow;
1149
1150 // This will only ever be updated from one thread. The other threads should
1151 // eventually get the update.
1152 Atomic<PHCState, Relaxed> mPhcState =
1153 Atomic<PHCState, Relaxed>(DEFAULT_STATEmozilla::phc::OnlyFree);
1154
1155 // RNG for deciding which allocations to treat specially. It doesn't need to
1156 // be high quality.
1157 //
1158 // This is a raw pointer for the reason explained in the comment above
1159 // PHC's constructor. Don't change it to UniquePtr or anything like that.
1160 non_crypto::XorShift128PlusRNG mRNG;
1161
1162 // A linked list of free pages. Pages are allocated from the head of the list
1163 // and returned to the tail. The list will naturally order itself by "last
1164 // freed time" so if the head of the list can't satisfy an allocation due to
1165 // time then none of the pages can.
1166 Maybe<uintptr_t> mFreePageListHead;
1167 Maybe<uintptr_t> mFreePageListTail;
1168
1169#if PHC_LOGGING0
1170 // How many allocations that could have been page allocs actually were? As
1171 // constrained kNumAllocPages. If the hit ratio isn't close to 100% it's
1172 // likely that the global constants are poorly chosen.
1173 size_t mPageAllocHits = 0;
1174 size_t mPageAllocMisses = 0;
1175#endif
1176
1177 // The remaining fields are updated much less often, place them on the next
1178 // cache line.
1179
1180 // The average delay before doing any page allocations at the start of a
1181 // process. Note that roughly 1 million allocations occur in the main process
1182 // while starting the browser. The delay range is 1..gAvgFirstAllocDelay*2.
1183 alignas(kCacheLineSize) Delay mAvgFirstAllocDelay = 64 * 1024;
1184
1185 // The average delay until the next attempted page allocation, once we get
1186 // past the first delay. The delay range is 1..kAvgAllocDelay*2.
1187 Delay mAvgAllocDelay = 16 * 1024;
1188
1189 // The average delay before reusing a freed page. Should be significantly
1190 // larger than kAvgAllocDelay, otherwise there's not much point in having it.
1191 // The delay range is (kAvgAllocDelay / 2)..(kAvgAllocDelay / 2 * 3). This is
1192 // different to the other delay ranges in not having a minimum of 1, because
1193 // that's such a short delay that there is a high likelihood of bad stacks in
1194 // any crash report.
1195 Delay mAvgPageReuseDelay = 256 * 1024;
1196
1197 // When true, PHC does as little as possible.
1198 //
1199 // (a) It does not allocate any new page allocations.
1200 //
1201 // (b) It avoids doing any operations that might call malloc/free/etc., which
1202 // would cause re-entry into PHC. (In practice, MozStackWalk() is the
1203 // only such operation.) Note that calls to the functions in MozJemalloc
1204 // are ok.
1205 //
1206 // For example, replace_malloc() will just fall back to mozjemalloc. However,
1207 // operations involving existing allocations are more complex, because those
1208 // existing allocations may be page allocations. For example, if
1209 // replace_free() is passed a page allocation on a PHC-disabled thread, it
1210 // will free the page allocation in the usual way, but it will get a dummy
1211 // freeStack in order to avoid calling MozStackWalk(), as per (b) above.
1212 //
1213 // This single disabling mechanism has two distinct uses.
1214 //
1215 // - It's used to prevent re-entry into PHC, which can cause correctness
1216 // problems. For example, consider this sequence.
1217 //
1218 // 1. enter replace_free()
1219 // 2. which calls PageFree()
1220 // 3. which calls MozStackWalk()
1221 // 4. which locks a mutex M, and then calls malloc
1222 // 5. enter replace_malloc()
1223 // 6. which calls MaybePageAlloc()
1224 // 7. which calls MozStackWalk()
1225 // 8. which (re)locks a mutex M --> deadlock
1226 //
1227 // We avoid this sequence by "disabling" the thread in PageFree() (at step
1228 // 2), which causes MaybePageAlloc() to fail, avoiding the call to
1229 // MozStackWalk() (at step 7).
1230 //
1231 // In practice, realloc or free of a PHC allocation is unlikely on a thread
1232 // that is disabled because of this use: MozStackWalk() will probably only
1233 // realloc/free allocations that it allocated itself, but those won't be
1234 // page allocations because PHC is disabled before calling MozStackWalk().
1235 //
1236 // (Note that MaybePageAlloc() could safely do a page allocation so long as
1237 // it avoided calling MozStackWalk() by getting a dummy allocStack. But it
1238 // wouldn't be useful, and it would prevent the second use below.)
1239 //
1240 // - It's used to prevent PHC allocations in some tests that rely on
1241 // mozjemalloc's exact allocation behaviour, which PHC does not replicate
1242 // exactly. (Note that (b) isn't necessary for this use -- MozStackWalk()
1243 // could be safely called -- but it is necessary for the first use above.)
1244 //
1245 static PHC_THREAD_LOCAL(bool)__thread ::mozilla::detail::ThreadLocal< bool, ::mozilla::
detail::ThreadLocalNativeStorage>
tlsIsDisabled;
1246
1247 // Delay until the next attempt at a page allocation. The delay is made up of
1248 // two parts the global delay and each thread's local portion of that delay:
1249 //
1250 // delay = sDelay + sum_all_threads(tlsAllocDelay)
1251 //
1252 // Threads use their local delay to reduce contention on the shared delay.
1253 //
1254 // See the comment in MaybePageAlloc() for an explanation of why it uses
1255 // ReleaseAcquire semantics.
1256 static Atomic<Delay, ReleaseAcquire> sAllocDelay;
1257 static PHC_THREAD_LOCAL(Delay)__thread ::mozilla::detail::ThreadLocal< Delay, ::mozilla::
detail::ThreadLocalNativeStorage>
tlsAllocDelay;
1258
1259 // The last value we set tlsAllocDelay to before starting to count down.
1260 static PHC_THREAD_LOCAL(Delay)__thread ::mozilla::detail::ThreadLocal< Delay, ::mozilla::
detail::ThreadLocalNativeStorage>
tlsLastDelay;
1261
1262 AllocPageInfo mAllocPages[kNumAllocPages];
1263#if PHC_LOGGING0
1264 Time mFreeTime[kNumAllocPages];
1265#endif
1266
1267 public:
1268 Delay GetAvgAllocDelay(const MutexAutoLock&) { return mAvgAllocDelay; }
1269 Delay GetAvgFirstAllocDelay(const MutexAutoLock&) {
1270 return mAvgFirstAllocDelay;
1271 }
1272 Delay GetAvgPageReuseDelay(const MutexAutoLock&) {
1273 return mAvgPageReuseDelay;
1274 }
1275
1276 // Both of these are accessed early on hot code paths. We make them both
1277 // static variables rathan making sRegion a member of sPHC to keep these hot
1278 // code paths as fast as possible. They're both "write once" so they can
1279 // share a cache line.
1280 static PHCRegion* sRegion;
1281 static PHC* sPHC;
1282};
1283
1284// These globals are read together and hardly ever written. They should be on
1285// the same cache line. They should be in a different cache line to data that
1286// is manipulated often (sMutex and mNow are members of sPHC for that reason) so
1287// that this cache line can be shared amoung cores. This makes a measurable
1288// impact to calls to maybe_init()
1289alignas(kCacheLineSize) PHCRegion* PHC::sRegion;
1290PHC* PHC::sPHC;
1291
1292PHC_THREAD_LOCAL(bool)__thread ::mozilla::detail::ThreadLocal< bool, ::mozilla::
detail::ThreadLocalNativeStorage>
PHC::tlsIsDisabled;
1293PHC_THREAD_LOCAL(Delay)__thread ::mozilla::detail::ThreadLocal< Delay, ::mozilla::
detail::ThreadLocalNativeStorage>
PHC::tlsAllocDelay;
1294Atomic<Delay, ReleaseAcquire> PHC::sAllocDelay;
1295PHC_THREAD_LOCAL(Delay)__thread ::mozilla::detail::ThreadLocal< Delay, ::mozilla::
detail::ThreadLocalNativeStorage>
PHC::tlsLastDelay;
1296
1297// This must be defined after the PHC class.
1298PHCRegion::PHCRegion()
1299 : mPagesStart(AllocAllPages()), mPagesLimit(mPagesStart + kAllPagesSize) {
1300 LOG("AllocAllPages at %p..%p\n", mPagesStart, mPagesLimit);
1301}
1302
1303// When PHC wants to crash we first have to unlock so that the crash reporter
1304// can call into PHC to lockup its pointer. That also means that before calling
1305// PHCCrash please ensure that state is consistent. Because this can report an
1306// arbitrary string, use of it must be reviewed by Firefox data stewards.
1307static void PHCCrash(PHCLock, const char* aMessage)
1308 MOZ_REQUIRES(PHC::sPHC->mMutex)__attribute__((exclusive_locks_required(PHC::sPHC->mMutex)
))
{
1309 PHC::sPHC->mMutex.Unlock();
1310 MOZ_CRASH_UNSAFE(aMessage)MOZ_Crash("/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1310, aMessage)
;
1311}
1312
1313class AutoDisableOnCurrentThread {
1314 public:
1315 AutoDisableOnCurrentThread(const AutoDisableOnCurrentThread&) = delete;
1316
1317 const AutoDisableOnCurrentThread& operator=(
1318 const AutoDisableOnCurrentThread&) = delete;
1319
1320 explicit AutoDisableOnCurrentThread() { PHC::DisableOnCurrentThread(); }
1321 ~AutoDisableOnCurrentThread() { PHC::sPHC->EnableOnCurrentThread(); }
1322};
1323
1324//---------------------------------------------------------------------------
1325// Initialisation
1326//---------------------------------------------------------------------------
1327
1328// WARNING: this function runs *very* early -- before all static initializers
1329// have run. For this reason, non-scalar globals (sRegion, sPHC) are allocated
1330// dynamically (so we can guarantee their construction in this function) rather
1331// than statically.
1332static bool phc_init() {
1333 if (GetKernelPageSize() != kPageSize) {
1334 return false;
1335 }
1336
1337 // sRegion and sPHC are never freed. They live for the life of the process.
1338 PHC::sRegion = InfallibleAllocPolicy::new_<PHCRegion>();
1339
1340 PHC::sPHC = InfallibleAllocPolicy::new_<PHC>();
1341
1342#ifndef XP_WIN
1343 // Avoid deadlocks when forking by acquiring our state lock prior to forking
1344 // and releasing it after forking. See |LogAlloc|'s |phc_init| for
1345 // in-depth details.
1346 pthread_atfork(PHC::prefork, PHC::postfork_parent, PHC::postfork_child);
1347#endif
1348
1349 return true;
1350}
1351
1352static inline bool maybe_init() {
1353 // This runs on hot paths and we can save some memory accesses by using sPHC
1354 // to test if we've already initialised PHC successfully.
1355 if (MOZ_UNLIKELY(!PHC::sPHC)(__builtin_expect(!!(!PHC::sPHC), 0))) {
1356 // The lambda will only be called once and is thread safe.
1357 static bool sInitSuccess = []() { return phc_init(); }();
1358 return sInitSuccess;
1359 }
1360
1361 return true;
1362}
1363
1364//---------------------------------------------------------------------------
1365// Page allocation operations
1366//---------------------------------------------------------------------------
1367
1368// This is the hot-path for testing if we should make a PHC allocation, it
1369// should be inlined into the caller while the remainder of the tests that are
1370// in MaybePageAlloc need not be inlined.
1371static MOZ_ALWAYS_INLINEinline bool ShouldPageAllocHot(size_t aReqSize) {
1372 if (MOZ_UNLIKELY(!maybe_init())(__builtin_expect(!!(!maybe_init()), 0))) {
1373 return false;
1374 }
1375
1376 if (MOZ_UNLIKELY(aReqSize > kPageSize)(__builtin_expect(!!(aReqSize > kPageSize), 0))) {
1377 return false;
1378 }
1379
1380 // Decrement the delay. If it's zero, we do a page allocation and reset the
1381 // delay to a random number.
1382 if (MOZ_LIKELY(!PHC::DecrementDelay())(__builtin_expect(!!(!PHC::DecrementDelay()), 1))) {
1383 return false;
1384 }
1385
1386 return true;
1387}
1388
1389static void LogNoAlloc(size_t aReqSize, size_t aAlignment,
1390 Delay newAllocDelay) {
1391 // No pages are available, or VirtualAlloc/mprotect failed.
1392#if PHC_LOGGING0
1393 phc::PHCStats stats = PHC::sPHC->GetPageStats(lock);
1394#endif
1395 LOG("No PageAlloc(%zu, %zu), sAllocDelay <- %zu, fullness %zu/%zu/%zu, "
1396 "hits %zu/%zu (%zu%%)\n",
1397 aReqSize, aAlignment, size_t(newAllocDelay), stats.mSlotsAllocated,
1398 stats.mSlotsFreed, kNumAllocPages, PHC::sPHC->PageAllocHits(lock),
1399 PHC::sPHC->PageAllocAttempts(lock), PHC::sPHC->PageAllocHitRate(lock));
1400}
1401
1402// Attempt a page allocation if the time and the size are right. Allocated
1403// memory is zeroed if aZero is true. On failure, the caller should attempt a
1404// normal allocation via MozJemalloc. Can be called in a context where
1405// PHC::mMutex is locked.
1406static void* MaybePageAlloc(const Maybe<arena_id_t>& aArenaId, size_t aReqSize,
1407 size_t aAlignment, bool aZero) {
1408 MOZ_ASSERT(IsPowerOfTwo(aAlignment))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(IsPowerOfTwo(aAlignment))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(IsPowerOfTwo(aAlignment)))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("IsPowerOfTwo(aAlignment)"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1408); AnnotateMozCrashReason("MOZ_ASSERT" "(" "IsPowerOfTwo(aAlignment)"
")"); do { *((volatile int*)__null) = 1408; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1409 MOZ_ASSERT(PHC::sPHC)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(PHC::sPHC)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(PHC::sPHC))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("PHC::sPHC", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1409); AnnotateMozCrashReason("MOZ_ASSERT" "(" "PHC::sPHC" ")"
); do { *((volatile int*)__null) = 1409; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1410 if (!PHC::sPHC->ShouldMakeNewAllocations()) {
1411 // Reset the allocation delay so that we take the fast path most of the
1412 // time. Rather than take the lock and use the RNG which are unnecessary
1413 // when PHC is disabled, instead set the delay to a reasonably high number,
1414 // the default average first allocation delay. This is reset when PHC is
1415 // re-enabled anyway.
1416 PHC::ForceSetNewAllocDelay(kDelayResetWhenDisabled);
1417 return nullptr;
1418 }
1419
1420 if (PHC::IsDisabledOnCurrentThread()) {
1421 // We don't reset sAllocDelay since that might affect other threads. We
1422 // assume this is okay because either this thread will be re-enabled after
1423 // less than DELAY_MAX allocations or that there are other active threads
1424 // that will reset sAllocDelay. We do reset our local delay which will
1425 // cause this thread to "back off" from updating sAllocDelay on future
1426 // allocations.
1427 PHC::ResetLocalAllocDelay(kDelayBackoffAmount);
1428 return nullptr;
1429 }
1430
1431 // Disable on this thread *before* getting the stack trace.
1432 AutoDisableOnCurrentThread disable;
1433
1434 // Get the stack trace *before* locking the mutex. If we return nullptr then
1435 // it was a waste, but it's not so frequent, and doing a stack walk while
1436 // the mutex is locked is problematic (see the big comment on
1437 // StackTrace::Fill() for details).
1438 StackTrace allocStack;
1439 allocStack.Fill();
1440
1441 MutexAutoLock lock(PHC::sPHC->mMutex);
1442
1443 Time now = PHC::Now();
1444
1445 Delay newAllocDelay = Rnd64ToDelay(PHC::sPHC->GetAvgAllocDelay(lock),
1446 PHC::sPHC->Random64(lock));
1447 if (!PHC::sPHC->SetNewAllocDelay(newAllocDelay)) {
1448 return nullptr;
1449 }
1450
1451 // Pages are allocated from a free list populated in order of when they're
1452 // freed. If the page at the head of the list is too recently freed to be
1453 // reused then no other pages on the list will be either.
1454
1455 Maybe<uintptr_t> mb_index = PHC::sPHC->PopNextFreeIfAllocatable(lock, now);
1456 if (!mb_index) {
1457 PHC::sPHC->IncPageAllocMisses(lock);
1458 LogNoAlloc(aReqSize, aAlignment, newAllocDelay);
1459 return nullptr;
1460 }
1461 uintptr_t index = mb_index.value();
1462
1463#if PHC_LOGGING0
1464 Time lifetime = 0;
1465#endif
1466 uint8_t* pagePtr = PHC::sRegion->AllocPagePtr(index);
1467 MOZ_ASSERT(pagePtr)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(pagePtr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(pagePtr))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("pagePtr", "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1467); AnnotateMozCrashReason("MOZ_ASSERT" "(" "pagePtr" ")"
); do { *((volatile int*)__null) = 1467; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1468 bool ok =
1469#ifdef XP_WIN
1470 !!VirtualAlloc(pagePtr, kPageSize, MEM_COMMIT, PAGE_READWRITE);
1471#else
1472 mprotect(pagePtr, kPageSize, PROT_READ0x1 | PROT_WRITE0x2) == 0;
1473#endif
1474
1475 if (!ok) {
1476 PHC::sPHC->UnpopNextFree(lock, index);
1477 PHC::sPHC->IncPageAllocMisses(lock);
1478 LogNoAlloc(aReqSize, aAlignment, newAllocDelay);
1479 return nullptr;
1480 }
1481
1482 size_t usableSize = MozJemalloc::malloc_good_size(aReqSize);
1483 MOZ_ASSERT(usableSize > 0)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(usableSize > 0)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(usableSize > 0))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("usableSize > 0"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1483); AnnotateMozCrashReason("MOZ_ASSERT" "(" "usableSize > 0"
")"); do { *((volatile int*)__null) = 1483; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1484
1485 // Put the allocation as close to the end of the page as possible,
1486 // allowing for alignment requirements.
1487 uint8_t* ptr = pagePtr + kPageSize - usableSize;
1488 if (aAlignment != 1) {
1489 ptr = reinterpret_cast<uint8_t*>(
1490 (reinterpret_cast<uintptr_t>(ptr) & ~(aAlignment - 1)));
1491 }
1492
1493#if PHC_LOGGING0
1494 Time then = PHC::sPHC->GetFreeTime(i);
1495 lifetime = then != 0 ? now - then : 0;
1496#endif
1497
1498 PHC::sPHC->SetPageInUse(lock, index, aArenaId, ptr, allocStack);
1499
1500 if (aZero) {
1501 memset(ptr, 0, usableSize);
1502 } else {
1503#ifdef DEBUG1
1504 memset(ptr, kAllocJunk, usableSize);
1505#endif
1506 }
1507
1508 PHC::sPHC->IncPageAllocHits(lock);
1509#if PHC_LOGGING0
1510 phc::PHCStats stats = PHC::sPHC->GetPageStats(lock);
1511#endif
1512 LOG("PageAlloc(%zu, %zu) -> %p[%zu]/%p (%zu) (z%zu), sAllocDelay <- %zu, "
1513 "fullness %zu/%zu/%zu, hits %zu/%zu (%zu%%), lifetime %zu\n",
1514 aReqSize, aAlignment, pagePtr, i, ptr, usableSize, size_t(newAllocDelay),
1515 size_t(PHC::SharedAllocDelay()), stats.mSlotsAllocated, stats.mSlotsFreed,
1516 kNumAllocPages, PHC::sPHC->PageAllocHits(lock),
1517 PHC::sPHC->PageAllocAttempts(lock), PHC::sPHC->PageAllocHitRate(lock),
1518 lifetime);
1519
1520 return ptr;
1521}
1522
1523static void FreePage(PHCLock aLock, uintptr_t aIndex,
1524 const Maybe<arena_id_t>& aArenaId,
1525 const StackTrace& aFreeStack, Delay aReuseDelay)
1526 MOZ_REQUIRES(PHC::sPHC->mMutex)__attribute__((exclusive_locks_required(PHC::sPHC->mMutex)
))
{
1527 void* pagePtr = PHC::sRegion->AllocPagePtr(aIndex);
1528
1529#ifdef XP_WIN
1530 if (!VirtualFree(pagePtr, kPageSize, MEM_DECOMMIT)) {
1531 PHCCrash(aLock, "VirtualFree failed");
1532 }
1533#else
1534 if (mmap_mmap(pagePtr, kPageSize, PROT_NONE0x0, MAP_FIXED0x10 | MAP_PRIVATE0x02 | MAP_ANON0x20,
1535 -1, 0) == MAP_FAILED((void *) -1)) {
1536 PHCCrash(aLock, "mmap failed");
1537 }
1538#endif
1539
1540 PHC::sPHC->SetPageFreed(aLock, aIndex, aArenaId, aFreeStack, aReuseDelay);
1541}
1542
1543//---------------------------------------------------------------------------
1544// replace-malloc machinery
1545//---------------------------------------------------------------------------
1546
1547// This handles malloc, moz_arena_malloc, and realloc-with-a-nullptr.
1548MOZ_ALWAYS_INLINEinline static void* PageMalloc(const Maybe<arena_id_t>& aArenaId,
1549 size_t aReqSize) {
1550 void* ptr = ShouldPageAllocHot(aReqSize)
1551 // The test on aArenaId here helps the compiler optimise away
1552 // the construction of Nothing() in the caller.
1553 ? MaybePageAlloc(aArenaId.isSome() ? aArenaId : Nothing(),
1554 aReqSize, /* aAlignment */ 1,
1555 /* aZero */ false)
1556 : nullptr;
1557 return ptr ? ptr
1558 : (aArenaId.isSome()
1559 ? MozJemalloc::moz_arena_malloc(*aArenaId, aReqSize)
1560 : MozJemalloc::malloc(aReqSize));
1561}
1562
1563inline void* MozJemallocPHC::malloc(size_t aReqSize) {
1564 return PageMalloc(Nothing(), aReqSize);
1565}
1566
1567static Delay ReuseDelay(PHCLock aLock) {
1568 Delay avg_reuse_delay = PHC::sPHC->GetAvgPageReuseDelay(aLock);
1569 return (avg_reuse_delay / 2) +
1570 Rnd64ToDelay(avg_reuse_delay / 2, PHC::sPHC->Random64(aLock));
1571}
1572
1573// This handles both calloc and moz_arena_calloc.
1574MOZ_ALWAYS_INLINEinline static void* PageCalloc(const Maybe<arena_id_t>& aArenaId,
1575 size_t aNum, size_t aReqSize) {
1576 CheckedInt<size_t> checkedSize = CheckedInt<size_t>(aNum) * aReqSize;
1577 if (!checkedSize.isValid()) {
1578 return nullptr;
1579 }
1580
1581 void* ptr = ShouldPageAllocHot(checkedSize.value())
1582 // The test on aArenaId here helps the compiler optimise away
1583 // the construction of Nothing() in the caller.
1584 ? MaybePageAlloc(aArenaId.isSome() ? aArenaId : Nothing(),
1585 checkedSize.value(), /* aAlignment */ 1,
1586 /* aZero */ true)
1587 : nullptr;
1588 return ptr ? ptr
1589 : (aArenaId.isSome()
1590 ? MozJemalloc::moz_arena_calloc(*aArenaId, aNum, aReqSize)
1591 : MozJemalloc::calloc(aNum, aReqSize));
1592}
1593
1594inline void* MozJemallocPHC::calloc(size_t aNum, size_t aReqSize) {
1595 return PageCalloc(Nothing(), aNum, aReqSize);
1596}
1597
1598// This function handles both realloc and moz_arena_realloc.
1599//
1600// As always, realloc is complicated, and doubly so when there are two
1601// different kinds of allocations in play. Here are the possible transitions,
1602// and what we do in practice.
1603//
1604// - normal-to-normal: This is straightforward and obviously necessary.
1605//
1606// - normal-to-page: This is disallowed because it would require getting the
1607// arenaId of the normal allocation, which isn't possible in non-DEBUG builds
1608// for security reasons.
1609//
1610// - page-to-page: This is done whenever possible, i.e. whenever the new size
1611// is less than or equal to 4 KiB. This choice counterbalances the
1612// disallowing of normal-to-page allocations, in order to avoid biasing
1613// towards or away from page allocations. It always occurs in-place.
1614//
1615// - page-to-normal: this is done only when necessary, i.e. only when the new
1616// size is greater than 4 KiB. This choice naturally flows from the
1617// prior choice on page-to-page transitions.
1618//
1619// In summary: realloc doesn't change the allocation kind unless it must.
1620//
1621// This function may return:
1622// - Some(pointer) when PHC handled the reallocation.
1623// - Some(nullptr) when PHC should have handled a page-to-normal transition
1624// but couldn't because of OOM.
1625// - Nothing() when PHC is disabled or the original allocation was not
1626// under PHC.
1627MOZ_ALWAYS_INLINEinline static Maybe<void*> MaybePageRealloc(
1628 const Maybe<arena_id_t>& aArenaId, void* aOldPtr, size_t aNewSize) {
1629 if (!aOldPtr) {
1630 // Null pointer. Treat like malloc(aNewSize).
1631 return Some(PageMalloc(aArenaId, aNewSize));
1632 }
1633
1634 if (!maybe_init()) {
1635 return Nothing();
1636 }
1637
1638 PtrKind pk = PHC::sRegion->PtrKind(aOldPtr);
1639 if (pk.IsNothing()) {
1640 // A normal-to-normal transition.
1641 return Nothing();
1642 }
1643
1644 if (pk.IsGuardPage()) {
1645 PHC::CrashOnGuardPage(aOldPtr);
1646 }
1647
1648 // At this point we know we have an allocation page.
1649 uintptr_t index = pk.AllocPageIndex();
1650
1651 // A page-to-something transition.
1652 PHC::sPHC->AdvanceNow(PHC::LocalAllocDelay());
1653
1654 // Note that `disable` has no effect unless it is emplaced below.
1655 Maybe<AutoDisableOnCurrentThread> disable;
1656 // Get the stack trace *before* locking the mutex.
1657 StackTrace stack;
1658 if (PHC::IsDisabledOnCurrentThread()) {
1659 // PHC is disabled on this thread. Leave the stack empty.
1660 } else {
1661 // Disable on this thread *before* getting the stack trace.
1662 disable.emplace();
1663 stack.Fill();
1664 }
1665
1666 MutexAutoLock lock(PHC::sPHC->mMutex);
1667
1668 // Check for realloc() of a freed block.
1669 PHC::sPHC->EnsureValidAndInUse(lock, aOldPtr, index);
1670
1671 if (aNewSize <= kPageSize && PHC::sPHC->ShouldMakeNewAllocations()) {
1672 // A page-to-page transition. Just keep using the page allocation. We do
1673 // this even if the thread is disabled, because it doesn't create a new
1674 // page allocation. Note that ResizePageInUse() checks aArenaId.
1675 //
1676 // Move the bytes with memmove(), because the old allocation and the new
1677 // allocation overlap. Move the usable size rather than the requested size,
1678 // because the user might have used malloc_usable_size() and filled up the
1679 // usable size.
1680 size_t oldUsableSize = PHC::sPHC->PageUsableSize(lock, index);
1681 size_t newUsableSize = MozJemalloc::malloc_good_size(aNewSize);
1682 uint8_t* pagePtr = PHC::sRegion->AllocPagePtr(index);
1683 uint8_t* newPtr = pagePtr + kPageSize - newUsableSize;
1684 memmove(newPtr, aOldPtr, std::min(oldUsableSize, aNewSize));
1685 PHC::sPHC->ResizePageInUse(lock, index, aArenaId, newPtr, stack);
1686 LOG("PageRealloc-Reuse(%p, %zu) -> %p\n", aOldPtr, aNewSize, newPtr);
1687 return Some(newPtr);
1688 }
1689
1690 // A page-to-normal transition (with the new size greater than page-sized).
1691 // (Note that aArenaId is checked below.)
1692 void* newPtr;
1693 if (aArenaId.isSome()) {
1694 newPtr = MozJemalloc::moz_arena_malloc(*aArenaId, aNewSize);
1695 } else {
1696 Maybe<arena_id_t> oldArenaId = PHC::sPHC->PageArena(lock, index);
1697 newPtr = (oldArenaId.isSome()
1698 ? MozJemalloc::moz_arena_malloc(*oldArenaId, aNewSize)
1699 : MozJemalloc::malloc(aNewSize));
1700 }
1701 if (!newPtr) {
1702 return Some(nullptr);
1703 }
1704
1705 Delay reuseDelay = ReuseDelay(lock);
1706
1707 // Copy the usable size rather than the requested size, because the user
1708 // might have used malloc_usable_size() and filled up the usable size. Note
1709 // that FreePage() checks aArenaId (via SetPageFreed()).
1710 size_t oldUsableSize = PHC::sPHC->PageUsableSize(lock, index);
1711 memcpy(newPtr, aOldPtr, std::min(oldUsableSize, aNewSize));
1712 FreePage(lock, index, aArenaId, stack, reuseDelay);
1713 LOG("PageRealloc-Free(%p[%zu], %zu) -> %p, %zu delay, reuse at ~%zu\n",
1714 aOldPtr, index, aNewSize, newPtr, size_t(reuseDelay),
1715 size_t(PHC::Now()) + reuseDelay);
1716
1717 return Some(newPtr);
1718}
1719
1720MOZ_ALWAYS_INLINEinline static void* PageRealloc(const Maybe<arena_id_t>& aArenaId,
1721 void* aOldPtr, size_t aNewSize) {
1722 Maybe<void*> ptr = MaybePageRealloc(aArenaId, aOldPtr, aNewSize);
1723
1724 return ptr.isSome()
1725 ? *ptr
1726 : (aArenaId.isSome() ? MozJemalloc::moz_arena_realloc(
1727 *aArenaId, aOldPtr, aNewSize)
1728 : MozJemalloc::realloc(aOldPtr, aNewSize));
1729}
1730
1731inline void* MozJemallocPHC::realloc(void* aOldPtr, size_t aNewSize) {
1732 return PageRealloc(Nothing(), aOldPtr, aNewSize);
1733}
1734
1735// This handles both free and moz_arena_free.
1736static void DoPageFree(const Maybe<arena_id_t>& aArenaId, void* aPtr) {
1737 PtrKind pk = PHC::sRegion->PtrKind(aPtr);
1738 if (pk.IsGuardPage()) {
1739 PHC::CrashOnGuardPage(aPtr);
1740 }
1741
1742 // At this point we know we have an allocation page.
1743 PHC::sPHC->AdvanceNow(PHC::LocalAllocDelay());
1744 uintptr_t index = pk.AllocPageIndex();
1745
1746 // Note that `disable` has no effect unless it is emplaced below.
1747 Maybe<AutoDisableOnCurrentThread> disable;
1748 // Get the stack trace *before* locking the mutex.
1749 StackTrace freeStack;
1750 if (PHC::IsDisabledOnCurrentThread()) {
1751 // PHC is disabled on this thread. Leave the stack empty.
1752 } else {
1753 // Disable on this thread *before* getting the stack trace.
1754 disable.emplace();
1755 freeStack.Fill();
1756 }
1757
1758 MutexAutoLock lock(PHC::sPHC->mMutex);
1759
1760 // Check for a double-free.
1761 PHC::sPHC->EnsureValidAndInUse(lock, aPtr, index);
1762
1763 // Note that FreePage() checks aArenaId (via SetPageFreed()).
1764 Delay reuseDelay = ReuseDelay(lock);
1765 FreePage(lock, index, aArenaId, freeStack, reuseDelay);
1766
1767#if PHC_LOGGING0
1768 phc::PHCStats stats = PHC::sPHC->GetPageStats(lock);
1769#endif
1770 LOG("PageFree(%p[%zu]), %zu delay, reuse at ~%zu, fullness %zu/%zu/%zu\n",
1771 aPtr, index, size_t(reuseDelay), size_t(PHC::Now()) + reuseDelay,
1772 stats.mSlotsAllocated, stats.mSlotsFreed, kNumAllocPages);
1773}
1774
1775MOZ_ALWAYS_INLINEinline static bool FastIsPHCPtr(void* aPtr) {
1776 if (MOZ_UNLIKELY(!maybe_init())(__builtin_expect(!!(!maybe_init()), 0))) {
1777 return false;
1778 }
1779
1780 PtrKind pk = PHC::sRegion->PtrKind(aPtr);
1781 return !pk.IsNothing();
1782}
1783
1784MOZ_ALWAYS_INLINEinline static void PageFree(const Maybe<arena_id_t>& aArenaId,
1785 void* aPtr) {
1786 if (MOZ_UNLIKELY(FastIsPHCPtr(aPtr))(__builtin_expect(!!(FastIsPHCPtr(aPtr)), 0))) {
1787 // The tenery expression here helps the compiler optimise away the
1788 // construction of Nothing() in the caller.
1789 DoPageFree(aArenaId.isSome() ? aArenaId : Nothing(), aPtr);
1790 return;
1791 }
1792
1793 aArenaId.isSome() ? MozJemalloc::moz_arena_free(*aArenaId, aPtr)
1794 : MozJemalloc::free(aPtr);
1795}
1796
1797inline void MozJemallocPHC::free(void* aPtr) { PageFree(Nothing(), aPtr); }
1798
1799// This handles memalign and moz_arena_memalign.
1800MOZ_ALWAYS_INLINEinline static void* PageMemalign(const Maybe<arena_id_t>& aArenaId,
1801 size_t aAlignment,
1802 size_t aReqSize) {
1803 MOZ_RELEASE_ASSERT(IsPowerOfTwo(aAlignment))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(IsPowerOfTwo(aAlignment))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(IsPowerOfTwo(aAlignment)))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("IsPowerOfTwo(aAlignment)"
, "/var/lib/jenkins/workspace/firefox-scan-build/memory/build/PHC.cpp"
, 1803); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "IsPowerOfTwo(aAlignment)"
")"); do { *((volatile int*)__null) = 1803; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1804
1805 // PHC can't satisfy an alignment greater than a page size, so fall back to
1806 // mozjemalloc in that case.
1807 void* ptr = nullptr;
1808 if (ShouldPageAllocHot(aReqSize) && aAlignment <= kPageSize) {
1809 // The test on aArenaId here helps the compiler optimise away
1810 // the construction of Nothing() in the caller.
1811 ptr = MaybePageAlloc(aArenaId.isSome() ? aArenaId : Nothing(), aReqSize,
1812 aAlignment, /* aZero */ false);
1813 }
1814 return ptr ? ptr
1815 : (aArenaId.isSome()
1816 ? MozJemalloc::moz_arena_memalign(*aArenaId, aAlignment,
1817 aReqSize)
1818 : MozJemalloc::memalign(aAlignment, aReqSize));
1819}
1820
1821inline void* MozJemallocPHC::memalign(size_t aAlignment, size_t aReqSize) {
1822 return PageMemalign(Nothing(), aAlignment, aReqSize);
1823}
1824
1825inline size_t MozJemallocPHC::malloc_usable_size(usable_ptr_t aPtr) {
1826 if (!maybe_init()) {
1827 return MozJemalloc::malloc_usable_size(aPtr);
1828 }
1829
1830 PtrKind pk = PHC::sRegion->PtrKind(aPtr);
1831 if (pk.IsNothing()) {
1832 // Not a page allocation. Measure it normally.
1833 return MozJemalloc::malloc_usable_size(aPtr);
1834 }
1835
1836 if (pk.IsGuardPage()) {
1837 PHC::CrashOnGuardPage(const_cast<void*>(aPtr));
1838 }
1839
1840 // At this point we know aPtr lands within an allocation page, due to the
1841 // math done in the PtrKind constructor. But if aPtr points to memory
1842 // before the base address of the allocation, we return 0.
1843 uintptr_t index = pk.AllocPageIndex();
1844
1845 MutexAutoLock lock(PHC::sPHC->mMutex);
1846
1847 void* pageBaseAddr = PHC::sPHC->AllocPageBaseAddr(lock, index);
1848
1849 if (MOZ_UNLIKELY(aPtr < pageBaseAddr)(__builtin_expect(!!(aPtr < pageBaseAddr), 0))) {
1850 return 0;
1851 }
1852
1853 return PHC::sPHC->PageUsableSize(lock, index);
1854}
1855
1856static size_t metadata_size() {
1857 return MozJemalloc::malloc_usable_size(PHC::sRegion) +
1858 MozJemalloc::malloc_usable_size(PHC::sPHC);
1859}
1860
1861inline void MozJemallocPHC::jemalloc_stats_internal(
1862 jemalloc_stats_t* aStats, jemalloc_bin_stats_t* aBinStats) {
1863 MozJemalloc::jemalloc_stats_internal(aStats, aBinStats);
1864
1865 if (!maybe_init()) {
1866 // If we're not initialised, then we're not using any additional memory and
1867 // have nothing to add to the report.
1868 return;
1869 }
1870
1871 // We allocate our memory from jemalloc so it has already counted our memory
1872 // usage within "mapped" and "allocated", we must subtract the memory we
1873 // allocated from jemalloc from allocated before adding in only the parts that
1874 // we have allocated out to Firefox.
1875
1876 aStats->allocated -= kAllPagesJemallocSize;
1877
1878 size_t allocated = 0;
1879 {
1880 MutexAutoLock lock(PHC::sPHC->mMutex);
1881
1882 // Add usable space of in-use allocations to `allocated`.
1883 for (size_t i = 0; i < kNumAllocPages; i++) {
1884 if (PHC::sPHC->IsPageInUse(lock, i)) {
1885 allocated += PHC::sPHC->PageUsableSize(lock, i);
1886 }
1887 }
1888 }
1889 aStats->allocated += allocated;
1890
1891 // guards is the gap between `allocated` and `mapped`. In some ways this
1892 // almost fits into aStats->wasted since it feels like wasted memory. However
1893 // wasted should only include committed memory and these guard pages are
1894 // uncommitted. Therefore we don't include it anywhere.
1895 // size_t guards = mapped - allocated;
1896
1897 // aStats.page_cache and aStats.bin_unused are left unchanged because PHC
1898 // doesn't have anything corresponding to those.
1899
1900 // The metadata is stored in normal heap allocations, so they're measured by
1901 // mozjemalloc as `allocated`. Move them into `bookkeeping`.
1902 // They're also reported under explicit/heap-overhead/phc/fragmentation in
1903 // about:memory.
1904 size_t bookkeeping = metadata_size();
1905 aStats->allocated -= bookkeeping;
1906 aStats->bookkeeping += bookkeeping;
1907}
1908
1909inline void MozJemallocPHC::jemalloc_ptr_info(const void* aPtr,
1910 jemalloc_ptr_info_t* aInfo) {
1911 if (!maybe_init()) {
1912 return MozJemalloc::jemalloc_ptr_info(aPtr, aInfo);
1913 }
1914
1915 // We need to implement this properly, because various code locations do
1916 // things like checking that allocations are in the expected arena.
1917 PtrKind pk = PHC::sRegion->PtrKind(aPtr);
1918 if (pk.IsNothing()) {
1919 // Not a page allocation.
1920 return MozJemalloc::jemalloc_ptr_info(aPtr, aInfo);
1921 }
1922
1923 if (pk.IsGuardPage()) {
1924 // Treat a guard page as unknown because there's no better alternative.
1925 *aInfo = {TagUnknown, nullptr, 0, 0};
1926 return;
1927 }
1928
1929 // At this point we know we have an allocation page.
1930 uintptr_t index = pk.AllocPageIndex();
1931
1932 MutexAutoLock lock(PHC::sPHC->mMutex);
1933
1934 PHC::sPHC->FillJemallocPtrInfo(lock, aPtr, index, aInfo);
1935#if DEBUG1
1936 LOG("JemallocPtrInfo(%p[%zu]) -> {%zu, %p, %zu, %zu}\n", aPtr, index,
1937 size_t(aInfo->tag), aInfo->addr, aInfo->size, aInfo->arenaId);
1938#else
1939 LOG("JemallocPtrInfo(%p[%zu]) -> {%zu, %p, %zu}\n", aPtr, index,
1940 size_t(aInfo->tag), aInfo->addr, aInfo->size);
1941#endif
1942}
1943
1944inline void* MozJemallocPHC::moz_arena_malloc(arena_id_t aArenaId,
1945 size_t aReqSize) {
1946 return PageMalloc(Some(aArenaId), aReqSize);
1947}
1948
1949inline void* MozJemallocPHC::moz_arena_calloc(arena_id_t aArenaId, size_t aNum,
1950 size_t aReqSize) {
1951 return PageCalloc(Some(aArenaId), aNum, aReqSize);
1952}
1953
1954inline void* MozJemallocPHC::moz_arena_realloc(arena_id_t aArenaId,
1955 void* aOldPtr, size_t aNewSize) {
1956 return PageRealloc(Some(aArenaId), aOldPtr, aNewSize);
1957}
1958
1959inline void MozJemallocPHC::moz_arena_free(arena_id_t aArenaId, void* aPtr) {
1960 return PageFree(Some(aArenaId), aPtr);
1961}
1962
1963inline void* MozJemallocPHC::moz_arena_memalign(arena_id_t aArenaId,
1964 size_t aAlignment,
1965 size_t aReqSize) {
1966 return PageMemalign(Some(aArenaId), aAlignment, aReqSize);
1967}
1968
1969namespace mozilla::phc {
1970
1971bool IsPHCAllocation(const void* aPtr, AddrInfo* aOut) {
1972 if (!maybe_init()) {
1973 return false;
1974 }
1975
1976 PtrKind pk = PHC::sRegion->PtrKind(aPtr);
1977 if (pk.IsNothing()) {
1978 return false;
1979 }
1980
1981 bool isGuardPage = false;
1982 if (pk.IsGuardPage()) {
1983 if ((uintptr_t(aPtr) % kPageSize) < (kPageSize / 2)) {
1984 // The address is in the lower half of a guard page, so it's probably an
1985 // overflow. But first check that it is not on the very first guard
1986 // page, in which case it cannot be an overflow, and we ignore it.
1987 if (PHC::sRegion->IsInFirstGuardPage(aPtr)) {
1988 return false;
1989 }
1990
1991 // Get the allocation page preceding this guard page.
1992 pk = PHC::sRegion->PtrKind(static_cast<const uint8_t*>(aPtr) - kPageSize);
1993
1994 } else {
1995 // The address is in the upper half of a guard page, so it's probably an
1996 // underflow. Get the allocation page following this guard page.
1997 pk = PHC::sRegion->PtrKind(static_cast<const uint8_t*>(aPtr) + kPageSize);
1998 }
1999
2000 // Make a note of the fact that we hit a guard page.
2001 isGuardPage = true;
2002 }
2003
2004 // At this point we know we have an allocation page.
2005 uintptr_t index = pk.AllocPageIndex();
2006
2007 if (aOut) {
2008 if (PHC::sPHC->mMutex.TryLock()) {
2009 PHC::sPHC->FillAddrInfo(index, aPtr, isGuardPage, *aOut);
2010 LOG("IsPHCAllocation: %zu, %p, %zu, %zu, %zu\n", size_t(aOut->mKind),
2011 aOut->mBaseAddr, aOut->mUsableSize,
2012 aOut->mAllocStack.isSome() ? aOut->mAllocStack->mLength : 0,
2013 aOut->mFreeStack.isSome() ? aOut->mFreeStack->mLength : 0);
2014 PHC::sPHC->mMutex.Unlock();
2015 } else {
2016 LOG("IsPHCAllocation: PHC is locked\n");
2017 aOut->mPhcWasLocked = true;
2018 }
2019 }
2020 return true;
2021}
2022
2023void DisablePHCOnCurrentThread() {
2024 PHC::DisableOnCurrentThread();
2025 LOG("DisablePHCOnCurrentThread: %zu\n", 0ul);
2026}
2027
2028void ReenablePHCOnCurrentThread() {
2029 PHC::sPHC->EnableOnCurrentThread();
2030 LOG("ReenablePHCOnCurrentThread: %zu\n", 0ul);
2031}
2032
2033bool IsPHCEnabledOnCurrentThread() {
2034 bool enabled = !PHC::IsDisabledOnCurrentThread();
2035 LOG("IsPHCEnabledOnCurrentThread: %zu\n", size_t(enabled));
2036 return enabled;
2037}
2038
2039void PHCMemoryUsage(MemoryUsage& aMemoryUsage) {
2040 if (!maybe_init()) {
2041 aMemoryUsage = MemoryUsage();
2042 return;
2043 }
2044
2045 aMemoryUsage.mMetadataBytes = metadata_size();
2046 if (PHC::sPHC) {
2047 MutexAutoLock lock(PHC::sPHC->mMutex);
2048 aMemoryUsage.mFragmentationBytes = PHC::sPHC->FragmentationBytes();
2049 } else {
2050 aMemoryUsage.mFragmentationBytes = 0;
2051 }
2052}
2053
2054void GetPHCStats(PHCStats& aStats) {
2055 if (!maybe_init()) {
2056 aStats = PHCStats();
2057 return;
2058 }
2059
2060 MutexAutoLock lock(PHC::sPHC->mMutex);
2061
2062 aStats = PHC::sPHC->GetPageStats(lock);
2063}
2064
2065// Enable or Disable PHC at runtime. If PHC is disabled it will still trap
2066// bad uses of previous allocations, but won't track any new allocations.
2067void SetPHCState(PHCState aState) {
2068 if (!maybe_init()) {
2069 return;
2070 }
2071
2072 PHC::sPHC->SetState(aState);
2073}
2074
2075void SetPHCProbabilities(int64_t aAvgDelayFirst, int64_t aAvgDelayNormal,
2076 int64_t aAvgDelayPageReuse) {
2077 if (!maybe_init()) {
2078 return;
2079 }
2080
2081 PHC::sPHC->SetProbabilities(aAvgDelayFirst, aAvgDelayNormal,
2082 aAvgDelayPageReuse);
2083}
2084
2085} // namespace mozilla::phc