Bug Summary

File:var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefPtr.h
Warning:line 80, column 7
Use of memory after it is freed

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name DrawTargetWebgl.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -ffp-contract=off -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dom/canvas -fcoverage-compilation-dir=/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dom/canvas -resource-dir /usr/lib/llvm-20/lib/clang/20 -include /var/lib/jenkins/workspace/firefox-scan-build/config/gcc_hidden.h -include /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/mozilla-config.h -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/stl_wrappers -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/system_wrappers -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=2 -D _GLIBCXX_ASSERTIONS -D DEBUG=1 -D MOZ_HAS_MOZGLUE -D MOZILLA_INTERNAL_API -D IMPL_LIBXUL -D MOZ_SUPPORT_LEAKCHECKING -D STATIC_EXPORTABLE_JS_API -I /var/lib/jenkins/workspace/firefox-scan-build/dom/canvas -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dom/canvas -I /var/lib/jenkins/workspace/firefox-scan-build/js/xpconnect/wrappers -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/ipc/ipdl/_ipdlheaders -I /var/lib/jenkins/workspace/firefox-scan-build/ipc/chromium/src -I /var/lib/jenkins/workspace/firefox-scan-build/dom/base -I /var/lib/jenkins/workspace/firefox-scan-build/dom/html -I /var/lib/jenkins/workspace/firefox-scan-build/dom/svg -I /var/lib/jenkins/workspace/firefox-scan-build/dom/workers -I /var/lib/jenkins/workspace/firefox-scan-build/dom/xul -I /var/lib/jenkins/workspace/firefox-scan-build/gfx/angle/checkout/include -I /var/lib/jenkins/workspace/firefox-scan-build/gfx/cairo/cairo/src -I /var/lib/jenkins/workspace/firefox-scan-build/gfx/gl -I /var/lib/jenkins/workspace/firefox-scan-build/image -I /var/lib/jenkins/workspace/firefox-scan-build/js/xpconnect/src -I /var/lib/jenkins/workspace/firefox-scan-build/layout/generic -I /var/lib/jenkins/workspace/firefox-scan-build/layout/style -I /var/lib/jenkins/workspace/firefox-scan-build/layout/xul -I /var/lib/jenkins/workspace/firefox-scan-build/media/libyuv/libyuv/include -I /var/lib/jenkins/workspace/firefox-scan-build/gfx/skia -I /var/lib/jenkins/workspace/firefox-scan-build/gfx/skia/skia -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/nspr -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/nss -D MOZILLA_CLIENT -I /usr/include/gtk-3.0 -I /usr/include/pango-1.0 -I /usr/include/glib-2.0 -I /usr/lib/x86_64-linux-gnu/glib-2.0/include -I /usr/include/sysprof-6 -I /usr/include/harfbuzz -I /usr/include/freetype2 -I /usr/include/libpng16 -I /usr/include/libmount -I /usr/include/blkid -I /usr/include/fribidi -I /usr/include/cairo -I /usr/include/pixman-1 -I /usr/include/gdk-pixbuf-2.0 -I /usr/include/x86_64-linux-gnu -I /usr/include/webp -I /usr/include/gio-unix-2.0 -I /usr/include/cloudproviders -I /usr/include/atk-1.0 -I /usr/include/at-spi2-atk/2.0 -I /usr/include/at-spi-2.0 -I /usr/include/dbus-1.0 -I /usr/lib/x86_64-linux-gnu/dbus-1.0/include -I /usr/include/gtk-3.0/unix-print -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/x86_64-linux-gnu/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14/backward -internal-isystem /usr/lib/llvm-20/lib/clang/20/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-error=tautological-type-limit-compare -Wno-invalid-offsetof -Wno-range-loop-analysis -Wno-deprecated-anon-enum-enum-conversion -Wno-deprecated-enum-enum-conversion -Wno-deprecated-this-capture -Wno-inline-new-delete -Wno-error=deprecated-declarations -Wno-error=array-bounds -Wno-error=free-nonheap-object -Wno-error=atomic-alignment -Wno-error=deprecated-builtins -Wno-psabi -Wno-error=builtin-macro-redefined -Wno-vla-cxx-extension -Wno-unknown-warning-option -Wno-shorten-64-to-32 -fdeprecated-macro -ferror-limit 19 -fstrict-flex-arrays=1 -stack-protector 2 -fstack-clash-protection -ftrivial-auto-var-init=pattern -fno-rtti -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -fno-sized-deallocation -fno-aligned-allocation -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2025-01-20-090804-167946-1 -x c++ /var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp

/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp

1/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3/* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7#include "DrawTargetWebglInternal.h"
8#include "SourceSurfaceWebgl.h"
9
10#include "mozilla/ClearOnShutdown.h"
11#include "mozilla/StaticPrefs_gfx.h"
12#include "mozilla/gfx/AAStroke.h"
13#include "mozilla/gfx/Blur.h"
14#include "mozilla/gfx/DrawTargetSkia.h"
15#include "mozilla/gfx/gfxVars.h"
16#include "mozilla/gfx/Helpers.h"
17#include "mozilla/gfx/HelpersSkia.h"
18#include "mozilla/gfx/Logging.h"
19#include "mozilla/gfx/PathHelpers.h"
20#include "mozilla/gfx/PathSkia.h"
21#include "mozilla/gfx/Swizzle.h"
22#include "mozilla/layers/ImageDataSerializer.h"
23#include "mozilla/layers/RemoteTextureMap.h"
24#include "mozilla/widget/ScreenManager.h"
25#include "skia/include/core/SkPixmap.h"
26#include "nsContentUtils.h"
27
28#include "GLContext.h"
29#include "WebGLContext.h"
30#include "WebGLChild.h"
31#include "WebGLBuffer.h"
32#include "WebGLFramebuffer.h"
33#include "WebGLProgram.h"
34#include "WebGLShader.h"
35#include "WebGLTexture.h"
36#include "WebGLVertexArray.h"
37
38#include "gfxPlatform.h"
39
40#ifdef XP_MACOSX
41# include "mozilla/gfx/ScaledFontMac.h"
42#endif
43
44namespace mozilla::gfx {
45
46BackingTexture::BackingTexture(const IntSize& aSize, SurfaceFormat aFormat,
47 const RefPtr<WebGLTexture>& aTexture)
48 : mSize(aSize), mFormat(aFormat), mTexture(aTexture) {}
49
50#ifdef XP_WIN
51// Work around buggy ANGLE/D3D drivers that may copy blocks of pixels outside
52// the row length. Extra space is reserved at the end of each row up to stride
53// alignment. This does not affect standalone textures.
54static const Etagere::AllocatorOptions kR8AllocatorOptions = {16, 1, 1, 0};
55#endif
56
57SharedTexture::SharedTexture(const IntSize& aSize, SurfaceFormat aFormat,
58 const RefPtr<WebGLTexture>& aTexture)
59 : BackingTexture(aSize, aFormat, aTexture),
60 mAtlasAllocator(
61#ifdef XP_WIN
62 aFormat == SurfaceFormat::A8
63 ? Etagere::etagere_atlas_allocator_with_options(
64 aSize.width, aSize.height, &kR8AllocatorOptions)
65 :
66#endif
67 Etagere::etagere_atlas_allocator_new(aSize.width, aSize.height)) {
68}
69
70SharedTexture::~SharedTexture() {
71 if (mAtlasAllocator) {
72 Etagere::etagere_atlas_allocator_delete(mAtlasAllocator);
73 mAtlasAllocator = nullptr;
74 }
75}
76
77SharedTextureHandle::SharedTextureHandle(Etagere::AllocationId aId,
78 const IntRect& aBounds,
79 SharedTexture* aTexture)
80 : mAllocationId(aId), mBounds(aBounds), mTexture(aTexture) {}
81
82already_AddRefed<SharedTextureHandle> SharedTexture::Allocate(
83 const IntSize& aSize) {
84 Etagere::Allocation alloc = {{0, 0, 0, 0}, Etagere::INVALID_ALLOCATION_ID};
85 if (!mAtlasAllocator ||
86 !Etagere::etagere_atlas_allocator_allocate(mAtlasAllocator, aSize.width,
87 aSize.height, &alloc) ||
88 alloc.id == Etagere::INVALID_ALLOCATION_ID) {
89 return nullptr;
90 }
91 RefPtr<SharedTextureHandle> handle = new SharedTextureHandle(
92 alloc.id,
93 IntRect(IntPoint(alloc.rectangle.min_x, alloc.rectangle.min_y), aSize),
94 this);
95 return handle.forget();
96}
97
98bool SharedTexture::Free(SharedTextureHandle& aHandle) {
99 if (aHandle.mTexture != this) {
100 return false;
101 }
102 if (aHandle.mAllocationId != Etagere::INVALID_ALLOCATION_ID) {
103 if (mAtlasAllocator) {
104 Etagere::etagere_atlas_allocator_deallocate(mAtlasAllocator,
105 aHandle.mAllocationId);
106 }
107 aHandle.mAllocationId = Etagere::INVALID_ALLOCATION_ID;
108 }
109 return true;
110}
111
112StandaloneTexture::StandaloneTexture(const IntSize& aSize,
113 SurfaceFormat aFormat,
114 const RefPtr<WebGLTexture>& aTexture)
115 : BackingTexture(aSize, aFormat, aTexture) {}
116
117DrawTargetWebgl::DrawTargetWebgl() = default;
118
119inline void SharedContextWebgl::ClearLastTexture(bool aFullClear) {
120 mLastTexture = nullptr;
121 if (aFullClear) {
122 mLastClipMask = nullptr;
123 }
124}
125
126// Attempts to clear the snapshot state. If the snapshot is only referenced by
127// this target, then it should simply be destroyed. If it is a WebGL surface in
128// use by something else, then special cleanup such as reusing the texture or
129// copy-on-write may be possible.
130void DrawTargetWebgl::ClearSnapshot(bool aCopyOnWrite, bool aNeedHandle) {
131 if (!mSnapshot) {
132 return;
133 }
134 mSharedContext->ClearLastTexture();
135 RefPtr<SourceSurfaceWebgl> snapshot = mSnapshot.forget();
136 if (snapshot->hasOneRef()) {
137 return;
138 }
139 if (aCopyOnWrite) {
140 // WebGL snapshots must be notified that the framebuffer contents will be
141 // changing so that it can copy the data.
142 snapshot->DrawTargetWillChange(aNeedHandle);
143 } else {
144 // If not copying, then give the backing texture to the surface for reuse.
145 snapshot->GiveTexture(
146 mSharedContext->WrapSnapshot(GetSize(), GetFormat(), mTex.forget()));
147 }
148}
149
150DrawTargetWebgl::~DrawTargetWebgl() {
151 ClearSnapshot(false);
152 if (mSharedContext) {
153 // Force any Skia snapshots to copy the shmem before it deallocs.
154 if (mSkia) {
155 mSkia->DetachAllSnapshots();
156 }
157 mSharedContext->ClearLastTexture(true);
158 mClipMask = nullptr;
159 mFramebuffer = nullptr;
160 mTex = nullptr;
161 mSharedContext->mDrawTargetCount--;
162 }
163}
164
165SharedContextWebgl::SharedContextWebgl() = default;
166
167SharedContextWebgl::~SharedContextWebgl() {
168 // Detect context loss before deletion.
169 if (mWebgl) {
170 ExitTlsScope();
171 mWebgl->ActiveTexture(0);
172 }
173 if (mWGRPathBuilder) {
174 WGR::wgr_builder_release(mWGRPathBuilder);
175 mWGRPathBuilder = nullptr;
176 }
177 ClearAllTextures();
178 UnlinkSurfaceTextures();
179 UnlinkGlyphCaches();
180}
181
182gl::GLContext* SharedContextWebgl::GetGLContext() {
183 return mWebgl ? mWebgl->GL() : nullptr;
184}
185
186void SharedContextWebgl::EnterTlsScope() {
187 if (mTlsScope.isSome()) {
188 return;
189 }
190 if (gl::GLContext* gl = GetGLContext()) {
191 mTlsScope = Some(gl->mUseTLSIsCurrent);
192 gl::GLContext::InvalidateCurrentContext();
193 gl->mUseTLSIsCurrent = true;
194 }
195}
196
197void SharedContextWebgl::ExitTlsScope() {
198 if (mTlsScope.isNothing()) {
199 return;
200 }
201 if (gl::GLContext* gl = GetGLContext()) {
202 gl->mUseTLSIsCurrent = mTlsScope.value();
203 }
204 mTlsScope = Nothing();
205}
206
207// Remove any SourceSurface user data associated with this TextureHandle.
208inline void SharedContextWebgl::UnlinkSurfaceTexture(
209 const RefPtr<TextureHandle>& aHandle) {
210 if (RefPtr<SourceSurface> surface = aHandle->GetSurface()) {
211 // Ensure any WebGL snapshot textures get unlinked.
212 if (surface->GetType() == SurfaceType::WEBGL) {
213 static_cast<SourceSurfaceWebgl*>(surface.get())->OnUnlinkTexture(this);
214 }
215 surface->RemoveUserData(aHandle->IsShadow() ? &mShadowTextureKey
216 : &mTextureHandleKey);
217 }
218}
219
220// Unlinks TextureHandles from any SourceSurface user data.
221void SharedContextWebgl::UnlinkSurfaceTextures() {
222 for (RefPtr<TextureHandle> handle = mTextureHandles.getFirst(); handle;
223 handle = handle->getNext()) {
224 UnlinkSurfaceTexture(handle);
225 }
226}
227
228// Unlinks GlyphCaches from any ScaledFont user data.
229void SharedContextWebgl::UnlinkGlyphCaches() {
230 GlyphCache* cache = mGlyphCaches.getFirst();
231 while (cache) {
232 ScaledFont* font = cache->GetFont();
233 // Access the next cache before removing the user data, as it might destroy
234 // the cache.
235 cache = cache->getNext();
236 font->RemoveUserData(&mGlyphCacheKey);
237 }
238}
239
240void SharedContextWebgl::OnMemoryPressure() { mShouldClearCaches = true; }
241
242void SharedContextWebgl::ClearCaches() {
243 OnMemoryPressure();
244 ClearCachesIfNecessary();
245}
246
247// Clear out the entire list of texture handles from any source.
248void SharedContextWebgl::ClearAllTextures() {
249 while (!mTextureHandles.isEmpty()) {
250 PruneTextureHandle(mTextureHandles.popLast());
251 --mNumTextureHandles;
252 }
253}
254
255// Scan through the shared texture pages looking for any that are empty and
256// delete them.
257void SharedContextWebgl::ClearEmptyTextureMemory() {
258 for (auto pos = mSharedTextures.begin(); pos != mSharedTextures.end();) {
259 if (!(*pos)->HasAllocatedHandles()) {
260 RefPtr<SharedTexture> shared = *pos;
261 size_t usedBytes = shared->UsedBytes();
262 mEmptyTextureMemory -= usedBytes;
263 mTotalTextureMemory -= usedBytes;
264 pos = mSharedTextures.erase(pos);
265 } else {
266 ++pos;
267 }
268 }
269}
270
271// If there is a request to clear out the caches because of memory pressure,
272// then first clear out all the texture handles in the texture cache. If there
273// are still empty texture pages being kept around, then clear those too.
274void SharedContextWebgl::ClearCachesIfNecessary() {
275 if (!mShouldClearCaches.exchange(false)) {
276 return;
277 }
278 mZeroBuffer = nullptr;
279 ClearAllTextures();
280 if (mEmptyTextureMemory) {
281 ClearEmptyTextureMemory();
282 }
283 ClearLastTexture();
284}
285
286// Try to initialize a new WebGL context. Verifies that the requested size does
287// not exceed the available texture limits and that shader creation succeeded.
288bool DrawTargetWebgl::Init(const IntSize& size, const SurfaceFormat format,
289 const RefPtr<SharedContextWebgl>& aSharedContext) {
290 MOZ_ASSERT(format == SurfaceFormat::B8G8R8A8 ||do { static_assert( mozilla::detail::AssertionConditionType<
decltype(format == SurfaceFormat::B8G8R8A8 || format == SurfaceFormat
::B8G8R8X8)>::isValid, "invalid assertion condition"); if (
(__builtin_expect(!!(!(!!(format == SurfaceFormat::B8G8R8A8 ||
format == SurfaceFormat::B8G8R8X8))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("format == SurfaceFormat::B8G8R8A8 || format == SurfaceFormat::B8G8R8X8"
, "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 291); AnnotateMozCrashReason("MOZ_ASSERT" "(" "format == SurfaceFormat::B8G8R8A8 || format == SurfaceFormat::B8G8R8X8"
")"); do { *((volatile int*)__null) = 291; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
291 format == SurfaceFormat::B8G8R8X8)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(format == SurfaceFormat::B8G8R8A8 || format == SurfaceFormat
::B8G8R8X8)>::isValid, "invalid assertion condition"); if (
(__builtin_expect(!!(!(!!(format == SurfaceFormat::B8G8R8A8 ||
format == SurfaceFormat::B8G8R8X8))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("format == SurfaceFormat::B8G8R8A8 || format == SurfaceFormat::B8G8R8X8"
, "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 291); AnnotateMozCrashReason("MOZ_ASSERT" "(" "format == SurfaceFormat::B8G8R8A8 || format == SurfaceFormat::B8G8R8X8"
")"); do { *((volatile int*)__null) = 291; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
292
293 mSize = size;
294 mFormat = format;
295
296 if (!aSharedContext || aSharedContext->IsContextLost() ||
297 aSharedContext->mDrawTargetCount >=
298 StaticPrefs::gfx_canvas_accelerated_max_draw_target_count()) {
299 return false;
300 }
301 mSharedContext = aSharedContext;
302 mSharedContext->mDrawTargetCount++;
303
304 if (size_t(std::max(size.width, size.height)) >
305 mSharedContext->mMaxTextureSize) {
306 return false;
307 }
308
309 if (!CreateFramebuffer()) {
310 return false;
311 }
312
313 size_t byteSize = layers::ImageDataSerializer::ComputeRGBBufferSize(
314 mSize, SurfaceFormat::B8G8R8A8);
315 if (byteSize == 0) {
316 return false;
317 }
318
319 size_t shmemSize = mozilla::ipc::SharedMemory::PageAlignedSize(byteSize);
320 if (NS_WARN_IF(shmemSize > UINT32_MAX)NS_warn_if_impl(shmemSize > (4294967295U), "shmemSize > UINT32_MAX"
, "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 320)
) {
321 MOZ_ASSERT_UNREACHABLE("Buffer too big?")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(false)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(false))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("false" " (" "MOZ_ASSERT_UNREACHABLE: "
"Buffer too big?" ")", "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 321); AnnotateMozCrashReason("MOZ_ASSERT" "(" "false" ") ("
"MOZ_ASSERT_UNREACHABLE: " "Buffer too big?" ")"); do { *((volatile
int*)__null) = 321; __attribute__((nomerge)) ::abort(); } while
(false); } } while (false)
;
322 return false;
323 }
324
325 auto shmem = MakeRefPtr<mozilla::ipc::SharedMemory>();
326 if (NS_WARN_IF(!shmem->Create(shmemSize))NS_warn_if_impl(!shmem->Create(shmemSize), "!shmem->Create(shmemSize)"
, "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 326)
||
327 NS_WARN_IF(!shmem->Map(shmemSize))NS_warn_if_impl(!shmem->Map(shmemSize), "!shmem->Map(shmemSize)"
, "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 327)
) {
328 return false;
329 }
330
331 mShmem = std::move(shmem);
332 mShmemSize = shmemSize;
333
334 mSkia = new DrawTargetSkia;
335 auto stride = layers::ImageDataSerializer::ComputeRGBStride(
336 SurfaceFormat::B8G8R8A8, size.width);
337 if (!mSkia->Init(reinterpret_cast<uint8_t*>(mShmem->Memory()), size, stride,
338 SurfaceFormat::B8G8R8A8, true)) {
339 return false;
340 }
341
342 // Allocate an unclipped copy of the DT pointing to its data.
343 uint8_t* dtData = nullptr;
344 IntSize dtSize;
345 int32_t dtStride = 0;
346 SurfaceFormat dtFormat = SurfaceFormat::UNKNOWN;
347 if (!mSkia->LockBits(&dtData, &dtSize, &dtStride, &dtFormat)) {
348 return false;
349 }
350 mSkiaNoClip = new DrawTargetSkia;
351 if (!mSkiaNoClip->Init(dtData, dtSize, dtStride, dtFormat, true)) {
352 mSkia->ReleaseBits(dtData);
353 return false;
354 }
355 mSkia->ReleaseBits(dtData);
356
357 SetPermitSubpixelAA(IsOpaque(format));
358 return true;
359}
360
361// If a non-recoverable error occurred that would stop the canvas from initing.
362static Atomic<bool> sContextInitError(false);
363
364already_AddRefed<SharedContextWebgl> SharedContextWebgl::Create() {
365 // If context initialization would fail, don't even try to create a context.
366 if (sContextInitError) {
367 return nullptr;
368 }
369 RefPtr<SharedContextWebgl> sharedContext = new SharedContextWebgl;
370 if (!sharedContext->Initialize()) {
371 return nullptr;
372 }
373 return sharedContext.forget();
374}
375
376bool SharedContextWebgl::Initialize() {
377 WebGLContextOptions options = {};
378 options.alpha = true;
379 options.depth = false;
380 options.stencil = false;
381 options.antialias = false;
382 options.preserveDrawingBuffer = true;
383 options.failIfMajorPerformanceCaveat = false;
384
385 const bool resistFingerprinting = nsContentUtils::ShouldResistFingerprinting(
386 "Fallback", RFPTarget::WebGLRenderCapability);
387 const auto initDesc = webgl::InitContextDesc{
388 .isWebgl2 = true,
389 .resistFingerprinting = resistFingerprinting,
390 .principalKey = 0,
391 .size = {1, 1},
392 .options = options,
393 };
394
395 webgl::InitContextResult initResult;
396 mWebgl = WebGLContext::Create(nullptr, initDesc, &initResult);
397 if (!mWebgl) {
398 // There was a non-recoverable error when trying to create a host context.
399 sContextInitError = true;
400 mWebgl = nullptr;
401 return false;
402 }
403 if (mWebgl->IsContextLost()) {
404 mWebgl = nullptr;
405 return false;
406 }
407
408 mMaxTextureSize = initResult.limits.maxTex2dSize;
409
410 if (kIsMacOS) {
411 mRasterizationTruncates = initResult.vendor == gl::GLVendor::ATI;
412 }
413
414 CachePrefs();
415
416 if (!CreateShaders()) {
417 // There was a non-recoverable error when trying to init shaders.
418 sContextInitError = true;
419 mWebgl = nullptr;
420 return false;
421 }
422
423 mWGRPathBuilder = WGR::wgr_new_builder();
424
425 return true;
426}
427
428inline void SharedContextWebgl::BlendFunc(GLenum aSrcFactor,
429 GLenum aDstFactor) {
430 mWebgl->BlendFuncSeparate({}, aSrcFactor, aDstFactor, aSrcFactor, aDstFactor);
431}
432
433void SharedContextWebgl::SetBlendState(CompositionOp aOp,
434 const Maybe<DeviceColor>& aColor) {
435 if (aOp == mLastCompositionOp && mLastBlendColor == aColor) {
436 return;
437 }
438 mLastCompositionOp = aOp;
439 mLastBlendColor = aColor;
440 // AA is not supported for all composition ops, so switching blend modes may
441 // cause a toggle in AA state. Certain ops such as OP_SOURCE require output
442 // alpha that is blended separately from AA coverage. This would require two
443 // stage blending which can incur a substantial performance penalty, so to
444 // work around this currently we just disable AA for those ops.
445
446 // Map the composition op to a WebGL blend mode, if possible.
447 bool enabled = true;
448 switch (aOp) {
449 case CompositionOp::OP_OVER:
450 if (aColor) {
451 // If a color is supplied, then we blend subpixel text.
452 mWebgl->BlendColor(aColor->b, aColor->g, aColor->r, 1.0f);
453 BlendFunc(LOCAL_GL_CONSTANT_COLOR0x8001, LOCAL_GL_ONE_MINUS_SRC_COLOR0x0301);
454 } else {
455 BlendFunc(LOCAL_GL_ONE1, LOCAL_GL_ONE_MINUS_SRC_ALPHA0x0303);
456 }
457 break;
458 case CompositionOp::OP_ADD:
459 BlendFunc(LOCAL_GL_ONE1, LOCAL_GL_ONE1);
460 break;
461 case CompositionOp::OP_ATOP:
462 BlendFunc(LOCAL_GL_DST_ALPHA0x0304, LOCAL_GL_ONE_MINUS_SRC_ALPHA0x0303);
463 break;
464 case CompositionOp::OP_SOURCE:
465 if (aColor) {
466 // If a color is supplied, then we assume there is clipping or AA. This
467 // requires that we still use an over blend func with the clip/AA alpha,
468 // while filling the interior with the unaltered color. Normally this
469 // would require dual source blending, but we can emulate it with only
470 // a blend color.
471 mWebgl->BlendColor(aColor->b, aColor->g, aColor->r, aColor->a);
472 BlendFunc(LOCAL_GL_CONSTANT_COLOR0x8001, LOCAL_GL_ONE_MINUS_SRC_COLOR0x0301);
473 } else {
474 enabled = false;
475 }
476 break;
477 case CompositionOp::OP_CLEAR:
478 // Assume the source is an alpha mask for clearing. Be careful to blend in
479 // the correct alpha if the target is opaque.
480 mWebgl->BlendFuncSeparate(
481 {}, LOCAL_GL_ZERO0, LOCAL_GL_ONE_MINUS_SRC_ALPHA0x0303,
482 IsOpaque(mCurrentTarget->GetFormat()) ? LOCAL_GL_ONE1 : LOCAL_GL_ZERO0,
483 LOCAL_GL_ONE_MINUS_SRC_ALPHA0x0303);
484 break;
485 default:
486 enabled = false;
487 break;
488 }
489
490 mWebgl->SetEnabled(LOCAL_GL_BLEND0x0BE2, {}, enabled);
491}
492
493// Ensure the WebGL framebuffer is set to the current target.
494bool SharedContextWebgl::SetTarget(DrawTargetWebgl* aDT) {
495 if (!mWebgl || mWebgl->IsContextLost()) {
496 return false;
497 }
498 if (aDT != mCurrentTarget) {
499 mCurrentTarget = aDT;
500 if (aDT) {
501 mWebgl->BindFramebuffer(LOCAL_GL_FRAMEBUFFER0x8D40, aDT->mFramebuffer);
502 mViewportSize = aDT->GetSize();
503 mWebgl->Viewport(0, 0, mViewportSize.width, mViewportSize.height);
504 }
505 }
506 return true;
507}
508
509// Replace the current clip rect with a new potentially-AA'd clip rect.
510void SharedContextWebgl::SetClipRect(const Rect& aClipRect) {
511 // Only invalidate the clip rect if it actually changes.
512 if (!mClipAARect.IsEqualEdges(aClipRect)) {
513 mClipAARect = aClipRect;
514 // Store the integer-aligned bounds.
515 mClipRect = RoundedOut(aClipRect);
516 }
517}
518
519bool SharedContextWebgl::SetClipMask(const RefPtr<WebGLTexture>& aTex) {
520 if (mLastClipMask != aTex) {
521 if (!mWebgl) {
522 return false;
523 }
524 mWebgl->ActiveTexture(1);
525 mWebgl->BindTexture(LOCAL_GL_TEXTURE_2D0x0DE1, aTex);
526 mWebgl->ActiveTexture(0);
527 mLastClipMask = aTex;
528 }
529 return true;
530}
531
532bool SharedContextWebgl::SetNoClipMask() {
533 if (mNoClipMask) {
534 return SetClipMask(mNoClipMask);
535 }
536 if (!mWebgl) {
537 return false;
538 }
539 mNoClipMask = mWebgl->CreateTexture();
540 if (!mNoClipMask) {
541 return false;
542 }
543 mWebgl->ActiveTexture(1);
544 mWebgl->BindTexture(LOCAL_GL_TEXTURE_2D0x0DE1, mNoClipMask);
545 static const auto solidMask =
546 std::array<const uint8_t, 4>{0xFF, 0xFF, 0xFF, 0xFF};
547 mWebgl->TexImage(0, LOCAL_GL_RGBA80x8058, {0, 0, 0},
548 {LOCAL_GL_RGBA0x1908, LOCAL_GL_UNSIGNED_BYTE0x1401},
549 {LOCAL_GL_TEXTURE_2D0x0DE1,
550 {1, 1, 1},
551 gfxAlphaType::NonPremult,
552 Some(Span{solidMask})});
553 InitTexParameters(mNoClipMask, false);
554 mWebgl->ActiveTexture(0);
555 mLastClipMask = mNoClipMask;
556 return true;
557}
558
559inline bool DrawTargetWebgl::ClipStack::operator==(
560 const DrawTargetWebgl::ClipStack& aOther) const {
561 // Verify the transform and bounds match.
562 if (!mTransform.FuzzyEquals(aOther.mTransform) ||
563 !mRect.IsEqualInterior(aOther.mRect)) {
564 return false;
565 }
566 // Verify the paths match.
567 if (!mPath) {
568 return !aOther.mPath;
569 }
570 if (!aOther.mPath ||
571 mPath->GetBackendType() != aOther.mPath->GetBackendType()) {
572 return false;
573 }
574 if (mPath->GetBackendType() != BackendType::SKIA) {
575 return mPath == aOther.mPath;
576 }
577 return static_cast<const PathSkia*>(mPath.get())->GetPath() ==
578 static_cast<const PathSkia*>(aOther.mPath.get())->GetPath();
579}
580
581// If the clip region can't be approximated by a simple clip rect, then we need
582// to generate a clip mask that can represent the clip region per-pixel. We
583// render to the Skia target temporarily, transparent outside the clip region,
584// opaque inside, and upload this to a texture that can be used by the shaders.
585bool DrawTargetWebgl::GenerateComplexClipMask() {
586 if (!mClipChanged || (mClipMask && mCachedClipStack == mClipStack)) {
587 mClipChanged = false;
588 // If the clip mask was already generated, use the cached mask and bounds.
589 mSharedContext->SetClipMask(mClipMask);
590 mSharedContext->SetClipRect(mClipBounds);
591 return true;
592 }
593 if (!mWebglValid) {
594 // If the Skia target is currently being used, then we can't render the mask
595 // in it.
596 return false;
597 }
598 RefPtr<WebGLContext> webgl = mSharedContext->mWebgl;
599 if (!webgl) {
600 return false;
601 }
602 bool init = false;
603 if (!mClipMask) {
604 mClipMask = webgl->CreateTexture();
605 if (!mClipMask) {
606 return false;
607 }
608 init = true;
609 }
610 // Try to get the bounds of the clip to limit the size of the mask.
611 if (Maybe<IntRect> clip = mSkia->GetDeviceClipRect(true)) {
612 mClipBounds = *clip;
613 } else {
614 // If we can't get bounds, then just use the entire viewport.
615 mClipBounds = GetRect();
616 }
617 mClipAARect = Rect(mClipBounds);
618 // If initializing the clip mask, then allocate the entire texture to ensure
619 // all pixels get filled with an empty mask regardless. Otherwise, restrict
620 // uploading to only the clip region.
621 RefPtr<DrawTargetSkia> dt = new DrawTargetSkia;
622 if (!dt->Init(mClipBounds.Size(), SurfaceFormat::A8)) {
623 return false;
624 }
625 // Set the clip region and fill the entire inside of it
626 // with opaque white.
627 mCachedClipStack.clear();
628 for (auto& clipStack : mClipStack) {
629 // Record the current state of the clip stack for this mask.
630 mCachedClipStack.push_back(clipStack);
631 dt->SetTransform(
632 Matrix(clipStack.mTransform).PostTranslate(-mClipBounds.TopLeft()));
633 if (clipStack.mPath) {
634 dt->PushClip(clipStack.mPath);
635 } else {
636 dt->PushClipRect(clipStack.mRect);
637 }
638 }
639 dt->SetTransform(Matrix::Translation(-mClipBounds.TopLeft()));
640 dt->FillRect(Rect(mClipBounds), ColorPattern(DeviceColor(1, 1, 1, 1)));
641 // Bind the clip mask for uploading. This is done on texture unit 0 so that
642 // we can work around an Windows Intel driver bug. If done on texture unit 1,
643 // the driver doesn't notice that the texture contents was modified. Force a
644 // re-latch by binding the texture on texture unit 1 only after modification.
645 webgl->BindTexture(LOCAL_GL_TEXTURE_2D0x0DE1, mClipMask);
646 if (init) {
647 mSharedContext->InitTexParameters(mClipMask, false);
648 }
649 RefPtr<DataSourceSurface> data;
650 if (RefPtr<SourceSurface> snapshot = dt->Snapshot()) {
651 data = snapshot->GetDataSurface();
652 }
653 // Finally, upload the texture data and initialize texture storage if
654 // necessary.
655 if (init && mClipBounds.Size() != mSize) {
656 mSharedContext->UploadSurface(nullptr, SurfaceFormat::A8, GetRect(),
657 IntPoint(), true, true);
658 init = false;
659 }
660 mSharedContext->UploadSurface(data, SurfaceFormat::A8,
661 IntRect(IntPoint(), mClipBounds.Size()),
662 mClipBounds.TopLeft(), init);
663 mSharedContext->ClearLastTexture();
664 // Bind the new clip mask to the clip sampler on texture unit 1.
665 mSharedContext->SetClipMask(mClipMask);
666 mSharedContext->SetClipRect(mClipBounds);
667 // We uploaded a surface, just as if we missed the texture cache, so account
668 // for that here.
669 mProfile.OnCacheMiss();
670 return !!data;
671}
672
673bool DrawTargetWebgl::SetSimpleClipRect() {
674 // Determine whether the clipping rectangle is simple enough to accelerate.
675 // Check if there is a device space clip rectangle available from the Skia
676 // target.
677 if (Maybe<IntRect> clip = mSkia->GetDeviceClipRect(false)) {
678 // If the clip is empty, leave the final integer clip rectangle empty to
679 // trivially discard the draw request.
680 // If the clip rect is larger than the viewport, just set it to the
681 // viewport.
682 if (!clip->IsEmpty() && clip->Contains(GetRect())) {
683 clip = Some(GetRect());
684 }
685 mSharedContext->SetClipRect(*clip);
686 mSharedContext->SetNoClipMask();
687 return true;
688 }
689
690 // There was no pixel-aligned clip rect available, so check the clip stack to
691 // see if there is an AA'd axis-aligned rectangle clip.
692 Rect rect(GetRect());
693 for (auto& clipStack : mClipStack) {
694 // If clip is a path or it has a non-axis-aligned transform, then it is
695 // complex.
696 if (clipStack.mPath ||
697 !clipStack.mTransform.PreservesAxisAlignedRectangles()) {
698 return false;
699 }
700 // Transform the rect and intersect it with the current clip.
701 rect =
702 clipStack.mTransform.TransformBounds(clipStack.mRect).Intersect(rect);
703 }
704 mSharedContext->SetClipRect(rect);
705 mSharedContext->SetNoClipMask();
706 return true;
707}
708
709// Installs the Skia clip rectangle, if applicable, onto the shared WebGL
710// context as well as sets the WebGL framebuffer to the current target.
711bool DrawTargetWebgl::PrepareContext(bool aClipped) {
712 if (!aClipped) {
713 // If no clipping requested, just set the clip rect to the viewport.
714 mSharedContext->SetClipRect(GetRect());
715 mSharedContext->SetNoClipMask();
716 // Ensure the clip gets reset if clipping is later requested for the target.
717 mRefreshClipState = true;
718 } else if (mRefreshClipState || !mSharedContext->IsCurrentTarget(this)) {
719 // Try to use a simple clip rect if possible. Otherwise, fall back to
720 // generating a clip mask texture that can represent complex clip regions.
721 if (!SetSimpleClipRect() && !GenerateComplexClipMask()) {
722 return false;
723 }
724 mClipChanged = false;
725 mRefreshClipState = false;
726 }
727 return mSharedContext->SetTarget(this);
728}
729
730bool SharedContextWebgl::IsContextLost() const {
731 return !mWebgl || mWebgl->IsContextLost();
732}
733
734// Signal to CanvasRenderingContext2D when the WebGL context is lost.
735bool DrawTargetWebgl::IsValid() const {
736 return mSharedContext && !mSharedContext->IsContextLost();
737}
738
739bool DrawTargetWebgl::CanCreate(const IntSize& aSize, SurfaceFormat aFormat) {
740 if (!gfxVars::UseAcceleratedCanvas2D()) {
741 return false;
742 }
743
744 if (!Factory::AllowedSurfaceSize(aSize)) {
745 return false;
746 }
747
748 // The interpretation of the min-size and max-size follows from the old
749 // SkiaGL prefs. First just ensure that the context is not unreasonably
750 // small.
751 static const int32_t kMinDimension = 16;
752 if (std::min(aSize.width, aSize.height) < kMinDimension) {
753 return false;
754 }
755
756 int32_t minSize = StaticPrefs::gfx_canvas_accelerated_min_size();
757 if (aSize.width * aSize.height < minSize * minSize) {
758 return false;
759 }
760
761 // Maximum pref allows 3 different options:
762 // 0 means unlimited size,
763 // > 0 means use value as an absolute threshold,
764 // < 0 means use the number of screen pixels as a threshold.
765 int32_t maxSize = StaticPrefs::gfx_canvas_accelerated_max_size();
766 if (maxSize > 0) {
767 if (std::max(aSize.width, aSize.height) > maxSize) {
768 return false;
769 }
770 } else if (maxSize < 0) {
771 // Default to historical mobile screen size of 980x480, like FishIEtank.
772 // In addition, allow acceleration up to this size even if the screen is
773 // smaller. A lot content expects this size to work well. See Bug 999841
774 static const int32_t kScreenPixels = 980 * 480;
775
776 if (RefPtr<widget::Screen> screen =
777 widget::ScreenManager::GetSingleton().GetPrimaryScreen()) {
778 LayoutDeviceIntSize screenSize = screen->GetRect().Size();
779 if (aSize.width * aSize.height >
780 std::max(screenSize.width * screenSize.height, kScreenPixels)) {
781 return false;
782 }
783 }
784 }
785
786 return true;
787}
788
789already_AddRefed<DrawTargetWebgl> DrawTargetWebgl::Create(
790 const IntSize& aSize, SurfaceFormat aFormat,
791 const RefPtr<SharedContextWebgl>& aSharedContext) {
792 // Validate the size and format.
793 if (!CanCreate(aSize, aFormat)) {
794 return nullptr;
795 }
796
797 RefPtr<DrawTargetWebgl> dt = new DrawTargetWebgl;
798 if (!dt->Init(aSize, aFormat, aSharedContext) || !dt->IsValid()) {
799 return nullptr;
800 }
801
802 return dt.forget();
803}
804
805void* DrawTargetWebgl::GetNativeSurface(NativeSurfaceType aType) {
806 switch (aType) {
807 case NativeSurfaceType::WEBGL_CONTEXT:
808 // If the context is lost, then don't attempt to access it.
809 if (mSharedContext->IsContextLost()) {
810 return nullptr;
811 }
812 if (!mWebglValid) {
813 FlushFromSkia();
814 }
815 return mSharedContext->mWebgl.get();
816 default:
817 return nullptr;
818 }
819}
820
821// Wrap a WebGL texture holding a snapshot with a texture handle. Note that
822// while the texture is still in use as the backing texture of a framebuffer,
823// it's texture memory is not currently tracked with other texture handles.
824// Once it is finally orphaned and used as a texture handle, it must be added
825// to the resource usage totals.
826already_AddRefed<TextureHandle> SharedContextWebgl::WrapSnapshot(
827 const IntSize& aSize, SurfaceFormat aFormat, RefPtr<WebGLTexture> aTex) {
828 // Ensure there is enough space for the texture.
829 size_t usedBytes = BackingTexture::UsedBytes(aFormat, aSize);
830 PruneTextureMemory(usedBytes, false);
831 // Allocate a handle for the texture
832 RefPtr<StandaloneTexture> handle =
833 new StandaloneTexture(aSize, aFormat, aTex.forget());
834 mStandaloneTextures.push_back(handle);
835 mTextureHandles.insertFront(handle);
836 mTotalTextureMemory += usedBytes;
837 mUsedTextureMemory += usedBytes;
838 ++mNumTextureHandles;
839 return handle.forget();
840}
841
842void SharedContextWebgl::SetTexFilter(WebGLTexture* aTex, bool aFilter) {
843 mWebgl->TexParameter_base(
844 LOCAL_GL_TEXTURE_2D0x0DE1, LOCAL_GL_TEXTURE_MAG_FILTER0x2800,
845 FloatOrInt(aFilter ? LOCAL_GL_LINEAR0x2601 : LOCAL_GL_NEAREST0x2600));
846 mWebgl->TexParameter_base(
847 LOCAL_GL_TEXTURE_2D0x0DE1, LOCAL_GL_TEXTURE_MIN_FILTER0x2801,
848 FloatOrInt(aFilter ? LOCAL_GL_LINEAR0x2601 : LOCAL_GL_NEAREST0x2600));
849}
850
851void SharedContextWebgl::InitTexParameters(WebGLTexture* aTex, bool aFilter) {
852 mWebgl->TexParameter_base(LOCAL_GL_TEXTURE_2D0x0DE1, LOCAL_GL_TEXTURE_WRAP_S0x2802,
853 FloatOrInt(LOCAL_GL_REPEAT0x2901));
854 mWebgl->TexParameter_base(LOCAL_GL_TEXTURE_2D0x0DE1, LOCAL_GL_TEXTURE_WRAP_T0x2803,
855 FloatOrInt(LOCAL_GL_REPEAT0x2901));
856 SetTexFilter(aTex, aFilter);
857}
858
859// Copy the contents of the WebGL framebuffer into a WebGL texture.
860already_AddRefed<TextureHandle> SharedContextWebgl::CopySnapshot(
861 const IntRect& aRect, TextureHandle* aHandle) {
862 if (!mWebgl || mWebgl->IsContextLost()) {
863 return nullptr;
864 }
865
866 // If the target is going away, then we can just directly reuse the
867 // framebuffer texture since it will never change.
868 RefPtr<WebGLTexture> tex = mWebgl->CreateTexture();
869 if (!tex) {
870 return nullptr;
871 }
872
873 // If copying from a non-DT source, we have to bind a scratch framebuffer for
874 // reading.
875 if (aHandle) {
876 if (!mScratchFramebuffer) {
877 mScratchFramebuffer = mWebgl->CreateFramebuffer();
878 }
879 mWebgl->BindFramebuffer(LOCAL_GL_FRAMEBUFFER0x8D40, mScratchFramebuffer);
880
881 webgl::FbAttachInfo attachInfo;
882 attachInfo.tex = aHandle->GetBackingTexture()->GetWebGLTexture();
883 mWebgl->FramebufferAttach(LOCAL_GL_FRAMEBUFFER0x8D40, LOCAL_GL_COLOR_ATTACHMENT00x8CE0,
884 LOCAL_GL_TEXTURE_2D0x0DE1, attachInfo);
885 }
886
887 // Create a texture to hold the copy
888 mWebgl->BindTexture(LOCAL_GL_TEXTURE_2D0x0DE1, tex);
889 mWebgl->TexStorage(LOCAL_GL_TEXTURE_2D0x0DE1, 1, LOCAL_GL_RGBA80x8058,
890 {uint32_t(aRect.width), uint32_t(aRect.height), 1});
891 InitTexParameters(tex);
892 // Copy the framebuffer into the texture
893 mWebgl->CopyTexImage(LOCAL_GL_TEXTURE_2D0x0DE1, 0, 0, {0, 0, 0}, {aRect.x, aRect.y},
894 {uint32_t(aRect.width), uint32_t(aRect.height)});
895 ClearLastTexture();
896
897 SurfaceFormat format =
898 aHandle ? aHandle->GetFormat() : mCurrentTarget->GetFormat();
899 already_AddRefed<TextureHandle> result =
900 WrapSnapshot(aRect.Size(), format, tex.forget());
901
902 // Restore the actual framebuffer after reading is done.
903 if (aHandle && mCurrentTarget) {
904 mWebgl->BindFramebuffer(LOCAL_GL_FRAMEBUFFER0x8D40, mCurrentTarget->mFramebuffer);
905 }
906
907 return result;
908}
909
910inline DrawTargetWebgl::AutoRestoreContext::AutoRestoreContext(
911 DrawTargetWebgl* aTarget)
912 : mTarget(aTarget),
913 mClipAARect(aTarget->mSharedContext->mClipAARect),
914 mLastClipMask(aTarget->mSharedContext->mLastClipMask) {}
915
916inline DrawTargetWebgl::AutoRestoreContext::~AutoRestoreContext() {
917 mTarget->mSharedContext->SetClipRect(mClipAARect);
918 if (mLastClipMask) {
919 mTarget->mSharedContext->SetClipMask(mLastClipMask);
920 }
921 mTarget->mRefreshClipState = true;
922}
923
924// Utility method to install the target before copying a snapshot.
925already_AddRefed<TextureHandle> DrawTargetWebgl::CopySnapshot(
926 const IntRect& aRect) {
927 AutoRestoreContext restore(this);
928 if (!PrepareContext(false)) {
929 return nullptr;
930 }
931 return mSharedContext->CopySnapshot(aRect);
932}
933
934bool DrawTargetWebgl::HasDataSnapshot() const {
935 return (mSkiaValid && !mSkiaLayer) || (mSnapshot && mSnapshot->HasReadData());
936}
937
938bool DrawTargetWebgl::PrepareSkia() {
939 if (!mSkiaValid) {
940 ReadIntoSkia();
941 } else if (mSkiaLayer) {
942 FlattenSkia();
943 }
944 return mSkiaValid;
945}
946
947bool DrawTargetWebgl::EnsureDataSnapshot() {
948 return HasDataSnapshot() || PrepareSkia();
949}
950
951void DrawTargetWebgl::PrepareShmem() { PrepareSkia(); }
952
953// Borrow a snapshot that may be used by another thread for composition. Only
954// Skia snapshots are safe to pass around.
955already_AddRefed<SourceSurface> DrawTargetWebgl::GetDataSnapshot() {
956 PrepareSkia();
957 return mSkia->Snapshot(mFormat);
958}
959
960already_AddRefed<SourceSurface> DrawTargetWebgl::Snapshot() {
961 // If already using the Skia fallback, then just snapshot that.
962 if (mSkiaValid) {
963 return GetDataSnapshot();
964 }
965
966 // There's no valid Skia snapshot, so we need to get one from the WebGL
967 // context.
968 if (!mSnapshot) {
969 // Create a copy-on-write reference to this target.
970 mSnapshot = new SourceSurfaceWebgl(this);
971 }
972 return do_AddRef(mSnapshot);
973}
974
975// If we need to provide a snapshot for another DrawTargetWebgl that shares the
976// same WebGL context, then it is safe to directly return a snapshot. Otherwise,
977// we may be exporting to another thread and require a data snapshot.
978already_AddRefed<SourceSurface> DrawTargetWebgl::GetOptimizedSnapshot(
979 DrawTarget* aTarget) {
980 if (aTarget && aTarget->GetBackendType() == BackendType::WEBGL &&
981 static_cast<DrawTargetWebgl*>(aTarget)->mSharedContext ==
982 mSharedContext) {
983 return Snapshot();
984 }
985 return GetDataSnapshot();
986}
987
988// Read from the WebGL context into a buffer. This handles both swizzling BGRA
989// to RGBA and flipping the image.
990bool SharedContextWebgl::ReadInto(uint8_t* aDstData, int32_t aDstStride,
991 SurfaceFormat aFormat, const IntRect& aBounds,
992 TextureHandle* aHandle) {
993 MOZ_ASSERT(aFormat == SurfaceFormat::B8G8R8A8 ||do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aFormat == SurfaceFormat::B8G8R8A8 || aFormat == SurfaceFormat
::B8G8R8X8)>::isValid, "invalid assertion condition"); if (
(__builtin_expect(!!(!(!!(aFormat == SurfaceFormat::B8G8R8A8 ||
aFormat == SurfaceFormat::B8G8R8X8))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("aFormat == SurfaceFormat::B8G8R8A8 || aFormat == SurfaceFormat::B8G8R8X8"
, "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 994); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aFormat == SurfaceFormat::B8G8R8A8 || aFormat == SurfaceFormat::B8G8R8X8"
")"); do { *((volatile int*)__null) = 994; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
994 aFormat == SurfaceFormat::B8G8R8X8)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aFormat == SurfaceFormat::B8G8R8A8 || aFormat == SurfaceFormat
::B8G8R8X8)>::isValid, "invalid assertion condition"); if (
(__builtin_expect(!!(!(!!(aFormat == SurfaceFormat::B8G8R8A8 ||
aFormat == SurfaceFormat::B8G8R8X8))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("aFormat == SurfaceFormat::B8G8R8A8 || aFormat == SurfaceFormat::B8G8R8X8"
, "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 994); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aFormat == SurfaceFormat::B8G8R8A8 || aFormat == SurfaceFormat::B8G8R8X8"
")"); do { *((volatile int*)__null) = 994; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
995
996 // If reading into a new texture, we have to bind it to a scratch framebuffer
997 // for reading.
998 if (aHandle) {
999 if (!mScratchFramebuffer) {
1000 mScratchFramebuffer = mWebgl->CreateFramebuffer();
1001 }
1002 mWebgl->BindFramebuffer(LOCAL_GL_FRAMEBUFFER0x8D40, mScratchFramebuffer);
1003 webgl::FbAttachInfo attachInfo;
1004 attachInfo.tex = aHandle->GetBackingTexture()->GetWebGLTexture();
1005 mWebgl->FramebufferAttach(LOCAL_GL_FRAMEBUFFER0x8D40, LOCAL_GL_COLOR_ATTACHMENT00x8CE0,
1006 LOCAL_GL_TEXTURE_2D0x0DE1, attachInfo);
1007 } else if (mCurrentTarget && mCurrentTarget->mIsClear) {
1008 // If reading from a target that is still clear, then avoid the readback by
1009 // just clearing the data.
1010 SkPixmap(MakeSkiaImageInfo(aBounds.Size(), aFormat), aDstData, aDstStride)
1011 .erase(IsOpaque(aFormat) ? SK_ColorBLACK : SK_ColorTRANSPARENT);
1012 return true;
1013 }
1014
1015 webgl::ReadPixelsDesc desc;
1016 desc.srcOffset = *ivec2::From(aBounds);
1017 desc.size = *uvec2::FromSize(aBounds);
1018 desc.packState.rowLength = aDstStride / 4;
1019 Range<uint8_t> range = {aDstData, size_t(aDstStride) * aBounds.height};
1020 mWebgl->ReadPixelsInto(desc, range);
1021
1022 // Restore the actual framebuffer after reading is done.
1023 if (aHandle && mCurrentTarget) {
1024 mWebgl->BindFramebuffer(LOCAL_GL_FRAMEBUFFER0x8D40, mCurrentTarget->mFramebuffer);
1025 }
1026
1027 return true;
1028}
1029
1030already_AddRefed<DataSourceSurface> SharedContextWebgl::ReadSnapshot(
1031 TextureHandle* aHandle) {
1032 // Allocate a data surface, map it, and read from the WebGL context into the
1033 // surface.
1034 SurfaceFormat format = SurfaceFormat::UNKNOWN;
1035 IntRect bounds;
1036 if (aHandle) {
1037 format = aHandle->GetFormat();
1038 bounds = aHandle->GetBounds();
1039 } else {
1040 format = mCurrentTarget->GetFormat();
1041 bounds = mCurrentTarget->GetRect();
1042 }
1043 RefPtr<DataSourceSurface> surface =
1044 Factory::CreateDataSourceSurface(bounds.Size(), format);
1045 if (!surface) {
1046 return nullptr;
1047 }
1048 DataSourceSurface::ScopedMap dstMap(surface, DataSourceSurface::WRITE);
1049 if (!dstMap.IsMapped() || !ReadInto(dstMap.GetData(), dstMap.GetStride(),
1050 format, bounds, aHandle)) {
1051 return nullptr;
1052 }
1053 return surface.forget();
1054}
1055
1056// Utility method to install the target before reading a snapshot.
1057bool DrawTargetWebgl::ReadInto(uint8_t* aDstData, int32_t aDstStride) {
1058 if (!PrepareContext(false)) {
1059 return false;
1060 }
1061
1062 return mSharedContext->ReadInto(aDstData, aDstStride, GetFormat(), GetRect());
1063}
1064
1065// Utility method to install the target before reading a snapshot.
1066already_AddRefed<DataSourceSurface> DrawTargetWebgl::ReadSnapshot() {
1067 AutoRestoreContext restore(this);
1068 if (!PrepareContext(false)) {
1069 return nullptr;
1070 }
1071 mProfile.OnReadback();
1072 return mSharedContext->ReadSnapshot();
1073}
1074
1075already_AddRefed<SourceSurface> DrawTargetWebgl::GetBackingSurface() {
1076 return Snapshot();
1077}
1078
1079void DrawTargetWebgl::DetachAllSnapshots() {
1080 mSkia->DetachAllSnapshots();
1081 ClearSnapshot();
1082}
1083
1084// Prepare the framebuffer for accelerated drawing. Any cached snapshots will
1085// be invalidated if not detached and copied here. Ensure the WebGL
1086// framebuffer's contents are updated if still somehow stored in the Skia
1087// framebuffer.
1088bool DrawTargetWebgl::MarkChanged() {
1089 if (mSnapshot) {
1090 // Try to copy the target into a new texture if possible.
1091 ClearSnapshot(true, true);
1092 }
1093 if (!mWebglValid && !FlushFromSkia()) {
1094 return false;
1095 }
1096 mSkiaValid = false;
1097 mIsClear = false;
1098 return true;
1099}
1100
1101void DrawTargetWebgl::MarkSkiaChanged(bool aOverwrite) {
1102 if (aOverwrite) {
1103 mSkiaValid = true;
1104 mSkiaLayer = false;
1105 } else if (!mSkiaValid) {
1106 if (ReadIntoSkia()) {
1107 // Signal that we've hit a complete software fallback.
1108 mProfile.OnFallback();
1109 }
1110 } else if (mSkiaLayer) {
1111 FlattenSkia();
1112 }
1113 mWebglValid = false;
1114 mIsClear = false;
1115}
1116
1117// Whether a given composition operator is associative and thus allows drawing
1118// into a separate layer that can be later composited back into the WebGL
1119// context.
1120static inline bool SupportsLayering(const DrawOptions& aOptions) {
1121 switch (aOptions.mCompositionOp) {
1122 case CompositionOp::OP_OVER:
1123 // Layering is only supported for the default source-over composition op.
1124 return true;
1125 default:
1126 return false;
1127 }
1128}
1129
1130void DrawTargetWebgl::MarkSkiaChanged(const DrawOptions& aOptions) {
1131 if (SupportsLayering(aOptions)) {
1132 if (!mSkiaValid) {
1133 // If the Skia context needs initialization, clear it and enable layering.
1134 mSkiaValid = true;
1135 if (mWebglValid) {
1136 mProfile.OnLayer();
1137 mSkiaLayer = true;
1138 mSkiaLayerClear = mIsClear;
1139 mSkia->DetachAllSnapshots();
1140 if (mSkiaLayerClear) {
1141 // Avoid blending later by making sure the layer background is filled
1142 // with opaque alpha values if necessary.
1143 mSkiaNoClip->FillRect(Rect(mSkiaNoClip->GetRect()), GetClearPattern(),
1144 DrawOptions(1.0f, CompositionOp::OP_SOURCE));
1145 } else {
1146 mSkiaNoClip->ClearRect(Rect(mSkiaNoClip->GetRect()));
1147 }
1148 }
1149 }
1150 // The WebGL context is no longer up-to-date.
1151 mWebglValid = false;
1152 mIsClear = false;
1153 } else {
1154 // For other composition ops, just overwrite the Skia data.
1155 MarkSkiaChanged();
1156 }
1157}
1158
1159bool DrawTargetWebgl::LockBits(uint8_t** aData, IntSize* aSize,
1160 int32_t* aStride, SurfaceFormat* aFormat,
1161 IntPoint* aOrigin) {
1162 // Can only access pixels if there is valid, flattened Skia data.
1163 if (mSkiaValid && !mSkiaLayer) {
1164 MarkSkiaChanged();
1165 return mSkia->LockBits(aData, aSize, aStride, aFormat, aOrigin);
1166 }
1167 return false;
1168}
1169
1170void DrawTargetWebgl::ReleaseBits(uint8_t* aData) {
1171 // Can only access pixels if there is valid, flattened Skia data.
1172 if (mSkiaValid && !mSkiaLayer) {
1173 mSkia->ReleaseBits(aData);
1174 }
1175}
1176
1177// Format is x, y, alpha
1178static const float kRectVertexData[12] = {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f,
1179 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f};
1180
1181// Orphans the contents of the path vertex buffer. The beginning of the buffer
1182// always contains data for a simple rectangle draw to avoid needing to switch
1183// buffers.
1184void SharedContextWebgl::ResetPathVertexBuffer(bool aChanged) {
1185 mWebgl->BindBuffer(LOCAL_GL_ARRAY_BUFFER0x8892, mPathVertexBuffer.get());
1186 mWebgl->UninitializedBufferData_SizeOnly(
1187 LOCAL_GL_ARRAY_BUFFER0x8892,
1188 std::max(size_t(mPathVertexCapacity), sizeof(kRectVertexData)),
1189 LOCAL_GL_DYNAMIC_DRAW0x88E8);
1190 mWebgl->BufferSubData(LOCAL_GL_ARRAY_BUFFER0x8892, 0, sizeof(kRectVertexData),
1191 (const uint8_t*)kRectVertexData);
1192 mPathVertexOffset = sizeof(kRectVertexData);
1193 if (aChanged) {
1194 mWGROutputBuffer.reset(
1195 mPathVertexCapacity > 0
1196 ? new (fallible) WGR::OutputVertex[mPathVertexCapacity /
1197 sizeof(WGR::OutputVertex)]
1198 : nullptr);
1199 }
1200}
1201
1202// Attempts to create all shaders and resources to be used for drawing commands.
1203// Returns whether or not this succeeded.
1204bool SharedContextWebgl::CreateShaders() {
1205 if (!mPathVertexArray) {
1206 mPathVertexArray = mWebgl->CreateVertexArray();
1207 }
1208 if (!mPathVertexBuffer) {
1209 mPathVertexBuffer = mWebgl->CreateBuffer();
1210 mWebgl->BindVertexArray(mPathVertexArray.get());
1211 ResetPathVertexBuffer();
1212 mWebgl->EnableVertexAttribArray(0);
1213
1214 webgl::VertAttribPointerDesc attribDesc;
1215 attribDesc.channels = 3;
1216 attribDesc.type = LOCAL_GL_FLOAT0x1406;
1217 attribDesc.normalized = false;
1218 mWebgl->VertexAttribPointer(0, attribDesc);
1219 }
1220 if (!mSolidProgram) {
1221 // AA is computed by using the basis vectors of the transform to determine
1222 // both the scale and orientation. The scale is then used to extrude the
1223 // rectangle outward by 1 screen-space pixel to account for the AA region.
1224 // The distance to the rectangle edges is passed to the fragment shader in
1225 // an interpolant, biased by 0.5 so it represents the desired coverage. The
1226 // minimum coverage is then chosen by the fragment shader to use as an AA
1227 // coverage value to modulate the color.
1228 auto vsSource =
1229 "attribute vec3 a_vertex;\n"
1230 "uniform vec2 u_transform[3];\n"
1231 "uniform vec2 u_viewport;\n"
1232 "uniform vec4 u_clipbounds;\n"
1233 "uniform float u_aa;\n"
1234 "varying vec2 v_cliptc;\n"
1235 "varying vec4 v_clipdist;\n"
1236 "varying vec4 v_dist;\n"
1237 "varying float v_alpha;\n"
1238 "void main() {\n"
1239 " vec2 scale = vec2(dot(u_transform[0], u_transform[0]),\n"
1240 " dot(u_transform[1], u_transform[1]));\n"
1241 " vec2 invScale = u_aa * inversesqrt(scale + 1.0e-6);\n"
1242 " scale *= invScale;\n"
1243 " vec2 extrude = a_vertex.xy +\n"
1244 " invScale * (2.0 * a_vertex.xy - 1.0);\n"
1245 " vec2 vertex = u_transform[0] * extrude.x +\n"
1246 " u_transform[1] * extrude.y +\n"
1247 " u_transform[2];\n"
1248 " gl_Position = vec4(vertex * 2.0 / u_viewport - 1.0, 0.0, 1.0);\n"
1249 " v_cliptc = vertex / u_viewport;\n"
1250 " v_clipdist = vec4(vertex - u_clipbounds.xy,\n"
1251 " u_clipbounds.zw - vertex);\n"
1252 " float noAA = 1.0 - u_aa;\n"
1253 " v_dist = vec4(extrude, 1.0 - extrude) * scale.xyxy + 0.5 + noAA;\n"
1254 " v_alpha = min(a_vertex.z,\n"
1255 " min(scale.x, 1.0) * min(scale.y, 1.0) + noAA);\n"
1256 "}\n";
1257 auto fsSource =
1258 "precision mediump float;\n"
1259 "uniform vec4 u_color;\n"
1260 "uniform sampler2D u_clipmask;\n"
1261 "varying highp vec2 v_cliptc;\n"
1262 "varying vec4 v_clipdist;\n"
1263 "varying vec4 v_dist;\n"
1264 "varying float v_alpha;\n"
1265 "void main() {\n"
1266 " float clip = texture2D(u_clipmask, v_cliptc).r;\n"
1267 " vec4 dist = min(v_dist, v_clipdist);\n"
1268 " dist.xy = min(dist.xy, dist.zw);\n"
1269 " float aa = clamp(min(dist.x, dist.y), 0.0, v_alpha);\n"
1270 " gl_FragColor = clip * aa * u_color;\n"
1271 "}\n";
1272 RefPtr<WebGLShader> vsId = mWebgl->CreateShader(LOCAL_GL_VERTEX_SHADER0x8B31);
1273 mWebgl->ShaderSource(*vsId, vsSource);
1274 mWebgl->CompileShader(*vsId);
1275 if (!mWebgl->GetCompileResult(*vsId).success) {
1276 return false;
1277 }
1278 RefPtr<WebGLShader> fsId = mWebgl->CreateShader(LOCAL_GL_FRAGMENT_SHADER0x8B30);
1279 mWebgl->ShaderSource(*fsId, fsSource);
1280 mWebgl->CompileShader(*fsId);
1281 if (!mWebgl->GetCompileResult(*fsId).success) {
1282 return false;
1283 }
1284 mSolidProgram = mWebgl->CreateProgram();
1285 mWebgl->AttachShader(*mSolidProgram, *vsId);
1286 mWebgl->AttachShader(*mSolidProgram, *fsId);
1287 mWebgl->BindAttribLocation(*mSolidProgram, 0, "a_vertex");
1288 mWebgl->LinkProgram(*mSolidProgram);
1289 if (!mWebgl->GetLinkResult(*mSolidProgram).success) {
1290 return false;
1291 }
1292 mSolidProgramViewport = GetUniformLocation(mSolidProgram, "u_viewport");
1293 mSolidProgramAA = GetUniformLocation(mSolidProgram, "u_aa");
1294 mSolidProgramTransform = GetUniformLocation(mSolidProgram, "u_transform");
1295 mSolidProgramColor = GetUniformLocation(mSolidProgram, "u_color");
1296 mSolidProgramClipMask = GetUniformLocation(mSolidProgram, "u_clipmask");
1297 mSolidProgramClipBounds = GetUniformLocation(mSolidProgram, "u_clipbounds");
1298 if (!mSolidProgramViewport || !mSolidProgramAA || !mSolidProgramTransform ||
1299 !mSolidProgramColor || !mSolidProgramClipMask ||
1300 !mSolidProgramClipBounds) {
1301 return false;
1302 }
1303 mWebgl->UseProgram(mSolidProgram);
1304 UniformData(LOCAL_GL_INT0x1404, mSolidProgramClipMask, Array<int32_t, 1>{1});
1305 }
1306
1307 if (!mImageProgram) {
1308 auto vsSource =
1309 "attribute vec3 a_vertex;\n"
1310 "uniform vec2 u_viewport;\n"
1311 "uniform vec4 u_clipbounds;\n"
1312 "uniform float u_aa;\n"
1313 "uniform vec2 u_transform[3];\n"
1314 "uniform vec2 u_texmatrix[3];\n"
1315 "varying vec2 v_cliptc;\n"
1316 "varying vec2 v_texcoord;\n"
1317 "varying vec4 v_clipdist;\n"
1318 "varying vec4 v_dist;\n"
1319 "varying float v_alpha;\n"
1320 "void main() {\n"
1321 " vec2 scale = vec2(dot(u_transform[0], u_transform[0]),\n"
1322 " dot(u_transform[1], u_transform[1]));\n"
1323 " vec2 invScale = u_aa * inversesqrt(scale + 1.0e-6);\n"
1324 " scale *= invScale;\n"
1325 " vec2 extrude = a_vertex.xy +\n"
1326 " invScale * (2.0 * a_vertex.xy - 1.0);\n"
1327 " vec2 vertex = u_transform[0] * extrude.x +\n"
1328 " u_transform[1] * extrude.y +\n"
1329 " u_transform[2];\n"
1330 " gl_Position = vec4(vertex * 2.0 / u_viewport - 1.0, 0.0, 1.0);\n"
1331 " v_cliptc = vertex / u_viewport;\n"
1332 " v_clipdist = vec4(vertex - u_clipbounds.xy,\n"
1333 " u_clipbounds.zw - vertex);\n"
1334 " v_texcoord = u_texmatrix[0] * extrude.x +\n"
1335 " u_texmatrix[1] * extrude.y +\n"
1336 " u_texmatrix[2];\n"
1337 " float noAA = 1.0 - u_aa;\n"
1338 " v_dist = vec4(extrude, 1.0 - extrude) * scale.xyxy + 0.5 + noAA;\n"
1339 " v_alpha = min(a_vertex.z,\n"
1340 " min(scale.x, 1.0) * min(scale.y, 1.0) + noAA);\n"
1341 "}\n";
1342 auto fsSource =
1343 "precision mediump float;\n"
1344 "uniform vec4 u_texbounds;\n"
1345 "uniform vec4 u_color;\n"
1346 "uniform float u_swizzle;\n"
1347 "uniform sampler2D u_sampler;\n"
1348 "uniform sampler2D u_clipmask;\n"
1349 "varying highp vec2 v_cliptc;\n"
1350 "varying highp vec2 v_texcoord;\n"
1351 "varying vec4 v_clipdist;\n"
1352 "varying vec4 v_dist;\n"
1353 "varying float v_alpha;\n"
1354 "void main() {\n"
1355 " highp vec2 tc = clamp(v_texcoord, u_texbounds.xy,\n"
1356 " u_texbounds.zw);\n"
1357 " vec4 image = texture2D(u_sampler, tc);\n"
1358 " float clip = texture2D(u_clipmask, v_cliptc).r;\n"
1359 " vec4 dist = min(v_dist, v_clipdist);\n"
1360 " dist.xy = min(dist.xy, dist.zw);\n"
1361 " float aa = clamp(min(dist.x, dist.y), 0.0, v_alpha);\n"
1362 " gl_FragColor = clip * aa * u_color *\n"
1363 " mix(image, image.rrrr, u_swizzle);\n"
1364 "}\n";
1365 RefPtr<WebGLShader> vsId = mWebgl->CreateShader(LOCAL_GL_VERTEX_SHADER0x8B31);
1366 mWebgl->ShaderSource(*vsId, vsSource);
1367 mWebgl->CompileShader(*vsId);
1368 if (!mWebgl->GetCompileResult(*vsId).success) {
1369 return false;
1370 }
1371 RefPtr<WebGLShader> fsId = mWebgl->CreateShader(LOCAL_GL_FRAGMENT_SHADER0x8B30);
1372 mWebgl->ShaderSource(*fsId, fsSource);
1373 mWebgl->CompileShader(*fsId);
1374 if (!mWebgl->GetCompileResult(*fsId).success) {
1375 return false;
1376 }
1377 mImageProgram = mWebgl->CreateProgram();
1378 mWebgl->AttachShader(*mImageProgram, *vsId);
1379 mWebgl->AttachShader(*mImageProgram, *fsId);
1380 mWebgl->BindAttribLocation(*mImageProgram, 0, "a_vertex");
1381 mWebgl->LinkProgram(*mImageProgram);
1382 if (!mWebgl->GetLinkResult(*mImageProgram).success) {
1383 return false;
1384 }
1385 mImageProgramViewport = GetUniformLocation(mImageProgram, "u_viewport");
1386 mImageProgramAA = GetUniformLocation(mImageProgram, "u_aa");
1387 mImageProgramTransform = GetUniformLocation(mImageProgram, "u_transform");
1388 mImageProgramTexMatrix = GetUniformLocation(mImageProgram, "u_texmatrix");
1389 mImageProgramTexBounds = GetUniformLocation(mImageProgram, "u_texbounds");
1390 mImageProgramSwizzle = GetUniformLocation(mImageProgram, "u_swizzle");
1391 mImageProgramColor = GetUniformLocation(mImageProgram, "u_color");
1392 mImageProgramSampler = GetUniformLocation(mImageProgram, "u_sampler");
1393 mImageProgramClipMask = GetUniformLocation(mImageProgram, "u_clipmask");
1394 mImageProgramClipBounds = GetUniformLocation(mImageProgram, "u_clipbounds");
1395 if (!mImageProgramViewport || !mImageProgramAA || !mImageProgramTransform ||
1396 !mImageProgramTexMatrix || !mImageProgramTexBounds ||
1397 !mImageProgramSwizzle || !mImageProgramColor || !mImageProgramSampler ||
1398 !mImageProgramClipMask || !mImageProgramClipBounds) {
1399 return false;
1400 }
1401 mWebgl->UseProgram(mImageProgram);
1402 UniformData(LOCAL_GL_INT0x1404, mImageProgramSampler, Array<int32_t, 1>{0});
1403 UniformData(LOCAL_GL_INT0x1404, mImageProgramClipMask, Array<int32_t, 1>{1});
1404 }
1405 return true;
1406}
1407
1408void SharedContextWebgl::EnableScissor(const IntRect& aRect) {
1409 // Only update scissor state if it actually changes.
1410 if (!mLastScissor.IsEqualEdges(aRect)) {
1411 mLastScissor = aRect;
1412 mWebgl->Scissor(aRect.x, aRect.y, aRect.width, aRect.height);
1413 }
1414 if (!mScissorEnabled) {
1415 mScissorEnabled = true;
1416 mWebgl->SetEnabled(LOCAL_GL_SCISSOR_TEST0x0C11, {}, true);
1417 }
1418}
1419
1420void SharedContextWebgl::DisableScissor() {
1421 if (mScissorEnabled) {
1422 mScissorEnabled = false;
1423 mWebgl->SetEnabled(LOCAL_GL_SCISSOR_TEST0x0C11, {}, false);
1424 }
1425}
1426
1427inline ColorPattern DrawTargetWebgl::GetClearPattern() const {
1428 return ColorPattern(
1429 DeviceColor(0.0f, 0.0f, 0.0f, IsOpaque(mFormat) ? 1.0f : 0.0f));
1430}
1431
1432template <typename R>
1433inline RectDouble DrawTargetWebgl::TransformDouble(const R& aRect) const {
1434 return MatrixDouble(mTransform).TransformBounds(WidenToDouble(aRect));
1435}
1436
1437// Check if the transformed rect clips to the viewport.
1438inline Maybe<Rect> DrawTargetWebgl::RectClippedToViewport(
1439 const RectDouble& aRect) const {
1440 if (!mTransform.PreservesAxisAlignedRectangles()) {
1441 return Nothing();
1442 }
1443
1444 return Some(NarrowToFloat(aRect.SafeIntersect(RectDouble(GetRect()))));
1445}
1446
1447// Ensure that the rect, after transform, is within reasonable precision limits
1448// such that when transformed and clipped in the shader it will not round bits
1449// from the mantissa in a way that will diverge in a noticeable way from path
1450// geometry calculated by the path fallback.
1451template <typename R>
1452static inline bool RectInsidePrecisionLimits(const R& aRect) {
1453 return R(-(1 << 20), -(1 << 20), 2 << 20, 2 << 20).Contains(aRect);
1454}
1455
1456void DrawTargetWebgl::ClearRect(const Rect& aRect) {
1457 if (mIsClear) {
1458 // No need to clear anything if the entire framebuffer is already clear.
1459 return;
1460 }
1461
1462 RectDouble xformRect = TransformDouble(aRect);
1463 bool containsViewport = false;
1464 if (Maybe<Rect> clipped = RectClippedToViewport(xformRect)) {
1465 // If the rect clips to viewport, just clear the clipped rect
1466 // to avoid transform issues.
1467 containsViewport = clipped->Size() == Size(GetSize());
1468 DrawRect(*clipped, GetClearPattern(),
1469 DrawOptions(1.0f, CompositionOp::OP_CLEAR), Nothing(), nullptr,
1470 false);
1471 } else if (RectInsidePrecisionLimits(xformRect)) {
1472 // If the rect transform won't stress precision, then just use it.
1473 DrawRect(aRect, GetClearPattern(),
1474 DrawOptions(1.0f, CompositionOp::OP_CLEAR));
1475 } else {
1476 // Otherwise, using the transform in the shader may lead to inaccuracies, so
1477 // just fall back.
1478 MarkSkiaChanged();
1479 mSkia->ClearRect(aRect);
1480 }
1481
1482 // If the clear rectangle encompasses the entire viewport and is not clipped,
1483 // then mark the target as entirely clear.
1484 if (containsViewport && mSharedContext->IsCurrentTarget(this) &&
1485 !mSharedContext->HasClipMask() &&
1486 mSharedContext->mClipAARect.Contains(Rect(GetRect()))) {
1487 mIsClear = true;
1488 }
1489}
1490
1491static inline DeviceColor PremultiplyColor(const DeviceColor& aColor,
1492 float aAlpha = 1.0f) {
1493 float a = aColor.a * aAlpha;
1494 return DeviceColor(aColor.r * a, aColor.g * a, aColor.b * a, a);
1495}
1496
1497// Attempts to create the framebuffer used for drawing and also any relevant
1498// non-shared resources. Returns whether or not this succeeded.
1499bool DrawTargetWebgl::CreateFramebuffer() {
1500 RefPtr<WebGLContext> webgl = mSharedContext->mWebgl;
1501 if (!mFramebuffer) {
1502 mFramebuffer = webgl->CreateFramebuffer();
1503 }
1504 if (!mTex) {
1505 mTex = webgl->CreateTexture();
1506 webgl->BindTexture(LOCAL_GL_TEXTURE_2D0x0DE1, mTex);
1507 webgl->TexStorage(LOCAL_GL_TEXTURE_2D0x0DE1, 1, LOCAL_GL_RGBA80x8058,
1508 {uint32_t(mSize.width), uint32_t(mSize.height), 1});
1509 mSharedContext->InitTexParameters(mTex);
1510 webgl->BindFramebuffer(LOCAL_GL_FRAMEBUFFER0x8D40, mFramebuffer);
1511 webgl::FbAttachInfo attachInfo;
1512 attachInfo.tex = mTex;
1513 webgl->FramebufferAttach(LOCAL_GL_FRAMEBUFFER0x8D40, LOCAL_GL_COLOR_ATTACHMENT00x8CE0,
1514 LOCAL_GL_TEXTURE_2D0x0DE1, attachInfo);
1515 webgl->Viewport(0, 0, mSize.width, mSize.height);
1516 mSharedContext->DisableScissor();
1517 DeviceColor color = PremultiplyColor(GetClearPattern().mColor);
1518 webgl->ClearColor(color.b, color.g, color.r, color.a);
1519 webgl->Clear(LOCAL_GL_COLOR_BUFFER_BIT0x00004000);
1520 mSharedContext->ClearTarget();
1521 mSharedContext->ClearLastTexture();
1522 }
1523 return true;
1524}
1525
1526void DrawTargetWebgl::CopySurface(SourceSurface* aSurface,
1527 const IntRect& aSourceRect,
1528 const IntPoint& aDestination) {
1529 // Intersect the source and destination rectangles with the viewport bounds.
1530 IntRect destRect =
1531 IntRect(aDestination, aSourceRect.Size()).SafeIntersect(GetRect());
1532 IntRect srcRect = destRect - aDestination + aSourceRect.TopLeft();
1533 if (srcRect.IsEmpty()) {
1534 return;
1535 }
1536
1537 if (mSkiaValid) {
1538 if (mSkiaLayer) {
1539 if (destRect.Contains(GetRect())) {
1540 // If the the destination would override the entire layer, discard the
1541 // layer.
1542 mSkiaLayer = false;
1543 } else if (!IsOpaque(aSurface->GetFormat())) {
1544 // If the surface is not opaque, copying it into the layer results in
1545 // unintended blending rather than a copy to the destination.
1546 FlattenSkia();
1547 }
1548 } else {
1549 // If there is no layer, copying is safe.
1550 MarkSkiaChanged();
1551 }
1552 mSkia->CopySurface(aSurface, srcRect, destRect.TopLeft());
1553 return;
1554 }
1555
1556 IntRect samplingRect;
1557 if (!mSharedContext->IsCompatibleSurface(aSurface)) {
1558 // If this data surface completely overwrites the framebuffer, then just
1559 // copy it to the Skia target.
1560 if (destRect.Contains(GetRect())) {
1561 MarkSkiaChanged(true);
1562 mSkia->DetachAllSnapshots();
1563 mSkiaNoClip->CopySurface(aSurface, srcRect, destRect.TopLeft());
1564 return;
1565 }
1566
1567 // CopySurface usually only samples a surface once, so don't cache the
1568 // entire surface as it is unlikely to be reused. Limit it to the used
1569 // source rectangle instead.
1570 IntRect surfaceRect = aSurface->GetRect();
1571 if (!srcRect.IsEqualEdges(surfaceRect)) {
1572 samplingRect = srcRect.SafeIntersect(surfaceRect);
1573 }
1574 }
1575
1576 Matrix matrix = Matrix::Translation(destRect.TopLeft() - srcRect.TopLeft());
1577 SurfacePattern pattern(aSurface, ExtendMode::CLAMP, matrix,
1578 SamplingFilter::POINT, samplingRect);
1579 DrawRect(Rect(destRect), pattern, DrawOptions(1.0f, CompositionOp::OP_SOURCE),
1580 Nothing(), nullptr, false, false);
1581}
1582
1583void DrawTargetWebgl::PushClip(const Path* aPath) {
1584 if (aPath && aPath->GetBackendType() == BackendType::SKIA) {
1585 // Detect if the path is really just a rect to simplify caching.
1586 if (Maybe<Rect> rect = aPath->AsRect()) {
1587 PushClipRect(*rect);
1588 return;
1589 }
1590 }
1591
1592 mClipChanged = true;
1593 mRefreshClipState = true;
1594 mSkia->PushClip(aPath);
1595
1596 mClipStack.push_back({GetTransform(), Rect(), aPath});
1597}
1598
1599void DrawTargetWebgl::PushClipRect(const Rect& aRect) {
1600 mClipChanged = true;
1601 mRefreshClipState = true;
1602 mSkia->PushClipRect(aRect);
1603
1604 mClipStack.push_back({GetTransform(), aRect, nullptr});
1605}
1606
1607void DrawTargetWebgl::PushDeviceSpaceClipRects(const IntRect* aRects,
1608 uint32_t aCount) {
1609 mClipChanged = true;
1610 mRefreshClipState = true;
1611 mSkia->PushDeviceSpaceClipRects(aRects, aCount);
1612
1613 for (uint32_t i = 0; i < aCount; i++) {
1614 mClipStack.push_back({Matrix(), Rect(aRects[i]), nullptr});
1615 }
1616}
1617
1618void DrawTargetWebgl::PopClip() {
1619 mClipChanged = true;
1620 mRefreshClipState = true;
1621 mSkia->PopClip();
1622
1623 mClipStack.pop_back();
1624}
1625
1626bool DrawTargetWebgl::RemoveAllClips() {
1627 if (mClipStack.empty()) {
1628 return true;
1629 }
1630 if (!mSkia->RemoveAllClips()) {
1631 return false;
1632 }
1633 mClipChanged = true;
1634 mRefreshClipState = true;
1635 mClipStack.clear();
1636 return true;
1637}
1638
1639bool DrawTargetWebgl::CopyToFallback(DrawTarget* aDT) {
1640 aDT->RemoveAllClips();
1641 for (auto& clipStack : mClipStack) {
1642 aDT->SetTransform(clipStack.mTransform);
1643 if (clipStack.mPath) {
1644 aDT->PushClip(clipStack.mPath);
1645 } else {
1646 aDT->PushClipRect(clipStack.mRect);
1647 }
1648 }
1649 aDT->SetTransform(GetTransform());
1650
1651 // An existing data snapshot is required for fallback, as we have to avoid
1652 // trying to touch the WebGL context, which is assumed to be invalid and not
1653 // suitable for readback.
1654 if (HasDataSnapshot()) {
1655 if (RefPtr<SourceSurface> snapshot = Snapshot()) {
1656 aDT->CopySurface(snapshot, snapshot->GetRect(), gfx::IntPoint(0, 0));
1657 return true;
1658 }
1659 }
1660 return false;
1661}
1662
1663// Whether a given composition operator can be mapped to a WebGL blend mode.
1664static inline bool SupportsDrawOptions(const DrawOptions& aOptions) {
1665 switch (aOptions.mCompositionOp) {
1666 case CompositionOp::OP_OVER:
1667 case CompositionOp::OP_ADD:
1668 case CompositionOp::OP_ATOP:
1669 case CompositionOp::OP_SOURCE:
1670 case CompositionOp::OP_CLEAR:
1671 return true;
1672 default:
1673 return false;
1674 }
1675}
1676
1677static inline bool SupportsExtendMode(const SurfacePattern& aPattern) {
1678 switch (aPattern.mExtendMode) {
1679 case ExtendMode::CLAMP:
1680 return true;
1681 case ExtendMode::REPEAT:
1682 case ExtendMode::REPEAT_X:
1683 case ExtendMode::REPEAT_Y:
1684 if ((!aPattern.mSurface ||
1685 aPattern.mSurface->GetType() == SurfaceType::WEBGL) &&
1686 !aPattern.mSamplingRect.IsEmpty()) {
1687 return false;
1688 }
1689 return true;
1690 default:
1691 return false;
1692 }
1693}
1694
1695// Whether a pattern can be mapped to an available WebGL shader.
1696bool SharedContextWebgl::SupportsPattern(const Pattern& aPattern) {
1697 switch (aPattern.GetType()) {
1698 case PatternType::COLOR:
1699 return true;
1700 case PatternType::SURFACE: {
1701 auto surfacePattern = static_cast<const SurfacePattern&>(aPattern);
1702 if (!SupportsExtendMode(surfacePattern)) {
1703 return false;
1704 }
1705 if (surfacePattern.mSurface) {
1706 // If the surface is already uploaded to a texture, then just use it.
1707 if (IsCompatibleSurface(surfacePattern.mSurface)) {
1708 return true;
1709 }
1710
1711 IntSize size = surfacePattern.mSurface->GetSize();
1712 // The maximum size a surface can be before triggering a fallback to
1713 // software. Bound the maximum surface size by the actual texture size
1714 // limit.
1715 int32_t maxSize = int32_t(
1716 std::min(StaticPrefs::gfx_canvas_accelerated_max_surface_size(),
1717 mMaxTextureSize));
1718 // Check if either of the surface dimensions or the sampling rect,
1719 // if supplied, exceed the maximum.
1720 if (std::max(size.width, size.height) > maxSize &&
1721 (surfacePattern.mSamplingRect.IsEmpty() ||
1722 std::max(surfacePattern.mSamplingRect.width,
1723 surfacePattern.mSamplingRect.height) > maxSize)) {
1724 return false;
1725 }
1726 }
1727 return true;
1728 }
1729 default:
1730 // Patterns other than colors and surfaces are currently not accelerated.
1731 return false;
1732 }
1733}
1734
1735bool DrawTargetWebgl::DrawRect(const Rect& aRect, const Pattern& aPattern,
1736 const DrawOptions& aOptions,
1737 Maybe<DeviceColor> aMaskColor,
1738 RefPtr<TextureHandle>* aHandle,
1739 bool aTransformed, bool aClipped,
1740 bool aAccelOnly, bool aForceUpdate,
1741 const StrokeOptions* aStrokeOptions) {
1742 // If there is nothing to draw, then don't draw...
1743 if (aRect.IsEmpty()) {
1744 return true;
1745 }
1746
1747 // If we're already drawing directly to the WebGL context, then we want to
1748 // continue to do so. However, if we're drawing into a Skia layer over the
1749 // WebGL context, then we need to be careful to avoid repeatedly clearing
1750 // and flushing the layer if we hit a drawing request that can be accelerated
1751 // in between layered drawing requests, as clearing and flushing the layer
1752 // can be significantly expensive when repeated. So when a Skia layer is
1753 // active, if it is possible to continue drawing into the layer, then don't
1754 // accelerate the drawing request.
1755 if (mWebglValid || (mSkiaLayer && !mLayerDepth &&
1756 (aAccelOnly || !SupportsLayering(aOptions)))) {
1757 // If we get here, either the WebGL context is being directly drawn to
1758 // or we are going to flush the Skia layer to it before doing so. The shared
1759 // context still needs to be claimed and prepared for drawing. If this
1760 // fails, we just fall back to drawing with Skia below.
1761 if (PrepareContext(aClipped)) {
1762 // The shared context is claimed and the framebuffer is now valid, so try
1763 // accelerated drawing.
1764 return mSharedContext->DrawRectAccel(
1765 aRect, aPattern, aOptions, aMaskColor, aHandle, aTransformed,
1766 aClipped, aAccelOnly, aForceUpdate, aStrokeOptions);
1767 }
1768 }
1769
1770 // Either there is no valid WebGL target to draw into, or we failed to prepare
1771 // it for drawing. The only thing we can do at this point is fall back to
1772 // drawing with Skia. If the request explicitly requires accelerated drawing,
1773 // then draw nothing before returning failure.
1774 if (!aAccelOnly) {
1775 DrawRectFallback(aRect, aPattern, aOptions, aMaskColor, aTransformed,
1776 aClipped, aStrokeOptions);
1777 }
1778 return false;
1779}
1780
1781void DrawTargetWebgl::DrawRectFallback(const Rect& aRect,
1782 const Pattern& aPattern,
1783 const DrawOptions& aOptions,
1784 Maybe<DeviceColor> aMaskColor,
1785 bool aTransformed, bool aClipped,
1786 const StrokeOptions* aStrokeOptions) {
1787 // Invalidate the WebGL target and prepare the Skia target for drawing.
1788 MarkSkiaChanged(aOptions);
1789
1790 if (aTransformed) {
1791 // If transforms are requested, then just translate back to FillRect.
1792 if (aMaskColor) {
1793 mSkia->Mask(ColorPattern(*aMaskColor), aPattern, aOptions);
1794 } else if (aStrokeOptions) {
1795 mSkia->StrokeRect(aRect, aPattern, *aStrokeOptions, aOptions);
1796 } else {
1797 mSkia->FillRect(aRect, aPattern, aOptions);
1798 }
1799 } else if (aClipped) {
1800 // If no transform was requested but clipping is still required, then
1801 // temporarily reset the transform before translating to FillRect.
1802 mSkia->SetTransform(Matrix());
1803 if (aMaskColor) {
1804 auto surfacePattern = static_cast<const SurfacePattern&>(aPattern);
1805 if (surfacePattern.mSamplingRect.IsEmpty()) {
1806 mSkia->MaskSurface(ColorPattern(*aMaskColor), surfacePattern.mSurface,
1807 aRect.TopLeft(), aOptions);
1808 } else {
1809 mSkia->Mask(ColorPattern(*aMaskColor), aPattern, aOptions);
1810 }
1811 } else if (aStrokeOptions) {
1812 mSkia->StrokeRect(aRect, aPattern, *aStrokeOptions, aOptions);
1813 } else {
1814 mSkia->FillRect(aRect, aPattern, aOptions);
1815 }
1816 mSkia->SetTransform(mTransform);
1817 } else if (aPattern.GetType() == PatternType::SURFACE) {
1818 // No transform nor clipping was requested, so it is essentially just a
1819 // copy.
1820 auto surfacePattern = static_cast<const SurfacePattern&>(aPattern);
1821 mSkia->CopySurface(surfacePattern.mSurface,
1822 surfacePattern.mSurface->GetRect(),
1823 IntPoint::Round(aRect.TopLeft()));
1824 } else {
1825 MOZ_ASSERT(false)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(false)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(false))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("false", "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 1825); AnnotateMozCrashReason("MOZ_ASSERT" "(" "false" ")")
; do { *((volatile int*)__null) = 1825; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1826 }
1827}
1828
1829inline already_AddRefed<WebGLTexture> SharedContextWebgl::GetCompatibleSnapshot(
1830 SourceSurface* aSurface) const {
1831 if (aSurface->GetType() == SurfaceType::WEBGL) {
1832 RefPtr<SourceSurfaceWebgl> webglSurf =
1833 static_cast<SourceSurfaceWebgl*>(aSurface);
1834 if (this == webglSurf->mSharedContext) {
1835 // If there is a snapshot copy in a texture handle, use that.
1836 if (webglSurf->mHandle) {
1837 return do_AddRef(
1838 webglSurf->mHandle->GetBackingTexture()->GetWebGLTexture());
1839 }
1840 if (RefPtr<DrawTargetWebgl> webglDT = webglSurf->GetTarget()) {
1841 // If there is a copy-on-write reference to a target, use its backing
1842 // texture directly. This is only safe if the targets don't match, but
1843 // MarkChanged should ensure that any snapshots were copied into a
1844 // texture handle before we ever get here.
1845 if (!IsCurrentTarget(webglDT)) {
1846 return do_AddRef(webglDT->mTex);
1847 }
1848 }
1849 }
1850 }
1851 return nullptr;
1852}
1853
1854inline bool SharedContextWebgl::IsCompatibleSurface(
1855 SourceSurface* aSurface) const {
1856 return bool(RefPtr<WebGLTexture>(GetCompatibleSnapshot(aSurface)));
1857}
1858
1859bool SharedContextWebgl::UploadSurface(DataSourceSurface* aData,
1860 SurfaceFormat aFormat,
1861 const IntRect& aSrcRect,
1862 const IntPoint& aDstOffset, bool aInit,
1863 bool aZero,
1864 const RefPtr<WebGLTexture>& aTex) {
1865 webgl::TexUnpackBlobDesc texDesc = {
1866 LOCAL_GL_TEXTURE_2D0x0DE1,
1867 {uint32_t(aSrcRect.width), uint32_t(aSrcRect.height), 1}};
1868 if (aData) {
1869 // The surface needs to be uploaded to its backing texture either to
1870 // initialize or update the texture handle contents. Map the data
1871 // contents of the surface so it can be read.
1872 DataSourceSurface::ScopedMap map(aData, DataSourceSurface::READ);
1873 if (!map.IsMapped()) {
1874 return false;
1875 }
1876 int32_t stride = map.GetStride();
1877 int32_t bpp = BytesPerPixel(aFormat);
1878 // Get the data pointer range considering the sampling rect offset and
1879 // size.
1880 Span<const uint8_t> range(
1881 map.GetData() + aSrcRect.y * size_t(stride) + aSrcRect.x * bpp,
1882 std::max(aSrcRect.height - 1, 0) * size_t(stride) +
1883 aSrcRect.width * bpp);
1884 texDesc.cpuData = Some(range);
1885 // If the stride happens to be 4 byte aligned, assume that is the
1886 // desired alignment regardless of format (even A8). Otherwise, we
1887 // default to byte alignment.
1888 texDesc.unpacking.alignmentInTypeElems = stride % 4 ? 1 : 4;
1889 texDesc.unpacking.rowLength = stride / bpp;
1890 } else if (aZero) {
1891 // Create a PBO filled with zero data to initialize the texture data and
1892 // avoid slow initialization inside WebGL.
1893 MOZ_ASSERT(aSrcRect.TopLeft() == IntPoint(0, 0))do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aSrcRect.TopLeft() == IntPoint(0, 0))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aSrcRect.TopLeft() == IntPoint
(0, 0)))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("aSrcRect.TopLeft() == IntPoint(0, 0)", "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 1893); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aSrcRect.TopLeft() == IntPoint(0, 0)"
")"); do { *((volatile int*)__null) = 1893; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
1894 size_t size =
1895 size_t(GetAlignedStride<4>(aSrcRect.width, BytesPerPixel(aFormat))) *
1896 aSrcRect.height;
1897 if (!mZeroBuffer || size > mZeroSize) {
1898 mZeroBuffer = mWebgl->CreateBuffer();
1899 mZeroSize = size;
1900 mWebgl->BindBuffer(LOCAL_GL_PIXEL_UNPACK_BUFFER0x88EC, mZeroBuffer);
1901 // WebGL will zero initialize the empty buffer, so we don't send zero data
1902 // explicitly.
1903 mWebgl->BufferData(LOCAL_GL_PIXEL_UNPACK_BUFFER0x88EC, size, nullptr,
1904 LOCAL_GL_STATIC_DRAW0x88E4);
1905 } else {
1906 mWebgl->BindBuffer(LOCAL_GL_PIXEL_UNPACK_BUFFER0x88EC, mZeroBuffer);
1907 }
1908 texDesc.pboOffset = Some(0);
1909 }
1910 // Upload as RGBA8 to avoid swizzling during upload. Surfaces provide
1911 // data as BGRA, but we manually swizzle that in the shader. An A8
1912 // surface will be stored as an R8 texture that will also be swizzled
1913 // in the shader.
1914 GLenum intFormat =
1915 aFormat == SurfaceFormat::A8 ? LOCAL_GL_R80x8229 : LOCAL_GL_RGBA80x8058;
1916 GLenum extFormat =
1917 aFormat == SurfaceFormat::A8 ? LOCAL_GL_RED0x1903 : LOCAL_GL_RGBA0x1908;
1918 webgl::PackingInfo texPI = {extFormat, LOCAL_GL_UNSIGNED_BYTE0x1401};
1919 // Do the (partial) upload for the shared or standalone texture.
1920 if (aTex) {
1921 mWebgl->BindTexture(LOCAL_GL_TEXTURE_2D0x0DE1, aTex);
1922 }
1923 mWebgl->TexImage(0, aInit ? intFormat : 0,
1924 {uint32_t(aDstOffset.x), uint32_t(aDstOffset.y), 0}, texPI,
1925 texDesc);
1926 if (aTex) {
1927 mWebgl->BindTexture(LOCAL_GL_TEXTURE_2D0x0DE1, mLastTexture);
1928 }
1929 if (!aData && aZero) {
1930 mWebgl->BindBuffer(LOCAL_GL_PIXEL_UNPACK_BUFFER0x88EC, 0);
1931 }
1932 return true;
1933}
1934
1935// Allocate a new texture handle backed by either a standalone texture or as a
1936// sub-texture of a larger shared texture.
1937already_AddRefed<TextureHandle> SharedContextWebgl::AllocateTextureHandle(
1938 SurfaceFormat aFormat, const IntSize& aSize, bool aAllowShared,
1939 bool aRenderable) {
1940 RefPtr<TextureHandle> handle;
1941 // Calculate the bytes that would be used by this texture handle, and prune
1942 // enough other textures to ensure we have that much usable texture space
1943 // available to allocate.
1944 size_t usedBytes = BackingTexture::UsedBytes(aFormat, aSize);
1945 PruneTextureMemory(usedBytes, false);
1946 // The requested page size for shared textures.
1947 int32_t pageSize = int32_t(std::min(
1948 StaticPrefs::gfx_canvas_accelerated_shared_page_size(), mMaxTextureSize));
1949 if (aAllowShared && std::max(aSize.width, aSize.height) <= pageSize / 2) {
1950 // Ensure that the surface is no bigger than a quadrant of a shared texture
1951 // page. If so, try to allocate it to a shared texture. Look for any
1952 // existing shared texture page with a matching format and allocate
1953 // from that if possible.
1954 for (auto& shared : mSharedTextures) {
1955 if (shared->GetFormat() == aFormat &&
1956 shared->IsRenderable() == aRenderable) {
1957 bool wasEmpty = !shared->HasAllocatedHandles();
1958 handle = shared->Allocate(aSize);
1959 if (handle) {
1960 if (wasEmpty) {
1961 // If the page was previously empty, then deduct it from the
1962 // empty memory reserves.
1963 mEmptyTextureMemory -= shared->UsedBytes();
1964 }
1965 break;
1966 }
1967 }
1968 }
1969 // If we couldn't find an existing shared texture page with matching
1970 // format, then allocate a new page to put the request in.
1971 if (!handle) {
1972 if (RefPtr<WebGLTexture> tex = mWebgl->CreateTexture()) {
1973 RefPtr<SharedTexture> shared =
1974 new SharedTexture(IntSize(pageSize, pageSize), aFormat, tex);
1975 if (aRenderable) {
1976 shared->MarkRenderable();
1977 }
1978 mSharedTextures.push_back(shared);
1979 mTotalTextureMemory += shared->UsedBytes();
1980 handle = shared->Allocate(aSize);
1981 }
1982 }
1983 } else {
1984 // The surface wouldn't fit in a shared texture page, so we need to
1985 // allocate a standalone texture for it instead.
1986 if (RefPtr<WebGLTexture> tex = mWebgl->CreateTexture()) {
1987 RefPtr<StandaloneTexture> standalone =
1988 new StandaloneTexture(aSize, aFormat, tex);
1989 if (aRenderable) {
1990 standalone->MarkRenderable();
1991 }
1992 mStandaloneTextures.push_back(standalone);
1993 mTotalTextureMemory += standalone->UsedBytes();
1994 handle = standalone;
1995 }
1996 }
1997
1998 if (!handle) {
1999 return nullptr;
2000 }
2001
2002 // Insert the new texture handle into the front of the MRU list and
2003 // update used space for it.
2004 mTextureHandles.insertFront(handle);
2005 ++mNumTextureHandles;
2006 mUsedTextureMemory += handle->UsedBytes();
2007
2008 return handle.forget();
2009}
2010
2011static inline SamplingFilter GetSamplingFilter(const Pattern& aPattern) {
2012 return aPattern.GetType() == PatternType::SURFACE
2013 ? static_cast<const SurfacePattern&>(aPattern).mSamplingFilter
2014 : SamplingFilter::GOOD;
2015}
2016
2017static inline bool UseNearestFilter(const Pattern& aPattern) {
2018 return GetSamplingFilter(aPattern) == SamplingFilter::POINT;
2019}
2020
2021// Determine if the rectangle is still axis-aligned and pixel-aligned.
2022static inline Maybe<IntRect> IsAlignedRect(bool aTransformed,
2023 const Matrix& aCurrentTransform,
2024 const Rect& aRect) {
2025 if (!aTransformed || aCurrentTransform.HasOnlyIntegerTranslation()) {
2026 auto intRect = RoundedToInt(aRect);
2027 if (aRect.WithinEpsilonOf(Rect(intRect), 1.0e-3f)) {
2028 if (aTransformed) {
2029 intRect += RoundedToInt(aCurrentTransform.GetTranslation());
2030 }
2031 return Some(intRect);
2032 }
2033 }
2034 return Nothing();
2035}
2036
2037Maybe<uint32_t> SharedContextWebgl::GetUniformLocation(
2038 const RefPtr<WebGLProgram>& aProg, const std::string& aName) const {
2039 if (!aProg || !aProg->LinkInfo()) {
2040 return Nothing();
2041 }
2042
2043 for (const auto& activeUniform : aProg->LinkInfo()->active.activeUniforms) {
2044 if (activeUniform.block_index != -1) continue;
2045
2046 auto locName = activeUniform.name;
2047 const auto indexed = webgl::ParseIndexed(locName);
2048 if (indexed) {
2049 locName = indexed->name;
2050 }
2051
2052 const auto baseLength = locName.size();
2053 for (const auto& pair : activeUniform.locByIndex) {
2054 if (indexed) {
2055 locName.erase(baseLength); // Erase previous "[N]".
2056 locName += '[';
2057 locName += std::to_string(pair.first);
2058 locName += ']';
2059 }
2060 if (locName == aName || locName == aName + "[0]") {
2061 return Some(pair.second);
2062 }
2063 }
2064 }
2065
2066 return Nothing();
2067}
2068
2069template <class T>
2070struct IsUniformDataValT : std::false_type {};
2071template <>
2072struct IsUniformDataValT<webgl::UniformDataVal> : std::true_type {};
2073template <>
2074struct IsUniformDataValT<float> : std::true_type {};
2075template <>
2076struct IsUniformDataValT<int32_t> : std::true_type {};
2077template <>
2078struct IsUniformDataValT<uint32_t> : std::true_type {};
2079
2080template <typename T, typename = std::enable_if_t<IsUniformDataValT<T>::value>>
2081inline Range<const webgl::UniformDataVal> AsUniformDataVal(
2082 const Range<const T>& data) {
2083 return {data.begin().template ReinterpretCast<const webgl::UniformDataVal>(),
2084 data.end().template ReinterpretCast<const webgl::UniformDataVal>()};
2085}
2086
2087template <class T, size_t N>
2088inline void SharedContextWebgl::UniformData(GLenum aFuncElemType,
2089 const Maybe<uint32_t>& aLoc,
2090 const Array<T, N>& aData) {
2091 // We currently always pass false for transpose. If in the future we need
2092 // support for transpose then caching needs to take that in to account.
2093 mWebgl->UniformData(*aLoc, false,
2094 AsUniformDataVal(Range<const T>(Span<const T>(aData))));
2095}
2096
2097template <class T, size_t N>
2098void SharedContextWebgl::MaybeUniformData(GLenum aFuncElemType,
2099 const Maybe<uint32_t>& aLoc,
2100 const Array<T, N>& aData,
2101 Maybe<Array<T, N>>& aCached) {
2102 if (aCached.isNothing() || !(*aCached == aData)) {
2103 aCached = Some(aData);
2104 UniformData(aFuncElemType, aLoc, aData);
2105 }
2106}
2107
2108inline void SharedContextWebgl::DrawQuad() {
2109 mWebgl->DrawArraysInstanced(LOCAL_GL_TRIANGLE_FAN0x0006, 0, 4, 1);
2110}
2111
2112void SharedContextWebgl::DrawTriangles(const PathVertexRange& aRange) {
2113 mWebgl->DrawArraysInstanced(LOCAL_GL_TRIANGLES0x0004, GLint(aRange.mOffset),
2114 GLsizei(aRange.mLength), 1);
2115}
2116
2117// Common rectangle and pattern drawing function shared by many DrawTarget
2118// commands. If aMaskColor is specified, the provided surface pattern will be
2119// treated as a mask. If aHandle is specified, then the surface pattern's
2120// texture will be cached in the supplied handle, as opposed to using the
2121// surface's user data. If aTransformed or aClipped are false, then transforms
2122// and/or clipping will be disabled. If aAccelOnly is specified, then this
2123// function will return before it would have otherwise drawn without
2124// acceleration. If aForceUpdate is specified, then the provided texture handle
2125// will be respecified with the provided surface.
2126bool SharedContextWebgl::DrawRectAccel(
2127 const Rect& aRect, const Pattern& aPattern, const DrawOptions& aOptions,
2128 Maybe<DeviceColor> aMaskColor, RefPtr<TextureHandle>* aHandle,
2129 bool aTransformed, bool aClipped, bool aAccelOnly, bool aForceUpdate,
2130 const StrokeOptions* aStrokeOptions, const PathVertexRange* aVertexRange,
2131 const Matrix* aRectXform) {
2132 // If the rect or clip rect is empty, then there is nothing to draw.
2133 if (aRect.IsEmpty() || mClipRect.IsEmpty()) {
2134 return true;
2135 }
2136
2137 // Check if the drawing options and the pattern support acceleration. Also
2138 // ensure the framebuffer is prepared for drawing. If not, fall back to using
2139 // the Skia target. When we need to forcefully update a texture, we must be
2140 // careful to override any pattern limits, as the caller ensures the pattern
2141 // is otherwise a supported type.
2142 if (!SupportsDrawOptions(aOptions) ||
2143 (!aForceUpdate && !SupportsPattern(aPattern)) || aStrokeOptions ||
2144 !mCurrentTarget->MarkChanged()) {
2145 // If only accelerated drawing was requested, bail out without software
2146 // drawing fallback.
2147 if (!aAccelOnly) {
2148 MOZ_ASSERT(!aVertexRange)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!aVertexRange)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!aVertexRange))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("!aVertexRange",
"/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 2148); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!aVertexRange"
")"); do { *((volatile int*)__null) = 2148; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2149 mCurrentTarget->DrawRectFallback(aRect, aPattern, aOptions, aMaskColor,
2150 aTransformed, aClipped, aStrokeOptions);
2151 }
2152 return false;
2153 }
2154
2155 const Matrix& currentTransform = mCurrentTarget->GetTransform();
2156 // rectXform only applies to the rect, but should not apply to the pattern,
2157 // as it might inadvertently alter the pattern.
2158 Matrix rectXform = currentTransform;
2159 if (aRectXform) {
2160 rectXform.PreMultiply(*aRectXform);
2161 }
2162
2163 if (aOptions.mCompositionOp == CompositionOp::OP_SOURCE && aTransformed &&
2164 aClipped &&
2165 (HasClipMask() || !rectXform.PreservesAxisAlignedRectangles() ||
2166 !rectXform.TransformBounds(aRect).Contains(Rect(mClipAARect)) ||
2167 (aPattern.GetType() == PatternType::SURFACE &&
2168 !IsAlignedRect(aTransformed, rectXform, aRect)))) {
2169 // Clear outside the mask region for masks that are not bounded by clip.
2170 return DrawRectAccel(Rect(mClipRect), ColorPattern(DeviceColor(0, 0, 0, 0)),
2171 DrawOptions(1.0f, CompositionOp::OP_SOURCE,
2172 aOptions.mAntialiasMode),
2173 Nothing(), nullptr, false, aClipped, aAccelOnly) &&
2174 DrawRectAccel(aRect, aPattern,
2175 DrawOptions(aOptions.mAlpha, CompositionOp::OP_ADD,
2176 aOptions.mAntialiasMode),
2177 aMaskColor, aHandle, aTransformed, aClipped,
2178 aAccelOnly, aForceUpdate, aStrokeOptions, aVertexRange,
2179 aRectXform);
2180 }
2181 if (aOptions.mCompositionOp == CompositionOp::OP_CLEAR &&
2182 aPattern.GetType() == PatternType::SURFACE && !aMaskColor) {
2183 // If the surface being drawn with clear is not a mask, then its contents
2184 // needs to be ignored. Just use a color pattern instead.
2185 return DrawRectAccel(aRect, ColorPattern(DeviceColor(1, 1, 1, 1)), aOptions,
2186 Nothing(), aHandle, aTransformed, aClipped, aAccelOnly,
2187 aForceUpdate, aStrokeOptions, aVertexRange,
2188 aRectXform);
2189 }
2190
2191 // Set up the scissor test to reflect the clipping rectangle, if supplied.
2192 if (!mClipRect.Contains(IntRect(IntPoint(), mViewportSize))) {
2193 EnableScissor(mClipRect);
2194 } else {
2195 DisableScissor();
2196 }
2197
2198 bool success = false;
2199
2200 // Now try to actually draw the pattern...
2201 switch (aPattern.GetType()) {
2202 case PatternType::COLOR: {
2203 if (!aVertexRange) {
2204 // Only an uncached draw if not using the vertex cache.
2205 mCurrentTarget->mProfile.OnUncachedDraw();
2206 }
2207 DeviceColor color = PremultiplyColor(
2208 static_cast<const ColorPattern&>(aPattern).mColor, aOptions.mAlpha);
2209 if (((color.a == 1.0f &&
2210 aOptions.mCompositionOp == CompositionOp::OP_OVER) ||
2211 aOptions.mCompositionOp == CompositionOp::OP_SOURCE ||
2212 aOptions.mCompositionOp == CompositionOp::OP_CLEAR) &&
2213 !aStrokeOptions && !aVertexRange && !HasClipMask() &&
2214 mClipAARect.IsEqualEdges(Rect(mClipRect))) {
2215 // Certain color patterns can be mapped to scissored clears. The
2216 // composition op must effectively overwrite the destination, and the
2217 // transform must map to an axis-aligned integer rectangle.
2218 if (Maybe<IntRect> intRect =
2219 IsAlignedRect(aTransformed, rectXform, aRect)) {
2220 // Only use a clear if the area is larger than a quarter or the
2221 // viewport.
2222 if (intRect->Area() >=
2223 (mViewportSize.width / 2) * (mViewportSize.height / 2)) {
2224 if (!intRect->Contains(mClipRect)) {
2225 EnableScissor(intRect->Intersect(mClipRect));
2226 }
2227 if (aOptions.mCompositionOp == CompositionOp::OP_CLEAR) {
2228 color =
2229 PremultiplyColor(mCurrentTarget->GetClearPattern().mColor);
2230 }
2231 mWebgl->ClearColor(color.b, color.g, color.r, color.a);
2232 mWebgl->Clear(LOCAL_GL_COLOR_BUFFER_BIT0x00004000);
2233 success = true;
2234 break;
2235 }
2236 }
2237 }
2238 // Map the composition op to a WebGL blend mode, if possible.
2239 Maybe<DeviceColor> blendColor;
2240 if (aOptions.mCompositionOp == CompositionOp::OP_SOURCE ||
2241 aOptions.mCompositionOp == CompositionOp::OP_CLEAR) {
2242 // The source operator can support clipping and AA by emulating it with
2243 // the over op. Supply the color with blend state, and set the shader
2244 // color to white, to avoid needing dual-source blending.
2245 blendColor = Some(color);
2246 // Both source and clear operators should output a mask from the shader.
2247 color = DeviceColor(1, 1, 1, 1);
2248 }
2249 SetBlendState(aOptions.mCompositionOp, blendColor);
2250 // Since it couldn't be mapped to a scissored clear, we need to use the
2251 // solid color shader with supplied transform.
2252 if (mLastProgram != mSolidProgram) {
2253 mWebgl->UseProgram(mSolidProgram);
2254 mLastProgram = mSolidProgram;
2255 }
2256 Array<float, 2> viewportData = {float(mViewportSize.width),
2257 float(mViewportSize.height)};
2258 MaybeUniformData(LOCAL_GL_FLOAT_VEC20x8B50, mSolidProgramViewport, viewportData,
2259 mSolidProgramUniformState.mViewport);
2260
2261 // Generated paths provide their own AA as vertex alpha.
2262 Array<float, 1> aaData = {aVertexRange ? 0.0f : 1.0f};
2263 MaybeUniformData(LOCAL_GL_FLOAT0x1406, mSolidProgramAA, aaData,
2264 mSolidProgramUniformState.mAA);
2265
2266 // Offset the clip AA bounds by 0.5 to ensure AA falls to 0 at pixel
2267 // boundary.
2268 Array<float, 4> clipData = {mClipAARect.x - 0.5f, mClipAARect.y - 0.5f,
2269 mClipAARect.XMost() + 0.5f,
2270 mClipAARect.YMost() + 0.5f};
2271 MaybeUniformData(LOCAL_GL_FLOAT_VEC40x8B52, mSolidProgramClipBounds, clipData,
2272 mSolidProgramUniformState.mClipBounds);
2273
2274 Array<float, 4> colorData = {color.b, color.g, color.r, color.a};
2275 Matrix xform(aRect.width, 0.0f, 0.0f, aRect.height, aRect.x, aRect.y);
2276 if (aTransformed) {
2277 xform *= rectXform;
2278 }
2279 Array<float, 6> xformData = {xform._11, xform._12, xform._21,
2280 xform._22, xform._31, xform._32};
2281 MaybeUniformData(LOCAL_GL_FLOAT_VEC20x8B50, mSolidProgramTransform, xformData,
2282 mSolidProgramUniformState.mTransform);
2283
2284 MaybeUniformData(LOCAL_GL_FLOAT_VEC40x8B52, mSolidProgramColor, colorData,
2285 mSolidProgramUniformState.mColor);
2286
2287 // Finally draw the colored rectangle.
2288 if (aVertexRange) {
2289 // If there's a vertex range, then we need to draw triangles within from
2290 // generated from a path stored in the path vertex buffer.
2291 DrawTriangles(*aVertexRange);
2292 } else {
2293 // Otherwise we're drawing a simple filled rectangle.
2294 DrawQuad();
2295 }
2296 success = true;
2297 break;
2298 }
2299 case PatternType::SURFACE: {
2300 auto surfacePattern = static_cast<const SurfacePattern&>(aPattern);
2301 // If a texture handle was supplied, or if the surface already has an
2302 // assigned texture handle stashed in its used data, try to use it.
2303 RefPtr<TextureHandle> handle =
2304 aHandle ? aHandle->get()
2305 : (surfacePattern.mSurface
2306 ? static_cast<TextureHandle*>(
2307 surfacePattern.mSurface->GetUserData(
2308 &mTextureHandleKey))
2309 : nullptr);
2310 IntSize texSize;
2311 IntPoint offset;
2312 SurfaceFormat format;
2313 // Check if the found handle is still valid and if its sampling rect
2314 // matches the requested sampling rect.
2315 if (handle && handle->IsValid() &&
2316 (surfacePattern.mSamplingRect.IsEmpty() ||
2317 handle->GetSamplingRect().IsEqualEdges(
2318 surfacePattern.mSamplingRect)) &&
2319 (surfacePattern.mExtendMode == ExtendMode::CLAMP ||
2320 handle->GetType() == TextureHandle::STANDALONE)) {
2321 texSize = handle->GetSize();
2322 format = handle->GetFormat();
2323 offset = handle->GetSamplingOffset();
2324 } else {
2325 // Otherwise, there is no handle that can be used yet, so extract
2326 // information from the surface pattern.
2327 handle = nullptr;
2328 if (!surfacePattern.mSurface) {
2329 // If there was no actual surface supplied, then we tried to draw
2330 // using a texture handle, but the texture handle wasn't valid.
2331 break;
2332 }
2333 texSize = surfacePattern.mSurface->GetSize();
2334 format = surfacePattern.mSurface->GetFormat();
2335 if (!surfacePattern.mSamplingRect.IsEmpty()) {
2336 texSize = surfacePattern.mSamplingRect.Size();
2337 offset = surfacePattern.mSamplingRect.TopLeft();
2338 }
2339 }
2340
2341 // We need to be able to transform from local space into texture space.
2342 Matrix invMatrix = surfacePattern.mMatrix;
2343 // If drawing a pre-transformed vertex range, then we need to ensure the
2344 // user-space pattern is still transformed to screen-space.
2345 if (aVertexRange && !aTransformed) {
2346 invMatrix *= currentTransform;
2347 }
2348 if (!invMatrix.Invert()) {
2349 break;
2350 }
2351 if (aRectXform) {
2352 // If there is aRectXform, it must be applied to the source rectangle to
2353 // generate the proper input coordinates for the inverse pattern matrix.
2354 invMatrix.PreMultiply(*aRectXform);
2355 }
2356
2357 RefPtr<WebGLTexture> tex;
2358 IntRect bounds;
2359 IntSize backingSize;
2360 RefPtr<DataSourceSurface> data;
2361 if (handle) {
2362 if (aForceUpdate) {
2363 data = surfacePattern.mSurface->GetDataSurface();
2364 if (!data) {
2365 break;
2366 }
2367 // The size of the texture may change if we update contents.
2368 mUsedTextureMemory -= handle->UsedBytes();
2369 handle->UpdateSize(texSize);
2370 mUsedTextureMemory += handle->UsedBytes();
2371 handle->SetSamplingOffset(surfacePattern.mSamplingRect.TopLeft());
2372 }
2373 // If using an existing handle, move it to the front of the MRU list.
2374 handle->remove();
2375 mTextureHandles.insertFront(handle);
2376 } else if ((tex = GetCompatibleSnapshot(surfacePattern.mSurface))) {
2377 backingSize = surfacePattern.mSurface->GetSize();
2378 bounds = IntRect(offset, texSize);
2379 // Count reusing a snapshot texture (no readback) as a cache hit.
2380 mCurrentTarget->mProfile.OnCacheHit();
2381 } else {
2382 // If we get here, we need a data surface for a texture upload.
2383 data = surfacePattern.mSurface->GetDataSurface();
2384 if (!data) {
2385 break;
2386 }
2387 // There is no existing handle. Try to allocate a new one. If the
2388 // surface size may change via a forced update, then don't allocate
2389 // from a shared texture page.
2390 handle = AllocateTextureHandle(
2391 format, texSize,
2392 !aForceUpdate && surfacePattern.mExtendMode == ExtendMode::CLAMP);
2393 if (!handle) {
2394 MOZ_ASSERT(false)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(false)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(false))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("false", "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 2394); AnnotateMozCrashReason("MOZ_ASSERT" "(" "false" ")")
; do { *((volatile int*)__null) = 2394; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2395 break;
2396 }
2397 // Link the handle to the surface's user data.
2398 handle->SetSamplingOffset(surfacePattern.mSamplingRect.TopLeft());
2399 if (aHandle) {
2400 *aHandle = handle;
2401 } else {
2402 handle->SetSurface(surfacePattern.mSurface);
2403 surfacePattern.mSurface->AddUserData(&mTextureHandleKey, handle.get(),
2404 nullptr);
2405 }
2406 }
2407
2408 // Map the composition op to a WebGL blend mode, if possible. If there is
2409 // a mask color and a texture with multiple channels, assume subpixel
2410 // blending. If we encounter the source op here, then assume the surface
2411 // is opaque (non-opaque is handled above) and emulate it with over.
2412 SetBlendState(aOptions.mCompositionOp,
2413 format != SurfaceFormat::A8 ? aMaskColor : Nothing());
2414 // Switch to the image shader and set up relevant transforms.
2415 if (mLastProgram != mImageProgram) {
2416 mWebgl->UseProgram(mImageProgram);
2417 mLastProgram = mImageProgram;
2418 }
2419
2420 Array<float, 2> viewportData = {float(mViewportSize.width),
2421 float(mViewportSize.height)};
2422 MaybeUniformData(LOCAL_GL_FLOAT_VEC20x8B50, mImageProgramViewport, viewportData,
2423 mImageProgramUniformState.mViewport);
2424
2425 // AA is not supported for OP_SOURCE. Generated paths provide their own
2426 // AA as vertex alpha.
2427 Array<float, 1> aaData = {
2428 mLastCompositionOp == CompositionOp::OP_SOURCE || aVertexRange
2429 ? 0.0f
2430 : 1.0f};
2431 MaybeUniformData(LOCAL_GL_FLOAT0x1406, mImageProgramAA, aaData,
2432 mImageProgramUniformState.mAA);
2433
2434 // Offset the clip AA bounds by 0.5 to ensure AA falls to 0 at pixel
2435 // boundary.
2436 Array<float, 4> clipData = {mClipAARect.x - 0.5f, mClipAARect.y - 0.5f,
2437 mClipAARect.XMost() + 0.5f,
2438 mClipAARect.YMost() + 0.5f};
2439 MaybeUniformData(LOCAL_GL_FLOAT_VEC40x8B52, mImageProgramClipBounds, clipData,
2440 mImageProgramUniformState.mClipBounds);
2441
2442 DeviceColor color =
2443 mLastCompositionOp == CompositionOp::OP_CLEAR
2444 ? DeviceColor(1, 1, 1, 1)
2445 : PremultiplyColor(
2446 aMaskColor && format != SurfaceFormat::A8
2447 ? DeviceColor::Mask(1.0f, aMaskColor->a)
2448 : aMaskColor.valueOr(DeviceColor(1, 1, 1, 1)),
2449 aOptions.mAlpha);
2450 Array<float, 4> colorData = {color.b, color.g, color.r, color.a};
2451 Array<float, 1> swizzleData = {format == SurfaceFormat::A8 ? 1.0f : 0.0f};
2452 Matrix xform(aRect.width, 0.0f, 0.0f, aRect.height, aRect.x, aRect.y);
2453 if (aTransformed) {
2454 xform *= rectXform;
2455 }
2456 Array<float, 6> xformData = {xform._11, xform._12, xform._21,
2457 xform._22, xform._31, xform._32};
2458 MaybeUniformData(LOCAL_GL_FLOAT_VEC20x8B50, mImageProgramTransform, xformData,
2459 mImageProgramUniformState.mTransform);
2460
2461 MaybeUniformData(LOCAL_GL_FLOAT_VEC40x8B52, mImageProgramColor, colorData,
2462 mImageProgramUniformState.mColor);
2463
2464 MaybeUniformData(LOCAL_GL_FLOAT0x1406, mImageProgramSwizzle, swizzleData,
2465 mImageProgramUniformState.mSwizzle);
2466
2467 // Start binding the WebGL state for the texture.
2468 BackingTexture* backing = nullptr;
2469 if (handle) {
2470 backing = handle->GetBackingTexture();
2471 if (!tex) {
2472 tex = backing->GetWebGLTexture();
2473 }
2474 bounds = handle->GetBounds();
2475 backingSize = backing->GetSize();
2476 }
2477 if (mLastTexture != tex) {
2478 mWebgl->BindTexture(LOCAL_GL_TEXTURE_2D0x0DE1, tex);
2479 mLastTexture = tex;
2480 }
2481
2482 if (backing && !backing->IsInitialized()) {
2483 // If this is the first time the texture is used, we need to initialize
2484 // the clamping and filtering state.
2485 backing->MarkInitialized();
2486 InitTexParameters(tex);
2487 if (texSize != backingSize) {
2488 // If this is a shared texture handle whose actual backing texture is
2489 // larger than it, then we need to allocate the texture page to the
2490 // full backing size before we can do a partial upload of the surface.
2491 UploadSurface(nullptr, format, IntRect(IntPoint(), backingSize),
2492 IntPoint(), true, true);
2493 }
2494 }
2495
2496 if (data) {
2497 UploadSurface(data, format, IntRect(offset, texSize), bounds.TopLeft(),
2498 texSize == backingSize);
2499 // Signal that we had to upload new data to the texture cache.
2500 mCurrentTarget->mProfile.OnCacheMiss();
2501 } else {
2502 // Signal that we are reusing data from the texture cache.
2503 mCurrentTarget->mProfile.OnCacheHit();
2504 }
2505
2506 // Set up the texture coordinate matrix to map from the input rectangle to
2507 // the backing texture subrect.
2508 Size backingSizeF(backingSize);
2509 Matrix uvMatrix(aRect.width, 0.0f, 0.0f, aRect.height, aRect.x, aRect.y);
2510 uvMatrix *= invMatrix;
2511 uvMatrix *= Matrix(1.0f / backingSizeF.width, 0.0f, 0.0f,
2512 1.0f / backingSizeF.height,
2513 float(bounds.x - offset.x) / backingSizeF.width,
2514 float(bounds.y - offset.y) / backingSizeF.height);
2515 Array<float, 6> uvData = {uvMatrix._11, uvMatrix._12, uvMatrix._21,
2516 uvMatrix._22, uvMatrix._31, uvMatrix._32};
2517 MaybeUniformData(LOCAL_GL_FLOAT_VEC20x8B50, mImageProgramTexMatrix, uvData,
2518 mImageProgramUniformState.mTexMatrix);
2519
2520 // Clamp sampling to within the bounds of the backing texture subrect.
2521 Array<float, 4> texBounds = {
2522 (bounds.x + 0.5f) / backingSizeF.width,
2523 (bounds.y + 0.5f) / backingSizeF.height,
2524 (bounds.XMost() - 0.5f) / backingSizeF.width,
2525 (bounds.YMost() - 0.5f) / backingSizeF.height,
2526 };
2527 switch (surfacePattern.mExtendMode) {
2528 case ExtendMode::REPEAT:
2529 texBounds[0] = -1e16f;
2530 texBounds[1] = -1e16f;
2531 texBounds[2] = 1e16f;
2532 texBounds[3] = 1e16f;
2533 break;
2534 case ExtendMode::REPEAT_X:
2535 texBounds[0] = -1e16f;
2536 texBounds[2] = 1e16f;
2537 break;
2538 case ExtendMode::REPEAT_Y:
2539 texBounds[1] = -1e16f;
2540 texBounds[3] = 1e16f;
2541 break;
2542 default:
2543 break;
2544 }
2545 MaybeUniformData(LOCAL_GL_FLOAT_VEC40x8B52, mImageProgramTexBounds, texBounds,
2546 mImageProgramUniformState.mTexBounds);
2547
2548 // Ensure we use nearest filtering when no antialiasing is requested.
2549 if (UseNearestFilter(surfacePattern)) {
2550 SetTexFilter(tex, false);
2551 }
2552
2553 // Finally draw the image rectangle.
2554 if (aVertexRange) {
2555 // If there's a vertex range, then we need to draw triangles within from
2556 // generated from a path stored in the path vertex buffer.
2557 DrawTriangles(*aVertexRange);
2558 } else {
2559 // Otherwise we're drawing a simple filled rectangle.
2560 DrawQuad();
2561 }
2562
2563 // Restore the default linear filter if overridden.
2564 if (UseNearestFilter(surfacePattern)) {
2565 SetTexFilter(tex, true);
2566 }
2567
2568 success = true;
2569 break;
2570 }
2571 default:
2572 gfxWarningmozilla::gfx::WarningLog() << "Unknown DrawTargetWebgl::DrawRect pattern type: "
2573 << (int)aPattern.GetType();
2574 break;
2575 }
2576
2577 return success;
2578}
2579
2580bool SharedContextWebgl::RemoveSharedTexture(
2581 const RefPtr<SharedTexture>& aTexture) {
2582 auto pos =
2583 std::find(mSharedTextures.begin(), mSharedTextures.end(), aTexture);
2584 if (pos == mSharedTextures.end()) {
2585 return false;
2586 }
2587 // Keep around a reserve of empty pages to avoid initialization costs from
2588 // allocating shared pages. If still below the limit of reserved pages, then
2589 // just add it to the reserve. Otherwise, erase the empty texture page.
2590 size_t maxBytes = StaticPrefs::gfx_canvas_accelerated_reserve_empty_cache()
2591 << 20;
2592 size_t usedBytes = aTexture->UsedBytes();
2593 if (mEmptyTextureMemory + usedBytes <= maxBytes) {
2594 mEmptyTextureMemory += usedBytes;
2595 } else {
2596 mTotalTextureMemory -= usedBytes;
2597 mSharedTextures.erase(pos);
2598 ClearLastTexture();
2599 }
2600 return true;
2601}
2602
2603void SharedTextureHandle::Cleanup(SharedContextWebgl& aContext) {
2604 mTexture->Free(*this);
2605
2606 // Check if the shared handle's owning page has no more allocated handles
2607 // after we freed it. If so, remove the empty shared texture page also.
2608 if (!mTexture->HasAllocatedHandles()) {
2609 aContext.RemoveSharedTexture(mTexture);
2610 }
2611}
2612
2613bool SharedContextWebgl::RemoveStandaloneTexture(
2614 const RefPtr<StandaloneTexture>& aTexture) {
2615 auto pos = std::find(mStandaloneTextures.begin(), mStandaloneTextures.end(),
2616 aTexture);
2617 if (pos == mStandaloneTextures.end()) {
2618 return false;
2619 }
2620 mTotalTextureMemory -= aTexture->UsedBytes();
2621 mStandaloneTextures.erase(pos);
2622 ClearLastTexture();
2623 return true;
2624}
2625
2626void StandaloneTexture::Cleanup(SharedContextWebgl& aContext) {
2627 aContext.RemoveStandaloneTexture(this);
2628}
2629
2630// Prune a given texture handle and release its associated resources.
2631void SharedContextWebgl::PruneTextureHandle(
2632 const RefPtr<TextureHandle>& aHandle) {
2633 // Invalidate the handle so nothing will subsequently use its contents.
2634 aHandle->Invalidate();
2635 // If the handle has an associated SourceSurface, unlink it.
2636 UnlinkSurfaceTexture(aHandle);
2637 // If the handle has an associated CacheEntry, unlink it.
2638 if (RefPtr<CacheEntry> entry = aHandle->GetCacheEntry()) {
2639 entry->Unlink();
2640 }
2641 // Deduct the used space from the total.
2642 mUsedTextureMemory -= aHandle->UsedBytes();
2643 // Ensure any allocated shared or standalone texture regions get freed.
2644 aHandle->Cleanup(*this);
2645}
2646
2647// Prune any texture memory above the limit (or margin below the limit) or any
2648// least-recently-used handles that are no longer associated with any usable
2649// surface.
2650bool SharedContextWebgl::PruneTextureMemory(size_t aMargin, bool aPruneUnused) {
2651 // The maximum amount of texture memory that may be used by textures.
2652 size_t maxBytes = StaticPrefs::gfx_canvas_accelerated_cache_size() << 20;
2653 maxBytes -= std::min(maxBytes, aMargin);
2654 size_t maxItems = StaticPrefs::gfx_canvas_accelerated_cache_items();
2655 size_t oldItems = mNumTextureHandles;
2656 while (!mTextureHandles.isEmpty() &&
2657 (mUsedTextureMemory > maxBytes || mNumTextureHandles > maxItems ||
2658 (aPruneUnused && !mTextureHandles.getLast()->IsUsed()))) {
2659 PruneTextureHandle(mTextureHandles.popLast());
2660 --mNumTextureHandles;
2661 }
2662 return mNumTextureHandles < oldItems;
2663}
2664
2665// Attempt to convert a linear gradient to a 1D ramp texture.
2666Maybe<SurfacePattern> DrawTargetWebgl::LinearGradientToSurface(
2667 const RectDouble& aBounds, const Pattern& aPattern) {
2668 MOZ_ASSERT(aPattern.GetType() == PatternType::LINEAR_GRADIENT)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aPattern.GetType() == PatternType::LINEAR_GRADIENT)>
::isValid, "invalid assertion condition"); if ((__builtin_expect
(!!(!(!!(aPattern.GetType() == PatternType::LINEAR_GRADIENT))
), 0))) { do { } while (false); MOZ_ReportAssertionFailure("aPattern.GetType() == PatternType::LINEAR_GRADIENT"
, "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 2668); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aPattern.GetType() == PatternType::LINEAR_GRADIENT"
")"); do { *((volatile int*)__null) = 2668; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
2669 const auto& gradient = static_cast<const LinearGradientPattern&>(aPattern);
2670 // The gradient points must be transformed by the gradient's matrix.
2671 Point gradBegin = gradient.mMatrix.TransformPoint(gradient.mBegin);
2672 Point gradEnd = gradient.mMatrix.TransformPoint(gradient.mEnd);
2673 // Get the gradient points in user-space.
2674 Point begin = mTransform.TransformPoint(gradBegin);
2675 Point end = mTransform.TransformPoint(gradEnd);
2676 // Find the normalized direction of the gradient and its length.
2677 Point dir = end - begin;
2678 float len = dir.Length();
2679 dir = dir / len;
2680 // Restrict the rendered bounds to fall within the canvas.
2681 Rect visBounds = NarrowToFloat(aBounds.SafeIntersect(RectDouble(GetRect())));
2682 // Calculate the distances along the gradient direction of the bounds.
2683 float dist0 = (visBounds.TopLeft() - begin).DotProduct(dir);
2684 float distX = visBounds.width * dir.x;
2685 float distY = visBounds.height * dir.y;
2686 float minDist = floorf(
2687 std::max(dist0 + std::min(distX, 0.0f) + std::min(distY, 0.0f), 0.0f));
2688 float maxDist = ceilf(
2689 std::min(dist0 + std::max(distX, 0.0f) + std::max(distY, 0.0f), len));
2690 // Calculate the approximate size of the ramp texture, and see if it would be
2691 // sufficiently smaller than just rendering the primitive.
2692 float subLen = maxDist - minDist;
2693 if (subLen > 0 && subLen < 0.5f * visBounds.Area()) {
2694 // Create a 1D texture to contain the gradient ramp. Reserve two extra
2695 // texels at the beginning and end of the ramp to account for clamping.
2696 RefPtr<DrawTargetSkia> dt = new DrawTargetSkia;
2697 if (dt->Init(IntSize(int32_t(subLen + 2), 1), SurfaceFormat::B8G8R8A8)) {
2698 // Fill the section of the gradient ramp that is actually used.
2699 dt->FillRect(Rect(dt->GetRect()),
2700 LinearGradientPattern(Point(1 - minDist, 0.0f),
2701 Point(len + 1 - minDist, 0.0f),
2702 gradient.mStops));
2703 if (RefPtr<SourceSurface> snapshot = dt->Snapshot()) {
2704 // Calculate a matrix that will map the gradient ramp texture onto the
2705 // actual direction of the gradient.
2706 Point gradDir = (gradEnd - gradBegin) / len;
2707 Point tangent = Point(-gradDir.y, gradDir.x) / gradDir.Length();
2708 SurfacePattern surfacePattern(
2709 snapshot, ExtendMode::CLAMP,
2710 Matrix(gradDir.x, gradDir.y, tangent.x, tangent.y, gradBegin.x,
2711 gradBegin.y)
2712 .PreTranslate(minDist - 1, 0));
2713 if (SupportsPattern(surfacePattern)) {
2714 return Some(surfacePattern);
2715 }
2716 }
2717 }
2718 }
2719 return Nothing();
2720}
2721
2722void DrawTargetWebgl::FillRect(const Rect& aRect, const Pattern& aPattern,
2723 const DrawOptions& aOptions) {
2724 RectDouble xformRect = TransformDouble(aRect);
2725 if (aPattern.GetType() == PatternType::COLOR) {
2726 if (Maybe<Rect> clipped = RectClippedToViewport(xformRect)) {
2727 // If the pattern is transform-invariant and the rect clips to the
2728 // viewport, just clip drawing to the viewport to avoid transform
2729 // issues.
2730 DrawRect(*clipped, aPattern, aOptions, Nothing(), nullptr, false);
2731 return;
2732 }
2733 }
2734 if (RectInsidePrecisionLimits(xformRect)) {
2735 if (SupportsPattern(aPattern)) {
2736 DrawRect(aRect, aPattern, aOptions);
2737 return;
2738 }
2739 if (aPattern.GetType() == PatternType::LINEAR_GRADIENT) {
2740 if (Maybe<SurfacePattern> surface =
2741 LinearGradientToSurface(xformRect, aPattern)) {
2742 if (DrawRect(aRect, *surface, aOptions, Nothing(), nullptr, true, true,
2743 true)) {
2744 return;
2745 }
2746 }
2747 }
2748 }
2749
2750 if (!mWebglValid) {
2751 MarkSkiaChanged(aOptions);
2752 mSkia->FillRect(aRect, aPattern, aOptions);
2753 } else {
2754 // If the pattern is unsupported, then transform the rect to a path so it
2755 // can be cached.
2756 SkPath skiaPath;
2757 skiaPath.addRect(RectToSkRect(aRect));
2758 RefPtr<PathSkia> path = new PathSkia(skiaPath, FillRule::FILL_WINDING);
2759 DrawPath(path, aPattern, aOptions);
2760 }
2761}
2762
2763void CacheEntry::Link(const RefPtr<TextureHandle>& aHandle) {
2764 mHandle = aHandle;
2765 mHandle->SetCacheEntry(this);
2766}
2767
2768// When the CacheEntry becomes unused, it marks the corresponding
2769// TextureHandle as unused and unlinks it from the CacheEntry. The
2770// entry is removed from its containing Cache, if applicable.
2771void CacheEntry::Unlink() {
2772 // The entry may not have a valid handle if rasterization failed.
2773 if (mHandle) {
8
Taking false branch
2774 mHandle->SetCacheEntry(nullptr);
2775 mHandle = nullptr;
2776 }
2777
2778 RemoveFromList();
9
Calling 'CacheEntryImpl::RemoveFromList'
27
Returning; memory was released
2779}
2780
2781// Hashes a path and pattern to a single hash value that can be used for quick
2782// comparisons. This currently avoids to expensive hashing of internal path
2783// and pattern data for speed, relying instead on later exact comparisons for
2784// disambiguation.
2785HashNumber PathCacheEntry::HashPath(const QuantizedPath& aPath,
2786 const Pattern* aPattern,
2787 const Matrix& aTransform,
2788 const IntRect& aBounds,
2789 const Point& aOrigin) {
2790 HashNumber hash = 0;
2791 hash = AddToHash(hash, aPath.mPath.num_types);
2792 hash = AddToHash(hash, aPath.mPath.num_points);
2793 if (aPath.mPath.num_points > 0) {
2794 hash = AddToHash(hash, aPath.mPath.points[0].x);
2795 hash = AddToHash(hash, aPath.mPath.points[0].y);
2796 if (aPath.mPath.num_points > 1) {
2797 hash = AddToHash(hash, aPath.mPath.points[1].x);
2798 hash = AddToHash(hash, aPath.mPath.points[1].y);
2799 }
2800 }
2801 // Quantize the relative offset of the path to its bounds.
2802 IntPoint offset = RoundedToInt((aOrigin - Point(aBounds.TopLeft())) * 16.0f);
2803 hash = AddToHash(hash, offset.x);
2804 hash = AddToHash(hash, offset.y);
2805 hash = AddToHash(hash, aBounds.width);
2806 hash = AddToHash(hash, aBounds.height);
2807 if (aPattern) {
2808 hash = AddToHash(hash, (int)aPattern->GetType());
2809 }
2810 return hash;
2811}
2812
2813// When caching rendered geometry, we need to ensure the scale and orientation
2814// is approximately the same. The offset will be considered separately.
2815static inline bool HasMatchingScale(const Matrix& aTransform1,
2816 const Matrix& aTransform2) {
2817 return FuzzyEqual(aTransform1._11, aTransform2._11) &&
2818 FuzzyEqual(aTransform1._22, aTransform2._22) &&
2819 FuzzyEqual(aTransform1._12, aTransform2._12) &&
2820 FuzzyEqual(aTransform1._21, aTransform2._21);
2821}
2822
2823// Determines if an existing path cache entry matches an incoming path and
2824// pattern.
2825inline bool PathCacheEntry::MatchesPath(
2826 const QuantizedPath& aPath, const Pattern* aPattern,
2827 const StrokeOptions* aStrokeOptions, AAStrokeMode aStrokeMode,
2828 const Matrix& aTransform, const IntRect& aBounds, const Point& aOrigin,
2829 HashNumber aHash, float aSigma) {
2830 return aHash == mHash && HasMatchingScale(aTransform, mTransform) &&
2831 // Ensure the clipped relative bounds fit inside those of the entry
2832 aBounds.x - aOrigin.x >= mBounds.x - mOrigin.x &&
2833 (aBounds.x - aOrigin.x) + aBounds.width <=
2834 (mBounds.x - mOrigin.x) + mBounds.width &&
2835 aBounds.y - aOrigin.y >= mBounds.y - mOrigin.y &&
2836 (aBounds.y - aOrigin.y) + aBounds.height <=
2837 (mBounds.y - mOrigin.y) + mBounds.height &&
2838 aPath == mPath &&
2839 (!aPattern ? !mPattern : mPattern && *aPattern == *mPattern) &&
2840 (!aStrokeOptions
2841 ? !mStrokeOptions
2842 : mStrokeOptions && *aStrokeOptions == *mStrokeOptions &&
2843 mAAStrokeMode == aStrokeMode) &&
2844 aSigma == mSigma;
2845}
2846
2847PathCacheEntry::PathCacheEntry(QuantizedPath&& aPath, Pattern* aPattern,
2848 StoredStrokeOptions* aStrokeOptions,
2849 AAStrokeMode aStrokeMode,
2850 const Matrix& aTransform, const IntRect& aBounds,
2851 const Point& aOrigin, HashNumber aHash,
2852 float aSigma)
2853 : CacheEntryImpl<PathCacheEntry>(aTransform, aBounds, aHash),
2854 mPath(std::move(aPath)),
2855 mOrigin(aOrigin),
2856 mPattern(aPattern),
2857 mStrokeOptions(aStrokeOptions),
2858 mAAStrokeMode(aStrokeMode),
2859 mSigma(aSigma) {}
2860
2861// Attempt to find a matching entry in the path cache. If one isn't found,
2862// a new entry will be created. The caller should check whether the contained
2863// texture handle is valid to determine if it will need to render the text run
2864// or just reuse the cached texture.
2865already_AddRefed<PathCacheEntry> PathCache::FindOrInsertEntry(
2866 QuantizedPath aPath, const Pattern* aPattern,
2867 const StrokeOptions* aStrokeOptions, AAStrokeMode aStrokeMode,
2868 const Matrix& aTransform, const IntRect& aBounds, const Point& aOrigin,
2869 float aSigma) {
2870 HashNumber hash =
2871 PathCacheEntry::HashPath(aPath, aPattern, aTransform, aBounds, aOrigin);
2872 for (const RefPtr<PathCacheEntry>& entry : GetChain(hash)) {
2873 if (entry->MatchesPath(aPath, aPattern, aStrokeOptions, aStrokeMode,
2874 aTransform, aBounds, aOrigin, hash, aSigma)) {
2875 return do_AddRef(entry);
2876 }
2877 }
2878 Pattern* pattern = nullptr;
2879 if (aPattern) {
2880 pattern = aPattern->CloneWeak();
2881 if (!pattern) {
2882 return nullptr;
2883 }
2884 }
2885 StoredStrokeOptions* strokeOptions = nullptr;
2886 if (aStrokeOptions) {
2887 strokeOptions = aStrokeOptions->Clone();
2888 if (!strokeOptions) {
2889 return nullptr;
2890 }
2891 }
2892 RefPtr<PathCacheEntry> entry =
2893 new PathCacheEntry(std::move(aPath), pattern, strokeOptions, aStrokeMode,
2894 aTransform, aBounds, aOrigin, hash, aSigma);
2895 Insert(entry);
2896 return entry.forget();
2897}
2898
2899void DrawTargetWebgl::Fill(const Path* aPath, const Pattern& aPattern,
2900 const DrawOptions& aOptions) {
2901 if (!aPath || aPath->GetBackendType() != BackendType::SKIA) {
2902 return;
2903 }
2904
2905 const SkPath& skiaPath = static_cast<const PathSkia*>(aPath)->GetPath();
2906 SkRect skiaRect = SkRect::MakeEmpty();
2907 // Draw the path as a simple rectangle with a supported pattern when possible.
2908 if (skiaPath.isRect(&skiaRect)) {
2909 RectDouble rect = SkRectToRectDouble(skiaRect);
2910 RectDouble xformRect = TransformDouble(rect);
2911 if (aPattern.GetType() == PatternType::COLOR) {
2912 if (Maybe<Rect> clipped = RectClippedToViewport(xformRect)) {
2913 // If the pattern is transform-invariant and the rect clips to the
2914 // viewport, just clip drawing to the viewport to avoid transform
2915 // issues.
2916 DrawRect(*clipped, aPattern, aOptions, Nothing(), nullptr, false);
2917 return;
2918 }
2919 }
2920
2921 if (RectInsidePrecisionLimits(xformRect)) {
2922 if (SupportsPattern(aPattern)) {
2923 DrawRect(NarrowToFloat(rect), aPattern, aOptions);
2924 return;
2925 }
2926 if (aPattern.GetType() == PatternType::LINEAR_GRADIENT) {
2927 if (Maybe<SurfacePattern> surface =
2928 LinearGradientToSurface(xformRect, aPattern)) {
2929 if (DrawRect(NarrowToFloat(rect), *surface, aOptions, Nothing(),
2930 nullptr, true, true, true)) {
2931 return;
2932 }
2933 }
2934 }
2935 }
2936 }
2937
2938 DrawPath(aPath, aPattern, aOptions);
2939}
2940
2941void DrawTargetWebgl::FillCircle(const Point& aOrigin, float aRadius,
2942 const Pattern& aPattern,
2943 const DrawOptions& aOptions) {
2944 DrawCircle(aOrigin, aRadius, aPattern, aOptions);
2945}
2946
2947QuantizedPath::QuantizedPath(const WGR::Path& aPath) : mPath(aPath) {}
2948
2949QuantizedPath::QuantizedPath(QuantizedPath&& aPath) noexcept
2950 : mPath(aPath.mPath) {
2951 aPath.mPath.points = nullptr;
2952 aPath.mPath.num_points = 0;
2953 aPath.mPath.types = nullptr;
2954 aPath.mPath.num_types = 0;
2955}
2956
2957QuantizedPath::~QuantizedPath() {
2958 if (mPath.points || mPath.types) {
2959 WGR::wgr_path_release(mPath);
2960 }
2961}
2962
2963bool QuantizedPath::operator==(const QuantizedPath& aOther) const {
2964 return mPath.num_types == aOther.mPath.num_types &&
2965 mPath.num_points == aOther.mPath.num_points &&
2966 mPath.fill_mode == aOther.mPath.fill_mode &&
2967 !memcmp(mPath.types, aOther.mPath.types,
2968 mPath.num_types * sizeof(uint8_t)) &&
2969 !memcmp(mPath.points, aOther.mPath.points,
2970 mPath.num_points * sizeof(WGR::Point));
2971}
2972
2973// Generate a quantized path from the Skia path using WGR. The supplied
2974// transform will be applied to the path. The path is stored relative to its
2975// bounds origin to support translation later.
2976static Maybe<QuantizedPath> GenerateQuantizedPath(
2977 WGR::PathBuilder* aPathBuilder, const SkPath& aPath, const Rect& aBounds,
2978 const Matrix& aTransform) {
2979 if (!aPathBuilder) {
2980 return Nothing();
2981 }
2982
2983 WGR::wgr_builder_reset(aPathBuilder);
2984 WGR::wgr_builder_set_fill_mode(aPathBuilder,
2985 aPath.getFillType() == SkPathFillType::kWinding
2986 ? WGR::FillMode::Winding
2987 : WGR::FillMode::EvenOdd);
2988
2989 SkPath::RawIter iter(aPath);
2990 SkPoint params[4];
2991 SkPath::Verb currentVerb;
2992
2993 // printf_stderr("bounds: (%d, %d) %d x %d\n", aBounds.x, aBounds.y,
2994 // aBounds.width, aBounds.height);
2995 Matrix transform = aTransform;
2996 transform.PostTranslate(-aBounds.TopLeft());
2997 while ((currentVerb = iter.next(params)) != SkPath::kDone_Verb) {
2998 switch (currentVerb) {
2999 case SkPath::kMove_Verb: {
3000 Point p0 = transform.TransformPoint(SkPointToPoint(params[0]));
3001 WGR::wgr_builder_move_to(aPathBuilder, p0.x, p0.y);
3002 break;
3003 }
3004 case SkPath::kLine_Verb: {
3005 Point p1 = transform.TransformPoint(SkPointToPoint(params[1]));
3006 WGR::wgr_builder_line_to(aPathBuilder, p1.x, p1.y);
3007 break;
3008 }
3009 case SkPath::kCubic_Verb: {
3010 Point p1 = transform.TransformPoint(SkPointToPoint(params[1]));
3011 Point p2 = transform.TransformPoint(SkPointToPoint(params[2]));
3012 Point p3 = transform.TransformPoint(SkPointToPoint(params[3]));
3013 // printf_stderr("cubic (%f, %f), (%f, %f), (%f, %f)\n", p1.x, p1.y,
3014 // p2.x, p2.y, p3.x, p3.y);
3015 WGR::wgr_builder_curve_to(aPathBuilder, p1.x, p1.y, p2.x, p2.y, p3.x,
3016 p3.y);
3017 break;
3018 }
3019 case SkPath::kQuad_Verb: {
3020 Point p1 = transform.TransformPoint(SkPointToPoint(params[1]));
3021 Point p2 = transform.TransformPoint(SkPointToPoint(params[2]));
3022 // printf_stderr("quad (%f, %f), (%f, %f)\n", p1.x, p1.y, p2.x, p2.y);
3023 WGR::wgr_builder_quad_to(aPathBuilder, p1.x, p1.y, p2.x, p2.y);
3024 break;
3025 }
3026 case SkPath::kConic_Verb: {
3027 Point p0 = transform.TransformPoint(SkPointToPoint(params[0]));
3028 Point p1 = transform.TransformPoint(SkPointToPoint(params[1]));
3029 Point p2 = transform.TransformPoint(SkPointToPoint(params[2]));
3030 float w = iter.conicWeight();
3031 std::vector<Point> quads;
3032 int numQuads = ConvertConicToQuads(p0, p1, p2, w, quads);
3033 for (int i = 0; i < numQuads; i++) {
3034 Point q1 = quads[2 * i + 1];
3035 Point q2 = quads[2 * i + 2];
3036 // printf_stderr("conic quad (%f, %f), (%f, %f)\n", q1.x, q1.y, q2.x,
3037 // q2.y);
3038 WGR::wgr_builder_quad_to(aPathBuilder, q1.x, q1.y, q2.x, q2.y);
3039 }
3040 break;
3041 }
3042 case SkPath::kClose_Verb:
3043 // printf_stderr("close\n");
3044 WGR::wgr_builder_close(aPathBuilder);
3045 break;
3046 default:
3047 MOZ_ASSERT(false)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(false)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(false))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("false", "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 3047); AnnotateMozCrashReason("MOZ_ASSERT" "(" "false" ")")
; do { *((volatile int*)__null) = 3047; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
3048 // Unexpected verb found in path!
3049 return Nothing();
3050 }
3051 }
3052
3053 WGR::Path p = WGR::wgr_builder_get_path(aPathBuilder);
3054 if (!p.num_points || !p.num_types) {
3055 WGR::wgr_path_release(p);
3056 return Nothing();
3057 }
3058 return Some(QuantizedPath(p));
3059}
3060
3061// Get the output vertex buffer using WGR from an input quantized path.
3062static Maybe<WGR::VertexBuffer> GeneratePathVertexBuffer(
3063 const QuantizedPath& aPath, const IntRect& aClipRect,
3064 bool aRasterizationTruncates, WGR::OutputVertex* aBuffer,
3065 size_t aBufferCapacity) {
3066 WGR::VertexBuffer vb = WGR::wgr_path_rasterize_to_tri_list(
3067 &aPath.mPath, aClipRect.x, aClipRect.y, aClipRect.width, aClipRect.height,
3068 true, false, aRasterizationTruncates, aBuffer, aBufferCapacity);
3069 if (!vb.len || (aBuffer && vb.len > aBufferCapacity)) {
3070 WGR::wgr_vertex_buffer_release(vb);
3071 return Nothing();
3072 }
3073 return Some(vb);
3074}
3075
3076static inline AAStroke::LineJoin ToAAStrokeLineJoin(JoinStyle aJoin) {
3077 switch (aJoin) {
3078 case JoinStyle::BEVEL:
3079 return AAStroke::LineJoin::Bevel;
3080 case JoinStyle::ROUND:
3081 return AAStroke::LineJoin::Round;
3082 case JoinStyle::MITER:
3083 case JoinStyle::MITER_OR_BEVEL:
3084 return AAStroke::LineJoin::Miter;
3085 }
3086 return AAStroke::LineJoin::Miter;
3087}
3088
3089static inline AAStroke::LineCap ToAAStrokeLineCap(CapStyle aCap) {
3090 switch (aCap) {
3091 case CapStyle::BUTT:
3092 return AAStroke::LineCap::Butt;
3093 case CapStyle::ROUND:
3094 return AAStroke::LineCap::Round;
3095 case CapStyle::SQUARE:
3096 return AAStroke::LineCap::Square;
3097 }
3098 return AAStroke::LineCap::Butt;
3099}
3100
3101static inline Point WGRPointToPoint(const WGR::Point& aPoint) {
3102 // WGR points are 28.4 fixed-point where (0.0, 0.0) is assumed to be a pixel
3103 // center, as opposed to (0.5, 0.5) in canvas device space. WGR thus shifts
3104 // each point by (-0.5, -0.5). To undo this, transform from fixed-point back
3105 // to floating-point, and reverse the pixel shift by adding back (0.5, 0.5).
3106 return Point(IntPoint(aPoint.x, aPoint.y)) * (1.0f / 16.0f) +
3107 Point(0.5f, 0.5f);
3108}
3109
3110// Generates a vertex buffer for a stroked path using aa-stroke.
3111static Maybe<AAStroke::VertexBuffer> GenerateStrokeVertexBuffer(
3112 const QuantizedPath& aPath, const StrokeOptions* aStrokeOptions,
3113 float aScale, WGR::OutputVertex* aBuffer, size_t aBufferCapacity) {
3114 AAStroke::StrokeStyle style = {aStrokeOptions->mLineWidth * aScale,
3115 ToAAStrokeLineCap(aStrokeOptions->mLineCap),
3116 ToAAStrokeLineJoin(aStrokeOptions->mLineJoin),
3117 aStrokeOptions->mMiterLimit};
3118 if (style.width <= 0.0f || !std::isfinite(style.width) ||
3119 style.miter_limit <= 0.0f || !std::isfinite(style.miter_limit)) {
3120 return Nothing();
3121 }
3122 AAStroke::Stroker* s = AAStroke::aa_stroke_new(
3123 &style, (AAStroke::OutputVertex*)aBuffer, aBufferCapacity);
3124 bool valid = true;
3125 size_t curPoint = 0;
3126 for (size_t curType = 0; valid && curType < aPath.mPath.num_types;) {
3127 // Verify that we are at the start of a sub-path.
3128 if ((aPath.mPath.types[curType] & WGR::PathPointTypePathTypeMask) !=
3129 WGR::PathPointTypeStart) {
3130 valid = false;
3131 break;
3132 }
3133 // Find where the next sub-path starts so we can locate the end.
3134 size_t endType = curType + 1;
3135 for (; endType < aPath.mPath.num_types; endType++) {
3136 if ((aPath.mPath.types[endType] & WGR::PathPointTypePathTypeMask) ==
3137 WGR::PathPointTypeStart) {
3138 break;
3139 }
3140 }
3141 // Check if the path is closed. This is a flag modifying the last type.
3142 bool closed =
3143 (aPath.mPath.types[endType - 1] & WGR::PathPointTypeCloseSubpath) != 0;
3144 for (; curType < endType; curType++) {
3145 // If this is the last type and the sub-path is not closed, determine if
3146 // this segment should be capped.
3147 bool end = curType + 1 == endType && !closed;
3148 switch (aPath.mPath.types[curType] & WGR::PathPointTypePathTypeMask) {
3149 case WGR::PathPointTypeStart: {
3150 if (curPoint + 1 > aPath.mPath.num_points) {
3151 valid = false;
3152 break;
3153 }
3154 Point p1 = WGRPointToPoint(aPath.mPath.points[curPoint]);
3155 AAStroke::aa_stroke_move_to(s, p1.x, p1.y, closed);
3156 if (end) {
3157 AAStroke::aa_stroke_line_to(s, p1.x, p1.y, true);
3158 }
3159 curPoint++;
3160 break;
3161 }
3162 case WGR::PathPointTypeLine: {
3163 if (curPoint + 1 > aPath.mPath.num_points) {
3164 valid = false;
3165 break;
3166 }
3167 Point p1 = WGRPointToPoint(aPath.mPath.points[curPoint]);
3168 AAStroke::aa_stroke_line_to(s, p1.x, p1.y, end);
3169 curPoint++;
3170 break;
3171 }
3172 case WGR::PathPointTypeBezier: {
3173 if (curPoint + 3 > aPath.mPath.num_points) {
3174 valid = false;
3175 break;
3176 }
3177 Point p1 = WGRPointToPoint(aPath.mPath.points[curPoint]);
3178 Point p2 = WGRPointToPoint(aPath.mPath.points[curPoint + 1]);
3179 Point p3 = WGRPointToPoint(aPath.mPath.points[curPoint + 2]);
3180 AAStroke::aa_stroke_curve_to(s, p1.x, p1.y, p2.x, p2.y, p3.x, p3.y,
3181 end);
3182 curPoint += 3;
3183 break;
3184 }
3185 default:
3186 MOZ_ASSERT(false, "Unknown WGR path point type")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(false)>::isValid, "invalid assertion condition");
if ((__builtin_expect(!!(!(!!(false))), 0))) { do { } while (
false); MOZ_ReportAssertionFailure("false" " (" "Unknown WGR path point type"
")", "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 3186); AnnotateMozCrashReason("MOZ_ASSERT" "(" "false" ") ("
"Unknown WGR path point type" ")"); do { *((volatile int*)__null
) = 3186; __attribute__((nomerge)) ::abort(); } while (false)
; } } while (false)
;
3187 valid = false;
3188 break;
3189 }
3190 }
3191 // Close the sub-path if necessary.
3192 if (valid && closed) {
3193 AAStroke::aa_stroke_close(s);
3194 }
3195 }
3196 Maybe<AAStroke::VertexBuffer> result;
3197 if (valid) {
3198 AAStroke::VertexBuffer vb = AAStroke::aa_stroke_finish(s);
3199 if (!vb.len || (aBuffer && vb.len > aBufferCapacity)) {
3200 AAStroke::aa_stroke_vertex_buffer_release(vb);
3201 } else {
3202 result = Some(vb);
3203 }
3204 }
3205 AAStroke::aa_stroke_release(s);
3206 return result;
3207}
3208
3209// Search the path cache for any entries stored in the path vertex buffer and
3210// remove them.
3211void PathCache::ClearVertexRanges() {
3212 for (auto& chain : mChains) {
3213 PathCacheEntry* entry = chain.getFirst();
3214 while (entry) {
3215 PathCacheEntry* next = entry->getNext();
3216 if (entry->GetVertexRange().IsValid()) {
3217 entry->Unlink();
3218 }
3219 entry = next;
3220 }
3221 }
3222}
3223
3224inline bool DrawTargetWebgl::ShouldAccelPath(
3225 const DrawOptions& aOptions, const StrokeOptions* aStrokeOptions) {
3226 return mWebglValid && SupportsDrawOptions(aOptions) && PrepareContext();
3227}
3228
3229// For now, we only directly support stroking solid color patterns to limit
3230// artifacts from blending of overlapping geometry generated by AAStroke. Other
3231// types of patterns may be partially supported by rendering to a temporary
3232// mask.
3233static inline AAStrokeMode SupportsAAStroke(const Pattern& aPattern,
3234 const DrawOptions& aOptions,
3235 const StrokeOptions& aStrokeOptions,
3236 bool aAllowStrokeAlpha) {
3237 if (aStrokeOptions.mDashPattern) {
3238 return AAStrokeMode::Unsupported;
3239 }
3240 switch (aOptions.mCompositionOp) {
3241 case CompositionOp::OP_SOURCE:
3242 return AAStrokeMode::Geometry;
3243 case CompositionOp::OP_OVER:
3244 if (aPattern.GetType() == PatternType::COLOR) {
3245 return static_cast<const ColorPattern&>(aPattern).mColor.a *
3246 aOptions.mAlpha <
3247 1.0f &&
3248 !aAllowStrokeAlpha
3249 ? AAStrokeMode::Mask
3250 : AAStrokeMode::Geometry;
3251 }
3252 return AAStrokeMode::Unsupported;
3253 default:
3254 return AAStrokeMode::Unsupported;
3255 }
3256}
3257
3258// Render an AA-Stroke'd vertex range into an R8 mask texture for subsequent
3259// drawing.
3260already_AddRefed<TextureHandle> SharedContextWebgl::DrawStrokeMask(
3261 const PathVertexRange& aVertexRange, const IntSize& aSize) {
3262 // Allocate a new texture handle to store the rendered mask.
3263 RefPtr<TextureHandle> handle =
3264 AllocateTextureHandle(SurfaceFormat::A8, aSize, true, true);
3265 if (!handle) {
3266 return nullptr;
3267 }
3268
3269 IntRect texBounds = handle->GetBounds();
3270 BackingTexture* backing = handle->GetBackingTexture();
3271 if (!backing->IsInitialized()) {
3272 // If the backing texture is uninitialized, it needs its sampling parameters
3273 // set for later use.
3274 mWebgl->BindTexture(LOCAL_GL_TEXTURE_2D0x0DE1, backing->GetWebGLTexture());
3275 mWebgl->TexStorage(LOCAL_GL_TEXTURE_2D0x0DE1, 1, LOCAL_GL_R80x8229,
3276 {uint32_t(backing->GetSize().width),
3277 uint32_t(backing->GetSize().height), 1});
3278 InitTexParameters(backing->GetWebGLTexture());
3279 ClearLastTexture();
3280 }
3281
3282 // Set up a scratch framebuffer to render to the appropriate sub-texture of
3283 // the backing texture.
3284 if (!mScratchFramebuffer) {
3285 mScratchFramebuffer = mWebgl->CreateFramebuffer();
3286 }
3287 mWebgl->BindFramebuffer(LOCAL_GL_FRAMEBUFFER0x8D40, mScratchFramebuffer);
3288 webgl::FbAttachInfo attachInfo;
3289 attachInfo.tex = backing->GetWebGLTexture();
3290 mWebgl->FramebufferAttach(LOCAL_GL_FRAMEBUFFER0x8D40, LOCAL_GL_COLOR_ATTACHMENT00x8CE0,
3291 LOCAL_GL_TEXTURE_2D0x0DE1, attachInfo);
3292 mWebgl->Viewport(texBounds.x, texBounds.y, texBounds.width, texBounds.height);
3293 EnableScissor(texBounds);
3294 if (!backing->IsInitialized()) {
3295 backing->MarkInitialized();
3296 // WebGL implicitly clears the backing texture the first time it is used.
3297 } else {
3298 // Ensure the mask background is clear.
3299 mWebgl->ClearColor(0.0f, 0.0f, 0.0f, 0.0f);
3300 mWebgl->Clear(LOCAL_GL_COLOR_BUFFER_BIT0x00004000);
3301 }
3302
3303 // Reset any blending when drawing the mask.
3304 SetBlendState(CompositionOp::OP_OVER);
3305
3306 // Set up the solid color shader to draw a simple opaque mask.
3307 if (mLastProgram != mSolidProgram) {
3308 mWebgl->UseProgram(mSolidProgram);
3309 mLastProgram = mSolidProgram;
3310 }
3311 Array<float, 2> viewportData = {float(texBounds.width),
3312 float(texBounds.height)};
3313 MaybeUniformData(LOCAL_GL_FLOAT_VEC20x8B50, mSolidProgramViewport, viewportData,
3314 mSolidProgramUniformState.mViewport);
3315 Array<float, 1> aaData = {0.0f};
3316 MaybeUniformData(LOCAL_GL_FLOAT0x1406, mSolidProgramAA, aaData,
3317 mSolidProgramUniformState.mAA);
3318 Array<float, 4> clipData = {-0.5f, -0.5f, float(texBounds.width) + 0.5f,
3319 float(texBounds.height) + 0.5f};
3320 MaybeUniformData(LOCAL_GL_FLOAT_VEC40x8B52, mSolidProgramClipBounds, clipData,
3321 mSolidProgramUniformState.mClipBounds);
3322 Array<float, 4> colorData = {1.0f, 1.0f, 1.0f, 1.0f};
3323 MaybeUniformData(LOCAL_GL_FLOAT_VEC40x8B52, mSolidProgramColor, colorData,
3324 mSolidProgramUniformState.mColor);
3325 Array<float, 6> xformData = {1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f};
3326 MaybeUniformData(LOCAL_GL_FLOAT_VEC20x8B50, mSolidProgramTransform, xformData,
3327 mSolidProgramUniformState.mTransform);
3328
3329 // Ensure the current clip mask is ignored.
3330 RefPtr<WebGLTexture> prevClipMask = mLastClipMask;
3331 SetNoClipMask();
3332
3333 // Draw the mask using the supplied path vertex range.
3334 DrawTriangles(aVertexRange);
3335
3336 // Restore the previous framebuffer state.
3337 mWebgl->BindFramebuffer(LOCAL_GL_FRAMEBUFFER0x8D40, mCurrentTarget->mFramebuffer);
3338 mWebgl->Viewport(0, 0, mViewportSize.width, mViewportSize.height);
3339 if (prevClipMask) {
3340 SetClipMask(prevClipMask);
3341 }
3342
3343 return handle.forget();
3344}
3345
3346bool SharedContextWebgl::DrawPathAccel(
3347 const Path* aPath, const Pattern& aPattern, const DrawOptions& aOptions,
3348 const StrokeOptions* aStrokeOptions, bool aAllowStrokeAlpha,
3349 const ShadowOptions* aShadow, bool aCacheable, const Matrix* aPathXform) {
3350 // Get the transformed bounds for the path and conservatively check if the
3351 // bounds overlap the canvas.
3352 const PathSkia* pathSkia = static_cast<const PathSkia*>(aPath);
3353 const Matrix& currentTransform = mCurrentTarget->GetTransform();
3354 Matrix pathXform = currentTransform;
3355 // If there is a path-specific transform that shouldn't be applied to the
3356 // pattern, then generate a matrix that should only be used with the Skia
3357 // path.
3358 if (aPathXform) {
3359 pathXform.PreMultiply(*aPathXform);
3360 }
3361 Rect bounds = pathSkia->GetFastBounds(pathXform, aStrokeOptions);
3362 // If the path is empty, then there is nothing to draw.
3363 if (bounds.IsEmpty()) {
3364 return true;
3365 }
3366 // Avoid integer conversion errors with abnormally large paths.
3367 if (!RectInsidePrecisionLimits(bounds)) {
3368 return false;
3369 }
3370 IntRect viewport(IntPoint(), mViewportSize);
3371 if (aShadow) {
3372 // Inflate the bounds to account for the blur radius.
3373 bounds += aShadow->mOffset;
3374 int32_t blurRadius = aShadow->BlurRadius();
3375 bounds.Inflate(blurRadius);
3376 viewport.Inflate(blurRadius);
3377 }
3378 Point realOrigin = bounds.TopLeft();
3379 if (aCacheable) {
3380 // Quantize the path origin to increase the reuse of cache entries.
3381 bounds.Scale(4.0f);
3382 bounds.Round();
3383 bounds.Scale(0.25f);
3384 }
3385 Point quantizedOrigin = bounds.TopLeft();
3386 // If the path doesn't intersect the viewport, then there is nothing to draw.
3387 IntRect intBounds = RoundedOut(bounds).Intersect(viewport);
3388 if (intBounds.IsEmpty()) {
3389 return true;
3390 }
3391 // Nudge the bounds to account for the quantization rounding.
3392 Rect quantBounds = Rect(intBounds) + (realOrigin - quantizedOrigin);
3393 // If the pattern is a solid color, then this will be used along with a path
3394 // mask to render the path, as opposed to baking the pattern into the cached
3395 // path texture.
3396 Maybe<DeviceColor> color =
3397 aOptions.mCompositionOp == CompositionOp::OP_CLEAR
3398 ? Some(DeviceColor(1, 1, 1, 1))
3399 : (aPattern.GetType() == PatternType::COLOR
3400 ? Some(static_cast<const ColorPattern&>(aPattern).mColor)
3401 : Nothing());
3402 AAStrokeMode aaStrokeMode =
3403 aStrokeOptions && mPathAAStroke
3404 ? SupportsAAStroke(aPattern, aOptions, *aStrokeOptions,
3405 aAllowStrokeAlpha)
3406 : AAStrokeMode::Unsupported;
3407 // Look for an existing path cache entry, if possible, or otherwise create
3408 // one. If the draw request is not cacheable, then don't create an entry.
3409 RefPtr<PathCacheEntry> entry;
3410 RefPtr<TextureHandle> handle;
3411 if (aCacheable) {
3412 if (!mPathCache) {
3413 mPathCache = MakeUnique<PathCache>();
3414 }
3415 // Use a quantized, relative (to its bounds origin) version of the path as
3416 // a cache key to help limit cache bloat.
3417 Maybe<QuantizedPath> qp = GenerateQuantizedPath(
3418 mWGRPathBuilder, pathSkia->GetPath(), quantBounds, pathXform);
3419 if (!qp) {
3420 return false;
3421 }
3422 entry = mPathCache->FindOrInsertEntry(
3423 std::move(*qp), color ? nullptr : &aPattern, aStrokeOptions,
3424 aaStrokeMode, currentTransform, intBounds, quantizedOrigin,
3425 aShadow ? aShadow->mSigma : -1.0f);
3426 if (!entry) {
3427 return false;
3428 }
3429 handle = entry->GetHandle();
3430 }
3431
3432 // If there is a shadow, it needs to draw with the shadow color rather than
3433 // the path color.
3434 Maybe<DeviceColor> shadowColor = color;
3435 if (aShadow && aOptions.mCompositionOp != CompositionOp::OP_CLEAR) {
3436 shadowColor = Some(aShadow->mColor);
3437 if (color) {
3438 shadowColor->a *= color->a;
3439 }
3440 }
3441 SamplingFilter filter =
3442 aShadow ? SamplingFilter::GOOD : GetSamplingFilter(aPattern);
3443 if (handle && handle->IsValid()) {
3444 // If the entry has a valid texture handle still, use it. However, the
3445 // entry texture is assumed to be located relative to its previous bounds.
3446 // We need to offset the pattern by the difference between its new unclipped
3447 // origin and its previous previous unclipped origin. Then when we finally
3448 // draw a rectangle at the expected new bounds, it will overlap the portion
3449 // of the old entry texture we actually need to sample from.
3450 Point offset =
3451 (realOrigin - entry->GetOrigin()) + entry->GetBounds().TopLeft();
3452 SurfacePattern pathPattern(nullptr, ExtendMode::CLAMP,
3453 Matrix::Translation(offset), filter);
3454 return DrawRectAccel(quantBounds, pathPattern, aOptions, shadowColor,
3455 &handle, false, true, true);
3456 }
3457
3458 if (mPathVertexCapacity > 0 && !handle && entry && !aShadow &&
3459 aOptions.mAntialiasMode != AntialiasMode::NONE &&
3460 SupportsPattern(aPattern) &&
3461 entry->GetPath().mPath.num_types <= mPathMaxComplexity) {
3462 if (entry->GetVertexRange().IsValid()) {
3463 // If there is a valid cached vertex data in the path vertex buffer, then
3464 // just draw that. We must draw at integer pixel boundaries (using
3465 // intBounds instead of quantBounds) due to WGR's reliance on pixel center
3466 // location.
3467 mCurrentTarget->mProfile.OnCacheHit();
3468 return DrawRectAccel(Rect(intBounds.TopLeft(), Size(1, 1)), aPattern,
3469 aOptions, Nothing(), nullptr, false, true, true,
3470 false, nullptr, &entry->GetVertexRange());
3471 }
3472
3473 // printf_stderr("Generating... verbs %d, points %d\n",
3474 // int(pathSkia->GetPath().countVerbs()),
3475 // int(pathSkia->GetPath().countPoints()));
3476 WGR::OutputVertex* outputBuffer = nullptr;
3477 size_t outputBufferCapacity = 0;
3478 if (mWGROutputBuffer) {
3479 outputBuffer = mWGROutputBuffer.get();
3480 outputBufferCapacity = mPathVertexCapacity / sizeof(WGR::OutputVertex);
3481 }
3482 Maybe<WGR::VertexBuffer> wgrVB;
3483 Maybe<AAStroke::VertexBuffer> strokeVB;
3484 if (!aStrokeOptions) {
3485 if (aPath == mUnitCirclePath) {
3486 auto scaleFactors = pathXform.ScaleFactors();
3487 if (scaleFactors.AreScalesSame()) {
3488 Point center = pathXform.GetTranslation() - quantBounds.TopLeft();
3489 float radius = scaleFactors.xScale;
3490 AAStroke::VertexBuffer vb = AAStroke::aa_stroke_filled_circle(
3491 center.x, center.y, radius, (AAStroke::OutputVertex*)outputBuffer,
3492 outputBufferCapacity);
3493 if (!vb.len || (outputBuffer && vb.len > outputBufferCapacity)) {
3494 AAStroke::aa_stroke_vertex_buffer_release(vb);
3495 } else {
3496 strokeVB = Some(vb);
3497 }
3498 }
3499 }
3500 if (!strokeVB) {
3501 wgrVB = GeneratePathVertexBuffer(
3502 entry->GetPath(), IntRect(-intBounds.TopLeft(), mViewportSize),
3503 mRasterizationTruncates, outputBuffer, outputBufferCapacity);
3504 }
3505 } else {
3506 if (aaStrokeMode != AAStrokeMode::Unsupported) {
3507 auto scaleFactors = currentTransform.ScaleFactors();
3508 if (scaleFactors.AreScalesSame()) {
3509 strokeVB = GenerateStrokeVertexBuffer(
3510 entry->GetPath(), aStrokeOptions, scaleFactors.xScale,
3511 outputBuffer, outputBufferCapacity);
3512 }
3513 }
3514 if (!strokeVB && mPathWGRStroke) {
3515 // If stroking, then generate a path to fill the stroked region. This
3516 // path will need to be quantized again because it differs from the
3517 // path used for the cache entry, but this allows us to avoid
3518 // generating a fill path on a cache hit.
3519 Maybe<Rect> cullRect;
3520 Matrix invTransform = currentTransform;
3521 if (invTransform.Invert()) {
3522 // Transform the stroking clip rect from device space to local
3523 // space.
3524 Rect invRect = invTransform.TransformBounds(Rect(mClipRect));
3525 invRect.RoundOut();
3526 cullRect = Some(invRect);
3527 }
3528 SkPath fillPath;
3529 if (pathSkia->GetFillPath(*aStrokeOptions, pathXform, fillPath,
3530 cullRect)) {
3531 // printf_stderr(" stroke fill... verbs %d, points %d\n",
3532 // int(fillPath.countVerbs()),
3533 // int(fillPath.countPoints()));
3534 if (Maybe<QuantizedPath> qp = GenerateQuantizedPath(
3535 mWGRPathBuilder, fillPath, quantBounds, pathXform)) {
3536 wgrVB = GeneratePathVertexBuffer(
3537 *qp, IntRect(-intBounds.TopLeft(), mViewportSize),
3538 mRasterizationTruncates, outputBuffer, outputBufferCapacity);
3539 }
3540 }
3541 }
3542 }
3543 if (wgrVB || strokeVB) {
3544 const uint8_t* vbData =
3545 wgrVB ? (const uint8_t*)wgrVB->data : (const uint8_t*)strokeVB->data;
3546 if (outputBuffer && !vbData) {
3547 vbData = (const uint8_t*)outputBuffer;
3548 }
3549 size_t vbLen = wgrVB ? wgrVB->len : strokeVB->len;
3550 uint32_t vertexBytes = uint32_t(
3551 std::min(vbLen * sizeof(WGR::OutputVertex), size_t(UINT32_MAX(4294967295U))));
3552 // printf_stderr(" ... %d verts, %d bytes\n", int(vbLen),
3553 // int(vertexBytes));
3554 if (vertexBytes > mPathVertexCapacity - mPathVertexOffset &&
3555 vertexBytes <= mPathVertexCapacity - sizeof(kRectVertexData)) {
3556 // If the vertex data is too large to fit in the remaining path vertex
3557 // buffer, then orphan the contents of the vertex buffer to make room
3558 // for it.
3559 if (mPathCache) {
3560 mPathCache->ClearVertexRanges();
3561 }
3562 ResetPathVertexBuffer(false);
3563 }
3564 if (vertexBytes <= mPathVertexCapacity - mPathVertexOffset) {
3565 // If there is actually room to fit the vertex data in the vertex buffer
3566 // after orphaning as necessary, then upload the data to the next
3567 // available offset in the buffer.
3568 PathVertexRange vertexRange(
3569 uint32_t(mPathVertexOffset / sizeof(WGR::OutputVertex)),
3570 uint32_t(vbLen));
3571 // printf_stderr(" ... offset %d\n", mPathVertexOffset);
3572 // Normal glBufferSubData interleaved with draw calls causes performance
3573 // issues on Mali, so use our special unsynchronized version. This is
3574 // safe as we never update regions referenced by pending draw calls.
3575 mWebgl->BufferSubData(LOCAL_GL_ARRAY_BUFFER0x8892, mPathVertexOffset,
3576 vertexBytes, vbData,
3577 /* unsynchronized */ true);
3578 mPathVertexOffset += vertexBytes;
3579 if (wgrVB) {
3580 WGR::wgr_vertex_buffer_release(wgrVB.ref());
3581 } else {
3582 AAStroke::aa_stroke_vertex_buffer_release(strokeVB.ref());
3583 }
3584 if (strokeVB && aaStrokeMode == AAStrokeMode::Mask) {
3585 // Attempt to generate a stroke mask for path.
3586 if (RefPtr<TextureHandle> handle =
3587 DrawStrokeMask(vertexRange, intBounds.Size())) {
3588 // Finally, draw the rendered stroke mask.
3589 if (entry) {
3590 entry->Link(handle);
3591 }
3592 mCurrentTarget->mProfile.OnCacheMiss();
3593 SurfacePattern maskPattern(
3594 nullptr, ExtendMode::CLAMP,
3595 Matrix::Translation(quantBounds.TopLeft()),
3596 SamplingFilter::GOOD);
3597 return DrawRectAccel(quantBounds, maskPattern, aOptions, color,
3598 &handle, false, true, true);
3599 }
3600 } else {
3601 // Remember the vertex range in the cache entry so that it can be
3602 // reused later.
3603 if (entry) {
3604 entry->SetVertexRange(vertexRange);
3605 }
3606
3607 // Finally, draw the uploaded vertex data.
3608 mCurrentTarget->mProfile.OnCacheMiss();
3609 return DrawRectAccel(Rect(intBounds.TopLeft(), Size(1, 1)), aPattern,
3610 aOptions, Nothing(), nullptr, false, true, true,
3611 false, nullptr, &vertexRange);
3612 }
3613 } else {
3614 if (wgrVB) {
3615 WGR::wgr_vertex_buffer_release(wgrVB.ref());
3616 } else {
3617 AAStroke::aa_stroke_vertex_buffer_release(strokeVB.ref());
3618 }
3619 }
3620 // If we failed to draw the vertex data for some reason, then fall through
3621 // to the texture rasterization path.
3622 }
3623 }
3624
3625 // If a stroke path covers too much screen area, it is likely that most is
3626 // empty space in the interior. This usually imposes too high a cost versus
3627 // just rasterizing without acceleration. Note that AA-Stroke generally
3628 // produces more acceptable amounts of geometry for larger paths, so we do
3629 // this heuristic after we attempt AA-Stroke.
3630 if (aStrokeOptions &&
3631 intBounds.width * intBounds.height >
3632 (mViewportSize.width / 2) * (mViewportSize.height / 2)) {
3633 return false;
3634 }
3635
3636 // If there isn't a valid texture handle, then we need to rasterize the
3637 // path in a software canvas and upload this to a texture. Solid color
3638 // patterns will be rendered as a path mask that can then be modulated
3639 // with any color. Other pattern types have to rasterize the pattern
3640 // directly into the cached texture.
3641 handle = nullptr;
3642 RefPtr<DrawTargetSkia> pathDT = new DrawTargetSkia;
3643 if (pathDT->Init(intBounds.Size(), color || aShadow
3644 ? SurfaceFormat::A8
3645 : SurfaceFormat::B8G8R8A8)) {
3646 Point offset = -quantBounds.TopLeft();
3647 if (aShadow) {
3648 // Ensure the the shadow is drawn at the requested offset
3649 offset += aShadow->mOffset;
3650 }
3651 DrawOptions drawOptions(1.0f, CompositionOp::OP_OVER,
3652 aOptions.mAntialiasMode);
3653 static const ColorPattern maskPattern(DeviceColor(1.0f, 1.0f, 1.0f, 1.0f));
3654 const Pattern& cachePattern = color ? maskPattern : aPattern;
3655 // If the source pattern is a DrawTargetWebgl snapshot, we may shift
3656 // targets when drawing the path, so back up the old target.
3657 DrawTargetWebgl* oldTarget = mCurrentTarget;
3658 {
3659 RefPtr<const Path> path;
3660 if (!aPathXform || (color && !aStrokeOptions)) {
3661 // If the pattern is transform invariant or there is no pathXform, then
3662 // it is safe to use the path directly. Solid colors are transform
3663 // invariant, except when there are stroke options such as line width or
3664 // dashes that should not be scaled by pathXform.
3665 path = aPath;
3666 pathDT->SetTransform(pathXform * Matrix::Translation(offset));
3667 } else {
3668 // If there is a pathXform, then pre-apply that to the path to avoid
3669 // altering the pattern.
3670 RefPtr<PathBuilder> builder =
3671 aPath->TransformedCopyToBuilder(*aPathXform);
3672 path = builder->Finish();
3673 pathDT->SetTransform(currentTransform * Matrix::Translation(offset));
3674 }
3675 if (aStrokeOptions) {
3676 pathDT->Stroke(path, cachePattern, *aStrokeOptions, drawOptions);
3677 } else {
3678 pathDT->Fill(path, cachePattern, drawOptions);
3679 }
3680 }
3681 if (aShadow && aShadow->mSigma > 0.0f) {
3682 // Blur the shadow if required.
3683 uint8_t* data = nullptr;
3684 IntSize size;
3685 int32_t stride = 0;
3686 SurfaceFormat format = SurfaceFormat::UNKNOWN;
3687 if (pathDT->LockBits(&data, &size, &stride, &format)) {
3688 AlphaBoxBlur blur(Rect(pathDT->GetRect()), stride, aShadow->mSigma,
3689 aShadow->mSigma);
3690 blur.Blur(data);
3691 pathDT->ReleaseBits(data);
3692 }
3693 }
3694 RefPtr<SourceSurface> pathSurface = pathDT->Snapshot();
3695 if (pathSurface) {
3696 // If the target changed, try to restore it.
3697 if (mCurrentTarget != oldTarget && !oldTarget->PrepareContext()) {
3698 return false;
3699 }
3700 SurfacePattern pathPattern(pathSurface, ExtendMode::CLAMP,
3701 Matrix::Translation(quantBounds.TopLeft()),
3702 filter);
3703 // Try and upload the rasterized path to a texture. If there is a
3704 // valid texture handle after this, then link it to the entry.
3705 // Otherwise, we might have to fall back to software drawing the
3706 // path, so unlink it from the entry.
3707 if (DrawRectAccel(quantBounds, pathPattern, aOptions, shadowColor,
3708 &handle, false, true) &&
3709 handle) {
3710 if (entry) {
3711 entry->Link(handle);
3712 }
3713 } else if (entry) {
3714 entry->Unlink();
3715 }
3716 return true;
3717 }
3718 }
3719
3720 return false;
3721}
3722
3723void DrawTargetWebgl::DrawPath(const Path* aPath, const Pattern& aPattern,
3724 const DrawOptions& aOptions,
3725 const StrokeOptions* aStrokeOptions,
3726 bool aAllowStrokeAlpha) {
3727 // If there is a WebGL context, then try to cache the path to avoid slow
3728 // fallbacks.
3729 if (ShouldAccelPath(aOptions, aStrokeOptions) &&
3730 mSharedContext->DrawPathAccel(aPath, aPattern, aOptions, aStrokeOptions,
3731 aAllowStrokeAlpha)) {
3732 return;
3733 }
3734
3735 // There was no path cache entry available to use, so fall back to drawing the
3736 // path with Skia.
3737 MarkSkiaChanged(aOptions);
3738 if (aStrokeOptions) {
3739 mSkia->Stroke(aPath, aPattern, *aStrokeOptions, aOptions);
3740 } else {
3741 mSkia->Fill(aPath, aPattern, aOptions);
3742 }
3743}
3744
3745// DrawCircleAccel is a more specialized version of DrawPathAccel that attempts
3746// to cache a unit circle.
3747bool SharedContextWebgl::DrawCircleAccel(const Point& aCenter, float aRadius,
3748 const Pattern& aPattern,
3749 const DrawOptions& aOptions,
3750 const StrokeOptions* aStrokeOptions) {
3751 // Cache a unit circle and transform it to avoid creating a path repeatedly.
3752 if (!mUnitCirclePath) {
3753 mUnitCirclePath = MakePathForCircle(*mCurrentTarget, Point(0, 0), 1);
3754 }
3755 // Scale and translate the circle to the desired shape.
3756 Matrix circleXform(aRadius, 0, 0, aRadius, aCenter.x, aCenter.y);
3757 return DrawPathAccel(mUnitCirclePath, aPattern, aOptions, aStrokeOptions,
3758 true, nullptr, true, &circleXform);
3759}
3760
3761void DrawTargetWebgl::DrawCircle(const Point& aOrigin, float aRadius,
3762 const Pattern& aPattern,
3763 const DrawOptions& aOptions,
3764 const StrokeOptions* aStrokeOptions) {
3765 if (ShouldAccelPath(aOptions, aStrokeOptions) &&
3766 mSharedContext->DrawCircleAccel(aOrigin, aRadius, aPattern, aOptions,
3767 aStrokeOptions)) {
3768 return;
3769 }
3770
3771 MarkSkiaChanged(aOptions);
3772 if (aStrokeOptions) {
3773 mSkia->StrokeCircle(aOrigin, aRadius, aPattern, *aStrokeOptions, aOptions);
3774 } else {
3775 mSkia->FillCircle(aOrigin, aRadius, aPattern, aOptions);
3776 }
3777}
3778
3779void DrawTargetWebgl::DrawSurface(SourceSurface* aSurface, const Rect& aDest,
3780 const Rect& aSource,
3781 const DrawSurfaceOptions& aSurfOptions,
3782 const DrawOptions& aOptions) {
3783 Matrix matrix = Matrix::Scaling(aDest.width / aSource.width,
3784 aDest.height / aSource.height);
3785 matrix.PreTranslate(-aSource.x, -aSource.y);
3786 matrix.PostTranslate(aDest.x, aDest.y);
3787 SurfacePattern pattern(aSurface, ExtendMode::CLAMP, matrix,
3788 aSurfOptions.mSamplingFilter);
3789 DrawRect(aDest, pattern, aOptions);
3790}
3791
3792void DrawTargetWebgl::Mask(const Pattern& aSource, const Pattern& aMask,
3793 const DrawOptions& aOptions) {
3794 if (!SupportsDrawOptions(aOptions) ||
3795 aMask.GetType() != PatternType::SURFACE ||
3796 aSource.GetType() != PatternType::COLOR) {
3797 MarkSkiaChanged(aOptions);
3798 mSkia->Mask(aSource, aMask, aOptions);
3799 return;
3800 }
3801 auto sourceColor = static_cast<const ColorPattern&>(aSource).mColor;
3802 auto maskPattern = static_cast<const SurfacePattern&>(aMask);
3803 DrawRect(Rect(IntRect(IntPoint(), maskPattern.mSurface->GetSize())),
3804 maskPattern, aOptions, Some(sourceColor));
3805}
3806
3807void DrawTargetWebgl::MaskSurface(const Pattern& aSource, SourceSurface* aMask,
3808 Point aOffset, const DrawOptions& aOptions) {
3809 if (!SupportsDrawOptions(aOptions) ||
3810 aSource.GetType() != PatternType::COLOR) {
3811 MarkSkiaChanged(aOptions);
3812 mSkia->MaskSurface(aSource, aMask, aOffset, aOptions);
3813 } else {
3814 auto sourceColor = static_cast<const ColorPattern&>(aSource).mColor;
3815 SurfacePattern pattern(aMask, ExtendMode::CLAMP,
3816 Matrix::Translation(aOffset));
3817 DrawRect(Rect(aOffset, Size(aMask->GetSize())), pattern, aOptions,
3818 Some(sourceColor));
3819 }
3820}
3821
3822// Extract the surface's alpha values into an A8 surface.
3823static already_AddRefed<DataSourceSurface> ExtractAlpha(SourceSurface* aSurface,
3824 bool aAllowSubpixelAA) {
3825 RefPtr<DataSourceSurface> surfaceData = aSurface->GetDataSurface();
3826 if (!surfaceData) {
3827 return nullptr;
3828 }
3829 DataSourceSurface::ScopedMap srcMap(surfaceData, DataSourceSurface::READ);
3830 if (!srcMap.IsMapped()) {
3831 return nullptr;
3832 }
3833 IntSize size = surfaceData->GetSize();
3834 RefPtr<DataSourceSurface> alpha =
3835 Factory::CreateDataSourceSurface(size, SurfaceFormat::A8, false);
3836 if (!alpha) {
3837 return nullptr;
3838 }
3839 DataSourceSurface::ScopedMap dstMap(alpha, DataSourceSurface::WRITE);
3840 if (!dstMap.IsMapped()) {
3841 return nullptr;
3842 }
3843 // For subpixel masks, ignore the alpha and instead sample one of the color
3844 // channels as if they were alpha.
3845 SwizzleData(
3846 srcMap.GetData(), srcMap.GetStride(),
3847 aAllowSubpixelAA ? SurfaceFormat::A8R8G8B8 : surfaceData->GetFormat(),
3848 dstMap.GetData(), dstMap.GetStride(), SurfaceFormat::A8, size);
3849 return alpha.forget();
3850}
3851
3852void DrawTargetWebgl::DrawShadow(const Path* aPath, const Pattern& aPattern,
3853 const ShadowOptions& aShadow,
3854 const DrawOptions& aOptions,
3855 const StrokeOptions* aStrokeOptions) {
3856 if (!aPath || aPath->GetBackendType() != BackendType::SKIA) {
3857 return;
3858 }
3859
3860 // If there is a WebGL context, then try to cache the path to avoid slow
3861 // fallbacks.
3862 if (ShouldAccelPath(aOptions, aStrokeOptions) &&
3863 mSharedContext->DrawPathAccel(aPath, aPattern, aOptions, aStrokeOptions,
3864 false, &aShadow)) {
3865 return;
3866 }
3867
3868 // There was no path cache entry available to use, so fall back to drawing the
3869 // path with Skia.
3870 MarkSkiaChanged(aOptions);
3871 mSkia->DrawShadow(aPath, aPattern, aShadow, aOptions, aStrokeOptions);
3872}
3873
3874void DrawTargetWebgl::DrawSurfaceWithShadow(SourceSurface* aSurface,
3875 const Point& aDest,
3876 const ShadowOptions& aShadow,
3877 CompositionOp aOperator) {
3878 DrawOptions options(1.0f, aOperator);
3879 if (ShouldAccelPath(options, nullptr)) {
3880 SurfacePattern pattern(aSurface, ExtendMode::CLAMP,
3881 Matrix::Translation(aDest));
3882 SkPath skiaPath;
3883 skiaPath.addRect(RectToSkRect(Rect(aSurface->GetRect()) + aDest));
3884 RefPtr<PathSkia> path = new PathSkia(skiaPath, FillRule::FILL_WINDING);
3885 AutoRestoreTransform restore(this);
3886 SetTransform(Matrix());
3887 if (mSharedContext->DrawPathAccel(path, pattern, options, nullptr, false,
3888 &aShadow, false)) {
3889 DrawRect(Rect(aSurface->GetRect()) + aDest, pattern, options);
3890 return;
3891 }
3892 }
3893
3894 MarkSkiaChanged(options);
3895 mSkia->DrawSurfaceWithShadow(aSurface, aDest, aShadow, aOperator);
3896}
3897
3898already_AddRefed<PathBuilder> DrawTargetWebgl::CreatePathBuilder(
3899 FillRule aFillRule) const {
3900 return mSkia->CreatePathBuilder(aFillRule);
3901}
3902
3903void DrawTargetWebgl::SetTransform(const Matrix& aTransform) {
3904 DrawTarget::SetTransform(aTransform);
3905 mSkia->SetTransform(aTransform);
3906}
3907
3908void DrawTargetWebgl::StrokeRect(const Rect& aRect, const Pattern& aPattern,
3909 const StrokeOptions& aStrokeOptions,
3910 const DrawOptions& aOptions) {
3911 if (!mWebglValid) {
3912 MarkSkiaChanged(aOptions);
3913 mSkia->StrokeRect(aRect, aPattern, aStrokeOptions, aOptions);
3914 } else {
3915 // If the stroke options are unsupported, then transform the rect to a path
3916 // so it can be cached.
3917 SkPath skiaPath;
3918 skiaPath.addRect(RectToSkRect(aRect));
3919 RefPtr<PathSkia> path = new PathSkia(skiaPath, FillRule::FILL_WINDING);
3920 DrawPath(path, aPattern, aOptions, &aStrokeOptions, true);
3921 }
3922}
3923
3924static inline bool IsThinLine(const Matrix& aTransform,
3925 const StrokeOptions& aStrokeOptions) {
3926 auto scale = aTransform.ScaleFactors();
3927 return std::max(scale.xScale, scale.yScale) * aStrokeOptions.mLineWidth <= 1;
3928}
3929
3930bool DrawTargetWebgl::StrokeLineAccel(const Point& aStart, const Point& aEnd,
3931 const Pattern& aPattern,
3932 const StrokeOptions& aStrokeOptions,
3933 const DrawOptions& aOptions,
3934 bool aClosed) {
3935 // Approximating a wide line as a rectangle works only with certain cap styles
3936 // in the general case (butt or square). However, if the line width is
3937 // sufficiently thin, we can either ignore the round cap (or treat it like
3938 // square for zero-length lines) without causing objectionable artifacts.
3939 // Lines may sometimes be used in closed paths that immediately reverse back,
3940 // in which case we need to use mLineJoin instead of mLineCap to determine the
3941 // actual cap used.
3942 CapStyle capStyle =
3943 aClosed ? (aStrokeOptions.mLineJoin == JoinStyle::ROUND ? CapStyle::ROUND
3944 : CapStyle::BUTT)
3945 : aStrokeOptions.mLineCap;
3946 if (mWebglValid && SupportsPattern(aPattern) &&
3947 (capStyle != CapStyle::ROUND ||
3948 IsThinLine(GetTransform(), aStrokeOptions)) &&
3949 aStrokeOptions.mDashPattern == nullptr && aStrokeOptions.mLineWidth > 0) {
3950 // Treat the line as a rectangle whose center-line is the supplied line and
3951 // for which the height is the supplied line width. Generate a matrix that
3952 // maps the X axis to the orientation of the line and the Y axis to the
3953 // normal vector to the line. This only works if the line caps are squared,
3954 // as rounded rectangles are currently not supported for round line caps.
3955 Point start = aStart;
3956 Point dirX = aEnd - aStart;
3957 Point dirY;
3958 float dirLen = dirX.Length();
3959 float scale = aStrokeOptions.mLineWidth;
3960 if (dirLen == 0.0f) {
3961 // If the line is zero-length, then only a cap is rendered.
3962 switch (capStyle) {
3963 case CapStyle::BUTT:
3964 // The cap doesn't extend beyond the line so nothing is drawn.
3965 return true;
3966 case CapStyle::ROUND:
3967 case CapStyle::SQUARE:
3968 // Draw a unit square centered at the single point.
3969 dirX = Point(scale, 0.0f);
3970 dirY = Point(0.0f, scale);
3971 // Offset the start by half a unit.
3972 start.x -= 0.5f * scale;
3973 break;
3974 }
3975 } else {
3976 // Make the scale map to a single unit length.
3977 scale /= dirLen;
3978 dirY = Point(-dirX.y, dirX.x) * scale;
3979 if (capStyle == CapStyle::SQUARE) {
3980 // Offset the start by half a unit.
3981 start -= (dirX * scale) * 0.5f;
3982 // Ensure the extent also accounts for the start and end cap.
3983 dirX += dirX * scale;
3984 }
3985 }
3986 Matrix lineXform(dirX.x, dirX.y, dirY.x, dirY.y, start.x - 0.5f * dirY.x,
3987 start.y - 0.5f * dirY.y);
3988 if (PrepareContext() &&
3989 mSharedContext->DrawRectAccel(Rect(0, 0, 1, 1), aPattern, aOptions,
3990 Nothing(), nullptr, true, true, true,
3991 false, nullptr, nullptr, &lineXform)) {
3992 return true;
3993 }
3994 }
3995 return false;
3996}
3997
3998void DrawTargetWebgl::StrokeLine(const Point& aStart, const Point& aEnd,
3999 const Pattern& aPattern,
4000 const StrokeOptions& aStrokeOptions,
4001 const DrawOptions& aOptions) {
4002 if (!mWebglValid) {
4003 MarkSkiaChanged(aOptions);
4004 mSkia->StrokeLine(aStart, aEnd, aPattern, aStrokeOptions, aOptions);
4005 } else if (!StrokeLineAccel(aStart, aEnd, aPattern, aStrokeOptions,
4006 aOptions)) {
4007 // If the stroke options are unsupported, then transform the line to a path
4008 // so it can be cached.
4009 SkPath skiaPath;
4010 skiaPath.moveTo(PointToSkPoint(aStart));
4011 skiaPath.lineTo(PointToSkPoint(aEnd));
4012 RefPtr<PathSkia> path = new PathSkia(skiaPath, FillRule::FILL_WINDING);
4013 DrawPath(path, aPattern, aOptions, &aStrokeOptions, true);
4014 }
4015}
4016
4017void DrawTargetWebgl::Stroke(const Path* aPath, const Pattern& aPattern,
4018 const StrokeOptions& aStrokeOptions,
4019 const DrawOptions& aOptions) {
4020 if (!aPath || aPath->GetBackendType() != BackendType::SKIA) {
4021 return;
4022 }
4023 const auto& skiaPath = static_cast<const PathSkia*>(aPath)->GetPath();
4024 if (!mWebglValid) {
4025 MarkSkiaChanged(aOptions);
4026 mSkia->Stroke(aPath, aPattern, aStrokeOptions, aOptions);
4027 return;
4028 }
4029
4030 // Avoid using Skia's isLine here because some paths erroneously include a
4031 // closePath at the end, causing isLine to not detect the line. In that case
4032 // we just draw a line in reverse right over the original line.
4033 int numVerbs = skiaPath.countVerbs();
4034 bool allowStrokeAlpha = false;
4035 if (numVerbs >= 2 && numVerbs <= 3) {
4036 uint8_t verbs[3];
4037 skiaPath.getVerbs(verbs, numVerbs);
4038 if (verbs[0] == SkPath::kMove_Verb && verbs[1] == SkPath::kLine_Verb &&
4039 (numVerbs < 3 || verbs[2] == SkPath::kClose_Verb)) {
4040 bool closed = numVerbs >= 3;
4041 Point start = SkPointToPoint(skiaPath.getPoint(0));
4042 Point end = SkPointToPoint(skiaPath.getPoint(1));
4043 if (StrokeLineAccel(start, end, aPattern, aStrokeOptions, aOptions,
4044 closed)) {
4045 if (closed) {
4046 StrokeLineAccel(end, start, aPattern, aStrokeOptions, aOptions, true);
4047 }
4048 return;
4049 }
4050 // If accelerated line drawing failed, just treat it as a path.
4051 allowStrokeAlpha = true;
4052 }
4053 }
4054
4055 DrawPath(aPath, aPattern, aOptions, &aStrokeOptions, allowStrokeAlpha);
4056}
4057
4058void DrawTargetWebgl::StrokeCircle(const Point& aOrigin, float aRadius,
4059 const Pattern& aPattern,
4060 const StrokeOptions& aStrokeOptions,
4061 const DrawOptions& aOptions) {
4062 DrawCircle(aOrigin, aRadius, aPattern, aOptions, &aStrokeOptions);
4063}
4064
4065bool DrawTargetWebgl::ShouldUseSubpixelAA(ScaledFont* aFont,
4066 const DrawOptions& aOptions) {
4067 AntialiasMode aaMode = aFont->GetDefaultAAMode();
4068 if (aOptions.mAntialiasMode != AntialiasMode::DEFAULT) {
4069 aaMode = aOptions.mAntialiasMode;
4070 }
4071 return GetPermitSubpixelAA() &&
4072 (aaMode == AntialiasMode::DEFAULT ||
4073 aaMode == AntialiasMode::SUBPIXEL) &&
4074 aOptions.mCompositionOp == CompositionOp::OP_OVER;
4075}
4076
4077void DrawTargetWebgl::StrokeGlyphs(ScaledFont* aFont,
4078 const GlyphBuffer& aBuffer,
4079 const Pattern& aPattern,
4080 const StrokeOptions& aStrokeOptions,
4081 const DrawOptions& aOptions) {
4082 if (!aFont || !aBuffer.mNumGlyphs) {
4083 return;
4084 }
4085
4086 bool useSubpixelAA = ShouldUseSubpixelAA(aFont, aOptions);
4087
4088 if (mWebglValid && SupportsDrawOptions(aOptions) &&
4089 aPattern.GetType() == PatternType::COLOR && PrepareContext() &&
4090 mSharedContext->DrawGlyphsAccel(aFont, aBuffer, aPattern, aOptions,
4091 &aStrokeOptions, useSubpixelAA)) {
4092 return;
4093 }
4094
4095 if (useSubpixelAA) {
4096 // Subpixel AA does not support layering because the subpixel masks can't
4097 // blend with the over op.
4098 MarkSkiaChanged();
4099 } else {
4100 MarkSkiaChanged(aOptions);
4101 }
4102 mSkia->StrokeGlyphs(aFont, aBuffer, aPattern, aStrokeOptions, aOptions);
4103}
4104
4105// Depending on whether we enable subpixel position for a given font, Skia may
4106// round transformed coordinates differently on each axis. By default, text is
4107// subpixel quantized horizontally and snapped to a whole integer vertical
4108// baseline. Axis-flip transforms instead snap to horizontal boundaries while
4109// subpixel quantizing along the vertical. For other types of transforms, Skia
4110// just applies subpixel quantization to both axes.
4111// We must duplicate the amount of quantization Skia applies carefully as a
4112// boundary value such as 0.49 may round to 0.5 with subpixel quantization,
4113// but if Skia actually snapped it to a whole integer instead, it would round
4114// down to 0. If a subsequent glyph with offset 0.51 came in, we might
4115// mistakenly round it down to 0.5, whereas Skia would round it up to 1. Thus
4116// we would alias 0.49 and 0.51 to the same cache entry, while Skia would
4117// actually snap the offset to 0 or 1, depending, resulting in mismatched
4118// hinting.
4119static inline IntPoint QuantizeScale(ScaledFont* aFont,
4120 const Matrix& aTransform) {
4121 if (!aFont->UseSubpixelPosition()) {
4122 return {1, 1};
4123 }
4124 if (aTransform._12 == 0) {
4125 // Glyphs are rendered subpixel horizontally, so snap vertically.
4126 return {4, 1};
4127 }
4128 if (aTransform._11 == 0) {
4129 // Glyphs are rendered subpixel vertically, so snap horizontally.
4130 return {1, 4};
4131 }
4132 // The transform isn't aligned, so don't snap.
4133 return {4, 4};
4134}
4135
4136// Skia only supports subpixel positioning to the nearest 1/4 fraction. It
4137// would be wasteful to attempt to cache text runs with positioning that is
4138// anymore precise than this. To prevent this cache bloat, we quantize the
4139// transformed glyph positions to the nearest 1/4. The scaling factor for
4140// the quantization is baked into the transform, so that if subpixel rounding
4141// is used on a given axis, then the axis will be multiplied by 4 before
4142// rounding. Since the quantized position is not used for rasterization, the
4143// transform is safe to modify as such.
4144static inline IntPoint QuantizePosition(const Matrix& aTransform,
4145 const IntPoint& aOffset,
4146 const Point& aPosition) {
4147 return RoundedToInt(aTransform.TransformPoint(aPosition)) - aOffset;
4148}
4149
4150// Get a quantized starting offset for the glyph buffer. We want this offset
4151// to encapsulate the transform and buffer offset while still preserving the
4152// relative subpixel positions of the glyphs this offset is subtracted from.
4153static inline IntPoint QuantizeOffset(const Matrix& aTransform,
4154 const IntPoint& aQuantizeScale,
4155 const GlyphBuffer& aBuffer) {
4156 IntPoint offset =
4157 RoundedToInt(aTransform.TransformPoint(aBuffer.mGlyphs[0].mPosition));
4158 offset.x.value &= ~(aQuantizeScale.x.value - 1);
4159 offset.y.value &= ~(aQuantizeScale.y.value - 1);
4160 return offset;
4161}
4162
4163// Hashes a glyph buffer to a single hash value that can be used for quick
4164// comparisons. Each glyph position is transformed and quantized before
4165// hashing.
4166HashNumber GlyphCacheEntry::HashGlyphs(const GlyphBuffer& aBuffer,
4167 const Matrix& aTransform,
4168 const IntPoint& aQuantizeScale) {
4169 HashNumber hash = 0;
4170 IntPoint offset = QuantizeOffset(aTransform, aQuantizeScale, aBuffer);
4171 for (size_t i = 0; i < aBuffer.mNumGlyphs; i++) {
4172 const Glyph& glyph = aBuffer.mGlyphs[i];
4173 hash = AddToHash(hash, glyph.mIndex);
4174 IntPoint pos = QuantizePosition(aTransform, offset, glyph.mPosition);
4175 hash = AddToHash(hash, pos.x);
4176 hash = AddToHash(hash, pos.y);
4177 }
4178 return hash;
4179}
4180
4181// Determines if an existing glyph cache entry matches an incoming text run.
4182inline bool GlyphCacheEntry::MatchesGlyphs(
4183 const GlyphBuffer& aBuffer, const DeviceColor& aColor,
4184 const Matrix& aTransform, const IntPoint& aQuantizeOffset,
4185 const IntPoint& aBoundsOffset, const IntRect& aClipRect, HashNumber aHash,
4186 const StrokeOptions* aStrokeOptions) {
4187 // First check if the hash matches to quickly reject the text run before any
4188 // more expensive checking. If it matches, then check if the color and
4189 // transform are the same.
4190 if (aHash != mHash || aBuffer.mNumGlyphs != mBuffer.mNumGlyphs ||
4191 aColor != mColor || !HasMatchingScale(aTransform, mTransform)) {
4192 return false;
4193 }
4194 // Finally check if all glyphs and their quantized positions match.
4195 for (size_t i = 0; i < aBuffer.mNumGlyphs; i++) {
4196 const Glyph& dst = mBuffer.mGlyphs[i];
4197 const Glyph& src = aBuffer.mGlyphs[i];
4198 if (dst.mIndex != src.mIndex ||
4199 dst.mPosition != Point(QuantizePosition(aTransform, aQuantizeOffset,
4200 src.mPosition))) {
4201 return false;
4202 }
4203 }
4204 // Check that stroke options actually match.
4205 if (aStrokeOptions) {
4206 // If stroking, verify that the entry is also stroked with the same options.
4207 if (!(mStrokeOptions && *aStrokeOptions == *mStrokeOptions)) {
4208 return false;
4209 }
4210 } else if (mStrokeOptions) {
4211 // If not stroking, check if the entry is stroked. If so, don't match.
4212 return false;
4213 }
4214 // Verify that the full bounds, once translated and clipped, are equal to the
4215 // clipped bounds.
4216 return (mFullBounds + aBoundsOffset)
4217 .Intersect(aClipRect)
4218 .IsEqualEdges(GetBounds() + aBoundsOffset);
4219}
4220
4221GlyphCacheEntry::GlyphCacheEntry(const GlyphBuffer& aBuffer,
4222 const DeviceColor& aColor,
4223 const Matrix& aTransform,
4224 const IntPoint& aQuantizeScale,
4225 const IntRect& aBounds,
4226 const IntRect& aFullBounds, HashNumber aHash,
4227 StoredStrokeOptions* aStrokeOptions)
4228 : CacheEntryImpl<GlyphCacheEntry>(aTransform, aBounds, aHash),
4229 mColor(aColor),
4230 mFullBounds(aFullBounds),
4231 mStrokeOptions(aStrokeOptions) {
4232 // Store a copy of the glyph buffer with positions already quantized for fast
4233 // comparison later.
4234 Glyph* glyphs = new Glyph[aBuffer.mNumGlyphs];
4235 IntPoint offset = QuantizeOffset(aTransform, aQuantizeScale, aBuffer);
4236 // Make the bounds relative to the offset so we can add a new offset later.
4237 IntPoint boundsOffset(offset.x / aQuantizeScale.x,
4238 offset.y / aQuantizeScale.y);
4239 mBounds -= boundsOffset;
4240 mFullBounds -= boundsOffset;
4241 for (size_t i = 0; i < aBuffer.mNumGlyphs; i++) {
4242 Glyph& dst = glyphs[i];
4243 const Glyph& src = aBuffer.mGlyphs[i];
4244 dst.mIndex = src.mIndex;
4245 dst.mPosition = Point(QuantizePosition(aTransform, offset, src.mPosition));
4246 }
4247 mBuffer.mGlyphs = glyphs;
4248 mBuffer.mNumGlyphs = aBuffer.mNumGlyphs;
4249}
4250
4251GlyphCacheEntry::~GlyphCacheEntry() { delete[] mBuffer.mGlyphs; }
4252
4253// Attempt to find a matching entry in the glyph cache. The caller should check
4254// whether the contained texture handle is valid to determine if it will need to
4255// render the text run or just reuse the cached texture.
4256already_AddRefed<GlyphCacheEntry> GlyphCache::FindEntry(
4257 const GlyphBuffer& aBuffer, const DeviceColor& aColor,
4258 const Matrix& aTransform, const IntPoint& aQuantizeScale,
4259 const IntRect& aClipRect, HashNumber aHash,
4260 const StrokeOptions* aStrokeOptions) {
4261 IntPoint offset = QuantizeOffset(aTransform, aQuantizeScale, aBuffer);
4262 IntPoint boundsOffset(offset.x / aQuantizeScale.x,
4263 offset.y / aQuantizeScale.y);
4264 for (const RefPtr<GlyphCacheEntry>& entry : GetChain(aHash)) {
4265 if (entry->MatchesGlyphs(aBuffer, aColor, aTransform, offset, boundsOffset,
4266 aClipRect, aHash, aStrokeOptions)) {
4267 return do_AddRef(entry);
4268 }
4269 }
4270 return nullptr;
4271}
4272
4273// Insert a new entry in the glyph cache.
4274already_AddRefed<GlyphCacheEntry> GlyphCache::InsertEntry(
4275 const GlyphBuffer& aBuffer, const DeviceColor& aColor,
4276 const Matrix& aTransform, const IntPoint& aQuantizeScale,
4277 const IntRect& aBounds, const IntRect& aFullBounds, HashNumber aHash,
4278 const StrokeOptions* aStrokeOptions) {
4279 StoredStrokeOptions* strokeOptions = nullptr;
4280 if (aStrokeOptions) {
4281 strokeOptions = aStrokeOptions->Clone();
4282 if (!strokeOptions) {
4283 return nullptr;
4284 }
4285 }
4286 RefPtr<GlyphCacheEntry> entry =
4287 new GlyphCacheEntry(aBuffer, aColor, aTransform, aQuantizeScale, aBounds,
4288 aFullBounds, aHash, strokeOptions);
4289 Insert(entry);
4290 return entry.forget();
4291}
4292
4293GlyphCache::GlyphCache(ScaledFont* aFont) : mFont(aFont) {}
4294
4295static void ReleaseGlyphCache(void* aPtr) {
4296 delete static_cast<GlyphCache*>(aPtr);
1
Calling implicit destructor for 'GlyphCache'
2
Calling '~CacheImpl'
4297}
4298
4299// Whether all glyphs in the buffer match the last whitespace glyph queried.
4300bool GlyphCache::IsWhitespace(const GlyphBuffer& aBuffer) const {
4301 if (!mLastWhitespace) {
4302 return false;
4303 }
4304 uint32_t whitespace = *mLastWhitespace;
4305 for (size_t i = 0; i < aBuffer.mNumGlyphs; ++i) {
4306 if (aBuffer.mGlyphs[i].mIndex != whitespace) {
4307 return false;
4308 }
4309 }
4310 return true;
4311}
4312
4313// Remember the last whitespace glyph seen.
4314void GlyphCache::SetLastWhitespace(const GlyphBuffer& aBuffer) {
4315 mLastWhitespace = Some(aBuffer.mGlyphs[0].mIndex);
4316}
4317
4318void DrawTargetWebgl::SetPermitSubpixelAA(bool aPermitSubpixelAA) {
4319 DrawTarget::SetPermitSubpixelAA(aPermitSubpixelAA);
4320 mSkia->SetPermitSubpixelAA(aPermitSubpixelAA);
4321}
4322
4323// Check for any color glyphs contained within a rasterized BGRA8 text result.
4324static bool CheckForColorGlyphs(const RefPtr<SourceSurface>& aSurface) {
4325 if (aSurface->GetFormat() != SurfaceFormat::B8G8R8A8) {
4326 return false;
4327 }
4328 RefPtr<DataSourceSurface> dataSurf = aSurface->GetDataSurface();
4329 if (!dataSurf) {
4330 return true;
4331 }
4332 DataSourceSurface::ScopedMap map(dataSurf, DataSourceSurface::READ);
4333 if (!map.IsMapped()) {
4334 return true;
4335 }
4336 IntSize size = dataSurf->GetSize();
4337 const uint8_t* data = map.GetData();
4338 int32_t stride = map.GetStride();
4339 for (int y = 0; y < size.height; y++) {
4340 const uint32_t* x = (const uint32_t*)data;
4341 const uint32_t* end = x + size.width;
4342 for (; x < end; x++) {
4343 // Verify if all components are the same as for premultiplied grayscale.
4344 uint32_t color = *x;
4345 uint32_t gray = color & 0xFF;
4346 gray |= gray << 8;
4347 gray |= gray << 16;
4348 if (color != gray) return true;
4349 }
4350 data += stride;
4351 }
4352 return false;
4353}
4354
4355// Quantize the preblend color used to key the cache, as only the high bits are
4356// used to determine the amount of preblending. This avoids excessive cache use.
4357// This roughly matches the quantization used in WebRender and Skia.
4358static DeviceColor QuantizePreblendColor(const DeviceColor& aColor,
4359 bool aUseSubpixelAA) {
4360 int32_t r = int32_t(aColor.r * 255.0f + 0.5f);
4361 int32_t g = int32_t(aColor.g * 255.0f + 0.5f);
4362 int32_t b = int32_t(aColor.b * 255.0f + 0.5f);
4363 // Skia only uses the high 3 bits of each color component to cache preblend
4364 // ramp tables.
4365 constexpr int32_t lumBits = 3;
4366 constexpr int32_t floorMask = ((1 << lumBits) - 1) << (8 - lumBits);
4367 if (!aUseSubpixelAA) {
4368 // If not using subpixel AA, then quantize only the luminance, stored in the
4369 // G channel.
4370 g = (r * 54 + g * 183 + b * 19) >> 8;
4371 g &= floorMask;
4372 r = g;
4373 b = g;
4374 } else {
4375 r &= floorMask;
4376 g &= floorMask;
4377 b &= floorMask;
4378 }
4379 return DeviceColor{r / 255.0f, g / 255.0f, b / 255.0f, 1.0f};
4380}
4381
4382// Draws glyphs to the WebGL target by trying to generate a cached texture for
4383// the text run that can be subsequently reused to quickly render the text run
4384// without using any software surfaces.
4385bool SharedContextWebgl::DrawGlyphsAccel(ScaledFont* aFont,
4386 const GlyphBuffer& aBuffer,
4387 const Pattern& aPattern,
4388 const DrawOptions& aOptions,
4389 const StrokeOptions* aStrokeOptions,
4390 bool aUseSubpixelAA) {
4391 // Look for an existing glyph cache on the font. If not there, create it.
4392 GlyphCache* cache =
4393 static_cast<GlyphCache*>(aFont->GetUserData(&mGlyphCacheKey));
4394 if (!cache) {
4395 cache = new GlyphCache(aFont);
4396 aFont->AddUserData(&mGlyphCacheKey, cache, ReleaseGlyphCache);
4397 mGlyphCaches.insertFront(cache);
4398 }
4399
4400 // Check if the buffer contains non-renderable whitespace characters before
4401 // any other expensive checks.
4402 if (cache->IsWhitespace(aBuffer)) {
4403 return true;
4404 }
4405
4406 // Whether the font may use bitmaps. If so, we need to render the glyphs with
4407 // color as grayscale bitmaps will use the color while color emoji will not,
4408 // with no easy way to know ahead of time. We currently have to check the
4409 // rasterized result to see if there are any color glyphs. To render subpixel
4410 // masks, we need to know that the rasterized result actually represents a
4411 // subpixel mask rather than try to interpret it as a normal RGBA result such
4412 // as for color emoji.
4413 bool useBitmaps = !aStrokeOptions && aFont->MayUseBitmaps() &&
4414 aOptions.mCompositionOp != CompositionOp::OP_CLEAR;
4415 // Hash the incoming text run and looking for a matching entry.
4416 DeviceColor color = aOptions.mCompositionOp == CompositionOp::OP_CLEAR
4417 ? DeviceColor(1, 1, 1, 1)
4418 : static_cast<const ColorPattern&>(aPattern).mColor;
4419#if defined(XP_MACOSX)
4420 // macOS uses gamma-aware blending with font smooth from subpixel AA.
4421 // If font smoothing is requested, even if there is no subpixel AA, gamma-
4422 // aware blending might be used and differing amounts of dilation might be
4423 // applied.
4424 bool usePreblend = aUseSubpixelAA ||
4425 (aFont->GetType() == FontType::MAC &&
4426 static_cast<ScaledFontMac*>(aFont)->UseFontSmoothing());
4427#elif defined(XP_WIN)
4428 // Windows uses gamma-aware blending via ClearType with grayscale and subpixel
4429 // AA.
4430 bool usePreblend =
4431 aUseSubpixelAA || aOptions.mAntialiasMode != AntialiasMode::NONE;
4432#else
4433 // FreeType backends currently don't use any preblending.
4434 bool usePreblend = false;
4435#endif
4436
4437 // If the font has bitmaps, use the color directly. Otherwise, the texture
4438 // holds a grayscale mask, so encode the key's subpixel state in the color.
4439 const Matrix& currentTransform = mCurrentTarget->GetTransform();
4440 IntPoint quantizeScale = QuantizeScale(aFont, currentTransform);
4441 Matrix quantizeTransform = currentTransform;
4442 quantizeTransform.PostScale(quantizeScale.x, quantizeScale.y);
4443 HashNumber hash =
4444 GlyphCacheEntry::HashGlyphs(aBuffer, quantizeTransform, quantizeScale);
4445 DeviceColor colorOrMask =
4446 useBitmaps ? color
4447 : (usePreblend ? QuantizePreblendColor(color, aUseSubpixelAA)
4448 : DeviceColor::Mask(aUseSubpixelAA ? 1 : 0, 1));
4449 IntRect clipRect(IntPoint(), mViewportSize);
4450 RefPtr<GlyphCacheEntry> entry =
4451 cache->FindEntry(aBuffer, colorOrMask, quantizeTransform, quantizeScale,
4452 clipRect, hash, aStrokeOptions);
4453 if (!entry) {
4454 // For small text runs, bounds computations can be expensive relative to the
4455 // cost of looking up a cache result. Avoid doing local bounds computations
4456 // until actually inserting the entry into the cache.
4457 Maybe<Rect> bounds = mCurrentTarget->mSkia->GetGlyphLocalBounds(
4458 aFont, aBuffer, aPattern, aStrokeOptions, aOptions);
4459 if (!bounds) {
4460 // Assume the buffer is full of whitespace characters that should be
4461 // remembered for subsequent lookups.
4462 cache->SetLastWhitespace(aBuffer);
4463 return true;
4464 }
4465 // Transform the local bounds into device space so that we know how big
4466 // the cached texture will be.
4467 Rect xformBounds = currentTransform.TransformBounds(*bounds);
4468 // Check if the transform flattens out the bounds before rounding.
4469 if (xformBounds.IsEmpty()) {
4470 return true;
4471 }
4472 IntRect fullBounds = RoundedOut(xformBounds);
4473 IntRect clipBounds = fullBounds.Intersect(clipRect);
4474 // Check if the bounds are completely clipped out.
4475 if (clipBounds.IsEmpty()) {
4476 return true;
4477 }
4478 entry = cache->InsertEntry(aBuffer, colorOrMask, quantizeTransform,
4479 quantizeScale, clipBounds, fullBounds, hash,
4480 aStrokeOptions);
4481 if (!entry) {
4482 return false;
4483 }
4484 }
4485
4486 // The bounds of the entry may have a different transform offset from the
4487 // bounds of the currently drawn text run. The entry bounds are relative to
4488 // the entry's quantized offset already, so just move the bounds to the new
4489 // offset.
4490 IntRect intBounds = entry->GetBounds();
4491 IntPoint newOffset =
4492 QuantizeOffset(quantizeTransform, quantizeScale, aBuffer);
4493 intBounds +=
4494 IntPoint(newOffset.x / quantizeScale.x, newOffset.y / quantizeScale.y);
4495 // Ensure there is a clear border around the text. This must be applied only
4496 // after clipping so that we always have some border texels for filtering.
4497 intBounds.Inflate(2);
4498
4499 RefPtr<TextureHandle> handle = entry->GetHandle();
4500 if (handle && handle->IsValid()) {
4501 // If there is an entry with a valid cached texture handle, then try
4502 // to draw with that. If that for some reason failed, then fall back
4503 // to using the Skia target as that means we were preventing from
4504 // drawing to the WebGL context based on something other than the
4505 // texture.
4506 SurfacePattern pattern(nullptr, ExtendMode::CLAMP,
4507 Matrix::Translation(intBounds.TopLeft()));
4508 if (DrawRectAccel(Rect(intBounds), pattern, aOptions,
4509 useBitmaps ? Nothing() : Some(color), &handle, false,
4510 true, true)) {
4511 return true;
4512 }
4513 } else {
4514 handle = nullptr;
4515
4516 // If we get here, either there wasn't a cached texture handle or it
4517 // wasn't valid. Render the text run into a temporary target.
4518 RefPtr<DrawTargetSkia> textDT = new DrawTargetSkia;
4519 if (textDT->Init(intBounds.Size(),
4520 useBitmaps || usePreblend || aUseSubpixelAA
4521 ? SurfaceFormat::B8G8R8A8
4522 : SurfaceFormat::A8)) {
4523 textDT->SetTransform(currentTransform *
4524 Matrix::Translation(-intBounds.TopLeft()));
4525 textDT->SetPermitSubpixelAA(aUseSubpixelAA);
4526 DrawOptions drawOptions(1.0f, CompositionOp::OP_OVER,
4527 aOptions.mAntialiasMode);
4528 // If bitmaps might be used, then we have to supply the color, as color
4529 // emoji may ignore it while grayscale bitmaps may use it, with no way to
4530 // know ahead of time. If we are using preblending in some form, then the
4531 // output also will depend on the supplied color. Otherwise, assume the
4532 // output will be a mask and just render it white to determine intensity.
4533 if (!useBitmaps && usePreblend) {
4534 textDT->DrawGlyphMask(aFont, aBuffer, color, aStrokeOptions,
4535 drawOptions);
4536 } else {
4537 ColorPattern colorPattern(useBitmaps ? color : DeviceColor(1, 1, 1, 1));
4538 if (aStrokeOptions) {
4539 textDT->StrokeGlyphs(aFont, aBuffer, colorPattern, *aStrokeOptions,
4540 drawOptions);
4541 } else {
4542 textDT->FillGlyphs(aFont, aBuffer, colorPattern, drawOptions);
4543 }
4544 }
4545 RefPtr<SourceSurface> textSurface = textDT->Snapshot();
4546 if (textSurface) {
4547 // If we don't expect the text surface to contain color glyphs
4548 // such as from subpixel AA, then do one final check to see if
4549 // any ended up in the result. If not, extract the alpha values
4550 // from the surface so we can render it as a mask.
4551 if (textSurface->GetFormat() != SurfaceFormat::A8 &&
4552 !CheckForColorGlyphs(textSurface)) {
4553 textSurface = ExtractAlpha(textSurface, !useBitmaps);
4554 if (!textSurface) {
4555 // Failed extracting alpha for the text surface...
4556 return false;
4557 }
4558 }
4559 // Attempt to upload the rendered text surface into a texture
4560 // handle and draw it.
4561 SurfacePattern pattern(textSurface, ExtendMode::CLAMP,
4562 Matrix::Translation(intBounds.TopLeft()));
4563 if (DrawRectAccel(Rect(intBounds), pattern, aOptions,
4564 useBitmaps ? Nothing() : Some(color), &handle, false,
4565 true) &&
4566 handle) {
4567 // If drawing succeeded, then the text surface was uploaded to
4568 // a texture handle. Assign it to the glyph cache entry.
4569 entry->Link(handle);
4570 } else {
4571 // If drawing failed, remove the entry from the cache.
4572 entry->Unlink();
4573 }
4574 return true;
4575 }
4576 }
4577 }
4578 return false;
4579}
4580
4581void DrawTargetWebgl::FillGlyphs(ScaledFont* aFont, const GlyphBuffer& aBuffer,
4582 const Pattern& aPattern,
4583 const DrawOptions& aOptions) {
4584 if (!aFont || !aBuffer.mNumGlyphs) {
4585 return;
4586 }
4587
4588 bool useSubpixelAA = ShouldUseSubpixelAA(aFont, aOptions);
4589
4590 if (mWebglValid && SupportsDrawOptions(aOptions) &&
4591 aPattern.GetType() == PatternType::COLOR && PrepareContext() &&
4592 mSharedContext->DrawGlyphsAccel(aFont, aBuffer, aPattern, aOptions,
4593 nullptr, useSubpixelAA)) {
4594 return;
4595 }
4596
4597 // If not able to cache the text run to a texture, then just fall back to
4598 // drawing with the Skia target.
4599 if (useSubpixelAA) {
4600 // Subpixel AA does not support layering because the subpixel masks can't
4601 // blend with the over op.
4602 MarkSkiaChanged();
4603 } else {
4604 MarkSkiaChanged(aOptions);
4605 }
4606 mSkia->FillGlyphs(aFont, aBuffer, aPattern, aOptions);
4607}
4608
4609// Attempts to read the contents of the WebGL context into the Skia target.
4610bool DrawTargetWebgl::ReadIntoSkia() {
4611 if (mSkiaValid) {
4612 return false;
4613 }
4614 bool didReadback = false;
4615 if (mWebglValid) {
4616 uint8_t* data = nullptr;
4617 IntSize size;
4618 int32_t stride;
4619 SurfaceFormat format;
4620 if (mIsClear) {
4621 // If the WebGL target is still clear, then just clear the Skia target.
4622 mSkia->DetachAllSnapshots();
4623 mSkiaNoClip->FillRect(Rect(mSkiaNoClip->GetRect()), GetClearPattern(),
4624 DrawOptions(1.0f, CompositionOp::OP_SOURCE));
4625 } else {
4626 // If there's no existing snapshot and we can successfully map the Skia
4627 // target for reading, then try to read into that.
4628 if (!mSnapshot && mSkia->LockBits(&data, &size, &stride, &format)) {
4629 (void)ReadInto(data, stride);
4630 mSkia->ReleaseBits(data);
4631 } else if (RefPtr<SourceSurface> snapshot = Snapshot()) {
4632 // Otherwise, fall back to getting a snapshot from WebGL if available
4633 // and then copying that to Skia.
4634 mSkia->CopySurface(snapshot, GetRect(), IntPoint(0, 0));
4635 }
4636 didReadback = true;
4637 }
4638 }
4639 mSkiaValid = true;
4640 // The Skia data is flat after reading, so disable any layering.
4641 mSkiaLayer = false;
4642 return didReadback;
4643}
4644
4645// Reads data from the WebGL context and blends it with the current Skia layer.
4646void DrawTargetWebgl::FlattenSkia() {
4647 if (!mSkiaValid || !mSkiaLayer) {
4648 return;
4649 }
4650 mSkiaLayer = false;
4651 if (mSkiaLayerClear) {
4652 // If the WebGL target is clear, then there is nothing to blend.
4653 return;
4654 }
4655 if (RefPtr<DataSourceSurface> base = ReadSnapshot()) {
4656 mSkia->DetachAllSnapshots();
4657 mSkiaNoClip->DrawSurface(base, Rect(GetRect()), Rect(GetRect()),
4658 DrawSurfaceOptions(SamplingFilter::POINT),
4659 DrawOptions(1.f, CompositionOp::OP_DEST_OVER));
4660 }
4661}
4662
4663// Attempts to draw the contents of the Skia target into the WebGL context.
4664bool DrawTargetWebgl::FlushFromSkia() {
4665 // If the WebGL context has been lost, then mark it as invalid and fail.
4666 if (mSharedContext->IsContextLost()) {
4667 mWebglValid = false;
4668 return false;
4669 }
4670 // The WebGL target is already valid, so there is nothing to do.
4671 if (mWebglValid) {
4672 return true;
4673 }
4674 // Ensure that DrawRect doesn't recursively call into FlushFromSkia. If
4675 // the Skia target isn't valid, then it doesn't matter what is in the the
4676 // WebGL target either, so only try to blend if there is a valid Skia target.
4677 mWebglValid = true;
4678 if (mSkiaValid) {
4679 AutoRestoreContext restore(this);
4680
4681 // If the Skia target is clear, then there is no need to use a snapshot.
4682 // Directly clear the WebGL target instead.
4683 if (mIsClear) {
4684 if (!DrawRect(Rect(GetRect()), GetClearPattern(),
4685 DrawOptions(1.0f, CompositionOp::OP_SOURCE), Nothing(),
4686 nullptr, false, false, true)) {
4687 mWebglValid = false;
4688 return false;
4689 }
4690 return true;
4691 }
4692
4693 RefPtr<SourceSurface> skiaSnapshot = mSkia->Snapshot();
4694 if (!skiaSnapshot) {
4695 // There's a valid Skia target to draw to, but for some reason there is
4696 // no available snapshot, so just keep using the Skia target.
4697 mWebglValid = false;
4698 return false;
4699 }
4700
4701 // If there is no layer, then just upload it directly.
4702 if (!mSkiaLayer) {
4703 if (PrepareContext(false) && MarkChanged()) {
4704 if (RefPtr<DataSourceSurface> data = skiaSnapshot->GetDataSurface()) {
4705 mSharedContext->UploadSurface(data, mFormat, GetRect(), IntPoint(),
4706 false, false, mTex);
4707 return true;
4708 }
4709 }
4710 // Failed to upload the Skia snapshot.
4711 mWebglValid = false;
4712 return false;
4713 }
4714
4715 SurfacePattern pattern(skiaSnapshot, ExtendMode::CLAMP);
4716 // If there is a layer, blend the snapshot with the WebGL context.
4717 if (!DrawRect(Rect(GetRect()), pattern,
4718 DrawOptions(1.0f, CompositionOp::OP_OVER), Nothing(),
4719 &mSnapshotTexture, false, false, true, true)) {
4720 // If accelerated drawing failed for some reason, then leave the Skia
4721 // target unchanged.
4722 mWebglValid = false;
4723 return false;
4724 }
4725 }
4726 return true;
4727}
4728
4729void DrawTargetWebgl::UsageProfile::BeginFrame() {
4730 // Reset the usage profile counters for the new frame.
4731 mFallbacks = 0;
4732 mLayers = 0;
4733 mCacheMisses = 0;
4734 mCacheHits = 0;
4735 mUncachedDraws = 0;
4736 mReadbacks = 0;
4737}
4738
4739void DrawTargetWebgl::UsageProfile::EndFrame() {
4740 bool failed = false;
4741 // If we hit a complete fallback to software rendering, or if cache misses
4742 // were more than cutoff ratio of all requests, then we consider the frame as
4743 // having failed performance profiling.
4744 float cacheRatio =
4745 StaticPrefs::gfx_canvas_accelerated_profile_cache_miss_ratio();
4746 if (mFallbacks > 0 ||
4747 float(mCacheMisses + mReadbacks + mLayers) >
4748 cacheRatio * float(mCacheMisses + mCacheHits + mUncachedDraws +
4749 mReadbacks + mLayers)) {
4750 failed = true;
4751 }
4752 if (failed) {
4753 ++mFailedFrames;
4754 }
4755 ++mFrameCount;
4756}
4757
4758bool DrawTargetWebgl::UsageProfile::RequiresRefresh() const {
4759 // If we've rendered at least the required number of frames for a profile and
4760 // more than the cutoff ratio of frames did not meet performance criteria,
4761 // then we should stop using an accelerated canvas.
4762 uint32_t profileFrames = StaticPrefs::gfx_canvas_accelerated_profile_frames();
4763 if (!profileFrames || mFrameCount < profileFrames) {
4764 return false;
4765 }
4766 float failRatio =
4767 StaticPrefs::gfx_canvas_accelerated_profile_fallback_ratio();
4768 return mFailedFrames > failRatio * mFrameCount;
4769}
4770
4771void SharedContextWebgl::CachePrefs() {
4772 uint32_t capacity = StaticPrefs::gfx_canvas_accelerated_gpu_path_size() << 20;
4773 if (capacity != mPathVertexCapacity) {
4774 mPathVertexCapacity = capacity;
4775 if (mPathCache) {
4776 mPathCache->ClearVertexRanges();
4777 }
4778 if (mPathVertexBuffer) {
4779 ResetPathVertexBuffer();
4780 }
4781 }
4782
4783 mPathMaxComplexity =
4784 StaticPrefs::gfx_canvas_accelerated_gpu_path_complexity();
4785
4786 mPathAAStroke = StaticPrefs::gfx_canvas_accelerated_aa_stroke_enabled();
4787 mPathWGRStroke = StaticPrefs::gfx_canvas_accelerated_stroke_to_fill_path();
4788}
4789
4790// For use within CanvasRenderingContext2D, called on BorrowDrawTarget.
4791void DrawTargetWebgl::BeginFrame(bool aInvalidContents) {
4792 // If still rendering into the Skia target, switch back to the WebGL
4793 // context.
4794 if (!mWebglValid) {
4795 if (aInvalidContents) {
4796 // If nothing needs to persist, just mark the WebGL context valid.
4797 mWebglValid = true;
4798 // Even if the Skia framebuffer is marked clear, since the WebGL
4799 // context is not valid, its contents may be out-of-date and not
4800 // necessarily clear.
4801 mIsClear = false;
4802 } else {
4803 FlushFromSkia();
4804 }
4805 }
4806 // Check if we need to clear out any cached because of memory pressure.
4807 mSharedContext->ClearCachesIfNecessary();
4808 // Cache any prefs for the frame.
4809 mSharedContext->CachePrefs();
4810 mProfile.BeginFrame();
4811}
4812
4813// For use within CanvasRenderingContext2D, called on ReturnDrawTarget.
4814void DrawTargetWebgl::EndFrame() {
4815 if (StaticPrefs::gfx_canvas_accelerated_debug()) {
4816 // Draw a green rectangle in the upper right corner to indicate
4817 // acceleration.
4818 IntRect corner = IntRect(mSize.width - 16, 0, 16, 16).Intersect(GetRect());
4819 DrawRect(Rect(corner), ColorPattern(DeviceColor(0.0f, 1.0f, 0.0f, 1.0f)),
4820 DrawOptions(), Nothing(), nullptr, false, false);
4821 }
4822 mProfile.EndFrame();
4823 // Ensure we're not somehow using more than the allowed texture memory.
4824 mSharedContext->PruneTextureMemory();
4825 // Signal that we're done rendering the frame in case no present occurs.
4826 mSharedContext->mWebgl->EndOfFrame();
4827 // Check if we need to clear out any cached because of memory pressure.
4828 mSharedContext->ClearCachesIfNecessary();
4829}
4830
4831bool DrawTargetWebgl::CopyToSwapChain(
4832 layers::TextureType aTextureType, layers::RemoteTextureId aId,
4833 layers::RemoteTextureOwnerId aOwnerId,
4834 layers::RemoteTextureOwnerClient* aOwnerClient) {
4835 if (!mWebglValid && !FlushFromSkia()) {
4836 return false;
4837 }
4838
4839 // Copy and swizzle the WebGL framebuffer to the swap chain front buffer.
4840 webgl::SwapChainOptions options;
4841 options.bgra = true;
4842 // Allow async present to be toggled on for accelerated Canvas2D
4843 // independent of WebGL via pref.
4844 options.forceAsyncPresent =
4845 StaticPrefs::gfx_canvas_accelerated_async_present();
4846 options.remoteTextureId = aId;
4847 options.remoteTextureOwnerId = aOwnerId;
4848 return mSharedContext->mWebgl->CopyToSwapChain(mFramebuffer, aTextureType,
4849 options, aOwnerClient);
4850}
4851
4852already_AddRefed<DrawTarget> DrawTargetWebgl::CreateSimilarDrawTarget(
4853 const IntSize& aSize, SurfaceFormat aFormat) const {
4854 return mSkia->CreateSimilarDrawTarget(aSize, aFormat);
4855}
4856
4857bool DrawTargetWebgl::CanCreateSimilarDrawTarget(const IntSize& aSize,
4858 SurfaceFormat aFormat) const {
4859 return mSkia->CanCreateSimilarDrawTarget(aSize, aFormat);
4860}
4861
4862RefPtr<DrawTarget> DrawTargetWebgl::CreateClippedDrawTarget(
4863 const Rect& aBounds, SurfaceFormat aFormat) {
4864 return mSkia->CreateClippedDrawTarget(aBounds, aFormat);
4865}
4866
4867already_AddRefed<SourceSurface> DrawTargetWebgl::CreateSourceSurfaceFromData(
4868 unsigned char* aData, const IntSize& aSize, int32_t aStride,
4869 SurfaceFormat aFormat) const {
4870 return mSkia->CreateSourceSurfaceFromData(aData, aSize, aStride, aFormat);
4871}
4872
4873already_AddRefed<SourceSurface>
4874DrawTargetWebgl::CreateSourceSurfaceFromNativeSurface(
4875 const NativeSurface& aSurface) const {
4876 return mSkia->CreateSourceSurfaceFromNativeSurface(aSurface);
4877}
4878
4879already_AddRefed<SourceSurface> DrawTargetWebgl::OptimizeSourceSurface(
4880 SourceSurface* aSurface) const {
4881 if (aSurface->GetType() == SurfaceType::WEBGL) {
4882 return do_AddRef(aSurface);
4883 }
4884 return mSkia->OptimizeSourceSurface(aSurface);
4885}
4886
4887already_AddRefed<SourceSurface>
4888DrawTargetWebgl::OptimizeSourceSurfaceForUnknownAlpha(
4889 SourceSurface* aSurface) const {
4890 return mSkia->OptimizeSourceSurfaceForUnknownAlpha(aSurface);
4891}
4892
4893already_AddRefed<GradientStops> DrawTargetWebgl::CreateGradientStops(
4894 GradientStop* aStops, uint32_t aNumStops, ExtendMode aExtendMode) const {
4895 return mSkia->CreateGradientStops(aStops, aNumStops, aExtendMode);
4896}
4897
4898already_AddRefed<FilterNode> DrawTargetWebgl::CreateFilter(FilterType aType) {
4899 return mSkia->CreateFilter(aType);
4900}
4901
4902void DrawTargetWebgl::DrawFilter(FilterNode* aNode, const Rect& aSourceRect,
4903 const Point& aDestPoint,
4904 const DrawOptions& aOptions) {
4905 MarkSkiaChanged(aOptions);
4906 mSkia->DrawFilter(aNode, aSourceRect, aDestPoint, aOptions);
4907}
4908
4909bool DrawTargetWebgl::Draw3DTransformedSurface(SourceSurface* aSurface,
4910 const Matrix4x4& aMatrix) {
4911 MarkSkiaChanged();
4912 return mSkia->Draw3DTransformedSurface(aSurface, aMatrix);
4913}
4914
4915void DrawTargetWebgl::PushLayer(bool aOpaque, Float aOpacity,
4916 SourceSurface* aMask,
4917 const Matrix& aMaskTransform,
4918 const IntRect& aBounds, bool aCopyBackground) {
4919 PushLayerWithBlend(aOpaque, aOpacity, aMask, aMaskTransform, aBounds,
4920 aCopyBackground, CompositionOp::OP_OVER);
4921}
4922
4923void DrawTargetWebgl::PushLayerWithBlend(bool aOpaque, Float aOpacity,
4924 SourceSurface* aMask,
4925 const Matrix& aMaskTransform,
4926 const IntRect& aBounds,
4927 bool aCopyBackground,
4928 CompositionOp aCompositionOp) {
4929 MarkSkiaChanged(DrawOptions(aOpacity, aCompositionOp));
4930 mSkia->PushLayerWithBlend(aOpaque, aOpacity, aMask, aMaskTransform, aBounds,
4931 aCopyBackground, aCompositionOp);
4932 ++mLayerDepth;
4933 SetPermitSubpixelAA(mSkia->GetPermitSubpixelAA());
4934}
4935
4936void DrawTargetWebgl::PopLayer() {
4937 MOZ_ASSERT(mSkiaValid)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mSkiaValid)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mSkiaValid))), 0))) { do { }
while (false); MOZ_ReportAssertionFailure("mSkiaValid", "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 4937); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mSkiaValid"
")"); do { *((volatile int*)__null) = 4937; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
4938 MOZ_ASSERT(mLayerDepth > 0)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mLayerDepth > 0)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mLayerDepth > 0))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("mLayerDepth > 0"
, "/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebgl.cpp"
, 4938); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mLayerDepth > 0"
")"); do { *((volatile int*)__null) = 4938; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
4939 --mLayerDepth;
4940 mSkia->PopLayer();
4941 SetPermitSubpixelAA(mSkia->GetPermitSubpixelAA());
4942}
4943
4944} // namespace mozilla::gfx

/var/lib/jenkins/workspace/firefox-scan-build/dom/canvas/DrawTargetWebglInternal.h

1/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3/* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7#ifndef _MOZILLA_GFX_DRAWTARGETWEBGL_INTERNAL_H
8#define _MOZILLA_GFX_DRAWTARGETWEBGL_INTERNAL_H
9
10#include "DrawTargetWebgl.h"
11
12#include "mozilla/HashFunctions.h"
13#include "mozilla/gfx/Etagere.h"
14#include "mozilla/gfx/PathSkia.h"
15#include "mozilla/gfx/WPFGpuRaster.h"
16
17namespace mozilla::gfx {
18
19// CacheEnty is a generic interface for various items that need to be cached to
20// a texture.
21class CacheEntry : public RefCounted<CacheEntry> {
22 public:
23 MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(CacheEntry)virtual const char* typeName() const { return "CacheEntry"; }
virtual size_t typeSize() const { return sizeof(*this); }
24
25 CacheEntry(const Matrix& aTransform, const IntRect& aBounds, HashNumber aHash)
26 : mTransform(aTransform), mBounds(aBounds), mHash(aHash) {}
27 virtual ~CacheEntry() = default;
28
29 void Link(const RefPtr<TextureHandle>& aHandle);
30 void Unlink();
31
32 const RefPtr<TextureHandle>& GetHandle() const { return mHandle; }
33
34 const Matrix& GetTransform() const { return mTransform; }
35 const IntRect& GetBounds() const { return mBounds; }
36 HashNumber GetHash() const { return mHash; }
37
38 virtual bool IsValid() const { return true; }
39
40 protected:
41 virtual void RemoveFromList() = 0;
42
43 // The handle of the rendered cache item.
44 RefPtr<TextureHandle> mHandle;
45 // The transform that was used to render the entry. This is necessary as
46 // the geometry might only be correctly rendered in device space after
47 // the transform is applied, so in general we can't cache untransformed
48 // geometry.
49 Matrix mTransform;
50 // The device space bounds of the rendered geometry.
51 IntRect mBounds;
52 // A hash of the geometry that may be used for quickly rejecting entries.
53 HashNumber mHash;
54};
55
56// CacheEntryImpl provides type-dependent boilerplate code for implementations
57// of CacheEntry.
58template <typename T>
59class CacheEntryImpl : public CacheEntry, public LinkedListElement<RefPtr<T>> {
60 typedef LinkedListElement<RefPtr<T>> ListType;
61
62 public:
63 CacheEntryImpl(const Matrix& aTransform, const IntRect& aBounds,
64 HashNumber aHash)
65 : CacheEntry(aTransform, aBounds, aHash) {}
66
67 void RemoveFromList() override {
68 if (ListType::isInList()) {
10
Taking true branch
69 ListType::remove();
11
Calling 'LinkedListElement::remove'
26
Returning; memory was released
70 }
71 }
72};
73
74// CacheImpl manages a list of CacheEntry.
75template <typename T, bool BIG>
76class CacheImpl {
77 protected:
78 typedef LinkedList<RefPtr<T>> ListType;
79
80 // Whether the cache should be small and space-efficient or prioritize speed.
81 static constexpr size_t kNumChains = BIG ? 499 : 71;
82
83 public:
84 ~CacheImpl() {
85 for (auto& chain : mChains) {
86 while (RefPtr<T> entry = chain.popLast()) {
3
Calling 'RefPtr::operator bool'
5
Returning from 'RefPtr::operator bool'
6
Loop condition is true. Entering loop body
87 entry->Unlink();
7
Calling 'CacheEntry::Unlink'
28
Returning; memory was released
88 }
29
Calling '~RefPtr'
89 }
90 }
91
92 protected:
93 ListType& GetChain(HashNumber aHash) { return mChains[aHash % kNumChains]; }
94
95 void Insert(T* aEntry) { GetChain(aEntry->GetHash()).insertFront(aEntry); }
96
97 ListType mChains[kNumChains];
98};
99
100// BackingTexture provides information about the shared or standalone texture
101// that is backing a texture handle.
102class BackingTexture {
103 public:
104 BackingTexture(const IntSize& aSize, SurfaceFormat aFormat,
105 const RefPtr<WebGLTexture>& aTexture);
106
107 SurfaceFormat GetFormat() const { return mFormat; }
108 IntSize GetSize() const { return mSize; }
109
110 static inline size_t UsedBytes(SurfaceFormat aFormat, const IntSize& aSize) {
111 return size_t(BytesPerPixel(aFormat)) * size_t(aSize.width) *
112 size_t(aSize.height);
113 }
114
115 size_t UsedBytes() const { return UsedBytes(GetFormat(), GetSize()); }
116
117 const RefPtr<WebGLTexture>& GetWebGLTexture() const { return mTexture; }
118
119 bool IsInitialized() const { return mFlags & INITIALIZED; }
120 void MarkInitialized() { mFlags |= INITIALIZED; }
121
122 bool IsRenderable() const { return mFlags & RENDERABLE; }
123 void MarkRenderable() { mFlags |= RENDERABLE; }
124
125 protected:
126 IntSize mSize;
127 SurfaceFormat mFormat;
128 RefPtr<WebGLTexture> mTexture;
129
130 private:
131 enum Flags : uint8_t {
132 INITIALIZED = 1 << 0,
133 RENDERABLE = 1 << 1,
134 };
135
136 uint8_t mFlags = 0;
137};
138
139// TextureHandle is an abstract base class for supplying textures to drawing
140// commands that may be backed by different resource types (such as a shared
141// or standalone texture). It may be further linked to use-specific metadata
142// such as for shadow drawing or for cached entries in the glyph cache.
143class TextureHandle : public RefCounted<TextureHandle>,
144 public LinkedListElement<RefPtr<TextureHandle>> {
145 public:
146 MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(TextureHandle)virtual const char* typeName() const { return "TextureHandle"
; } virtual size_t typeSize() const { return sizeof(*this); }
147
148 enum Type { SHARED, STANDALONE };
149
150 virtual Type GetType() const = 0;
151 virtual IntRect GetBounds() const = 0;
152 IntSize GetSize() const { return GetBounds().Size(); }
153 virtual SurfaceFormat GetFormat() const = 0;
154
155 virtual BackingTexture* GetBackingTexture() = 0;
156
157 size_t UsedBytes() const {
158 return BackingTexture::UsedBytes(GetFormat(), GetSize());
159 }
160
161 virtual void UpdateSize(const IntSize& aSize) {}
162
163 virtual void Cleanup(SharedContextWebgl& aContext) {}
164
165 virtual ~TextureHandle() {}
166
167 bool IsValid() const { return mValid; }
168 void Invalidate() { mValid = false; }
169
170 void ClearSurface() { mSurface = nullptr; }
171 void SetSurface(const RefPtr<SourceSurface>& aSurface) {
172 mSurface = aSurface;
173 }
174 already_AddRefed<SourceSurface> GetSurface() const {
175 RefPtr<SourceSurface> surface(mSurface);
176 return surface.forget();
177 }
178
179 float GetSigma() const { return mSigma; }
180 void SetSigma(float aSigma) { mSigma = aSigma; }
181 bool IsShadow() const { return mSigma >= 0.0f; }
182
183 void SetSamplingOffset(const IntPoint& aSamplingOffset) {
184 mSamplingOffset = aSamplingOffset;
185 }
186 const IntPoint& GetSamplingOffset() const { return mSamplingOffset; }
187 IntRect GetSamplingRect() const {
188 return IntRect(GetSamplingOffset(), GetSize());
189 }
190
191 const RefPtr<CacheEntry>& GetCacheEntry() const { return mCacheEntry; }
192 void SetCacheEntry(const RefPtr<CacheEntry>& aEntry) { mCacheEntry = aEntry; }
193
194 // Note as used if there is corresponding surface or cache entry.
195 bool IsUsed() const {
196 return !mSurface.IsDead() || (mCacheEntry && mCacheEntry->IsValid());
197 }
198
199 private:
200 bool mValid = true;
201 // If applicable, weak pointer to the SourceSurface that is linked to this
202 // TextureHandle.
203 ThreadSafeWeakPtr<SourceSurface> mSurface;
204 // If this TextureHandle stores a cached shadow, then we need to remember the
205 // blur sigma used to produce the shadow.
206 float mSigma = -1.0f;
207 // If the originating surface requested a sampling rect, then we need to know
208 // the offset of the subrect within the surface for texture coordinates.
209 IntPoint mSamplingOffset;
210 // If applicable, the CacheEntry that is linked to this TextureHandle.
211 RefPtr<CacheEntry> mCacheEntry;
212};
213
214class SharedTextureHandle;
215
216// SharedTexture is a large slab texture that is subdivided (by using a
217// TexturePacker) to hold many small SharedTextureHandles. This avoids needing
218// to allocate many WebGL textures for every single small Canvas 2D texture.
219class SharedTexture : public RefCounted<SharedTexture>, public BackingTexture {
220 public:
221 MOZ_DECLARE_REFCOUNTED_TYPENAME(SharedTexture)const char* typeName() const { return "SharedTexture"; } size_t
typeSize() const { return sizeof(*this); }
222
223 SharedTexture(const IntSize& aSize, SurfaceFormat aFormat,
224 const RefPtr<WebGLTexture>& aTexture);
225 ~SharedTexture();
226
227 already_AddRefed<SharedTextureHandle> Allocate(const IntSize& aSize);
228 bool Free(SharedTextureHandle& aHandle);
229
230 bool HasAllocatedHandles() const {
231 return mAtlasAllocator && Etagere::etagere_atlas_allocator_allocated_space(
232 mAtlasAllocator) > 0;
233 }
234
235 private:
236 Etagere::AtlasAllocator* mAtlasAllocator = nullptr;
237};
238
239// SharedTextureHandle is an allocated region within a large SharedTexture page
240// that owns it.
241class SharedTextureHandle : public TextureHandle {
242 friend class SharedTexture;
243
244 public:
245 MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(SharedTextureHandle, override)virtual const char* typeName() const override { return "SharedTextureHandle"
; } virtual size_t typeSize() const override { return sizeof(
*this); }
246
247 SharedTextureHandle(Etagere::AllocationId aId, const IntRect& aBounds,
248 SharedTexture* aTexture);
249
250 Type GetType() const override { return Type::SHARED; }
251
252 IntRect GetBounds() const override { return mBounds; }
253
254 SurfaceFormat GetFormat() const override { return mTexture->GetFormat(); }
255
256 BackingTexture* GetBackingTexture() override { return mTexture.get(); }
257
258 void Cleanup(SharedContextWebgl& aContext) override;
259
260 const RefPtr<SharedTexture>& GetOwner() const { return mTexture; }
261
262 private:
263 Etagere::AllocationId mAllocationId = Etagere::INVALID_ALLOCATION_ID;
264 IntRect mBounds;
265 RefPtr<SharedTexture> mTexture;
266};
267
268// StandaloneTexture is a texture that can not be effectively shared within
269// a SharedTexture page, such that it is better to assign it its own WebGL
270// texture.
271class StandaloneTexture : public TextureHandle, public BackingTexture {
272 public:
273 MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(StandaloneTexture, override)virtual const char* typeName() const override { return "StandaloneTexture"
; } virtual size_t typeSize() const override { return sizeof(
*this); }
274
275 StandaloneTexture(const IntSize& aSize, SurfaceFormat aFormat,
276 const RefPtr<WebGLTexture>& aTexture);
277
278 Type GetType() const override { return Type::STANDALONE; }
279
280 IntRect GetBounds() const override {
281 return IntRect(IntPoint(0, 0), BackingTexture::GetSize());
282 }
283
284 SurfaceFormat GetFormat() const override {
285 return BackingTexture::GetFormat();
286 }
287
288 using BackingTexture::UsedBytes;
289
290 BackingTexture* GetBackingTexture() override { return this; }
291
292 void UpdateSize(const IntSize& aSize) override { mSize = aSize; }
293
294 void Cleanup(SharedContextWebgl& aContext) override;
295};
296
297// GlyphCacheEntry stores rendering metadata for a rendered text run, as well
298// the handle to the texture it was rendered into, so that it can be located
299// for reuse under similar rendering circumstances.
300class GlyphCacheEntry : public CacheEntryImpl<GlyphCacheEntry> {
301 public:
302 MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(GlyphCacheEntry, override)virtual const char* typeName() const override { return "GlyphCacheEntry"
; } virtual size_t typeSize() const override { return sizeof(
*this); }
303
304 GlyphCacheEntry(const GlyphBuffer& aBuffer, const DeviceColor& aColor,
305 const Matrix& aTransform, const IntPoint& aQuantizeScale,
306 const IntRect& aBounds, const IntRect& aFullBounds,
307 HashNumber aHash,
308 StoredStrokeOptions* aStrokeOptions = nullptr);
309 ~GlyphCacheEntry();
310
311 const GlyphBuffer& GetGlyphBuffer() const { return mBuffer; }
312
313 bool MatchesGlyphs(const GlyphBuffer& aBuffer, const DeviceColor& aColor,
314 const Matrix& aTransform, const IntPoint& aQuantizeOffset,
315 const IntPoint& aBoundsOffset, const IntRect& aClipRect,
316 HashNumber aHash, const StrokeOptions* aStrokeOptions);
317
318 static HashNumber HashGlyphs(const GlyphBuffer& aBuffer,
319 const Matrix& aTransform,
320 const IntPoint& aQuantizeScale);
321
322 private:
323 // The glyph keys used to render the text run.
324 GlyphBuffer mBuffer = {nullptr, 0};
325 // The color of the text run.
326 DeviceColor mColor;
327 // The full bounds of the text run without any clipping applied.
328 IntRect mFullBounds;
329 // Stroke options for the text run.
330 UniquePtr<StoredStrokeOptions> mStrokeOptions;
331};
332
333// GlyphCache maintains a list of GlyphCacheEntry's representing previously
334// rendered text runs. The cache is searched to see if a given incoming text
335// run has already been rendered to a texture, and if so, just reuses it.
336// Otherwise, the text run will be rendered to a new texture handle and
337// inserted into a new GlyphCacheEntry to represent it.
338class GlyphCache : public LinkedListElement<GlyphCache>,
339 public CacheImpl<GlyphCacheEntry, false> {
340 public:
341 explicit GlyphCache(ScaledFont* aFont);
342
343 ScaledFont* GetFont() const { return mFont; }
344
345 already_AddRefed<GlyphCacheEntry> FindEntry(const GlyphBuffer& aBuffer,
346 const DeviceColor& aColor,
347 const Matrix& aTransform,
348 const IntPoint& aQuantizeScale,
349 const IntRect& aClipRect,
350 HashNumber aHash,
351 const StrokeOptions* aOptions);
352
353 already_AddRefed<GlyphCacheEntry> InsertEntry(
354 const GlyphBuffer& aBuffer, const DeviceColor& aColor,
355 const Matrix& aTransform, const IntPoint& aQuantizeScale,
356 const IntRect& aBounds, const IntRect& aFullBounds, HashNumber aHash,
357 const StrokeOptions* aOptions);
358
359 bool IsWhitespace(const GlyphBuffer& aBuffer) const;
360 void SetLastWhitespace(const GlyphBuffer& aBuffer);
361
362 private:
363 // Weak pointer to the owning font
364 ScaledFont* mFont;
365 // The last whitespace queried from this cache
366 Maybe<uint32_t> mLastWhitespace;
367};
368
369struct QuantizedPath {
370 explicit QuantizedPath(const WGR::Path& aPath);
371 // Ensure the path can only be moved, but not copied.
372 QuantizedPath(QuantizedPath&&) noexcept;
373 QuantizedPath(const QuantizedPath&) = delete;
374 ~QuantizedPath();
375
376 bool operator==(const QuantizedPath&) const;
377
378 WGR::Path mPath;
379};
380
381struct PathVertexRange {
382 uint32_t mOffset;
383 uint32_t mLength;
384
385 PathVertexRange() : mOffset(0), mLength(0) {}
386 PathVertexRange(uint32_t aOffset, uint32_t aLength)
387 : mOffset(aOffset), mLength(aLength) {}
388
389 bool IsValid() const { return mLength > 0; }
390};
391
392enum class AAStrokeMode {
393 Unsupported,
394 Geometry,
395 Mask,
396};
397
398// PathCacheEntry stores a rasterized version of a supplied path with a given
399// pattern.
400class PathCacheEntry : public CacheEntryImpl<PathCacheEntry> {
401 public:
402 MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(PathCacheEntry, override)virtual const char* typeName() const override { return "PathCacheEntry"
; } virtual size_t typeSize() const override { return sizeof(
*this); }
403
404 PathCacheEntry(QuantizedPath&& aPath, Pattern* aPattern,
405 StoredStrokeOptions* aStrokeOptions, AAStrokeMode aStrokeMode,
406 const Matrix& aTransform, const IntRect& aBounds,
407 const Point& aOrigin, HashNumber aHash, float aSigma = -1.0f);
408
409 bool MatchesPath(const QuantizedPath& aPath, const Pattern* aPattern,
410 const StrokeOptions* aStrokeOptions,
411 AAStrokeMode aStrokeMode, const Matrix& aTransform,
412 const IntRect& aBounds, const Point& aOrigin,
413 HashNumber aHash, float aSigma);
414
415 static HashNumber HashPath(const QuantizedPath& aPath,
416 const Pattern* aPattern, const Matrix& aTransform,
417 const IntRect& aBounds, const Point& aOrigin);
418
419 const QuantizedPath& GetPath() const { return mPath; }
420
421 const Point& GetOrigin() const { return mOrigin; }
422
423 // Valid if either a mask (no pattern) or there is valid pattern.
424 bool IsValid() const override { return !mPattern || mPattern->IsValid(); }
425
426 const PathVertexRange& GetVertexRange() const { return mVertexRange; }
427 void SetVertexRange(const PathVertexRange& aRange) { mVertexRange = aRange; }
428
429 private:
430 // The actual path geometry supplied
431 QuantizedPath mPath;
432 // The transformed origin of the path
433 Point mOrigin;
434 // The pattern used to rasterize the path, if not a mask
435 UniquePtr<Pattern> mPattern;
436 // The StrokeOptions used for stroked paths, if applicable
437 UniquePtr<StoredStrokeOptions> mStrokeOptions;
438 // The AAStroke mode used for rendering a stroked path.
439 AAStrokeMode mAAStrokeMode = AAStrokeMode::Unsupported;
440 // The shadow blur sigma
441 float mSigma;
442 // If the path has cached geometry in the vertex buffer.
443 PathVertexRange mVertexRange;
444};
445
446class PathCache : public CacheImpl<PathCacheEntry, true> {
447 public:
448 PathCache() = default;
449
450 already_AddRefed<PathCacheEntry> FindOrInsertEntry(
451 QuantizedPath aPath, const Pattern* aPattern,
452 const StrokeOptions* aStrokeOptions, AAStrokeMode aStrokeMode,
453 const Matrix& aTransform, const IntRect& aBounds, const Point& aOrigin,
454 float aSigma = -1.0f);
455
456 void ClearVertexRanges();
457};
458
459} // namespace mozilla::gfx
460
461#endif // _MOZILLA_GFX_DRAWTARGETWEBGL_INTERNAL_H

/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefPtr.h

1/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3/* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7#ifndef mozilla_RefPtr_h
8#define mozilla_RefPtr_h
9
10#include "mozilla/AlreadyAddRefed.h"
11#include "mozilla/Assertions.h"
12#include "mozilla/Attributes.h"
13#include "mozilla/DbgMacro.h"
14
15#include <type_traits>
16
17/*****************************************************************************/
18
19// template <class T> class RefPtrGetterAddRefs;
20
21class nsQueryReferent;
22class nsCOMPtr_helper;
23class nsISupports;
24
25namespace mozilla {
26template <class T>
27class MovingNotNull;
28template <class T>
29class NotNull;
30template <class T>
31class OwningNonNull;
32template <class T>
33class StaticLocalRefPtr;
34template <class T>
35class StaticRefPtr;
36
37// Traditionally, RefPtr supports automatic refcounting of any pointer type
38// with AddRef() and Release() methods that follow the traditional semantics.
39//
40// This traits class can be specialized to operate on other pointer types. For
41// example, we specialize this trait for opaque FFI types that represent
42// refcounted objects in Rust.
43//
44// Given the use of ConstRemovingRefPtrTraits below, U should not be a const-
45// qualified type.
46template <class U>
47struct RefPtrTraits {
48 static void AddRef(U* aPtr) { aPtr->AddRef(); }
49 static void Release(U* aPtr) { aPtr->Release(); }
50};
51
52} // namespace mozilla
53
54template <class T>
55class MOZ_IS_REFPTR RefPtr {
56 private:
57 void assign_with_AddRef(T* aRawPtr) {
58 if (aRawPtr) {
59 ConstRemovingRefPtrTraits<T>::AddRef(aRawPtr);
60 }
61 assign_assuming_AddRef(aRawPtr);
62 }
63
64 void assign_assuming_AddRef(T* aNewPtr) {
65 T* oldPtr = mRawPtr;
66 mRawPtr = aNewPtr;
67 if (oldPtr) {
68 ConstRemovingRefPtrTraits<T>::Release(oldPtr);
69 }
70 }
71
72 private:
73 T* MOZ_OWNING_REF mRawPtr;
74
75 public:
76 typedef T element_type;
77
78 ~RefPtr() {
79 if (mRawPtr
29.1
Field 'mRawPtr' is non-null
29.1
Field 'mRawPtr' is non-null
29.1
Field 'mRawPtr' is non-null
29.1
Field 'mRawPtr' is non-null
29.1
Field 'mRawPtr' is non-null
29.1
Field 'mRawPtr' is non-null
) {
30
Taking true branch
80 ConstRemovingRefPtrTraits<T>::Release(mRawPtr);
31
Use of memory after it is freed
81 }
82 }
83
84 // Constructors
85
86 RefPtr()
87 : mRawPtr(nullptr)
88 // default constructor
89 {}
90
91 RefPtr(const RefPtr<T>& aSmartPtr)
92 : mRawPtr(aSmartPtr.mRawPtr)
93 // copy-constructor
94 {
95 if (mRawPtr) {
96 ConstRemovingRefPtrTraits<T>::AddRef(mRawPtr);
97 }
98 }
99
100 RefPtr(RefPtr<T>&& aRefPtr) noexcept : mRawPtr(aRefPtr.mRawPtr) {
101 aRefPtr.mRawPtr = nullptr;
102 }
103
104 // construct from a raw pointer (of the right type)
105
106 MOZ_IMPLICIT RefPtr(T* aRawPtr) : mRawPtr(aRawPtr) {
107 if (mRawPtr) {
108 ConstRemovingRefPtrTraits<T>::AddRef(mRawPtr);
109 }
110 }
111
112 MOZ_IMPLICIT RefPtr(decltype(nullptr)) : mRawPtr(nullptr) {}
113
114 template <typename I,
115 typename = std::enable_if_t<std::is_convertible_v<I*, T*>>>
116 MOZ_IMPLICIT RefPtr(already_AddRefed<I>& aSmartPtr)
117 : mRawPtr(aSmartPtr.take())
118 // construct from |already_AddRefed|
119 {}
120
121 template <typename I,
122 typename = std::enable_if_t<std::is_convertible_v<I*, T*>>>
123 MOZ_IMPLICIT RefPtr(already_AddRefed<I>&& aSmartPtr)
124 : mRawPtr(aSmartPtr.take())
125 // construct from |otherRefPtr.forget()|
126 {}
127
128 template <typename I,
129 typename = std::enable_if_t<std::is_convertible_v<I*, T*>>>
130 MOZ_IMPLICIT RefPtr(const RefPtr<I>& aSmartPtr)
131 : mRawPtr(aSmartPtr.get())
132 // copy-construct from a smart pointer with a related pointer type
133 {
134 if (mRawPtr) {
135 ConstRemovingRefPtrTraits<T>::AddRef(mRawPtr);
136 }
137 }
138
139 template <typename I,
140 typename = std::enable_if_t<std::is_convertible_v<I*, T*>>>
141 MOZ_IMPLICIT RefPtr(RefPtr<I>&& aSmartPtr)
142 : mRawPtr(aSmartPtr.forget().take())
143 // construct from |Move(RefPtr<SomeSubclassOfT>)|.
144 {}
145
146 template <typename I,
147 typename = std::enable_if_t<!std::is_same_v<I, RefPtr<T>> &&
148 std::is_convertible_v<I, RefPtr<T>>>>
149 MOZ_IMPLICIT RefPtr(const mozilla::NotNull<I>& aSmartPtr)
150 : mRawPtr(RefPtr<T>(aSmartPtr.get()).forget().take())
151 // construct from |mozilla::NotNull|.
152 {}
153
154 template <typename I,
155 typename = std::enable_if_t<!std::is_same_v<I, RefPtr<T>> &&
156 std::is_convertible_v<I, RefPtr<T>>>>
157 MOZ_IMPLICIT RefPtr(mozilla::MovingNotNull<I>&& aSmartPtr)
158 : mRawPtr(RefPtr<T>(std::move(aSmartPtr).unwrapBasePtr()).forget().take())
159 // construct from |mozilla::MovingNotNull|.
160 {}
161
162 MOZ_IMPLICIT RefPtr(const nsQueryReferent& aHelper);
163 MOZ_IMPLICIT RefPtr(const nsCOMPtr_helper& aHelper);
164
165 // Defined in OwningNonNull.h
166 template <class U>
167 MOZ_IMPLICIT RefPtr(const mozilla::OwningNonNull<U>& aOther);
168
169 // Defined in StaticLocalPtr.h
170 template <class U>
171 MOZ_IMPLICIT RefPtr(const mozilla::StaticLocalRefPtr<U>& aOther);
172
173 // Defined in StaticPtr.h
174 template <class U>
175 MOZ_IMPLICIT RefPtr(const mozilla::StaticRefPtr<U>& aOther);
176
177 // Assignment operators
178
179 RefPtr<T>& operator=(decltype(nullptr)) {
180 assign_assuming_AddRef(nullptr);
181 return *this;
182 }
183
184 RefPtr<T>& operator=(const RefPtr<T>& aRhs)
185 // copy assignment operator
186 {
187 assign_with_AddRef(aRhs.mRawPtr);
188 return *this;
189 }
190
191 template <typename I>
192 RefPtr<T>& operator=(const RefPtr<I>& aRhs)
193 // assign from an RefPtr of a related pointer type
194 {
195 assign_with_AddRef(aRhs.get());
196 return *this;
197 }
198
199 RefPtr<T>& operator=(T* aRhs)
200 // assign from a raw pointer (of the right type)
201 {
202 assign_with_AddRef(aRhs);
203 return *this;
204 }
205
206 template <typename I>
207 RefPtr<T>& operator=(already_AddRefed<I>& aRhs)
208 // assign from |already_AddRefed|
209 {
210 assign_assuming_AddRef(aRhs.take());
211 return *this;
212 }
213
214 template <typename I>
215 RefPtr<T>& operator=(already_AddRefed<I>&& aRhs)
216 // assign from |otherRefPtr.forget()|
217 {
218 assign_assuming_AddRef(aRhs.take());
219 return *this;
220 }
221
222 RefPtr<T>& operator=(const nsQueryReferent& aQueryReferent);
223 RefPtr<T>& operator=(const nsCOMPtr_helper& aHelper);
224
225 template <typename I,
226 typename = std::enable_if_t<std::is_convertible_v<I*, T*>>>
227 RefPtr<T>& operator=(RefPtr<I>&& aRefPtr) noexcept {
228 assign_assuming_AddRef(aRefPtr.forget().take());
229 return *this;
230 }
231
232 template <typename I,
233 typename = std::enable_if_t<std::is_convertible_v<I, RefPtr<T>>>>
234 RefPtr<T>& operator=(const mozilla::NotNull<I>& aSmartPtr)
235 // assign from |mozilla::NotNull|.
236 {
237 assign_assuming_AddRef(RefPtr<T>(aSmartPtr.get()).forget().take());
238 return *this;
239 }
240
241 template <typename I,
242 typename = std::enable_if_t<std::is_convertible_v<I, RefPtr<T>>>>
243 RefPtr<T>& operator=(mozilla::MovingNotNull<I>&& aSmartPtr)
244 // assign from |mozilla::MovingNotNull|.
245 {
246 assign_assuming_AddRef(
247 RefPtr<T>(std::move(aSmartPtr).unwrapBasePtr()).forget().take());
248 return *this;
249 }
250
251 // Defined in OwningNonNull.h
252 template <class U>
253 RefPtr<T>& operator=(const mozilla::OwningNonNull<U>& aOther);
254
255 // Defined in StaticLocalPtr.h
256 template <class U>
257 RefPtr<T>& operator=(const mozilla::StaticLocalRefPtr<U>& aOther);
258
259 // Defined in StaticPtr.h
260 template <class U>
261 RefPtr<T>& operator=(const mozilla::StaticRefPtr<U>& aOther);
262
263 // Other pointer operators
264
265 void swap(RefPtr<T>& aRhs)
266 // ...exchange ownership with |aRhs|; can save a pair of refcount operations
267 {
268 T* temp = aRhs.mRawPtr;
269 aRhs.mRawPtr = mRawPtr;
270 mRawPtr = temp;
271 }
272
273 void swap(T*& aRhs)
274 // ...exchange ownership with |aRhs|; can save a pair of refcount operations
275 {
276 T* temp = aRhs;
277 aRhs = mRawPtr;
278 mRawPtr = temp;
279 }
280
281 already_AddRefed<T> MOZ_MAY_CALL_AFTER_MUST_RETURN forget()
282 // return the value of mRawPtr and null out mRawPtr. Useful for
283 // already_AddRefed return values.
284 {
285 T* temp = nullptr;
286 swap(temp);
287 return already_AddRefed<T>(temp);
288 }
289
290 template <typename I>
291 void forget(I** aRhs)
292 // Set the target of aRhs to the value of mRawPtr and null out mRawPtr.
293 // Useful to avoid unnecessary AddRef/Release pairs with "out"
294 // parameters where aRhs bay be a T** or an I** where I is a base class
295 // of T.
296 {
297 MOZ_ASSERT(aRhs, "Null pointer passed to forget!")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aRhs)>::isValid, "invalid assertion condition"); if
((__builtin_expect(!!(!(!!(aRhs))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("aRhs" " (" "Null pointer passed to forget!"
")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefPtr.h"
, 297); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aRhs" ") (" "Null pointer passed to forget!"
")"); do { *((volatile int*)__null) = 297; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
298 *aRhs = mRawPtr;
299 mRawPtr = nullptr;
300 }
301
302 void forget(nsISupports** aRhs) {
303 MOZ_ASSERT(aRhs, "Null pointer passed to forget!")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aRhs)>::isValid, "invalid assertion condition"); if
((__builtin_expect(!!(!(!!(aRhs))), 0))) { do { } while (false
); MOZ_ReportAssertionFailure("aRhs" " (" "Null pointer passed to forget!"
")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefPtr.h"
, 303); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aRhs" ") (" "Null pointer passed to forget!"
")"); do { *((volatile int*)__null) = 303; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
304 *aRhs = ToSupports(mRawPtr);
305 mRawPtr = nullptr;
306 }
307
308 T* get() const
309 /*
310 Prefer the implicit conversion provided automatically by |operator T*()
311 const|. Use |get()| to resolve ambiguity or to get a castable pointer.
312 */
313 {
314 return const_cast<T*>(mRawPtr);
315 }
316
317 operator T*() const&
318 /*
319 ...makes an |RefPtr| act like its underlying raw pointer type whenever it
320 is used in a context where a raw pointer is expected. It is this operator
321 that makes an |RefPtr| substitutable for a raw pointer.
322
323 Prefer the implicit use of this operator to calling |get()|, except where
324 necessary to resolve ambiguity.
325 */
326 {
327 return get();
328 }
329
330 // Don't allow implicit conversion of temporary RefPtr to raw pointer,
331 // because the refcount might be one and the pointer will immediately become
332 // invalid.
333 operator T*() const&& = delete;
334
335 // These are needed to avoid the deleted operator above. XXX Why is operator!
336 // needed separately? Shouldn't the compiler prefer using the non-deleted
337 // operator bool instead of the deleted operator T*?
338 explicit operator bool() const { return !!mRawPtr; }
4
Assuming field 'mRawPtr' is non-null
339 bool operator!() const { return !mRawPtr; }
340
341 T* operator->() const MOZ_NO_ADDREF_RELEASE_ON_RETURN {
342 MOZ_ASSERT(mRawPtr != nullptr,do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mRawPtr != nullptr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mRawPtr != nullptr))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("mRawPtr != nullptr"
" (" "You can't dereference a NULL RefPtr with operator->()."
")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefPtr.h"
, 343); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mRawPtr != nullptr"
") (" "You can't dereference a NULL RefPtr with operator->()."
")"); do { *((volatile int*)__null) = 343; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
343 "You can't dereference a NULL RefPtr with operator->().")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mRawPtr != nullptr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mRawPtr != nullptr))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("mRawPtr != nullptr"
" (" "You can't dereference a NULL RefPtr with operator->()."
")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefPtr.h"
, 343); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mRawPtr != nullptr"
") (" "You can't dereference a NULL RefPtr with operator->()."
")"); do { *((volatile int*)__null) = 343; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
344 return get();
345 }
346
347 template <typename R, typename... Args>
348 class Proxy {
349 typedef R (T::*member_function)(Args...);
350 T* mRawPtr;
351 member_function mFunction;
352
353 public:
354 Proxy(T* aRawPtr, member_function aFunction)
355 : mRawPtr(aRawPtr), mFunction(aFunction) {}
356 template <typename... ActualArgs>
357 R operator()(ActualArgs&&... aArgs) {
358 return ((*mRawPtr).*mFunction)(std::forward<ActualArgs>(aArgs)...);
359 }
360 };
361
362 template <typename R, typename... Args>
363 Proxy<R, Args...> operator->*(R (T::*aFptr)(Args...)) const {
364 MOZ_ASSERT(mRawPtr != nullptr,do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mRawPtr != nullptr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mRawPtr != nullptr))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("mRawPtr != nullptr"
" (" "You can't dereference a NULL RefPtr with operator->*()."
")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefPtr.h"
, 365); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mRawPtr != nullptr"
") (" "You can't dereference a NULL RefPtr with operator->*()."
")"); do { *((volatile int*)__null) = 365; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
365 "You can't dereference a NULL RefPtr with operator->*().")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mRawPtr != nullptr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mRawPtr != nullptr))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("mRawPtr != nullptr"
" (" "You can't dereference a NULL RefPtr with operator->*()."
")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefPtr.h"
, 365); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mRawPtr != nullptr"
") (" "You can't dereference a NULL RefPtr with operator->*()."
")"); do { *((volatile int*)__null) = 365; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
366 return Proxy<R, Args...>(get(), aFptr);
367 }
368
369 RefPtr<T>* get_address()
370 // This is not intended to be used by clients. See |address_of|
371 // below.
372 {
373 return this;
374 }
375
376 const RefPtr<T>* get_address() const
377 // This is not intended to be used by clients. See |address_of|
378 // below.
379 {
380 return this;
381 }
382
383 public:
384 T& operator*() const {
385 MOZ_ASSERT(mRawPtr != nullptr,do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mRawPtr != nullptr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mRawPtr != nullptr))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("mRawPtr != nullptr"
" (" "You can't dereference a NULL RefPtr with operator*()."
")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefPtr.h"
, 386); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mRawPtr != nullptr"
") (" "You can't dereference a NULL RefPtr with operator*()."
")"); do { *((volatile int*)__null) = 386; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
386 "You can't dereference a NULL RefPtr with operator*().")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mRawPtr != nullptr)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mRawPtr != nullptr))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("mRawPtr != nullptr"
" (" "You can't dereference a NULL RefPtr with operator*()."
")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefPtr.h"
, 386); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mRawPtr != nullptr"
") (" "You can't dereference a NULL RefPtr with operator*()."
")"); do { *((volatile int*)__null) = 386; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
387 return *get();
388 }
389
390 T** StartAssignment() {
391 assign_assuming_AddRef(nullptr);
392 return reinterpret_cast<T**>(&mRawPtr);
393 }
394
395 private:
396 // This helper class makes |RefPtr<const T>| possible by casting away
397 // the constness from the pointer when calling AddRef() and Release().
398 //
399 // This is necessary because AddRef() and Release() implementations can't
400 // generally expected to be const themselves (without heavy use of |mutable|
401 // and |const_cast| in their own implementations).
402 //
403 // This should be sound because while |RefPtr<const T>| provides a
404 // const view of an object, the object itself should not be const (it
405 // would have to be allocated as |new const T| or similar to be const).
406 template <class U>
407 struct ConstRemovingRefPtrTraits {
408 static void AddRef(U* aPtr) { mozilla::RefPtrTraits<U>::AddRef(aPtr); }
409 static void Release(U* aPtr) { mozilla::RefPtrTraits<U>::Release(aPtr); }
410 };
411 template <class U>
412 struct ConstRemovingRefPtrTraits<const U> {
413 static void AddRef(const U* aPtr) {
414 mozilla::RefPtrTraits<U>::AddRef(const_cast<U*>(aPtr));
415 }
416 static void Release(const U* aPtr) {
417 mozilla::RefPtrTraits<U>::Release(const_cast<U*>(aPtr));
418 }
419 };
420};
421
422class nsCycleCollectionTraversalCallback;
423template <typename T>
424void CycleCollectionNoteChild(nsCycleCollectionTraversalCallback& aCallback,
425 T* aChild, const char* aName, uint32_t aFlags);
426
427template <typename T>
428inline void ImplCycleCollectionUnlink(RefPtr<T>& aField) {
429 aField = nullptr;
430}
431
432template <typename T>
433inline void ImplCycleCollectionTraverse(
434 nsCycleCollectionTraversalCallback& aCallback, const RefPtr<T>& aField,
435 const char* aName, uint32_t aFlags = 0) {
436 CycleCollectionNoteChild(aCallback, aField.get(), aName, aFlags);
437}
438
439template <class T>
440inline RefPtr<T>* address_of(RefPtr<T>& aPtr) {
441 return aPtr.get_address();
442}
443
444template <class T>
445inline const RefPtr<T>* address_of(const RefPtr<T>& aPtr) {
446 return aPtr.get_address();
447}
448
449template <class T>
450class RefPtrGetterAddRefs
451/*
452 ...
453
454 This class is designed to be used for anonymous temporary objects in the
455 argument list of calls that return COM interface pointers, e.g.,
456
457 RefPtr<IFoo> fooP;
458 ...->GetAddRefedPointer(getter_AddRefs(fooP))
459
460 DO NOT USE THIS TYPE DIRECTLY IN YOUR CODE. Use |getter_AddRefs()| instead.
461
462 When initialized with a |RefPtr|, as in the example above, it returns
463 a |void**|, a |T**|, or an |nsISupports**| as needed, that the
464 outer call (|GetAddRefedPointer| in this case) can fill in.
465
466 This type should be a nested class inside |RefPtr<T>|.
467*/
468{
469 public:
470 explicit RefPtrGetterAddRefs(RefPtr<T>& aSmartPtr)
471 : mTargetSmartPtr(aSmartPtr) {
472 // nothing else to do
473 }
474
475 operator void**() {
476 return reinterpret_cast<void**>(mTargetSmartPtr.StartAssignment());
477 }
478
479 operator T**() { return mTargetSmartPtr.StartAssignment(); }
480
481 T*& operator*() { return *(mTargetSmartPtr.StartAssignment()); }
482
483 private:
484 RefPtr<T>& mTargetSmartPtr;
485};
486
487template <class T>
488inline RefPtrGetterAddRefs<T> getter_AddRefs(RefPtr<T>& aSmartPtr)
489/*
490 Used around a |RefPtr| when
491 ...makes the class |RefPtrGetterAddRefs<T>| invisible.
492*/
493{
494 return RefPtrGetterAddRefs<T>(aSmartPtr);
495}
496
497// Comparing two |RefPtr|s
498
499template <class T, class U>
500inline bool operator==(const RefPtr<T>& aLhs, const RefPtr<U>& aRhs) {
501 return static_cast<const T*>(aLhs.get()) == static_cast<const U*>(aRhs.get());
502}
503
504template <class T, class U>
505inline bool operator!=(const RefPtr<T>& aLhs, const RefPtr<U>& aRhs) {
506 return static_cast<const T*>(aLhs.get()) != static_cast<const U*>(aRhs.get());
507}
508
509// Comparing an |RefPtr| to a raw pointer
510
511template <class T, class U>
512inline bool operator==(const RefPtr<T>& aLhs, const U* aRhs) {
513 return static_cast<const T*>(aLhs.get()) == static_cast<const U*>(aRhs);
514}
515
516template <class T, class U>
517inline bool operator==(const U* aLhs, const RefPtr<T>& aRhs) {
518 return static_cast<const U*>(aLhs) == static_cast<const T*>(aRhs.get());
519}
520
521template <class T, class U>
522inline bool operator!=(const RefPtr<T>& aLhs, const U* aRhs) {
523 return static_cast<const T*>(aLhs.get()) != static_cast<const U*>(aRhs);
524}
525
526template <class T, class U>
527inline bool operator!=(const U* aLhs, const RefPtr<T>& aRhs) {
528 return static_cast<const U*>(aLhs) != static_cast<const T*>(aRhs.get());
529}
530
531template <class T, class U>
532inline bool operator==(const RefPtr<T>& aLhs, U* aRhs) {
533 return static_cast<const T*>(aLhs.get()) == const_cast<const U*>(aRhs);
534}
535
536template <class T, class U>
537inline bool operator==(U* aLhs, const RefPtr<T>& aRhs) {
538 return const_cast<const U*>(aLhs) == static_cast<const T*>(aRhs.get());
539}
540
541template <class T, class U>
542inline bool operator!=(const RefPtr<T>& aLhs, U* aRhs) {
543 return static_cast<const T*>(aLhs.get()) != const_cast<const U*>(aRhs);
544}
545
546template <class T, class U>
547inline bool operator!=(U* aLhs, const RefPtr<T>& aRhs) {
548 return const_cast<const U*>(aLhs) != static_cast<const T*>(aRhs.get());
549}
550
551// Comparing an |RefPtr| to |nullptr|
552
553template <class T>
554inline bool operator==(const RefPtr<T>& aLhs, decltype(nullptr)) {
555 return aLhs.get() == nullptr;
556}
557
558template <class T>
559inline bool operator==(decltype(nullptr), const RefPtr<T>& aRhs) {
560 return nullptr == aRhs.get();
561}
562
563template <class T>
564inline bool operator!=(const RefPtr<T>& aLhs, decltype(nullptr)) {
565 return aLhs.get() != nullptr;
566}
567
568template <class T>
569inline bool operator!=(decltype(nullptr), const RefPtr<T>& aRhs) {
570 return nullptr != aRhs.get();
571}
572
573// MOZ_DBG support
574
575template <class T>
576std::ostream& operator<<(std::ostream& aOut, const RefPtr<T>& aObj) {
577 return mozilla::DebugValue(aOut, aObj.get());
578}
579
580/*****************************************************************************/
581
582template <class T>
583inline already_AddRefed<T> do_AddRef(T* aObj) {
584 RefPtr<T> ref(aObj);
585 return ref.forget();
586}
587
588template <class T>
589inline already_AddRefed<T> do_AddRef(const RefPtr<T>& aObj) {
590 RefPtr<T> ref(aObj);
591 return ref.forget();
592}
593
594namespace mozilla {
595
596template <typename T>
597class AlignmentFinder;
598
599// Provide a specialization of AlignmentFinder to allow MOZ_ALIGNOF(RefPtr<T>)
600// with an incomplete T.
601template <typename T>
602class AlignmentFinder<RefPtr<T>> {
603 public:
604 static const size_t alignment = alignof(T*);
605};
606
607/**
608 * Helper function to be able to conveniently write things like:
609 *
610 * already_AddRefed<T>
611 * f(...)
612 * {
613 * return MakeAndAddRef<T>(...);
614 * }
615 */
616template <typename T, typename... Args>
617already_AddRefed<T> MakeAndAddRef(Args&&... aArgs) {
618 RefPtr<T> p(new T(std::forward<Args>(aArgs)...));
619 return p.forget();
620}
621
622/**
623 * Helper function to be able to conveniently write things like:
624 *
625 * auto runnable =
626 * MakeRefPtr<ErrorCallbackRunnable<nsIDOMGetUserMediaSuccessCallback>>(
627 * mOnSuccess, mOnFailure, *error, mWindowID);
628 */
629template <typename T, typename... Args>
630RefPtr<T> MakeRefPtr(Args&&... aArgs) {
631 RefPtr<T> p(new T(std::forward<Args>(aArgs)...));
632 return p;
633}
634
635} // namespace mozilla
636
637/**
638 * Deduction guide to allow simple `RefPtr` definitions from an
639 * already_AddRefed<T> without repeating the type, e.g.:
640 *
641 * RefPtr ptr = MakeAndAddRef<SomeType>(...);
642 */
643template <typename T>
644RefPtr(already_AddRefed<T>) -> RefPtr<T>;
645
646#endif /* mozilla_RefPtr_h */

/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h

1/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3/* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7/* A type-safe doubly-linked list class. */
8
9/*
10 * The classes LinkedList<T> and LinkedListElement<T> together form a
11 * convenient, type-safe doubly-linked list implementation.
12 *
13 * The class T which will be inserted into the linked list must inherit from
14 * LinkedListElement<T>. A given object may be in only one linked list at a
15 * time.
16 *
17 * A LinkedListElement automatically removes itself from the list upon
18 * destruction, and a LinkedList will fatally assert in debug builds if it's
19 * non-empty when it's destructed.
20 *
21 * For example, you might use LinkedList in a simple observer list class as
22 * follows.
23 *
24 * class Observer : public LinkedListElement<Observer>
25 * {
26 * public:
27 * void observe(char* aTopic) { ... }
28 * };
29 *
30 * class ObserverContainer
31 * {
32 * private:
33 * LinkedList<Observer> list;
34 *
35 * public:
36 * void addObserver(Observer* aObserver)
37 * {
38 * // Will assert if |aObserver| is part of another list.
39 * list.insertBack(aObserver);
40 * }
41 *
42 * void removeObserver(Observer* aObserver)
43 * {
44 * // Will assert if |aObserver| is not part of some list.
45 * aObserver.remove();
46 * // Or, will assert if |aObserver| is not part of |list| specifically.
47 * // aObserver.removeFrom(list);
48 * }
49 *
50 * void notifyObservers(char* aTopic)
51 * {
52 * for (Observer* o = list.getFirst(); o != nullptr; o = o->getNext()) {
53 * o->observe(aTopic);
54 * }
55 * }
56 * };
57 *
58 * Additionally, the class AutoCleanLinkedList<T> is a LinkedList<T> that will
59 * remove and delete each element still within itself upon destruction. Note
60 * that because each element is deleted, elements must have been allocated
61 * using |new|.
62 */
63
64#ifndef mozilla_LinkedList_h
65#define mozilla_LinkedList_h
66
67#include <algorithm>
68#include <utility>
69
70#include "mozilla/Assertions.h"
71#include "mozilla/Attributes.h"
72#include "mozilla/MemoryReporting.h"
73#include "mozilla/RefPtr.h"
74
75#ifdef __cplusplus201703L
76
77namespace mozilla {
78
79template <typename T>
80class LinkedListElement;
81
82namespace detail {
83
84/**
85 * LinkedList supports refcounted elements using this adapter class. Clients
86 * using LinkedList<RefPtr<T>> will get a data structure that holds a strong
87 * reference to T as long as T is in the list.
88 */
89template <typename T>
90struct LinkedListElementTraits {
91 typedef T* RawType;
92 typedef const T* ConstRawType;
93 typedef T* ClientType;
94 typedef const T* ConstClientType;
95
96 // These static methods are called when an element is added to or removed from
97 // a linked list. It can be used to keep track ownership in lists that are
98 // supposed to own their elements. If elements are transferred from one list
99 // to another, no enter or exit calls happen since the elements still belong
100 // to a list.
101 static void enterList(LinkedListElement<T>* elt) {}
102 static void exitList(LinkedListElement<T>* elt) {}
103
104 // This method is called when AutoCleanLinkedList cleans itself
105 // during destruction. It can be used to call delete on elements if
106 // the list is the sole owner.
107 static void cleanElement(LinkedListElement<T>* elt) { delete elt->asT(); }
108};
109
110template <typename T>
111struct LinkedListElementTraits<RefPtr<T>> {
112 typedef T* RawType;
113 typedef const T* ConstRawType;
114 typedef RefPtr<T> ClientType;
115 typedef RefPtr<const T> ConstClientType;
116
117 static void enterList(LinkedListElement<RefPtr<T>>* elt) {
118 elt->asT()->AddRef();
119 }
120 static void exitList(LinkedListElement<RefPtr<T>>* elt) {
121 elt->asT()->Release();
15
Calling 'RefCounted::Release'
24
Returning; memory was released
122 }
123 static void cleanElement(LinkedListElement<RefPtr<T>>* elt) {}
124};
125
126} /* namespace detail */
127
128template <typename T>
129class LinkedList;
130
131template <typename T>
132class LinkedListElement {
133 typedef typename detail::LinkedListElementTraits<T> Traits;
134 typedef typename Traits::RawType RawType;
135 typedef typename Traits::ConstRawType ConstRawType;
136 typedef typename Traits::ClientType ClientType;
137 typedef typename Traits::ConstClientType ConstClientType;
138
139 /*
140 * It's convenient that we return nullptr when getNext() or getPrevious()
141 * hits the end of the list, but doing so costs an extra word of storage in
142 * each linked list node (to keep track of whether |this| is the sentinel
143 * node) and a branch on this value in getNext/getPrevious.
144 *
145 * We could get rid of the extra word of storage by shoving the "is
146 * sentinel" bit into one of the pointers, although this would, of course,
147 * have performance implications of its own.
148 *
149 * But the goal here isn't to win an award for the fastest or slimmest
150 * linked list; rather, we want a *convenient* linked list. So we won't
151 * waste time guessing which micro-optimization strategy is best.
152 *
153 *
154 * Speaking of unnecessary work, it's worth addressing here why we wrote
155 * mozilla::LinkedList in the first place, instead of using stl::list.
156 *
157 * The key difference between mozilla::LinkedList and stl::list is that
158 * mozilla::LinkedList stores the mPrev/mNext pointers in the object itself,
159 * while stl::list stores the mPrev/mNext pointers in a list element which
160 * itself points to the object being stored.
161 *
162 * mozilla::LinkedList's approach makes it harder to store an object in more
163 * than one list. But the upside is that you can call next() / prev() /
164 * remove() directly on the object. With stl::list, you'd need to store a
165 * pointer to its iterator in the object in order to accomplish this. Not
166 * only would this waste space, but you'd have to remember to update that
167 * pointer every time you added or removed the object from a list.
168 *
169 * In-place, constant-time removal is a killer feature of doubly-linked
170 * lists, and supporting this painlessly was a key design criterion.
171 */
172
173 private:
174 LinkedListElement* mNext;
175 LinkedListElement* mPrev;
176 const bool mIsSentinel;
177
178 public:
179 LinkedListElement() : mNext(this), mPrev(this), mIsSentinel(false) {}
180
181 /*
182 * Moves |aOther| into |*this|. If |aOther| is already in a list, then
183 * |aOther| is removed from the list and replaced by |*this|.
184 */
185 LinkedListElement(LinkedListElement<T>&& aOther)
186 : mIsSentinel(aOther.mIsSentinel) {
187 adjustLinkForMove(std::move(aOther));
188 }
189
190 LinkedListElement& operator=(LinkedListElement<T>&& aOther) {
191 MOZ_ASSERT(mIsSentinel == aOther.mIsSentinel, "Mismatch NodeKind!")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mIsSentinel == aOther.mIsSentinel)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mIsSentinel == aOther.mIsSentinel
))), 0))) { do { } while (false); MOZ_ReportAssertionFailure(
"mIsSentinel == aOther.mIsSentinel" " (" "Mismatch NodeKind!"
")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 191); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mIsSentinel == aOther.mIsSentinel"
") (" "Mismatch NodeKind!" ")"); do { *((volatile int*)__null
) = 191; __attribute__((nomerge)) ::abort(); } while (false);
} } while (false)
;
192 MOZ_ASSERT(!isInList(),do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!isInList())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!isInList()))), 0))) { do { }
while (false); MOZ_ReportAssertionFailure("!isInList()" " ("
"Assigning to an element in a list messes up that list!" ")"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 193); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!isInList()"
") (" "Assigning to an element in a list messes up that list!"
")"); do { *((volatile int*)__null) = 193; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
193 "Assigning to an element in a list messes up that list!")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!isInList())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!isInList()))), 0))) { do { }
while (false); MOZ_ReportAssertionFailure("!isInList()" " ("
"Assigning to an element in a list messes up that list!" ")"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 193); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!isInList()"
") (" "Assigning to an element in a list messes up that list!"
")"); do { *((volatile int*)__null) = 193; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
194 adjustLinkForMove(std::move(aOther));
195 return *this;
196 }
197
198 ~LinkedListElement() {
199 if (!mIsSentinel && isInList()) {
200 remove();
201 }
202 }
203
204 /*
205 * Get the next element in the list, or nullptr if this is the last element
206 * in the list.
207 */
208 RawType getNext() { return mNext->asT(); }
209 ConstRawType getNext() const { return mNext->asT(); }
210
211 /*
212 * Get the previous element in the list, or nullptr if this is the first
213 * element in the list.
214 */
215 RawType getPrevious() { return mPrev->asT(); }
216 ConstRawType getPrevious() const { return mPrev->asT(); }
217
218 /*
219 * Insert aElem after this element in the list. |this| must be part of a
220 * linked list when you call setNext(); otherwise, this method will assert.
221 */
222 void setNext(RawType aElem) {
223 MOZ_ASSERT(isInList())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isInList())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isInList()))), 0))) { do { }
while (false); MOZ_ReportAssertionFailure("isInList()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 223); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isInList()" ")"
); do { *((volatile int*)__null) = 223; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
224 setNextUnsafe(aElem);
225 }
226
227 /*
228 * Insert aElem before this element in the list. |this| must be part of a
229 * linked list when you call setPrevious(); otherwise, this method will
230 * assert.
231 */
232 void setPrevious(RawType aElem) {
233 MOZ_ASSERT(isInList())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isInList())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isInList()))), 0))) { do { }
while (false); MOZ_ReportAssertionFailure("isInList()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 233); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isInList()" ")"
); do { *((volatile int*)__null) = 233; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
234 setPreviousUnsafe(aElem);
235 }
236
237 /*
238 * Remove this element from the list which contains it. If this element is
239 * not currently part of a linked list, this method asserts.
240 */
241 void remove() {
242 MOZ_ASSERT(isInList())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isInList())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isInList()))), 0))) { do { }
while (false); MOZ_ReportAssertionFailure("isInList()", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 242); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isInList()" ")"
); do { *((volatile int*)__null) = 242; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
12
Taking false branch
13
Loop condition is false. Exiting loop
243
244 mPrev->mNext = mNext;
245 mNext->mPrev = mPrev;
246 mNext = this;
247 mPrev = this;
248
249 Traits::exitList(this);
14
Calling 'LinkedListElementTraits::exitList'
25
Returning; memory was released via 1st parameter
250 }
251
252 /*
253 * Remove this element from the list containing it. Returns a pointer to the
254 * element that follows this element (before it was removed). This method
255 * asserts if the element does not belong to a list. Note: In a refcounted
256 * list, |this| may be destroyed.
257 */
258 RawType removeAndGetNext() {
259 RawType r = getNext();
260 remove();
261 return r;
262 }
263
264 /*
265 * Remove this element from the list containing it. Returns a pointer to the
266 * previous element in the containing list (before the removal). This method
267 * asserts if the element does not belong to a list. Note: In a refcounted
268 * list, |this| may be destroyed.
269 */
270 RawType removeAndGetPrevious() {
271 RawType r = getPrevious();
272 remove();
273 return r;
274 }
275
276 /*
277 * Identical to remove(), but also asserts in debug builds that this element
278 * is in aList.
279 */
280 void removeFrom(const LinkedList<T>& aList) {
281 aList.assertContains(asT());
282 remove();
283 }
284
285 /*
286 * Return true if |this| part is of a linked list, and false otherwise.
287 */
288 bool isInList() const {
289 MOZ_ASSERT((mNext == this) == (mPrev == this))do { static_assert( mozilla::detail::AssertionConditionType<
decltype((mNext == this) == (mPrev == this))>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!((mNext == this) == (mPrev ==
this)))), 0))) { do { } while (false); MOZ_ReportAssertionFailure
("(mNext == this) == (mPrev == this)", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 289); AnnotateMozCrashReason("MOZ_ASSERT" "(" "(mNext == this) == (mPrev == this)"
")"); do { *((volatile int*)__null) = 289; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
290 return mNext != this;
291 }
292
293 private:
294 friend class LinkedList<T>;
295 friend struct detail::LinkedListElementTraits<T>;
296
297 enum class NodeKind { Normal, Sentinel };
298
299 explicit LinkedListElement(NodeKind nodeKind)
300 : mNext(this), mPrev(this), mIsSentinel(nodeKind == NodeKind::Sentinel) {}
301
302 /*
303 * Return |this| cast to T* if we're a normal node, or return nullptr if
304 * we're a sentinel node.
305 */
306 RawType asT() { return mIsSentinel ? nullptr : static_cast<RawType>(this); }
307 ConstRawType asT() const {
308 return mIsSentinel ? nullptr : static_cast<ConstRawType>(this);
309 }
310
311 /*
312 * Insert aElem after this element, but don't check that this element is in
313 * the list. This is called by LinkedList::insertFront().
314 */
315 void setNextUnsafe(RawType aElem) {
316 LinkedListElement* listElem = static_cast<LinkedListElement*>(aElem);
317 MOZ_RELEASE_ASSERT(!listElem->isInList())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!listElem->isInList())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!listElem->isInList()))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("!listElem->isInList()"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 317); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "!listElem->isInList()"
")"); do { *((volatile int*)__null) = 317; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
318
319 listElem->mNext = this->mNext;
320 listElem->mPrev = this;
321 this->mNext->mPrev = listElem;
322 this->mNext = listElem;
323
324 Traits::enterList(aElem);
325 }
326
327 /*
328 * Insert aElem before this element, but don't check that this element is in
329 * the list. This is called by LinkedList::insertBack().
330 */
331 void setPreviousUnsafe(RawType aElem) {
332 LinkedListElement<T>* listElem = static_cast<LinkedListElement<T>*>(aElem);
333 MOZ_RELEASE_ASSERT(!listElem->isInList())do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!listElem->isInList())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!listElem->isInList()))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("!listElem->isInList()"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 333); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "!listElem->isInList()"
")"); do { *((volatile int*)__null) = 333; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
334
335 listElem->mNext = this;
336 listElem->mPrev = this->mPrev;
337 this->mPrev->mNext = listElem;
338 this->mPrev = listElem;
339
340 Traits::enterList(aElem);
341 }
342
343 /*
344 * Transfers the elements [aBegin, aEnd) before the "this" list element.
345 */
346 void transferBeforeUnsafe(LinkedListElement<T>& aBegin,
347 LinkedListElement<T>& aEnd) {
348 MOZ_RELEASE_ASSERT(!aBegin.mIsSentinel)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!aBegin.mIsSentinel)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!aBegin.mIsSentinel))), 0)))
{ do { } while (false); MOZ_ReportAssertionFailure("!aBegin.mIsSentinel"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 348); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "!aBegin.mIsSentinel"
")"); do { *((volatile int*)__null) = 348; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
349 if (!aBegin.isInList() || !aEnd.isInList()) {
350 return;
351 }
352
353 auto otherPrev = aBegin.mPrev;
354
355 aBegin.mPrev = this->mPrev;
356 this->mPrev->mNext = &aBegin;
357 this->mPrev = aEnd.mPrev;
358 aEnd.mPrev->mNext = this;
359
360 // Patch the gap in the source list
361 otherPrev->mNext = &aEnd;
362 aEnd.mPrev = otherPrev;
363 }
364
365 /*
366 * Adjust mNext and mPrev for implementing move constructor and move
367 * assignment.
368 */
369 void adjustLinkForMove(LinkedListElement<T>&& aOther) {
370 if (!aOther.isInList()) {
371 mNext = this;
372 mPrev = this;
373 return;
374 }
375
376 if (!mIsSentinel) {
377 Traits::enterList(this);
378 }
379
380 MOZ_ASSERT(aOther.mNext->mPrev == &aOther)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aOther.mNext->mPrev == &aOther)>::isValid,
"invalid assertion condition"); if ((__builtin_expect(!!(!(!
!(aOther.mNext->mPrev == &aOther))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("aOther.mNext->mPrev == &aOther"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 380); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aOther.mNext->mPrev == &aOther"
")"); do { *((volatile int*)__null) = 380; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
381 MOZ_ASSERT(aOther.mPrev->mNext == &aOther)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aOther.mPrev->mNext == &aOther)>::isValid,
"invalid assertion condition"); if ((__builtin_expect(!!(!(!
!(aOther.mPrev->mNext == &aOther))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("aOther.mPrev->mNext == &aOther"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 381); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aOther.mPrev->mNext == &aOther"
")"); do { *((volatile int*)__null) = 381; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
382
383 /*
384 * Initialize |this| with |aOther|'s mPrev/mNext pointers, and adjust those
385 * element to point to this one.
386 */
387 mNext = aOther.mNext;
388 mPrev = aOther.mPrev;
389
390 mNext->mPrev = this;
391 mPrev->mNext = this;
392
393 /*
394 * Adjust |aOther| so it doesn't think it's in a list. This makes it
395 * safely destructable.
396 */
397 aOther.mNext = &aOther;
398 aOther.mPrev = &aOther;
399
400 if (!mIsSentinel) {
401 Traits::exitList(&aOther);
402 }
403 }
404
405 LinkedListElement& operator=(const LinkedListElement<T>& aOther) = delete;
406 LinkedListElement(const LinkedListElement<T>& aOther) = delete;
407};
408
409template <typename T>
410class LinkedList {
411 private:
412 using Traits = typename detail::LinkedListElementTraits<T>;
413 using RawType = typename Traits::RawType;
414 using ConstRawType = typename Traits::ConstRawType;
415 using ClientType = typename Traits::ClientType;
416 using ConstClientType = typename Traits::ConstClientType;
417 using ElementType = LinkedListElement<T>*;
418 using ConstElementType = const LinkedListElement<T>*;
419
420 LinkedListElement<T> sentinel;
421
422 public:
423 template <typename Type, typename Element>
424 class Iterator {
425 Type mCurrent;
426
427 public:
428 using iterator_category = std::forward_iterator_tag;
429 using value_type = T;
430 using difference_type = std::ptrdiff_t;
431 using pointer = T*;
432 using reference = T&;
433
434 explicit Iterator(Type aCurrent) : mCurrent(aCurrent) {}
435
436 Type operator*() const { return mCurrent; }
437
438 const Iterator& operator++() {
439 mCurrent = static_cast<Element>(mCurrent)->getNext();
440 return *this;
441 }
442
443 bool operator!=(const Iterator& aOther) const {
444 return mCurrent != aOther.mCurrent;
445 }
446 };
447
448 using const_iterator = Iterator<ConstRawType, ConstElementType>;
449 using iterator = Iterator<RawType, ElementType>;
450
451 LinkedList() : sentinel(LinkedListElement<T>::NodeKind::Sentinel) {}
452
453 LinkedList(LinkedList<T>&& aOther) : sentinel(std::move(aOther.sentinel)) {}
454
455 LinkedList& operator=(LinkedList<T>&& aOther) {
456 MOZ_ASSERT(isEmpty(),do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isEmpty())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isEmpty()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("isEmpty()" " (" "Assigning to a non-empty list leaks elements in that list!"
")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 457); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isEmpty()" ") ("
"Assigning to a non-empty list leaks elements in that list!"
")"); do { *((volatile int*)__null) = 457; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
457 "Assigning to a non-empty list leaks elements in that list!")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(isEmpty())>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(isEmpty()))), 0))) { do { } while
(false); MOZ_ReportAssertionFailure("isEmpty()" " (" "Assigning to a non-empty list leaks elements in that list!"
")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 457); AnnotateMozCrashReason("MOZ_ASSERT" "(" "isEmpty()" ") ("
"Assigning to a non-empty list leaks elements in that list!"
")"); do { *((volatile int*)__null) = 457; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
458 sentinel = std::move(aOther.sentinel);
459 return *this;
460 }
461
462 ~LinkedList() {
463# ifdef DEBUG1
464 if (!isEmpty()) {
465 MOZ_CRASH_UNSAFE_PRINTF(do { static_assert(1 > 0, "Did you forget arguments to MOZ_CRASH_UNSAFE_PRINTF? "
"Or maybe you want MOZ_CRASH instead?"); static_assert(1 <=
sPrintfMaxArgs, "Only up to 4 additional arguments are allowed!"
); static_assert(sizeof("%s has a buggy user: " "it should have removed all this list's elements before "
"the list's destruction") <= sPrintfCrashReasonSize, "The supplied format string is too long!"
); MOZ_Crash("/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 469, MOZ_CrashPrintf("" "%s has a buggy user: " "it should have removed all this list's elements before "
"the list's destruction", __PRETTY_FUNCTION__)); } while (false
)
466 "%s has a buggy user: "do { static_assert(1 > 0, "Did you forget arguments to MOZ_CRASH_UNSAFE_PRINTF? "
"Or maybe you want MOZ_CRASH instead?"); static_assert(1 <=
sPrintfMaxArgs, "Only up to 4 additional arguments are allowed!"
); static_assert(sizeof("%s has a buggy user: " "it should have removed all this list's elements before "
"the list's destruction") <= sPrintfCrashReasonSize, "The supplied format string is too long!"
); MOZ_Crash("/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 469, MOZ_CrashPrintf("" "%s has a buggy user: " "it should have removed all this list's elements before "
"the list's destruction", __PRETTY_FUNCTION__)); } while (false
)
467 "it should have removed all this list's elements before "do { static_assert(1 > 0, "Did you forget arguments to MOZ_CRASH_UNSAFE_PRINTF? "
"Or maybe you want MOZ_CRASH instead?"); static_assert(1 <=
sPrintfMaxArgs, "Only up to 4 additional arguments are allowed!"
); static_assert(sizeof("%s has a buggy user: " "it should have removed all this list's elements before "
"the list's destruction") <= sPrintfCrashReasonSize, "The supplied format string is too long!"
); MOZ_Crash("/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 469, MOZ_CrashPrintf("" "%s has a buggy user: " "it should have removed all this list's elements before "
"the list's destruction", __PRETTY_FUNCTION__)); } while (false
)
468 "the list's destruction",do { static_assert(1 > 0, "Did you forget arguments to MOZ_CRASH_UNSAFE_PRINTF? "
"Or maybe you want MOZ_CRASH instead?"); static_assert(1 <=
sPrintfMaxArgs, "Only up to 4 additional arguments are allowed!"
); static_assert(sizeof("%s has a buggy user: " "it should have removed all this list's elements before "
"the list's destruction") <= sPrintfCrashReasonSize, "The supplied format string is too long!"
); MOZ_Crash("/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 469, MOZ_CrashPrintf("" "%s has a buggy user: " "it should have removed all this list's elements before "
"the list's destruction", __PRETTY_FUNCTION__)); } while (false
)
469 __PRETTY_FUNCTION__)do { static_assert(1 > 0, "Did you forget arguments to MOZ_CRASH_UNSAFE_PRINTF? "
"Or maybe you want MOZ_CRASH instead?"); static_assert(1 <=
sPrintfMaxArgs, "Only up to 4 additional arguments are allowed!"
); static_assert(sizeof("%s has a buggy user: " "it should have removed all this list's elements before "
"the list's destruction") <= sPrintfCrashReasonSize, "The supplied format string is too long!"
); MOZ_Crash("/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 469, MOZ_CrashPrintf("" "%s has a buggy user: " "it should have removed all this list's elements before "
"the list's destruction", __PRETTY_FUNCTION__)); } while (false
)
;
470 }
471# endif
472 }
473
474 /*
475 * Add aElem to the front of the list.
476 */
477 void insertFront(RawType aElem) {
478 /* Bypass setNext()'s this->isInList() assertion. */
479 sentinel.setNextUnsafe(aElem);
480 }
481
482 /*
483 * Add aElem to the back of the list.
484 */
485 void insertBack(RawType aElem) { sentinel.setPreviousUnsafe(aElem); }
486
487 /*
488 * Move all elements from another list to the back
489 */
490 void extendBack(LinkedList<T>&& aOther) {
491 MOZ_RELEASE_ASSERT(this != &aOther)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(this != &aOther)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(this != &aOther))), 0)))
{ do { } while (false); MOZ_ReportAssertionFailure("this != &aOther"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 491); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "this != &aOther"
")"); do { *((volatile int*)__null) = 491; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
492 if (aOther.isEmpty()) {
493 return;
494 }
495 sentinel.transferBeforeUnsafe(**aOther.begin(), aOther.sentinel);
496 }
497
498 /*
499 * Move elements from another list to the specified position
500 */
501 void splice(size_t aDestinationPos, LinkedList<T>& aListFrom,
502 size_t aSourceStart, size_t aSourceLen) {
503 MOZ_RELEASE_ASSERT(this != &aListFrom)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(this != &aListFrom)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(this != &aListFrom))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("this != &aListFrom"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 503); AnnotateMozCrashReason("MOZ_RELEASE_ASSERT" "(" "this != &aListFrom"
")"); do { *((volatile int*)__null) = 503; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
504 if (aListFrom.isEmpty() || !aSourceLen) {
505 return;
506 }
507
508 const auto safeForward = [](LinkedList<T>& aList,
509 LinkedListElement<T>& aBegin,
510 size_t aPos) -> LinkedListElement<T>& {
511 auto* iter = &aBegin;
512 for (size_t i = 0; i < aPos; ++i, (iter = iter->mNext)) {
513 if (iter->mIsSentinel) {
514 break;
515 }
516 }
517 return *iter;
518 };
519
520 auto& sourceBegin =
521 safeForward(aListFrom, *aListFrom.sentinel.mNext, aSourceStart);
522 if (sourceBegin.mIsSentinel) {
523 return;
524 }
525 auto& sourceEnd = safeForward(aListFrom, sourceBegin, aSourceLen);
526 auto& destination = safeForward(*this, *sentinel.mNext, aDestinationPos);
527
528 destination.transferBeforeUnsafe(sourceBegin, sourceEnd);
529 }
530
531 /*
532 * Get the first element of the list, or nullptr if the list is empty.
533 */
534 RawType getFirst() { return sentinel.getNext(); }
535 ConstRawType getFirst() const { return sentinel.getNext(); }
536
537 /*
538 * Get the last element of the list, or nullptr if the list is empty.
539 */
540 RawType getLast() { return sentinel.getPrevious(); }
541 ConstRawType getLast() const { return sentinel.getPrevious(); }
542
543 /*
544 * Get and remove the first element of the list. If the list is empty,
545 * return nullptr.
546 */
547 ClientType popFirst() {
548 ClientType ret = sentinel.getNext();
549 if (ret) {
550 static_cast<LinkedListElement<T>*>(RawType(ret))->remove();
551 }
552 return ret;
553 }
554
555 /*
556 * Get and remove the last element of the list. If the list is empty,
557 * return nullptr.
558 */
559 ClientType popLast() {
560 ClientType ret = sentinel.getPrevious();
561 if (ret) {
562 static_cast<LinkedListElement<T>*>(RawType(ret))->remove();
563 }
564 return ret;
565 }
566
567 /*
568 * Return true if the list is empty, or false otherwise.
569 */
570 bool isEmpty() const { return !sentinel.isInList(); }
571
572 /**
573 * Returns whether the given element is in the list.
574 */
575 bool contains(ConstRawType aElm) const {
576 return std::find(begin(), end(), aElm) != end();
577 }
578
579 /*
580 * Remove all the elements from the list.
581 *
582 * This runs in time linear to the list's length, because we have to mark
583 * each element as not in the list.
584 */
585 void clear() {
586 while (popFirst()) {
587 }
588 }
589
590 /**
591 * Return the length of elements in the list.
592 */
593 size_t length() const { return std::distance(begin(), end()); }
594
595 /*
596 * Allow range-based iteration:
597 *
598 * for (MyElementType* elt : myList) { ... }
599 */
600 Iterator<RawType, ElementType> begin() {
601 return Iterator<RawType, ElementType>(getFirst());
602 }
603 Iterator<ConstRawType, ConstElementType> begin() const {
604 return Iterator<ConstRawType, ConstElementType>(getFirst());
605 }
606 Iterator<RawType, ElementType> end() {
607 return Iterator<RawType, ElementType>(nullptr);
608 }
609 Iterator<ConstRawType, ConstElementType> end() const {
610 return Iterator<ConstRawType, ConstElementType>(nullptr);
611 }
612
613 /*
614 * Measures the memory consumption of the list excluding |this|. Note that
615 * it only measures the list elements themselves. If the list elements
616 * contain pointers to other memory blocks, those blocks must be measured
617 * separately during a subsequent iteration over the list.
618 */
619 size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
620 size_t n = 0;
621 ConstRawType t = getFirst();
622 while (t) {
623 n += aMallocSizeOf(t);
624 t = static_cast<const LinkedListElement<T>*>(t)->getNext();
625 }
626 return n;
627 }
628
629 /*
630 * Like sizeOfExcludingThis(), but measures |this| as well.
631 */
632 size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
633 return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf);
634 }
635
636 /*
637 * In a debug build, make sure that the list is sane (no cycles, consistent
638 * mNext/mPrev pointers, only one sentinel). Has no effect in release builds.
639 */
640 void debugAssertIsSane() const {
641# ifdef DEBUG1
642 const LinkedListElement<T>* slow;
643 const LinkedListElement<T>* fast1;
644 const LinkedListElement<T>* fast2;
645
646 /*
647 * Check for cycles in the forward singly-linked list using the
648 * tortoise/hare algorithm.
649 */
650 for (slow = sentinel.mNext, fast1 = sentinel.mNext->mNext,
651 fast2 = sentinel.mNext->mNext->mNext;
652 slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
653 slow = slow->mNext, fast1 = fast2->mNext, fast2 = fast1->mNext) {
654 MOZ_ASSERT(slow != fast1)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(slow != fast1)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(slow != fast1))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("slow != fast1",
"/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 654); AnnotateMozCrashReason("MOZ_ASSERT" "(" "slow != fast1"
")"); do { *((volatile int*)__null) = 654; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
655 MOZ_ASSERT(slow != fast2)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(slow != fast2)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(slow != fast2))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("slow != fast2",
"/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 655); AnnotateMozCrashReason("MOZ_ASSERT" "(" "slow != fast2"
")"); do { *((volatile int*)__null) = 655; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
656 }
657
658 /* Check for cycles in the backward singly-linked list. */
659 for (slow = sentinel.mPrev, fast1 = sentinel.mPrev->mPrev,
660 fast2 = sentinel.mPrev->mPrev->mPrev;
661 slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
662 slow = slow->mPrev, fast1 = fast2->mPrev, fast2 = fast1->mPrev) {
663 MOZ_ASSERT(slow != fast1)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(slow != fast1)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(slow != fast1))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("slow != fast1",
"/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 663); AnnotateMozCrashReason("MOZ_ASSERT" "(" "slow != fast1"
")"); do { *((volatile int*)__null) = 663; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
664 MOZ_ASSERT(slow != fast2)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(slow != fast2)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(slow != fast2))), 0))) { do {
} while (false); MOZ_ReportAssertionFailure("slow != fast2",
"/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 664); AnnotateMozCrashReason("MOZ_ASSERT" "(" "slow != fast2"
")"); do { *((volatile int*)__null) = 664; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
665 }
666
667 /*
668 * Check that |sentinel| is the only node in the list with
669 * mIsSentinel == true.
670 */
671 for (const LinkedListElement<T>* elem = sentinel.mNext; elem != &sentinel;
672 elem = elem->mNext) {
673 MOZ_ASSERT(!elem->mIsSentinel)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(!elem->mIsSentinel)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(!elem->mIsSentinel))), 0)
)) { do { } while (false); MOZ_ReportAssertionFailure("!elem->mIsSentinel"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 673); AnnotateMozCrashReason("MOZ_ASSERT" "(" "!elem->mIsSentinel"
")"); do { *((volatile int*)__null) = 673; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
674 }
675
676 /* Check that the mNext/mPrev pointers match up. */
677 const LinkedListElement<T>* prev = &sentinel;
678 const LinkedListElement<T>* cur = sentinel.mNext;
679 do {
680 MOZ_ASSERT(cur->mPrev == prev)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(cur->mPrev == prev)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(cur->mPrev == prev))), 0)
)) { do { } while (false); MOZ_ReportAssertionFailure("cur->mPrev == prev"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 680); AnnotateMozCrashReason("MOZ_ASSERT" "(" "cur->mPrev == prev"
")"); do { *((volatile int*)__null) = 680; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
681 MOZ_ASSERT(prev->mNext == cur)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(prev->mNext == cur)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(prev->mNext == cur))), 0)
)) { do { } while (false); MOZ_ReportAssertionFailure("prev->mNext == cur"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 681); AnnotateMozCrashReason("MOZ_ASSERT" "(" "prev->mNext == cur"
")"); do { *((volatile int*)__null) = 681; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
682
683 prev = cur;
684 cur = cur->mNext;
685 } while (cur != &sentinel);
686# endif /* ifdef DEBUG */
687 }
688
689 private:
690 friend class LinkedListElement<T>;
691
692 void assertContains(const RawType aValue) const {
693# ifdef DEBUG1
694 for (ConstRawType elem = getFirst(); elem; elem = elem->getNext()) {
695 if (elem == aValue) {
696 return;
697 }
698 }
699 MOZ_CRASH("element wasn't found in this list!")do { do { } while (false); MOZ_ReportCrash("" "element wasn't found in this list!"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/LinkedList.h"
, 699); AnnotateMozCrashReason("MOZ_CRASH(" "element wasn't found in this list!"
")"); do { *((volatile int*)__null) = 699; __attribute__((nomerge
)) ::abort(); } while (false); } while (false)
;
700# endif
701 }
702
703 LinkedList& operator=(const LinkedList<T>& aOther) = delete;
704 LinkedList(const LinkedList<T>& aOther) = delete;
705};
706
707template <typename T>
708size_t RangeSizeEstimate(const LinkedList<T>&) {
709 return 0;
710}
711
712template <typename T>
713inline void ImplCycleCollectionUnlink(LinkedList<RefPtr<T>>& aField) {
714 aField.clear();
715}
716
717template <typename T>
718inline void ImplCycleCollectionTraverse(
719 nsCycleCollectionTraversalCallback& aCallback,
720 LinkedList<RefPtr<T>>& aField, const char* aName, uint32_t aFlags = 0) {
721 typedef typename detail::LinkedListElementTraits<T> Traits;
722 typedef typename Traits::RawType RawType;
723 for (RawType element : aField) {
724 // RefPtr is stored as a raw pointer in LinkedList.
725 // So instead of creating a new RefPtr from the raw
726 // pointer (which is not allowed), we simply call
727 // CycleCollectionNoteChild against the raw pointer
728 CycleCollectionNoteChild(aCallback, element, aName, aFlags);
729 }
730}
731
732template <typename T>
733class AutoCleanLinkedList : public LinkedList<T> {
734 private:
735 using Traits = detail::LinkedListElementTraits<T>;
736 using ClientType = typename detail::LinkedListElementTraits<T>::ClientType;
737
738 public:
739 AutoCleanLinkedList() = default;
740 AutoCleanLinkedList(AutoCleanLinkedList&&) = default;
741 ~AutoCleanLinkedList() { clear(); }
742
743 AutoCleanLinkedList& operator=(AutoCleanLinkedList&& aOther) = default;
744
745 void clear() {
746 while (ClientType element = this->popFirst()) {
747 Traits::cleanElement(element);
748 }
749 }
750};
751
752} /* namespace mozilla */
753
754#endif /* __cplusplus */
755
756#endif /* mozilla_LinkedList_h */

/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefCounted.h

1/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3/* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7/* CRTP refcounting templates. Do not use unless you are an Expert. */
8
9#ifndef mozilla_RefCounted_h
10#define mozilla_RefCounted_h
11
12#include <utility>
13#include <type_traits>
14
15#include "mozilla/AlreadyAddRefed.h"
16#include "mozilla/Assertions.h"
17#include "mozilla/Atomics.h"
18#include "mozilla/Attributes.h"
19#include "mozilla/RefCountType.h"
20
21#ifdef __wasi__
22# include "mozilla/WasiAtomic.h"
23#else
24# include <atomic>
25#endif // __wasi__
26
27#if defined(MOZ_SUPPORT_LEAKCHECKING1) && defined(NS_BUILD_REFCNT_LOGGING1)
28# define MOZ_REFCOUNTED_LEAK_CHECKING
29#endif
30
31namespace mozilla {
32
33/**
34 * RefCounted<T> is a sort of a "mixin" for a class T. RefCounted
35 * manages, well, refcounting for T, and because RefCounted is
36 * parameterized on T, RefCounted<T> can call T's destructor directly.
37 * This means T doesn't need to have a virtual dtor and so doesn't
38 * need a vtable.
39 *
40 * RefCounted<T> is created with refcount == 0. Newly-allocated
41 * RefCounted<T> must immediately be assigned to a RefPtr to make the
42 * refcount > 0. It's an error to allocate and free a bare
43 * RefCounted<T>, i.e. outside of the RefPtr machinery. Attempts to
44 * do so will abort DEBUG builds.
45 *
46 * Live RefCounted<T> have refcount > 0. The lifetime (refcounts) of
47 * live RefCounted<T> are controlled by RefPtr<T> and
48 * RefPtr<super/subclass of T>. Upon a transition from refcounted==1
49 * to 0, the RefCounted<T> "dies" and is destroyed. The "destroyed"
50 * state is represented in DEBUG builds by refcount==0xffffdead. This
51 * state distinguishes use-before-ref (refcount==0) from
52 * use-after-destroy (refcount==0xffffdead).
53 *
54 * Note that when deriving from RefCounted or AtomicRefCounted, you
55 * should add MOZ_DECLARE_REFCOUNTED_TYPENAME(ClassName) to the public
56 * section of your class, where ClassName is the name of your class.
57 */
58namespace detail {
59const MozRefCountType DEAD = 0xffffdead;
60
61#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
62// When this header is included in SpiderMonkey code, NS_LogAddRef and
63// NS_LogRelease are not available. To work around this, we call these
64// functions through a function pointer set by SetLeakCheckingFunctions.
65// Note: these are globals because GCC on Linux reports undefined-reference
66// errors when they're static members of the RefCountLogger class.
67using LogAddRefFunc = void (*)(void* aPtr, MozRefCountType aNewRefCnt,
68 const char* aTypeName, uint32_t aClassSize);
69using LogReleaseFunc = void (*)(void* aPtr, MozRefCountType aNewRefCnt,
70 const char* aTypeName);
71extern MFBT_DATA__attribute__((weak)) __attribute__((visibility("default"))) LogAddRefFunc gLogAddRefFunc;
72extern MFBT_DATA__attribute__((weak)) __attribute__((visibility("default"))) LogReleaseFunc gLogReleaseFunc;
73extern MFBT_DATA__attribute__((weak)) __attribute__((visibility("default"))) size_t gNumStaticCtors;
74extern MFBT_DATA__attribute__((weak)) __attribute__((visibility("default"))) const char* gLastStaticCtorTypeName;
75#endif
76
77// When building code that gets compiled into Gecko, try to use the
78// trace-refcount leak logging facilities.
79class RefCountLogger {
80 public:
81 // Called by `RefCounted`-like classes to log a successful AddRef call in the
82 // Gecko leak-logging system. This call is a no-op outside of Gecko. Should be
83 // called afer incrementing the reference count.
84 template <class T>
85 static void logAddRef(const T* aPointer, MozRefCountType aRefCount) {
86#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
87 const void* pointer = aPointer;
88 const char* typeName = aPointer->typeName();
89 uint32_t typeSize = aPointer->typeSize();
90 if (gLogAddRefFunc) {
91 gLogAddRefFunc(const_cast<void*>(pointer), aRefCount, typeName, typeSize);
92 } else {
93 gNumStaticCtors++;
94 gLastStaticCtorTypeName = typeName;
95 }
96#endif
97 }
98
99#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
100 static MFBT_API__attribute__((weak)) __attribute__((visibility("default"))) void SetLeakCheckingFunctions(LogAddRefFunc aLogAddRefFunc,
101 LogReleaseFunc aLogReleaseFunc);
102#endif
103
104 // Created by `RefCounted`-like classes to log a successful Release call in
105 // the Gecko leak-logging system. The constructor should be invoked before the
106 // refcount is decremented to avoid invoking `typeName()` with a zero
107 // reference count. This call is a no-op outside of Gecko.
108 class MOZ_STACK_CLASS ReleaseLogger final {
109 public:
110 template <class T>
111 explicit ReleaseLogger(const T* aPointer)
112#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
113 : mPointer(aPointer),
114 mTypeName(aPointer->typeName())
115#endif
116 {
117 }
118
119 void logRelease(MozRefCountType aRefCount) {
120#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
121 MOZ_ASSERT(aRefCount != DEAD)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(aRefCount != DEAD)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(aRefCount != DEAD))), 0))) {
do { } while (false); MOZ_ReportAssertionFailure("aRefCount != DEAD"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefCounted.h"
, 121); AnnotateMozCrashReason("MOZ_ASSERT" "(" "aRefCount != DEAD"
")"); do { *((volatile int*)__null) = 121; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
122 if (gLogReleaseFunc) {
123 gLogReleaseFunc(const_cast<void*>(mPointer), aRefCount, mTypeName);
124 } else {
125 gNumStaticCtors++;
126 gLastStaticCtorTypeName = mTypeName;
127 }
128#endif
129 }
130
131#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
132 const void* mPointer;
133 const char* mTypeName;
134#endif
135 };
136};
137
138// This is used WeakPtr.h as well as this file.
139enum RefCountAtomicity { AtomicRefCount, NonAtomicRefCount };
140
141template <typename T, RefCountAtomicity Atomicity>
142class RC {
143 public:
144 explicit RC(T aCount) : mValue(aCount) {}
145
146 RC(const RC&) = delete;
147 RC& operator=(const RC&) = delete;
148 RC(RC&&) = delete;
149 RC& operator=(RC&&) = delete;
150
151 T operator++() { return ++mValue; }
152 T operator--() { return --mValue; }
153
154#ifdef DEBUG1
155 void operator=(const T& aValue) { mValue = aValue; }
156#endif
157
158 operator T() const { return mValue; }
159
160 private:
161 T mValue;
162};
163
164template <typename T>
165class RC<T, AtomicRefCount> {
166 public:
167 explicit RC(T aCount) : mValue(aCount) {}
168
169 RC(const RC&) = delete;
170 RC& operator=(const RC&) = delete;
171 RC(RC&&) = delete;
172 RC& operator=(RC&&) = delete;
173
174 T operator++() {
175 // Memory synchronization is not required when incrementing a
176 // reference count. The first increment of a reference count on a
177 // thread is not important, since the first use of the object on a
178 // thread can happen before it. What is important is the transfer
179 // of the pointer to that thread, which may happen prior to the
180 // first increment on that thread. The necessary memory
181 // synchronization is done by the mechanism that transfers the
182 // pointer between threads.
183 return mValue.fetch_add(1, std::memory_order_relaxed) + 1;
184 }
185
186 T operator--() {
187 // Since this may be the last release on this thread, we need
188 // release semantics so that prior writes on this thread are visible
189 // to the thread that destroys the object when it reads mValue with
190 // acquire semantics.
191 T result = mValue.fetch_sub(1, std::memory_order_release) - 1;
192 if (result == 0) {
193 // We're going to destroy the object on this thread, so we need
194 // acquire semantics to synchronize with the memory released by
195 // the last release on other threads, that is, to ensure that
196 // writes prior to that release are now visible on this thread.
197#if defined(MOZ_TSAN) || defined(__wasi__)
198 // TSan doesn't understand std::atomic_thread_fence, so in order
199 // to avoid a false positive for every time a refcounted object
200 // is deleted, we replace the fence with an atomic operation.
201 mValue.load(std::memory_order_acquire);
202#else
203 std::atomic_thread_fence(std::memory_order_acquire);
204#endif
205 }
206 return result;
207 }
208
209#ifdef DEBUG1
210 // This method is only called in debug builds, so we're not too concerned
211 // about its performance.
212 void operator=(const T& aValue) {
213 mValue.store(aValue, std::memory_order_seq_cst);
214 }
215#endif
216
217 operator T() const {
218 // Use acquire semantics since we're not sure what the caller is
219 // doing.
220 return mValue.load(std::memory_order_acquire);
221 }
222
223 T IncrementIfNonzero() {
224 // This can be a relaxed load as any write of 0 that we observe will leave
225 // the field in a permanently zero (or `DEAD`) state (so a "stale" read of 0
226 // is fine), and any other value is confirmed by the CAS below.
227 //
228 // This roughly matches rust's Arc::upgrade implementation as of rust 1.49.0
229 T prev = mValue.load(std::memory_order_relaxed);
230 while (prev != 0) {
231 MOZ_ASSERT(prev != detail::DEAD,do { static_assert( mozilla::detail::AssertionConditionType<
decltype(prev != detail::DEAD)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(prev != detail::DEAD))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("prev != detail::DEAD"
" (" "Cannot IncrementIfNonzero if marked as dead!" ")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefCounted.h"
, 232); AnnotateMozCrashReason("MOZ_ASSERT" "(" "prev != detail::DEAD"
") (" "Cannot IncrementIfNonzero if marked as dead!" ")"); do
{ *((volatile int*)__null) = 232; __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
232 "Cannot IncrementIfNonzero if marked as dead!")do { static_assert( mozilla::detail::AssertionConditionType<
decltype(prev != detail::DEAD)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(prev != detail::DEAD))), 0))
) { do { } while (false); MOZ_ReportAssertionFailure("prev != detail::DEAD"
" (" "Cannot IncrementIfNonzero if marked as dead!" ")", "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefCounted.h"
, 232); AnnotateMozCrashReason("MOZ_ASSERT" "(" "prev != detail::DEAD"
") (" "Cannot IncrementIfNonzero if marked as dead!" ")"); do
{ *((volatile int*)__null) = 232; __attribute__((nomerge)) ::
abort(); } while (false); } } while (false)
;
233 // TODO: It may be possible to use relaxed success ordering here?
234 if (mValue.compare_exchange_weak(prev, prev + 1,
235 std::memory_order_acquire,
236 std::memory_order_relaxed)) {
237 return prev + 1;
238 }
239 }
240 return 0;
241 }
242
243 private:
244 std::atomic<T> mValue;
245};
246
247template <typename T, RefCountAtomicity Atomicity>
248class RefCounted {
249 protected:
250 RefCounted() : mRefCnt(0) {}
251#ifdef DEBUG1
252 ~RefCounted() { MOZ_ASSERT(mRefCnt == detail::DEAD)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mRefCnt == detail::DEAD)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mRefCnt == detail::DEAD))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("mRefCnt == detail::DEAD"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefCounted.h"
, 252); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mRefCnt == detail::DEAD"
")"); do { *((volatile int*)__null) = 252; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
; }
253#endif
254
255 public:
256 // Compatibility with RefPtr.
257 void AddRef() const {
258 // Note: this method must be thread safe for AtomicRefCounted.
259 MOZ_ASSERT(int32_t(mRefCnt) >= 0)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(int32_t(mRefCnt) >= 0)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(int32_t(mRefCnt) >= 0))),
0))) { do { } while (false); MOZ_ReportAssertionFailure("int32_t(mRefCnt) >= 0"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefCounted.h"
, 259); AnnotateMozCrashReason("MOZ_ASSERT" "(" "int32_t(mRefCnt) >= 0"
")"); do { *((volatile int*)__null) = 259; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
260 MozRefCountType cnt = ++mRefCnt;
261 detail::RefCountLogger::logAddRef(static_cast<const T*>(this), cnt);
262 }
263
264 void Release() const {
265 // Note: this method must be thread safe for AtomicRefCounted.
266 MOZ_ASSERT(int32_t(mRefCnt) > 0)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(int32_t(mRefCnt) > 0)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(int32_t(mRefCnt) > 0))), 0
))) { do { } while (false); MOZ_ReportAssertionFailure("int32_t(mRefCnt) > 0"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefCounted.h"
, 266); AnnotateMozCrashReason("MOZ_ASSERT" "(" "int32_t(mRefCnt) > 0"
")"); do { *((volatile int*)__null) = 266; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
16
Assuming the condition is true
17
Taking false branch
18
Loop condition is false. Exiting loop
267 detail::RefCountLogger::ReleaseLogger logger(static_cast<const T*>(this));
268 MozRefCountType cnt = --mRefCnt;
269 // Note: it's not safe to touch |this| after decrementing the refcount,
270 // except for below.
271 logger.logRelease(cnt);
272 if (0 == cnt) {
19
Assuming 'cnt' is equal to 0
20
Taking true branch
273 // Because we have atomically decremented the refcount above, only
274 // one thread can get a 0 count here, so as long as we can assume that
275 // everything else in the system is accessing this object through
276 // RefPtrs, it's safe to access |this| here.
277#ifdef DEBUG1
278 mRefCnt = detail::DEAD;
279#endif
280 delete static_cast<const T*>(this);
21
Calling 'operator delete'
23
Returning from 'operator delete'
281 }
282 }
283
284 using HasThreadSafeRefCnt =
285 std::integral_constant<bool, Atomicity == AtomicRefCount>;
286
287 // Compatibility with wtf::RefPtr.
288 void ref() { AddRef(); }
289 void deref() { Release(); }
290 MozRefCountType refCount() const { return mRefCnt; }
291 bool hasOneRef() const {
292 MOZ_ASSERT(mRefCnt > 0)do { static_assert( mozilla::detail::AssertionConditionType<
decltype(mRefCnt > 0)>::isValid, "invalid assertion condition"
); if ((__builtin_expect(!!(!(!!(mRefCnt > 0))), 0))) { do
{ } while (false); MOZ_ReportAssertionFailure("mRefCnt > 0"
, "/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/RefCounted.h"
, 292); AnnotateMozCrashReason("MOZ_ASSERT" "(" "mRefCnt > 0"
")"); do { *((volatile int*)__null) = 292; __attribute__((nomerge
)) ::abort(); } while (false); } } while (false)
;
293 return mRefCnt == 1;
294 }
295
296 private:
297 mutable RC<MozRefCountType, Atomicity> mRefCnt;
298};
299
300#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
301// Passing override for the optional argument marks the typeName and
302// typeSize functions defined by this macro as overrides.
303# define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T, ...)virtual const char* typeName() const ... { return "T"; } virtual
size_t typeSize() const ... { return sizeof(*this); }
\
304 virtual const char* typeName() const __VA_ARGS__ { return #T; } \
305 virtual size_t typeSize() const __VA_ARGS__ { return sizeof(*this); }
306#else
307# define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T, ...)virtual const char* typeName() const ... { return "T"; } virtual
size_t typeSize() const ... { return sizeof(*this); }
308#endif
309
310// Note that this macro is expanded unconditionally because it declares only
311// two small inline functions which will hopefully get eliminated by the linker
312// in non-leak-checking builds.
313#define MOZ_DECLARE_REFCOUNTED_TYPENAME(T)const char* typeName() const { return "T"; } size_t typeSize(
) const { return sizeof(*this); }
\
314 const char* typeName() const { return #T; } \
315 size_t typeSize() const { return sizeof(*this); }
316
317} // namespace detail
318
319template <typename T>
320class RefCounted : public detail::RefCounted<T, detail::NonAtomicRefCount> {
321 public:
322 ~RefCounted() {
323 static_assert(std::is_base_of<RefCounted, T>::value,
324 "T must derive from RefCounted<T>");
325 }
326};
327
328namespace external {
329
330/**
331 * AtomicRefCounted<T> is like RefCounted<T>, with an atomically updated
332 * reference counter.
333 *
334 * NOTE: Please do not use this class, use NS_INLINE_DECL_THREADSAFE_REFCOUNTING
335 * instead.
336 */
337template <typename T>
338class AtomicRefCounted
339 : public mozilla::detail::RefCounted<T, mozilla::detail::AtomicRefCount> {
340 public:
341 ~AtomicRefCounted() {
342 static_assert(std::is_base_of<AtomicRefCounted, T>::value,
343 "T must derive from AtomicRefCounted<T>");
344 }
345};
346
347} // namespace external
348
349} // namespace mozilla
350
351#endif // mozilla_RefCounted_h

/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/mozilla/cxxalloc.h

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5#ifndef mozilla_cxxalloc_h
6#define mozilla_cxxalloc_h
7
8/*
9 * We implement the default operators new/delete as part of
10 * libmozalloc, replacing their definitions in libstdc++. The
11 * operator new* definitions in libmozalloc will never return a NULL
12 * pointer.
13 *
14 * Each operator new immediately below returns a pointer to memory
15 * that can be delete'd by any of
16 *
17 * (1) the matching infallible operator delete immediately below
18 * (2) the matching system |operator delete(void*, std::nothrow)|
19 * (3) the matching system |operator delete(void*) noexcept(false)|
20 *
21 * NB: these are declared |noexcept(false)|, though they will never
22 * throw that exception. This declaration is consistent with the rule
23 * that |::operator new() noexcept(false)| will never return NULL.
24 *
25 * NB: mozilla::fallible can be used instead of std::nothrow.
26 */
27
28#ifndef MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline
29# define MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline MFBT_API__attribute__((weak)) __attribute__((visibility("default")))
30#endif
31
32MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline void* operator new(size_t size) noexcept(false) {
33 return moz_xmalloc(size);
34}
35
36MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline void* operator new(size_t size,
37 const std::nothrow_t&) noexcept(true) {
38 return malloc_implmalloc(size);
39}
40
41MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline void* operator new[](size_t size) noexcept(false) {
42 return moz_xmalloc(size);
43}
44
45MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline void* operator new[](size_t size,
46 const std::nothrow_t&) noexcept(true) {
47 return malloc_implmalloc(size);
48}
49
50MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline void operator delete(void* ptr) noexcept(true) {
51 return free_implfree(ptr);
22
Memory is released
52}
53
54MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline void operator delete(void* ptr,
55 const std::nothrow_t&) noexcept(true) {
56 return free_implfree(ptr);
57}
58
59MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline void operator delete[](void* ptr) noexcept(true) {
60 return free_implfree(ptr);
61}
62
63MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline void operator delete[](
64 void* ptr, const std::nothrow_t&) noexcept(true) {
65 return free_implfree(ptr);
66}
67
68#if defined(XP_WIN)
69// We provide the global sized delete overloads unconditionally because the
70// MSVC runtime headers do, despite compiling with /Zc:sizedDealloc-
71MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline void operator delete(void* ptr,
72 size_t /*size*/) noexcept(true) {
73 return free_implfree(ptr);
74}
75
76MOZALLOC_EXPORT_NEW__attribute__((always_inline)) inline void operator delete[](void* ptr,
77 size_t /*size*/) noexcept(true) {
78 return free_implfree(ptr);
79}
80#endif
81
82#endif /* mozilla_cxxalloc_h */