Bug Summary

File:root/firefox-clang/gfx/cairo/cairo/src/cairo-tor-scan-converter.c
Warning:line 1176, column 6
Access to field 'dy' results in a dereference of a null pointer (loaded from variable 'e')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name cairo-tor-scan-converter.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -ffp-contract=off -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/root/firefox-clang/obj-x86_64-pc-linux-gnu/gfx/cairo/cairo/src -fcoverage-compilation-dir=/root/firefox-clang/obj-x86_64-pc-linux-gnu/gfx/cairo/cairo/src -resource-dir /usr/lib/llvm-21/lib/clang/21 -include /root/firefox-clang/config/gcc_hidden.h -include /root/firefox-clang/obj-x86_64-pc-linux-gnu/mozilla-config.h -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/system_wrappers -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=2 -D _GLIBCXX_ASSERTIONS -D DEBUG=1 -D HAVE_FT_LOAD_SFNT_TABLE -D PACKAGE_VERSION="moz" -D PACKAGE_BUGREPORT="http://bugzilla.mozilla.org/" -D CAIRO_HAS_PTHREAD -D _GNU_SOURCE -D MOZ_TREE_PIXMAN -D SIZEOF_VOID_P=__SIZEOF_POINTER__ -D SIZEOF_INT=__SIZEOF_INT__ -D SIZEOF_LONG=__SIZEOF_LONG__ -D SIZEOF_LONG_LONG=__SIZEOF_LONG_LONG__ -D HAVE_UINT64_T -D HAVE_CXX11_ATOMIC_PRIMITIVES -D MOZ_HAS_MOZGLUE -D MOZILLA_INTERNAL_API -D IMPL_LIBXUL -D MOZ_SUPPORT_LEAKCHECKING -D STATIC_EXPORTABLE_JS_API -I /root/firefox-clang/gfx/cairo/cairo/src -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/gfx/cairo/cairo/src -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nspr -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nss -D MOZILLA_CLIENT -I /usr/include/freetype2 -I /usr/include/libpng16 -I /usr/include/freetype2 -I /usr/include/libpng16 -internal-isystem /usr/lib/llvm-21/lib/clang/21/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-error=tautological-type-limit-compare -Wno-range-loop-analysis -Wno-error=deprecated-declarations -Wno-error=array-bounds -Wno-error=free-nonheap-object -Wno-error=atomic-alignment -Wno-error=deprecated-builtins -Wno-psabi -Wno-error=builtin-macro-redefined -Wno-unknown-warning-option -Wno-enum-compare -Wno-int-to-pointer-cast -Wno-int-conversion -Wno-incompatible-pointer-types -Wno-sign-compare -Wno-type-limits -Wno-missing-field-initializers -Wno-conversion -Wno-narrowing -Wno-switch -Wno-unused -Wno-unused-variable -Wno-error=uninitialized -Wno-absolute-value -Wno-deprecated-register -Wno-incompatible-pointer-types -Wno-macro-redefined -Wno-shift-negative-value -Wno-tautological-compare -Wno-tautological-constant-out-of-range-compare -Wno-unreachable-code -ferror-limit 19 -fstrict-flex-arrays=1 -stack-protector 2 -fstack-clash-protection -ftrivial-auto-var-init=pattern -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2025-06-26-231904-1820671-1 -x c /root/firefox-clang/gfx/cairo/cairo/src/cairo-tor-scan-converter.c
1/* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
2/* glitter-paths - polygon scan converter
3 *
4 * Copyright (c) 2008 M Joonas Pihlaja
5 * Copyright (c) 2007 David Turner
6 *
7 * Permission is hereby granted, free of charge, to any person
8 * obtaining a copy of this software and associated documentation
9 * files (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use,
11 * copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following
14 * conditions:
15 *
16 * The above copyright notice and this permission notice shall be
17 * included in all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
21 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28/* This is the Glitter paths scan converter incorporated into cairo.
29 * The source is from commit 734c53237a867a773640bd5b64816249fa1730f8
30 * of
31 *
32 * https://gitweb.freedesktop.org/?p=users/joonas/glitter-paths
33 */
34/* Glitter-paths is a stand alone polygon rasteriser derived from
35 * David Turner's reimplementation of Tor Anderssons's 15x17
36 * supersampling rasteriser from the Apparition graphics library. The
37 * main new feature here is cheaply choosing per-scan line between
38 * doing fully analytical coverage computation for an entire row at a
39 * time vs. using a supersampling approach.
40 *
41 * David Turner's code can be found at
42 *
43 * http://david.freetype.org/rasterizer-shootout/raster-comparison-20070813.tar.bz2
44 *
45 * In particular this file incorporates large parts of ftgrays_tor10.h
46 * from raster-comparison-20070813.tar.bz2
47 */
48/* Overview
49 *
50 * A scan converter's basic purpose to take polygon edges and convert
51 * them into an RLE compressed A8 mask. This one works in two phases:
52 * gathering edges and generating spans.
53 *
54 * 1) As the user feeds the scan converter edges they are vertically
55 * clipped and bucketted into a _polygon_ data structure. The edges
56 * are also snapped from the user's coordinates to the subpixel grid
57 * coordinates used during scan conversion.
58 *
59 * user
60 * |
61 * | edges
62 * V
63 * polygon buckets
64 *
65 * 2) Generating spans works by performing a vertical sweep of pixel
66 * rows from top to bottom and maintaining an _active_list_ of edges
67 * that intersect the row. From the active list the fill rule
68 * determines which edges are the left and right edges of the start of
69 * each span, and their contribution is then accumulated into a pixel
70 * coverage list (_cell_list_) as coverage deltas. Once the coverage
71 * deltas of all edges are known we can form spans of constant pixel
72 * coverage by summing the deltas during a traversal of the cell list.
73 * At the end of a pixel row the cell list is sent to a coverage
74 * blitter for rendering to some target surface.
75 *
76 * The pixel coverages are computed by either supersampling the row
77 * and box filtering a mono rasterisation, or by computing the exact
78 * coverages of edges in the active list. The supersampling method is
79 * used whenever some edge starts or stops within the row or there are
80 * edge intersections in the row.
81 *
82 * polygon bucket for \
83 * current pixel row |
84 * | |
85 * | activate new edges | Repeat GRID_Y times if we
86 * V \ are supersampling this row,
87 * active list / or just once if we're computing
88 * | | analytical coverage.
89 * | coverage deltas |
90 * V |
91 * pixel coverage list /
92 * |
93 * V
94 * coverage blitter
95 */
96#include "cairoint.h"
97#include "cairo-spans-private.h"
98#include "cairo-error-private.h"
99
100#include <stdlib.h>
101#include <string.h>
102#include <limits.h>
103#include <setjmp.h>
104
105/*-------------------------------------------------------------------------
106 * cairo specific config
107 */
108#define Istatic static
109
110/* Prefer cairo's status type. */
111#define GLITTER_HAVE_STATUS_T1 1
112#define GLITTER_STATUS_SUCCESSCAIRO_STATUS_SUCCESS CAIRO_STATUS_SUCCESS
113#define GLITTER_STATUS_NO_MEMORYCAIRO_STATUS_NO_MEMORY CAIRO_STATUS_NO_MEMORY
114typedef cairo_status_t glitter_status_t;
115
116/* The input coordinate scale and the rasterisation grid scales. */
117#define GLITTER_INPUT_BITS8 CAIRO_FIXED_FRAC_BITS8
118#define GRID_X_BITS8 CAIRO_FIXED_FRAC_BITS8
119#define GRID_Y15 15
120
121/* Set glitter up to use a cairo span renderer to do the coverage
122 * blitting. */
123struct pool;
124struct cell_list;
125
126/*-------------------------------------------------------------------------
127 * glitter-paths.h
128 */
129
130/* "Input scaled" numbers are fixed precision reals with multiplier
131 * 2**GLITTER_INPUT_BITS. Input coordinates are given to glitter as
132 * pixel scaled numbers. These get converted to the internal grid
133 * scaled numbers as soon as possible. Internal overflow is possible
134 * if GRID_X/Y inside glitter-paths.c is larger than
135 * 1<<GLITTER_INPUT_BITS. */
136#ifndef GLITTER_INPUT_BITS8
137# define GLITTER_INPUT_BITS8 8
138#endif
139#define GLITTER_INPUT_SCALE(1<<8) (1<<GLITTER_INPUT_BITS8)
140typedef int glitter_input_scaled_t;
141
142#if !GLITTER_HAVE_STATUS_T1
143typedef enum {
144 GLITTER_STATUS_SUCCESSCAIRO_STATUS_SUCCESS = 0,
145 GLITTER_STATUS_NO_MEMORYCAIRO_STATUS_NO_MEMORY
146} glitter_status_t;
147#endif
148
149#ifndef Istatic
150# define Istatic /*static*/
151#endif
152
153/* Opaque type for scan converting. */
154typedef struct glitter_scan_converter glitter_scan_converter_t;
155
156/* Reset a scan converter to accept polygon edges and set the clip box
157 * in pixels. Allocates O(ymax-ymin) bytes of memory. The clip box
158 * is set to integer pixel coordinates xmin <= x < xmax, ymin <= y <
159 * ymax. */
160Istatic glitter_status_t
161glitter_scan_converter_reset(
162 glitter_scan_converter_t *converter,
163 int xmin, int ymin,
164 int xmax, int ymax);
165
166/* Render the polygon in the scan converter to the given A8 format
167 * image raster. Only the pixels accessible as pixels[y*stride+x] for
168 * x,y inside the clip box are written to, where xmin <= x < xmax,
169 * ymin <= y < ymax. The image is assumed to be clear on input.
170 *
171 * If nonzero_fill is true then the interior of the polygon is
172 * computed with the non-zero fill rule. Otherwise the even-odd fill
173 * rule is used.
174 *
175 * The scan converter must be reset or destroyed after this call. */
176
177/*-------------------------------------------------------------------------
178 * glitter-paths.c: Implementation internal types
179 */
180#include <stdlib.h>
181#include <string.h>
182#include <limits.h>
183
184/* All polygon coordinates are snapped onto a subsample grid. "Grid
185 * scaled" numbers are fixed precision reals with multiplier GRID_X or
186 * GRID_Y. */
187typedef int grid_scaled_t;
188typedef int grid_scaled_x_t;
189typedef int grid_scaled_y_t;
190
191/* Default x/y scale factors.
192 * You can either define GRID_X/Y_BITS to get a power-of-two scale
193 * or define GRID_X/Y separately. */
194#if !defined(GRID_X(1 << 8)) && !defined(GRID_X_BITS8)
195# define GRID_X_BITS8 8
196#endif
197#if !defined(GRID_Y15) && !defined(GRID_Y_BITS)
198# define GRID_Y15 15
199#endif
200
201/* Use GRID_X/Y_BITS to define GRID_X/Y if they're available. */
202#ifdef GRID_X_BITS8
203# define GRID_X(1 << 8) (1 << GRID_X_BITS8)
204#endif
205#ifdef GRID_Y_BITS
206# define GRID_Y15 (1 << GRID_Y_BITS)
207#endif
208
209/* The GRID_X_TO_INT_FRAC macro splits a grid scaled coordinate into
210 * integer and fractional parts. The integer part is floored. */
211#if defined(GRID_X_TO_INT_FRAC)
212 /* do nothing */
213#elif defined(GRID_X_BITS8)
214# define GRID_X_TO_INT_FRAC(x, i, f)do { (f) = (x) & ((1 << (8)) - 1); (i) = (x) >>
(8); } while (0)
\
215 _GRID_TO_INT_FRAC_shift(x, i, f, GRID_X_BITS)do { (f) = (x) & ((1 << (8)) - 1); (i) = (x) >>
(8); } while (0)
216#else
217# define GRID_X_TO_INT_FRAC(x, i, f)do { (f) = (x) & ((1 << (8)) - 1); (i) = (x) >>
(8); } while (0)
\
218 _GRID_TO_INT_FRAC_general(x, i, f, GRID_X)do { (i) = (x) / ((1 << 8)); (f) = (x) % ((1 << 8
)); if ((f) < 0) { --(i); (f) += ((1 << 8)); } } while
(0)
219#endif
220
221#define _GRID_TO_INT_FRAC_general(t, i, f, m)do { (i) = (t) / (m); (f) = (t) % (m); if ((f) < 0) { --(i
); (f) += (m); } } while (0)
do { \
222 (i) = (t) / (m); \
223 (f) = (t) % (m); \
224 if ((f) < 0) { \
225 --(i); \
226 (f) += (m); \
227 } \
228} while (0)
229
230#define _GRID_TO_INT_FRAC_shift(t, i, f, b)do { (f) = (t) & ((1 << (b)) - 1); (i) = (t) >>
(b); } while (0)
do { \
231 (f) = (t) & ((1 << (b)) - 1); \
232 (i) = (t) >> (b); \
233} while (0)
234
235/* A grid area is a real in [0,1] scaled by 2*GRID_X*GRID_Y. We want
236 * to be able to represent exactly areas of subpixel trapezoids whose
237 * vertices are given in grid scaled coordinates. The scale factor
238 * comes from needing to accurately represent the area 0.5*dx*dy of a
239 * triangle with base dx and height dy in grid scaled numbers. */
240#define GRID_XY(2*(1 << 8)*15) (2*GRID_X(1 << 8)*GRID_Y15) /* Unit area on the grid. */
241
242/* GRID_AREA_TO_ALPHA(area): map [0,GRID_XY] to [0,255]. */
243#if GRID_XY(2*(1 << 8)*15) == 510
244# define GRID_AREA_TO_ALPHA(c)(((c) + ((c)<<4) + 256) >> 9) (((c)+1) >> 1)
245#elif GRID_XY(2*(1 << 8)*15) == 255
246# define GRID_AREA_TO_ALPHA(c)(((c) + ((c)<<4) + 256) >> 9) (c)
247#elif GRID_XY(2*(1 << 8)*15) == 64
248# define GRID_AREA_TO_ALPHA(c)(((c) + ((c)<<4) + 256) >> 9) (((c) << 2) | -(((c) & 0x40) >> 6))
249#elif GRID_XY(2*(1 << 8)*15) == 128
250# define GRID_AREA_TO_ALPHA(c)(((c) + ((c)<<4) + 256) >> 9) ((((c) << 1) | -((c) >> 7)) & 255)
251#elif GRID_XY(2*(1 << 8)*15) == 256
252# define GRID_AREA_TO_ALPHA(c)(((c) + ((c)<<4) + 256) >> 9) (((c) | -((c) >> 8)) & 255)
253#elif GRID_XY(2*(1 << 8)*15) == 15
254# define GRID_AREA_TO_ALPHA(c)(((c) + ((c)<<4) + 256) >> 9) (((c) << 4) + (c))
255#elif GRID_XY(2*(1 << 8)*15) == 2*256*15
256# define GRID_AREA_TO_ALPHA(c)(((c) + ((c)<<4) + 256) >> 9) (((c) + ((c)<<4) + 256) >> 9)
257#else
258# define GRID_AREA_TO_ALPHA(c)(((c) + ((c)<<4) + 256) >> 9) (((c)*255 + GRID_XY(2*(1 << 8)*15)/2) / GRID_XY(2*(1 << 8)*15))
259#endif
260
261#define UNROLL3(x)x x x x x x
262
263struct quorem {
264 int32_t quo;
265 int64_t rem;
266};
267
268/* Header for a chunk of memory in a memory pool. */
269struct _pool_chunk {
270 /* # bytes used in this chunk. */
271 size_t size;
272
273 /* # bytes total in this chunk */
274 size_t capacity;
275
276 /* Pointer to the previous chunk or %NULL if this is the sentinel
277 * chunk in the pool header. */
278 struct _pool_chunk *prev_chunk;
279
280 /* Actual data starts here. Well aligned even for 64 bit types. */
281 int64_t data;
282};
283
284/* The int64_t data member of _pool_chunk just exists to enforce alignment,
285 * it shouldn't be included in the allocated size for the struct. */
286#define SIZEOF_POOL_CHUNK(sizeof(struct _pool_chunk) - sizeof(int64_t)) (sizeof(struct _pool_chunk) - sizeof(int64_t))
287
288/* A memory pool. This is supposed to be embedded on the stack or
289 * within some other structure. It may optionally be followed by an
290 * embedded array from which requests are fulfilled until
291 * malloc needs to be called to allocate a first real chunk. */
292struct pool {
293 /* Chunk we're allocating from. */
294 struct _pool_chunk *current;
295
296 jmp_buf *jmp;
297
298 /* Free list of previously allocated chunks. All have >= default
299 * capacity. */
300 struct _pool_chunk *first_free;
301
302 /* The default capacity of a chunk. */
303 size_t default_capacity;
304
305 /* Header for the sentinel chunk. Directly following the pool
306 * struct should be some space for embedded elements from which
307 * the sentinel chunk allocates from. This is expressed as a char
308 * array so that the 'int64_t data' member of _pool_chunk isn't
309 * included. This way embedding struct pool in other structs works
310 * without wasting space. */
311 char sentinel[SIZEOF_POOL_CHUNK(sizeof(struct _pool_chunk) - sizeof(int64_t))];
312};
313
314/* A polygon edge. */
315struct edge {
316 /* Next in y-bucket or active list. */
317 struct edge *next, *prev;
318
319 /* The clipped y of the top of the edge. */
320 grid_scaled_y_t ytop;
321
322 /* Number of subsample rows remaining to scan convert of this
323 * edge. */
324 grid_scaled_y_t height_left;
325
326 /* Original sign of the edge: +1 for downwards, -1 for upwards
327 * edges. */
328 int dir;
329 int cell;
330
331 /* Current x coordinate while the edge is on the active
332 * list. Initialised to the x coordinate of the top of the
333 * edge. The quotient is in grid_scaled_x_t units and the
334 * remainder is mod dy in grid_scaled_y_t units.*/
335 struct quorem x;
336
337 /* Advance of the current x when moving down a subsample line. */
338 struct quorem dxdy;
339
340 /* Advance of the current x when moving down a full pixel
341 * row. Only initialised when the height of the edge is large
342 * enough that there's a chance the edge could be stepped by a
343 * full row's worth of subsample rows at a time. */
344 struct quorem dxdy_full;
345
346 /* y2-y1 after orienting the edge downwards. */
347 int64_t dy;
348};
349
350#define EDGE_Y_BUCKET_INDEX(y, ymin)(((y) - (ymin))/15) (((y) - (ymin))/GRID_Y15)
351
352/* A collection of sorted and vertically clipped edges of the polygon.
353 * Edges are moved from the polygon to an active list while scan
354 * converting. */
355struct polygon {
356 /* The vertical clip extents. */
357 grid_scaled_y_t ymin, ymax;
358
359 /* Array of edges all starting in the same bucket. An edge is put
360 * into bucket EDGE_BUCKET_INDEX(edge->ytop, polygon->ymin) when
361 * it is added to the polygon. */
362 struct edge **y_buckets;
363 struct edge *y_buckets_embedded[64];
364
365 struct {
366 struct pool base[1];
367 struct edge embedded[32];
368 } edge_pool;
369};
370
371/* A cell records the effect on pixel coverage of polygon edges
372 * passing through a pixel. It contains two accumulators of pixel
373 * coverage.
374 *
375 * Consider the effects of a polygon edge on the coverage of a pixel
376 * it intersects and that of the following one. The coverage of the
377 * following pixel is the height of the edge multiplied by the width
378 * of the pixel, and the coverage of the pixel itself is the area of
379 * the trapezoid formed by the edge and the right side of the pixel.
380 *
381 * +-----------------------+-----------------------+
382 * | | |
383 * | | |
384 * |_______________________|_______________________|
385 * | \...................|.......................|\
386 * | \..................|.......................| |
387 * | \.................|.......................| |
388 * | \....covered.....|.......................| |
389 * | \....area.......|.......................| } covered height
390 * | \..............|.......................| |
391 * |uncovered\.............|.......................| |
392 * | area \............|.......................| |
393 * |___________\...........|.......................|/
394 * | | |
395 * | | |
396 * | | |
397 * +-----------------------+-----------------------+
398 *
399 * Since the coverage of the following pixel will always be a multiple
400 * of the width of the pixel, we can store the height of the covered
401 * area instead. The coverage of the pixel itself is the total
402 * coverage minus the area of the uncovered area to the left of the
403 * edge. As it's faster to compute the uncovered area we only store
404 * that and subtract it from the total coverage later when forming
405 * spans to blit.
406 *
407 * The heights and areas are signed, with left edges of the polygon
408 * having positive sign and right edges having negative sign. When
409 * two edges intersect they swap their left/rightness so their
410 * contribution above and below the intersection point must be
411 * computed separately. */
412struct cell {
413 struct cell *next;
414 int x;
415 int16_t uncovered_area;
416 int16_t covered_height;
417};
418
419/* A cell list represents the scan line sparsely as cells ordered by
420 * ascending x. It is geared towards scanning the cells in order
421 * using an internal cursor. */
422struct cell_list {
423 /* Sentinel nodes */
424 struct cell head, tail;
425
426 /* Cursor state for iterating through the cell list. */
427 struct cell *cursor, *rewind;
428
429 /* Cells in the cell list are owned by the cell list and are
430 * allocated from this pool. */
431 struct {
432 struct pool base[1];
433 struct cell embedded[32];
434 } cell_pool;
435};
436
437struct cell_pair {
438 struct cell *cell1;
439 struct cell *cell2;
440};
441
442/* The active list contains edges in the current scan line ordered by
443 * the x-coordinate of the intercept of the edge and the scan line. */
444struct active_list {
445 /* Leftmost edge on the current scan line. */
446 struct edge head, tail;
447
448 /* A lower bound on the height of the active edges is used to
449 * estimate how soon some active edge ends. We can't advance the
450 * scan conversion by a full pixel row if an edge ends somewhere
451 * within it. */
452 grid_scaled_y_t min_height;
453 int is_vertical;
454};
455
456struct glitter_scan_converter {
457 struct polygon polygon[1];
458 struct active_list active[1];
459 struct cell_list coverages[1];
460
461 cairo_half_open_span_t *spans;
462 cairo_half_open_span_t spans_embedded[64];
463
464 /* Clip box. */
465 grid_scaled_x_t xmin, xmax;
466 grid_scaled_y_t ymin, ymax;
467};
468
469static struct _pool_chunk *
470_pool_chunk_init(
471 struct _pool_chunk *p,
472 struct _pool_chunk *prev_chunk,
473 size_t capacity)
474{
475 p->prev_chunk = prev_chunk;
476 p->size = 0;
477 p->capacity = capacity;
478 return p;
479}
480
481static struct _pool_chunk *
482_pool_chunk_create(struct pool *pool, size_t size)
483{
484 struct _pool_chunk *p;
485
486 p = _cairo_malloc (SIZEOF_POOL_CHUNK + size)(((sizeof(struct _pool_chunk) - sizeof(int64_t)) + size) != 0
? malloc((sizeof(struct _pool_chunk) - sizeof(int64_t)) + size
) : ((void*)0))
;
487 if (unlikely (NULL == p)(__builtin_expect (!!(((void*)0) == p), 0)))
488 longjmp (*pool->jmp, _cairo_error (CAIRO_STATUS_NO_MEMORY));
489
490 return _pool_chunk_init(p, pool->current, size);
491}
492
493static void
494pool_init(struct pool *pool,
495 jmp_buf *jmp,
496 size_t default_capacity,
497 size_t embedded_capacity)
498{
499 pool->jmp = jmp;
500 pool->current = (void*) pool->sentinel;
501 pool->first_free = NULL((void*)0);
502 pool->default_capacity = default_capacity;
503 _pool_chunk_init(pool->current, NULL((void*)0), embedded_capacity);
504}
505
506static void
507pool_fini(struct pool *pool)
508{
509 struct _pool_chunk *p = pool->current;
510 do {
511 while (NULL((void*)0) != p) {
512 struct _pool_chunk *prev = p->prev_chunk;
513 if (p != (void *) pool->sentinel)
514 free(p);
515 p = prev;
516 }
517 p = pool->first_free;
518 pool->first_free = NULL((void*)0);
519 } while (NULL((void*)0) != p);
520}
521
522/* Satisfy an allocation by first allocating a new large enough chunk
523 * and adding it to the head of the pool's chunk list. This function
524 * is called as a fallback if pool_alloc() couldn't do a quick
525 * allocation from the current chunk in the pool. */
526static void *
527_pool_alloc_from_new_chunk(
528 struct pool *pool,
529 size_t size)
530{
531 struct _pool_chunk *chunk;
532 void *obj;
533 size_t capacity;
534
535 /* If the allocation is smaller than the default chunk size then
536 * try getting a chunk off the free list. Force alloc of a new
537 * chunk for large requests. */
538 capacity = size;
539 chunk = NULL((void*)0);
540 if (size < pool->default_capacity) {
541 capacity = pool->default_capacity;
542 chunk = pool->first_free;
543 if (chunk) {
544 pool->first_free = chunk->prev_chunk;
545 _pool_chunk_init(chunk, pool->current, chunk->capacity);
546 }
547 }
548
549 if (NULL((void*)0) == chunk)
550 chunk = _pool_chunk_create (pool, capacity);
551 pool->current = chunk;
552
553 obj = ((unsigned char*)&chunk->data + chunk->size);
554 chunk->size += size;
555 return obj;
556}
557
558/* Allocate size bytes from the pool. The first allocated address
559 * returned from a pool is aligned to 8 bytes. Subsequent
560 * addresses will maintain alignment as long as multiples of 8 are
561 * allocated. Returns the address of a new memory area or %NULL on
562 * allocation failures. The pool retains ownership of the returned
563 * memory. */
564inline static void *
565pool_alloc (struct pool *pool, size_t size)
566{
567 struct _pool_chunk *chunk = pool->current;
568
569 if (size <= chunk->capacity - chunk->size) {
570 void *obj = ((unsigned char*)&chunk->data + chunk->size);
571 chunk->size += size;
572 return obj;
573 } else {
574 return _pool_alloc_from_new_chunk(pool, size);
575 }
576}
577
578/* Relinquish all pool_alloced memory back to the pool. */
579static void
580pool_reset (struct pool *pool)
581{
582 /* Transfer all used chunks to the chunk free list. */
583 struct _pool_chunk *chunk = pool->current;
584 if (chunk != (void *) pool->sentinel) {
585 while (chunk->prev_chunk != (void *) pool->sentinel) {
586 chunk = chunk->prev_chunk;
587 }
588 chunk->prev_chunk = pool->first_free;
589 pool->first_free = pool->current;
590 }
591 /* Reset the sentinel as the current chunk. */
592 pool->current = (void *) pool->sentinel;
593 pool->current->size = 0;
594}
595
596/* Rewinds the cell list's cursor to the beginning. After rewinding
597 * we're good to cell_list_find() the cell any x coordinate. */
598inline static void
599cell_list_rewind (struct cell_list *cells)
600{
601 cells->cursor = &cells->head;
602}
603
604inline static void
605cell_list_maybe_rewind (struct cell_list *cells, int x)
606{
607 if (x < cells->cursor->x) {
608 cells->cursor = cells->rewind;
609 if (x < cells->cursor->x)
610 cells->cursor = &cells->head;
611 }
612}
613
614inline static void
615cell_list_set_rewind (struct cell_list *cells)
616{
617 cells->rewind = cells->cursor;
618}
619
620static void
621cell_list_init(struct cell_list *cells, jmp_buf *jmp)
622{
623 pool_init(cells->cell_pool.base, jmp,
624 256*sizeof(struct cell),
625 sizeof(cells->cell_pool.embedded));
626 cells->tail.next = NULL((void*)0);
627 cells->tail.x = INT_MAX2147483647;
628 cells->head.x = INT_MIN(-2147483647 -1);
629 cells->head.next = &cells->tail;
630 cell_list_rewind (cells);
631}
632
633static void
634cell_list_fini(struct cell_list *cells)
635{
636 pool_fini (cells->cell_pool.base);
637}
638
639/* Empty the cell list. This is called at the start of every pixel
640 * row. */
641inline static void
642cell_list_reset (struct cell_list *cells)
643{
644 cell_list_rewind (cells);
645 cells->head.next = &cells->tail;
646 pool_reset (cells->cell_pool.base);
647}
648
649inline static struct cell *
650cell_list_alloc (struct cell_list *cells,
651 struct cell *tail,
652 int x)
653{
654 struct cell *cell;
655
656 cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell));
657 cell->next = tail->next;
658 tail->next = cell;
659 cell->x = x;
660 *(uint32_t *)&cell->uncovered_area = 0;
661
662 return cell;
663}
664
665/* Find a cell at the given x-coordinate. Returns %NULL if a new cell
666 * needed to be allocated but couldn't be. Cells must be found with
667 * non-decreasing x-coordinate until the cell list is rewound using
668 * cell_list_rewind(). Ownership of the returned cell is retained by
669 * the cell list. */
670inline static struct cell *
671cell_list_find (struct cell_list *cells, int x)
672{
673 struct cell *tail = cells->cursor;
674
675 if (tail->x == x)
676 return tail;
677
678 while (1) {
679 UNROLL3({{ if (tail->next->x > x) break; tail = tail->next
; } { if (tail->next->x > x) break; tail = tail->
next; } { if (tail->next->x > x) break; tail = tail->
next; }
680 if (tail->next->x > x){ if (tail->next->x > x) break; tail = tail->next
; } { if (tail->next->x > x) break; tail = tail->
next; } { if (tail->next->x > x) break; tail = tail->
next; }
681 break;{ if (tail->next->x > x) break; tail = tail->next
; } { if (tail->next->x > x) break; tail = tail->
next; } { if (tail->next->x > x) break; tail = tail->
next; }
682 tail = tail->next;{ if (tail->next->x > x) break; tail = tail->next
; } { if (tail->next->x > x) break; tail = tail->
next; } { if (tail->next->x > x) break; tail = tail->
next; }
683 }){ if (tail->next->x > x) break; tail = tail->next
; } { if (tail->next->x > x) break; tail = tail->
next; } { if (tail->next->x > x) break; tail = tail->
next; }
;
684 }
685
686 if (tail->x != x)
687 tail = cell_list_alloc (cells, tail, x);
688 return cells->cursor = tail;
689
690}
691
692/* Find two cells at x1 and x2. This is exactly equivalent
693 * to
694 *
695 * pair.cell1 = cell_list_find(cells, x1);
696 * pair.cell2 = cell_list_find(cells, x2);
697 *
698 * except with less function call overhead. */
699inline static struct cell_pair
700cell_list_find_pair(struct cell_list *cells, int x1, int x2)
701{
702 struct cell_pair pair;
703
704 pair.cell1 = cells->cursor;
705 while (1) {
706 UNROLL3({{ if (pair.cell1->next->x > x1) break; pair.cell1 = pair
.cell1->next; } { if (pair.cell1->next->x > x1) break
; pair.cell1 = pair.cell1->next; } { if (pair.cell1->next
->x > x1) break; pair.cell1 = pair.cell1->next; }
707 if (pair.cell1->next->x > x1){ if (pair.cell1->next->x > x1) break; pair.cell1 = pair
.cell1->next; } { if (pair.cell1->next->x > x1) break
; pair.cell1 = pair.cell1->next; } { if (pair.cell1->next
->x > x1) break; pair.cell1 = pair.cell1->next; }
708 break;{ if (pair.cell1->next->x > x1) break; pair.cell1 = pair
.cell1->next; } { if (pair.cell1->next->x > x1) break
; pair.cell1 = pair.cell1->next; } { if (pair.cell1->next
->x > x1) break; pair.cell1 = pair.cell1->next; }
709 pair.cell1 = pair.cell1->next;{ if (pair.cell1->next->x > x1) break; pair.cell1 = pair
.cell1->next; } { if (pair.cell1->next->x > x1) break
; pair.cell1 = pair.cell1->next; } { if (pair.cell1->next
->x > x1) break; pair.cell1 = pair.cell1->next; }
710 }){ if (pair.cell1->next->x > x1) break; pair.cell1 = pair
.cell1->next; } { if (pair.cell1->next->x > x1) break
; pair.cell1 = pair.cell1->next; } { if (pair.cell1->next
->x > x1) break; pair.cell1 = pair.cell1->next; }
;
711 }
712 if (pair.cell1->x != x1)
713 pair.cell1 = cell_list_alloc (cells, pair.cell1, x1);
714
715 pair.cell2 = pair.cell1;
716 while (1) {
717 UNROLL3({{ if (pair.cell2->next->x > x2) break; pair.cell2 = pair
.cell2->next; } { if (pair.cell2->next->x > x2) break
; pair.cell2 = pair.cell2->next; } { if (pair.cell2->next
->x > x2) break; pair.cell2 = pair.cell2->next; }
718 if (pair.cell2->next->x > x2){ if (pair.cell2->next->x > x2) break; pair.cell2 = pair
.cell2->next; } { if (pair.cell2->next->x > x2) break
; pair.cell2 = pair.cell2->next; } { if (pair.cell2->next
->x > x2) break; pair.cell2 = pair.cell2->next; }
719 break;{ if (pair.cell2->next->x > x2) break; pair.cell2 = pair
.cell2->next; } { if (pair.cell2->next->x > x2) break
; pair.cell2 = pair.cell2->next; } { if (pair.cell2->next
->x > x2) break; pair.cell2 = pair.cell2->next; }
720 pair.cell2 = pair.cell2->next;{ if (pair.cell2->next->x > x2) break; pair.cell2 = pair
.cell2->next; } { if (pair.cell2->next->x > x2) break
; pair.cell2 = pair.cell2->next; } { if (pair.cell2->next
->x > x2) break; pair.cell2 = pair.cell2->next; }
721 }){ if (pair.cell2->next->x > x2) break; pair.cell2 = pair
.cell2->next; } { if (pair.cell2->next->x > x2) break
; pair.cell2 = pair.cell2->next; } { if (pair.cell2->next
->x > x2) break; pair.cell2 = pair.cell2->next; }
;
722 }
723 if (pair.cell2->x != x2)
724 pair.cell2 = cell_list_alloc (cells, pair.cell2, x2);
725
726 cells->cursor = pair.cell2;
727 return pair;
728}
729
730/* Add a subpixel span covering [x1, x2) to the coverage cells. */
731inline static void
732cell_list_add_subspan(struct cell_list *cells,
733 grid_scaled_x_t x1,
734 grid_scaled_x_t x2)
735{
736 int ix1, fx1;
737 int ix2, fx2;
738
739 if (x1 == x2)
740 return;
741
742 GRID_X_TO_INT_FRAC(x1, ix1, fx1)do { (fx1) = (x1) & ((1 << (8)) - 1); (ix1) = (x1) >>
(8); } while (0)
;
743 GRID_X_TO_INT_FRAC(x2, ix2, fx2)do { (fx2) = (x2) & ((1 << (8)) - 1); (ix2) = (x2) >>
(8); } while (0)
;
744
745 if (ix1 != ix2) {
746 struct cell_pair p;
747 p = cell_list_find_pair(cells, ix1, ix2);
748 p.cell1->uncovered_area += 2*fx1;
749 ++p.cell1->covered_height;
750 p.cell2->uncovered_area -= 2*fx2;
751 --p.cell2->covered_height;
752 } else {
753 struct cell *cell = cell_list_find(cells, ix1);
754 cell->uncovered_area += 2*(fx1-fx2);
755 }
756}
757
758inline static void full_step (struct edge *e)
759{
760 if (e->dy == 0)
761 return;
762
763 e->x.quo += e->dxdy_full.quo;
764 e->x.rem += e->dxdy_full.rem;
765 if (e->x.rem < 0) {
766 e->x.quo--;
767 e->x.rem += e->dy;
768 } else if (e->x.rem >= e->dy) {
769 ++e->x.quo;
770 e->x.rem -= e->dy;
771 }
772
773 e->cell = e->x.quo + (e->x.rem >= e->dy/2);
774}
775
776
777/* Adds the analytical coverage of an edge crossing the current pixel
778 * row to the coverage cells and advances the edge's x position to the
779 * following row.
780 *
781 * This function is only called when we know that during this pixel row:
782 *
783 * 1) The relative order of all edges on the active list doesn't
784 * change. In particular, no edges intersect within this row to pixel
785 * precision.
786 *
787 * 2) No new edges start in this row.
788 *
789 * 3) No existing edges end mid-row.
790 *
791 * This function depends on being called with all edges from the
792 * active list in the order they appear on the list (i.e. with
793 * non-decreasing x-coordinate.) */
794static void
795cell_list_render_edge(struct cell_list *cells,
796 struct edge *edge,
797 int sign)
798{
799 struct quorem x1, x2;
800 grid_scaled_x_t fx1, fx2;
801 int ix1, ix2;
802
803 x1 = edge->x;
804 full_step (edge);
805 x2 = edge->x;
806
807 /* Step back from the sample location (half-subrow) to the pixel origin */
808 if (edge->dy) {
809 x1.quo -= edge->dxdy.quo / 2;
810 x1.rem -= edge->dxdy.rem / 2;
811 if (x1.rem < 0) {
812 --x1.quo;
813 x1.rem += edge->dy;
814 } else if (x1.rem >= edge->dy) {
815 ++x1.quo;
816 x1.rem -= edge->dy;
817 }
818
819 x2.quo -= edge->dxdy.quo / 2;
820 x2.rem -= edge->dxdy.rem / 2;
821 if (x2.rem < 0) {
822 --x2.quo;
823 x2.rem += edge->dy;
824 } else if (x2.rem >= edge->dy) {
825 ++x2.quo;
826 x2.rem -= edge->dy;
827 }
828 }
829
830 GRID_X_TO_INT_FRAC(x1.quo, ix1, fx1)do { (fx1) = (x1.quo) & ((1 << (8)) - 1); (ix1) = (
x1.quo) >> (8); } while (0)
;
831 GRID_X_TO_INT_FRAC(x2.quo, ix2, fx2)do { (fx2) = (x2.quo) & ((1 << (8)) - 1); (ix2) = (
x2.quo) >> (8); } while (0)
;
832
833 cell_list_maybe_rewind(cells, MIN(ix1, ix2)((ix1) < (ix2) ? (ix1) : (ix2)));
834
835 /* Edge is entirely within a column? */
836 if (ix1 == ix2) {
837 /* We always know that ix1 is >= the cell list cursor in this
838 * case due to the no-intersections precondition. */
839 struct cell *cell = cell_list_find(cells, ix1);
840 cell->covered_height += sign*GRID_Y15;
841 cell->uncovered_area += sign*(fx1 + fx2)*GRID_Y15;
842 return;
843 }
844
845 /* Orient the edge left-to-right. */
846 if (ix2 < ix1) {
847 struct quorem tx;
848 int t;
849
850 t = ix1;
851 ix1 = ix2;
852 ix2 = t;
853
854 t = fx1;
855 fx1 = fx2;
856 fx2 = t;
857
858 tx = x1;
859 x1 = x2;
860 x2 = tx;
861 }
862
863 /* Add coverage for all pixels [ix1,ix2] on this row crossed
864 * by the edge. */
865 {
866 struct cell_pair pair;
867 struct quorem y;
868 int64_t tmp, dx;
869 int y_last;
870
871 dx = (x2.quo - x1.quo) * edge->dy + (x2.rem - x1.rem);
872
873 tmp = (ix1 + 1) * GRID_X(1 << 8) * edge->dy;
874 tmp -= x1.quo * edge->dy + x1.rem;
875 tmp *= GRID_Y15;
876
877 y.quo = tmp / dx;
878 y.rem = tmp % dx;
879
880 /* When rendering a previous edge on the active list we may
881 * advance the cell list cursor past the leftmost pixel of the
882 * current edge even though the two edges don't intersect.
883 * e.g. consider two edges going down and rightwards:
884 *
885 * --\_+---\_+-----+-----+----
886 * \_ \_ | |
887 * | \_ | \_ | |
888 * | \_| \_| |
889 * | \_ \_ |
890 * ----+-----+-\---+-\---+----
891 *
892 * The left edge touches cells past the starting cell of the
893 * right edge. Fortunately such cases are rare.
894 */
895
896 pair = cell_list_find_pair(cells, ix1, ix1+1);
897 pair.cell1->uncovered_area += sign*y.quo*(GRID_X(1 << 8) + fx1);
898 pair.cell1->covered_height += sign*y.quo;
899 y_last = y.quo;
900
901 if (ix1+1 < ix2) {
902 struct cell *cell = pair.cell2;
903 struct quorem dydx_full;
904
905 dydx_full.quo = GRID_Y15 * GRID_X(1 << 8) * edge->dy / dx;
906 dydx_full.rem = GRID_Y15 * GRID_X(1 << 8) * edge->dy % dx;
907
908 ++ix1;
909 do {
910 y.quo += dydx_full.quo;
911 y.rem += dydx_full.rem;
912 if (y.rem >= dx) {
913 y.quo++;
914 y.rem -= dx;
915 }
916
917 cell->uncovered_area += sign*(y.quo - y_last)*GRID_X(1 << 8);
918 cell->covered_height += sign*(y.quo - y_last);
919 y_last = y.quo;
920
921 ++ix1;
922 cell = cell_list_find(cells, ix1);
923 } while (ix1 != ix2);
924
925 pair.cell2 = cell;
926 }
927 pair.cell2->uncovered_area += sign*(GRID_Y15 - y_last)*fx2;
928 pair.cell2->covered_height += sign*(GRID_Y15 - y_last);
929 }
930}
931
932static void
933polygon_init (struct polygon *polygon, jmp_buf *jmp)
934{
935 polygon->ymin = polygon->ymax = 0;
936 polygon->y_buckets = polygon->y_buckets_embedded;
937 pool_init (polygon->edge_pool.base, jmp,
938 8192 - sizeof (struct _pool_chunk),
939 sizeof (polygon->edge_pool.embedded));
940}
941
942static void
943polygon_fini (struct polygon *polygon)
944{
945 if (polygon->y_buckets != polygon->y_buckets_embedded)
946 free (polygon->y_buckets);
947
948 pool_fini (polygon->edge_pool.base);
949}
950
951/* Empties the polygon of all edges. The polygon is then prepared to
952 * receive new edges and clip them to the vertical range
953 * [ymin,ymax). */
954static glitter_status_t
955polygon_reset (struct polygon *polygon,
956 grid_scaled_y_t ymin,
957 grid_scaled_y_t ymax)
958{
959 unsigned h = ymax - ymin;
960 unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + GRID_Y-1, ymin)(((ymax + 15 -1) - (ymin))/15);
961
962 pool_reset(polygon->edge_pool.base);
963
964 if (unlikely (h > 0x7FFFFFFFU - GRID_Y)(__builtin_expect (!!(h > 0x7FFFFFFFU - 15), 0)))
965 goto bail_no_mem; /* even if you could, you wouldn't want to. */
966
967 if (polygon->y_buckets != polygon->y_buckets_embedded)
968 free (polygon->y_buckets);
969
970 polygon->y_buckets = polygon->y_buckets_embedded;
971 if (num_buckets > ARRAY_LENGTH (polygon->y_buckets_embedded)((int) (sizeof (polygon->y_buckets_embedded) / sizeof (polygon
->y_buckets_embedded[0])))
) {
972 polygon->y_buckets = _cairo_malloc_ab (num_buckets,
973 sizeof (struct edge *));
974 if (unlikely (NULL == polygon->y_buckets)(__builtin_expect (!!(((void*)0) == polygon->y_buckets), 0
))
)
975 goto bail_no_mem;
976 }
977 memset (polygon->y_buckets, 0, num_buckets * sizeof (struct edge *));
978
979 polygon->ymin = ymin;
980 polygon->ymax = ymax;
981 return GLITTER_STATUS_SUCCESSCAIRO_STATUS_SUCCESS;
982
983bail_no_mem:
984 polygon->ymin = 0;
985 polygon->ymax = 0;
986 return GLITTER_STATUS_NO_MEMORYCAIRO_STATUS_NO_MEMORY;
987}
988
989static void
990_polygon_insert_edge_into_its_y_bucket(struct polygon *polygon,
991 struct edge *e)
992{
993 unsigned ix = EDGE_Y_BUCKET_INDEX(e->ytop, polygon->ymin)(((e->ytop) - (polygon->ymin))/15);
994 struct edge **ptail = &polygon->y_buckets[ix];
995 e->next = *ptail;
996 *ptail = e;
997}
998
999static void
1000active_list_reset (struct active_list *active)
1001{
1002 active->head.height_left = INT_MAX2147483647;
1003 active->head.dy = 0;
1004 active->head.cell = INT_MIN(-2147483647 -1);
1005 active->head.prev = NULL((void*)0);
1006 active->head.next = &active->tail;
1007 active->tail.prev = &active->head;
1008 active->tail.next = NULL((void*)0);
1009 active->tail.cell = INT_MAX2147483647;
1010 active->tail.height_left = INT_MAX2147483647;
1011 active->tail.dy = 0;
1012 active->min_height = 0;
1013 active->is_vertical = 1;
1014}
1015
1016static void
1017active_list_init(struct active_list *active)
1018{
1019 active_list_reset(active);
1020}
1021
1022/*
1023 * Merge two sorted edge lists.
1024 * Input:
1025 * - head_a: The head of the first list.
1026 * - head_b: The head of the second list; head_b cannot be NULL.
1027 * Output:
1028 * Returns the head of the merged list.
1029 *
1030 * Implementation notes:
1031 * To make it fast (in particular, to reduce to an insertion sort whenever
1032 * one of the two input lists only has a single element) we iterate through
1033 * a list until its head becomes greater than the head of the other list,
1034 * then we switch their roles. As soon as one of the two lists is empty, we
1035 * just attach the other one to the current list and exit.
1036 * Writes to memory are only needed to "switch" lists (as it also requires
1037 * attaching to the output list the list which we will be iterating next) and
1038 * to attach the last non-empty list.
1039 */
1040static struct edge *
1041merge_sorted_edges (struct edge *head_a, struct edge *head_b)
1042{
1043 struct edge *head, **next, *prev;
1044 int32_t x;
1045
1046 prev = head_a->prev;
1047 next = &head;
1048 if (head_a->cell <= head_b->cell) {
1049 head = head_a;
1050 } else {
1051 head = head_b;
1052 head_b->prev = prev;
1053 goto start_with_b;
1054 }
1055
1056 do {
1057 x = head_b->cell;
1058 while (head_a != NULL((void*)0) && head_a->cell <= x) {
1059 prev = head_a;
1060 next = &head_a->next;
1061 head_a = head_a->next;
1062 }
1063
1064 head_b->prev = prev;
1065 *next = head_b;
1066 if (head_a == NULL((void*)0))
1067 return head;
1068
1069start_with_b:
1070 x = head_a->cell;
1071 while (head_b != NULL((void*)0) && head_b->cell <= x) {
1072 prev = head_b;
1073 next = &head_b->next;
1074 head_b = head_b->next;
1075 }
1076
1077 head_a->prev = prev;
1078 *next = head_a;
1079 if (head_b == NULL((void*)0))
1080 return head;
1081 } while (1);
1082}
1083
1084/*
1085 * Sort (part of) a list.
1086 * Input:
1087 * - list: The list to be sorted; list cannot be NULL.
1088 * - limit: Recursion limit.
1089 * Output:
1090 * - head_out: The head of the sorted list containing the first 2^(level+1) elements of the
1091 * input list; if the input list has fewer elements, head_out be a sorted list
1092 * containing all the elements of the input list.
1093 * Returns the head of the list of unprocessed elements (NULL if the sorted list contains
1094 * all the elements of the input list).
1095 *
1096 * Implementation notes:
1097 * Special case single element list, unroll/inline the sorting of the first two elements.
1098 * Some tail recursion is used since we iterate on the bottom-up solution of the problem
1099 * (we start with a small sorted list and keep merging other lists of the same size to it).
1100 */
1101static struct edge *
1102sort_edges (struct edge *list,
1103 unsigned int level,
1104 struct edge **head_out)
1105{
1106 struct edge *head_other, *remaining;
1107 unsigned int i;
1108
1109 head_other = list->next;
1110
1111 if (head_other == NULL((void*)0)) {
1112 *head_out = list;
1113 return NULL((void*)0);
1114 }
1115
1116 remaining = head_other->next;
1117 if (list->cell <= head_other->cell) {
1118 *head_out = list;
1119 head_other->next = NULL((void*)0);
1120 } else {
1121 *head_out = head_other;
1122 head_other->prev = list->prev;
1123 head_other->next = list;
1124 list->prev = head_other;
1125 list->next = NULL((void*)0);
1126 }
1127
1128 for (i = 0; i < level && remaining; i++) {
1129 remaining = sort_edges (remaining, i, &head_other);
1130 *head_out = merge_sorted_edges (*head_out, head_other);
1131 }
1132
1133 return remaining;
1134}
1135
1136 static struct edge *
1137merge_unsorted_edges (struct edge *head, struct edge *unsorted)
1138{
1139 sort_edges (unsorted, UINT_MAX(2147483647 *2U +1U), &unsorted);
1140 return merge_sorted_edges (head, unsorted);
1141}
1142
1143/* Test if the edges on the active list can be safely advanced by a
1144 * full row without intersections or any edges ending. */
1145inline static int
1146can_do_full_row (struct active_list *active)
1147{
1148 const struct edge *e;
1149 int prev_x = INT_MIN(-2147483647 -1);
1150
1151 /* Recomputes the minimum height of all edges on the active
1152 * list if we have been dropping edges. */
1153 if (active->min_height <= 0) {
21
Assuming field 'min_height' is <= 0
22
Taking true branch
1154 int min_height = INT_MAX2147483647;
1155 int is_vertical = 1;
1156
1157 e = active->head.next;
1158 while (NULL((void*)0) != e) {
23
Assuming 'e' is equal to NULL
24
Loop condition is false. Execution continues on line 1165
1159 if (e->height_left < min_height)
1160 min_height = e->height_left;
1161 is_vertical &= e->dy == 0;
1162 e = e->next;
1163 }
1164
1165 active->is_vertical = is_vertical;
1166 active->min_height = min_height;
1167 }
1168
1169 if (active->min_height
24.1
Field 'min_height' is >= GRID_Y
< GRID_Y15)
25
Taking false branch
1170 return 0;
1171
1172 /* Check for intersections as no edges end during the next row. */
1173 for (e = active->head.next; e != &active->tail; e = e->next) {
26
Null pointer value stored to 'e'
27
Loop condition is true. Entering loop body
1174 int cell;
1175
1176 if (e->dy) {
28
Access to field 'dy' results in a dereference of a null pointer (loaded from variable 'e')
1177 struct quorem x = e->x;
1178 x.quo += e->dxdy_full.quo;
1179 x.rem += e->dxdy_full.rem;
1180 if (x.rem < 0) {
1181 x.quo--;
1182 x.rem += e->dy;
1183 } else if (x.rem >= e->dy) {
1184 x.quo++;
1185 x.rem -= e->dy;
1186 }
1187 cell = x.quo + (x.rem >= e->dy/2);
1188 } else
1189 cell = e->cell;
1190
1191 if (cell < prev_x)
1192 return 0;
1193
1194 prev_x = cell;
1195 }
1196
1197 return 1;
1198}
1199
1200/* Merges edges on the given subpixel row from the polygon to the
1201 * active_list. */
1202inline static void
1203active_list_merge_edges_from_bucket(struct active_list *active,
1204 struct edge *edges)
1205{
1206 active->head.next = merge_unsorted_edges (active->head.next, edges);
1207}
1208
1209inline static int
1210polygon_fill_buckets (struct active_list *active,
1211 struct edge *edge,
1212 int y,
1213 struct edge **buckets)
1214{
1215 grid_scaled_y_t min_height = active->min_height;
1216 int is_vertical = active->is_vertical;
1217 int max_suby = 0;
1218
1219 while (edge) {
13
Loop condition is false. Execution continues on line 1235
1220 struct edge *next = edge->next;
1221 int suby = edge->ytop - y;
1222 if (buckets[suby])
1223 buckets[suby]->prev = edge;
1224 edge->next = buckets[suby];
1225 edge->prev = NULL((void*)0);
1226 buckets[suby] = edge;
1227 if (edge->height_left < min_height)
1228 min_height = edge->height_left;
1229 is_vertical &= edge->dy == 0;
1230 edge = next;
1231 if (suby > max_suby)
1232 max_suby = suby;
1233 }
1234
1235 active->is_vertical = is_vertical;
1236 active->min_height = min_height;
1237
1238 return max_suby;
14
Returning without writing to 'active->head.next'
1239}
1240
1241static void step (struct edge *edge)
1242{
1243 if (edge->dy == 0)
1244 return;
1245
1246 edge->x.quo += edge->dxdy.quo;
1247 edge->x.rem += edge->dxdy.rem;
1248 if (edge->x.rem < 0) {
1249 --edge->x.quo;
1250 edge->x.rem += edge->dy;
1251 } else if (edge->x.rem >= edge->dy) {
1252 ++edge->x.quo;
1253 edge->x.rem -= edge->dy;
1254 }
1255
1256 edge->cell = edge->x.quo + (edge->x.rem >= edge->dy/2);
1257}
1258
1259inline static void
1260sub_row (struct active_list *active,
1261 struct cell_list *coverages,
1262 unsigned int mask)
1263{
1264 struct edge *edge = active->head.next;
1265 int xstart = INT_MIN(-2147483647 -1), prev_x = INT_MIN(-2147483647 -1);
1266 int winding = 0;
1267
1268 cell_list_rewind (coverages);
1269
1270 while (&active->tail != edge) {
1271 struct edge *next = edge->next;
1272 int xend = edge->cell;
1273
1274 if (--edge->height_left) {
1275 step (edge);
1276
1277 if (edge->cell < prev_x) {
1278 struct edge *pos = edge->prev;
1279 pos->next = next;
1280 next->prev = pos;
1281 do {
1282 pos = pos->prev;
1283 } while (edge->cell < pos->cell);
1284 pos->next->prev = edge;
1285 edge->next = pos->next;
1286 edge->prev = pos;
1287 pos->next = edge;
1288 } else
1289 prev_x = edge->cell;
1290 active->min_height = -1;
1291 } else {
1292 edge->prev->next = next;
1293 next->prev = edge->prev;
1294 }
1295
1296 winding += edge->dir;
1297 if ((winding & mask) == 0) {
1298 if (next->cell != xend) {
1299 cell_list_add_subspan (coverages, xstart, xend);
1300 xstart = INT_MIN(-2147483647 -1);
1301 }
1302 } else if (xstart == INT_MIN(-2147483647 -1))
1303 xstart = xend;
1304
1305 edge = next;
1306 }
1307}
1308
1309inline static void dec (struct active_list *a, struct edge *e, int h)
1310{
1311 e->height_left -= h;
1312 if (e->height_left == 0) {
1313 e->prev->next = e->next;
1314 e->next->prev = e->prev;
1315 a->min_height = -1;
1316 }
1317}
1318
1319static void
1320full_row (struct active_list *active,
1321 struct cell_list *coverages,
1322 unsigned int mask)
1323{
1324 struct edge *left = active->head.next;
1325
1326 while (&active->tail != left) {
1327 struct edge *right;
1328 int winding;
1329
1330 dec (active, left, GRID_Y15);
1331
1332 winding = left->dir;
1333 right = left->next;
1334 do {
1335 dec (active, right, GRID_Y15);
1336
1337 winding += right->dir;
1338 if ((winding & mask) == 0 && right->next->cell != right->cell)
1339 break;
1340
1341 full_step (right);
1342
1343 right = right->next;
1344 } while (1);
1345
1346 cell_list_set_rewind (coverages);
1347 cell_list_render_edge (coverages, left, +1);
1348 cell_list_render_edge (coverages, right, -1);
1349
1350 left = right->next;
1351 }
1352}
1353
1354static void
1355_glitter_scan_converter_init(glitter_scan_converter_t *converter, jmp_buf *jmp)
1356{
1357 polygon_init(converter->polygon, jmp);
1358 active_list_init(converter->active);
1359 cell_list_init(converter->coverages, jmp);
1360 converter->xmin=0;
1361 converter->ymin=0;
1362 converter->xmax=0;
1363 converter->ymax=0;
1364}
1365
1366static void
1367_glitter_scan_converter_fini(glitter_scan_converter_t *self)
1368{
1369 if (self->spans != self->spans_embedded)
1370 free (self->spans);
1371
1372 polygon_fini(self->polygon);
1373 cell_list_fini(self->coverages);
1374
1375 self->xmin=0;
1376 self->ymin=0;
1377 self->xmax=0;
1378 self->ymax=0;
1379}
1380
1381static grid_scaled_t
1382int_to_grid_scaled(int i, int scale)
1383{
1384 /* Clamp to max/min representable scaled number. */
1385 if (i >= 0) {
1386 if (i >= INT_MAX2147483647/scale)
1387 i = INT_MAX2147483647/scale;
1388 }
1389 else {
1390 if (i <= INT_MIN(-2147483647 -1)/scale)
1391 i = INT_MIN(-2147483647 -1)/scale;
1392 }
1393 return i*scale;
1394}
1395
1396#define int_to_grid_scaled_x(x)int_to_grid_scaled((x), (1 << 8)) int_to_grid_scaled((x), GRID_X(1 << 8))
1397#define int_to_grid_scaled_y(x)int_to_grid_scaled((x), 15) int_to_grid_scaled((x), GRID_Y15)
1398
1399Istatic glitter_status_t
1400glitter_scan_converter_reset(
1401 glitter_scan_converter_t *converter,
1402 int xmin, int ymin,
1403 int xmax, int ymax)
1404{
1405 glitter_status_t status;
1406 int max_num_spans;
1407
1408 converter->xmin = 0; converter->xmax = 0;
1409 converter->ymin = 0; converter->ymax = 0;
1410
1411 max_num_spans = xmax - xmin + 1;
1412
1413 if (max_num_spans > ARRAY_LENGTH(converter->spans_embedded)((int) (sizeof (converter->spans_embedded) / sizeof (converter
->spans_embedded[0])))
) {
1414 converter->spans = _cairo_malloc_ab (max_num_spans,
1415 sizeof (cairo_half_open_span_t));
1416 if (unlikely (converter->spans == NULL)(__builtin_expect (!!(converter->spans == ((void*)0)), 0)))
1417 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1418 } else
1419 converter->spans = converter->spans_embedded;
1420
1421 xmin = int_to_grid_scaled_x(xmin)int_to_grid_scaled((xmin), (1 << 8));
1422 ymin = int_to_grid_scaled_y(ymin)int_to_grid_scaled((ymin), 15);
1423 xmax = int_to_grid_scaled_x(xmax)int_to_grid_scaled((xmax), (1 << 8));
1424 ymax = int_to_grid_scaled_y(ymax)int_to_grid_scaled((ymax), 15);
1425
1426 active_list_reset(converter->active);
1427 cell_list_reset(converter->coverages);
1428 status = polygon_reset(converter->polygon, ymin, ymax);
1429 if (status)
1430 return status;
1431
1432 converter->xmin = xmin;
1433 converter->xmax = xmax;
1434 converter->ymin = ymin;
1435 converter->ymax = ymax;
1436 return GLITTER_STATUS_SUCCESSCAIRO_STATUS_SUCCESS;
1437}
1438
1439/* INPUT_TO_GRID_X/Y (in_coord, out_grid_scaled, grid_scale)
1440 * These macros convert an input coordinate in the client's
1441 * device space to the rasterisation grid.
1442 */
1443/* Gah.. this bit of ugly defines INPUT_TO_GRID_X/Y so as to use
1444 * shifts if possible, and something saneish if not.
1445 */
1446#if !defined(INPUT_TO_GRID_Y) && defined(GRID_Y_BITS) && GRID_Y_BITS <= GLITTER_INPUT_BITS8
1447# define INPUT_TO_GRID_Y(in, out)do { long long tmp__ = (long long)(15) * (in); tmp__ += 1 <<
(8 -1); tmp__ >>= 8; (out) = tmp__; } while (0)
(out) = (in) >> (GLITTER_INPUT_BITS8 - GRID_Y_BITS)
1448#else
1449# define INPUT_TO_GRID_Y(in, out)do { long long tmp__ = (long long)(15) * (in); tmp__ += 1 <<
(8 -1); tmp__ >>= 8; (out) = tmp__; } while (0)
INPUT_TO_GRID_general(in, out, GRID_Y)do { long long tmp__ = (long long)(15) * (in); tmp__ += 1 <<
(8 -1); tmp__ >>= 8; (out) = tmp__; } while (0)
1450#endif
1451
1452#if !defined(INPUT_TO_GRID_X) && defined(GRID_X_BITS8) && GRID_X_BITS8 <= GLITTER_INPUT_BITS8
1453# define INPUT_TO_GRID_X(in, out)(out) = (in) >> (8 - 8) (out) = (in) >> (GLITTER_INPUT_BITS8 - GRID_X_BITS8)
1454#else
1455# define INPUT_TO_GRID_X(in, out)(out) = (in) >> (8 - 8) INPUT_TO_GRID_general(in, out, GRID_X)do { long long tmp__ = (long long)((1 << 8)) * (in); tmp__
+= 1 << (8 -1); tmp__ >>= 8; (out) = tmp__; } while
(0)
1456#endif
1457
1458#define INPUT_TO_GRID_general(in, out, grid_scale)do { long long tmp__ = (long long)(grid_scale) * (in); tmp__ +=
1 << (8 -1); tmp__ >>= 8; (out) = tmp__; } while
(0)
do { \
1459 long long tmp__ = (long long)(grid_scale) * (in); \
1460 tmp__ += 1 << (GLITTER_INPUT_BITS8-1); \
1461 tmp__ >>= GLITTER_INPUT_BITS8; \
1462 (out) = tmp__; \
1463} while (0)
1464
1465inline static void
1466polygon_add_edge (struct polygon *polygon,
1467 const cairo_edge_t *edge)
1468{
1469 struct edge *e;
1470 grid_scaled_y_t ytop, ybot;
1471 const cairo_point_t *p1, *p2;
1472
1473 INPUT_TO_GRID_Y (edge->top, ytop)do { long long tmp__ = (long long)(15) * (edge->top); tmp__
+= 1 << (8 -1); tmp__ >>= 8; (ytop) = tmp__; } while
(0)
;
1474 if (ytop < polygon->ymin)
1475 ytop = polygon->ymin;
1476
1477 INPUT_TO_GRID_Y (edge->bottom, ybot)do { long long tmp__ = (long long)(15) * (edge->bottom); tmp__
+= 1 << (8 -1); tmp__ >>= 8; (ybot) = tmp__; } while
(0)
;
1478 if (ybot > polygon->ymax)
1479 ybot = polygon->ymax;
1480
1481 if (ybot <= ytop)
1482 return;
1483
1484 e = pool_alloc (polygon->edge_pool.base, sizeof (struct edge));
1485
1486 e->ytop = ytop;
1487 e->height_left = ybot - ytop;
1488 if (edge->line.p2.y > edge->line.p1.y) {
1489 e->dir = edge->dir;
1490 p1 = &edge->line.p1;
1491 p2 = &edge->line.p2;
1492 } else {
1493 e->dir = -edge->dir;
1494 p1 = &edge->line.p2;
1495 p2 = &edge->line.p1;
1496 }
1497
1498 if (p2->x == p1->x) {
1499 e->cell = p1->x;
1500 e->x.quo = p1->x;
1501 e->x.rem = 0;
1502 e->dxdy.quo = e->dxdy.rem = 0;
1503 e->dxdy_full.quo = e->dxdy_full.rem = 0;
1504 e->dy = 0;
1505 } else {
1506 int64_t Ex, Ey, tmp;
1507
1508 Ex = (int64_t)(p2->x - p1->x) * GRID_X(1 << 8);
1509 Ey = (int64_t)(p2->y - p1->y) * GRID_Y15 * (2 << GLITTER_INPUT_BITS8);
1510
1511 e->dxdy.quo = Ex * (2 << GLITTER_INPUT_BITS8) / Ey;
1512 e->dxdy.rem = Ex * (2 << GLITTER_INPUT_BITS8) % Ey;
1513
1514 tmp = (int64_t)(2*ytop + 1) << GLITTER_INPUT_BITS8;
1515 tmp -= (int64_t)p1->y * GRID_Y15 * 2;
1516 tmp *= Ex;
1517 e->x.quo = tmp / Ey;
1518 e->x.rem = tmp % Ey;
1519
1520#if GRID_X_BITS8 == GLITTER_INPUT_BITS8
1521 e->x.quo += p1->x;
1522#else
1523 tmp = (int64_t)p1->x * GRID_X(1 << 8);
1524 e->x.quo += tmp >> GLITTER_INPUT_BITS8;
1525 e->x.rem += ((tmp & ((1 << GLITTER_INPUT_BITS8) - 1)) * Ey) / (1 << GLITTER_INPUT_BITS8);
1526#endif
1527
1528 if (e->x.rem < 0) {
1529 e->x.quo--;
1530 e->x.rem += Ey;
1531 } else if (e->x.rem >= Ey) {
1532 e->x.quo++;
1533 e->x.rem -= Ey;
1534 }
1535
1536 if (e->height_left >= GRID_Y15) {
1537 tmp = Ex * (2 * GRID_Y15 << GLITTER_INPUT_BITS8);
1538 e->dxdy_full.quo = tmp / Ey;
1539 e->dxdy_full.rem = tmp % Ey;
1540 } else
1541 e->dxdy_full.quo = e->dxdy_full.rem = 0;
1542
1543 e->cell = e->x.quo + (e->x.rem >= Ey/2);
1544 e->dy = Ey;
1545 }
1546
1547 _polygon_insert_edge_into_its_y_bucket (polygon, e);
1548}
1549
1550/* Add a new polygon edge from pixel (x1,y1) to (x2,y2) to the scan
1551 * converter. The coordinates represent pixel positions scaled by
1552 * 2**GLITTER_PIXEL_BITS. If this function fails then the scan
1553 * converter should be reset or destroyed. Dir must be +1 or -1,
1554 * with the latter reversing the orientation of the edge. */
1555Istatic void
1556glitter_scan_converter_add_edge (glitter_scan_converter_t *converter,
1557 const cairo_edge_t *edge)
1558{
1559 polygon_add_edge (converter->polygon, edge);
1560}
1561
1562static void
1563step_edges (struct active_list *active, int count)
1564{
1565 struct edge *edge;
1566
1567 count *= GRID_Y15;
1568 for (edge = active->head.next; edge != &active->tail; edge = edge->next) {
1569 edge->height_left -= count;
1570 if (! edge->height_left) {
1571 edge->prev->next = edge->next;
1572 edge->next->prev = edge->prev;
1573 active->min_height = -1;
1574 }
1575 }
1576}
1577
1578static glitter_status_t
1579blit_a8 (struct cell_list *cells,
1580 cairo_span_renderer_t *renderer,
1581 cairo_half_open_span_t *spans,
1582 int y, int height,
1583 int xmin, int xmax)
1584{
1585 struct cell *cell = cells->head.next;
1586 int prev_x = xmin, last_x = -1;
1587 int16_t cover = 0, last_cover = 0;
1588 unsigned num_spans;
1589
1590 if (cell == &cells->tail)
1591 return CAIRO_STATUS_SUCCESS;
1592
1593 /* Skip cells to the left of the clip region. */
1594 while (cell->x < xmin) {
1595 cover += cell->covered_height;
1596 cell = cell->next;
1597 }
1598 cover *= GRID_X(1 << 8)*2;
1599
1600 /* Form the spans from the coverages and areas. */
1601 num_spans = 0;
1602 for (; cell->x < xmax; cell = cell->next) {
1603 int x = cell->x;
1604 int16_t area;
1605
1606 if (x > prev_x && cover != last_cover) {
1607 spans[num_spans].x = prev_x;
1608 spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover)(((cover) + ((cover)<<4) + 256) >> 9);
1609 last_cover = cover;
1610 last_x = prev_x;
1611 ++num_spans;
1612 }
1613
1614 cover += cell->covered_height*GRID_X(1 << 8)*2;
1615 area = cover - cell->uncovered_area;
1616
1617 if (area != last_cover) {
1618 spans[num_spans].x = x;
1619 spans[num_spans].coverage = GRID_AREA_TO_ALPHA (area)(((area) + ((area)<<4) + 256) >> 9);
1620 last_cover = area;
1621 last_x = x;
1622 ++num_spans;
1623 }
1624
1625 prev_x = x+1;
1626 }
1627
1628 if (prev_x <= xmax && cover != last_cover) {
1629 spans[num_spans].x = prev_x;
1630 spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover)(((cover) + ((cover)<<4) + 256) >> 9);
1631 last_cover = cover;
1632 last_x = prev_x;
1633 ++num_spans;
1634 }
1635
1636 if (last_x < xmax && last_cover) {
1637 spans[num_spans].x = xmax;
1638 spans[num_spans].coverage = 0;
1639 ++num_spans;
1640 }
1641
1642 /* Dump them into the renderer. */
1643 return renderer->render_rows (renderer, y, height, spans, num_spans);
1644}
1645
1646#define GRID_AREA_TO_A1(A)(((((A) + ((A)<<4) + 256) >> 9) > 127) ? 255 :
0)
((GRID_AREA_TO_ALPHA (A)(((A) + ((A)<<4) + 256) >> 9) > 127) ? 255 : 0)
1647static glitter_status_t
1648blit_a1 (struct cell_list *cells,
1649 cairo_span_renderer_t *renderer,
1650 cairo_half_open_span_t *spans,
1651 int y, int height,
1652 int xmin, int xmax)
1653{
1654 struct cell *cell = cells->head.next;
1655 int prev_x = xmin, last_x = -1;
1656 int16_t cover = 0;
1657 uint8_t coverage, last_cover = 0;
1658 unsigned num_spans;
1659
1660 if (cell == &cells->tail)
1661 return CAIRO_STATUS_SUCCESS;
1662
1663 /* Skip cells to the left of the clip region. */
1664 while (cell->x < xmin) {
1665 cover += cell->covered_height;
1666 cell = cell->next;
1667 }
1668 cover *= GRID_X(1 << 8)*2;
1669
1670 /* Form the spans from the coverages and areas. */
1671 num_spans = 0;
1672 for (; cell->x < xmax; cell = cell->next) {
1673 int x = cell->x;
1674 int16_t area;
1675
1676 coverage = GRID_AREA_TO_A1 (cover)(((((cover) + ((cover)<<4) + 256) >> 9) > 127)
? 255 : 0)
;
1677 if (x > prev_x && coverage != last_cover) {
1678 last_x = spans[num_spans].x = prev_x;
1679 last_cover = spans[num_spans].coverage = coverage;
1680 ++num_spans;
1681 }
1682
1683 cover += cell->covered_height*GRID_X(1 << 8)*2;
1684 area = cover - cell->uncovered_area;
1685
1686 coverage = GRID_AREA_TO_A1 (area)(((((area) + ((area)<<4) + 256) >> 9) > 127) ?
255 : 0)
;
1687 if (coverage != last_cover) {
1688 last_x = spans[num_spans].x = x;
1689 last_cover = spans[num_spans].coverage = coverage;
1690 ++num_spans;
1691 }
1692
1693 prev_x = x+1;
1694 }
1695
1696 coverage = GRID_AREA_TO_A1 (cover)(((((cover) + ((cover)<<4) + 256) >> 9) > 127)
? 255 : 0)
;
1697 if (prev_x <= xmax && coverage != last_cover) {
1698 last_x = spans[num_spans].x = prev_x;
1699 last_cover = spans[num_spans].coverage = coverage;
1700 ++num_spans;
1701 }
1702
1703 if (last_x < xmax && last_cover) {
1704 spans[num_spans].x = xmax;
1705 spans[num_spans].coverage = 0;
1706 ++num_spans;
1707 }
1708 if (num_spans == 1)
1709 return CAIRO_STATUS_SUCCESS;
1710
1711 /* Dump them into the renderer. */
1712 return renderer->render_rows (renderer, y, height, spans, num_spans);
1713}
1714
1715
1716Istatic void
1717glitter_scan_converter_render(glitter_scan_converter_t *converter,
1718 unsigned int winding_mask,
1719 int antialias,
1720 cairo_span_renderer_t *renderer)
1721{
1722 int i, j;
1723 int ymax_i = converter->ymax / GRID_Y15;
1724 int ymin_i = converter->ymin / GRID_Y15;
1725 int xmin_i, xmax_i;
1726 int h = ymax_i - ymin_i;
1727 struct polygon *polygon = converter->polygon;
1728 struct cell_list *coverages = converter->coverages;
1729 struct active_list *active = converter->active;
1730 struct edge *buckets[GRID_Y15] = { 0 };
1731
1732 xmin_i = converter->xmin / GRID_X(1 << 8);
1733 xmax_i = converter->xmax / GRID_X(1 << 8);
1734 if (xmin_i >= xmax_i)
8
Assuming 'xmin_i' is < 'xmax_i'
9
Taking false branch
1735 return;
1736
1737 /* Render each pixel row. */
1738 for (i = 0; i < h; i = j) {
10
Assuming 'i' is < 'h'
11
Loop condition is true. Entering loop body
1739 int do_full_row = 0;
1740
1741 j = i + 1;
1742
1743 /* Determine if we can ignore this row or use the full pixel
1744 * stepper. */
1745 if (polygon_fill_buckets (active,
12
Calling 'polygon_fill_buckets'
15
Returning from 'polygon_fill_buckets'
16
Taking true branch
1746 polygon->y_buckets[i],
1747 (i+ymin_i)*GRID_Y15,
1748 buckets) == 0) {
1749 if (buckets[0]) {
17
Taking false branch
1750 active_list_merge_edges_from_bucket (active, buckets[0]);
1751 buckets[0] = NULL((void*)0);
1752 }
1753
1754 if (active->head.next == &active->tail) {
18
Assuming the condition is false
19
Taking false branch
1755 active->min_height = INT_MAX2147483647;
1756 active->is_vertical = 1;
1757 for (; j < h && ! polygon->y_buckets[j]; j++)
1758 ;
1759 continue;
1760 }
1761
1762 do_full_row = can_do_full_row (active);
20
Calling 'can_do_full_row'
1763 }
1764
1765 if (do_full_row) {
1766 /* Step by a full pixel row's worth. */
1767 full_row (active, coverages, winding_mask);
1768
1769 if (active->is_vertical) {
1770 while (j < h &&
1771 polygon->y_buckets[j] == NULL((void*)0) &&
1772 active->min_height >= 2*GRID_Y15)
1773 {
1774 active->min_height -= GRID_Y15;
1775 j++;
1776 }
1777 if (j != i + 1)
1778 step_edges (active, j - (i + 1));
1779 }
1780 } else {
1781 int sub;
1782
1783 /* Subsample this row. */
1784 for (sub = 0; sub < GRID_Y15; sub++) {
1785 if (buckets[sub]) {
1786 active_list_merge_edges_from_bucket (active, buckets[sub]);
1787 buckets[sub] = NULL((void*)0);
1788 }
1789 sub_row (active, coverages, winding_mask);
1790 }
1791 }
1792
1793 if (antialias)
1794 blit_a8 (coverages, renderer, converter->spans,
1795 i+ymin_i, j-i, xmin_i, xmax_i);
1796 else
1797 blit_a1 (coverages, renderer, converter->spans,
1798 i+ymin_i, j-i, xmin_i, xmax_i);
1799 cell_list_reset (coverages);
1800
1801 active->min_height -= GRID_Y15;
1802 }
1803}
1804
1805struct _cairo_tor_scan_converter {
1806 cairo_scan_converter_t base;
1807
1808 glitter_scan_converter_t converter[1];
1809 cairo_fill_rule_t fill_rule;
1810 cairo_antialias_t antialias;
1811
1812 jmp_buf jmp;
1813};
1814
1815typedef struct _cairo_tor_scan_converter cairo_tor_scan_converter_t;
1816
1817static void
1818_cairo_tor_scan_converter_destroy (void *converter)
1819{
1820 cairo_tor_scan_converter_t *self = converter;
1821 if (self == NULL((void*)0)) {
1822 return;
1823 }
1824 _glitter_scan_converter_fini (self->converter);
1825 free(self);
1826}
1827
1828cairo_status_t
1829_cairo_tor_scan_converter_add_polygon (void *converter,
1830 const cairo_polygon_t *polygon)
1831{
1832 cairo_tor_scan_converter_t *self = converter;
1833 int i;
1834
1835#if 0
1836 FILE *file = fopen ("polygon.txt", "w");
1837 _cairo_debug_print_polygon (file, polygon);
1838 fclose (file);
1839#endif
1840
1841 for (i = 0; i < polygon->num_edges; i++)
1842 glitter_scan_converter_add_edge (self->converter, &polygon->edges[i]);
1843
1844 return CAIRO_STATUS_SUCCESS;
1845}
1846
1847static cairo_status_t
1848_cairo_tor_scan_converter_generate (void *converter,
1849 cairo_span_renderer_t *renderer)
1850{
1851 cairo_tor_scan_converter_t *self = converter;
1852 cairo_status_t status;
1853
1854 if ((status = setjmp (self->jmp)_setjmp (self->jmp)))
1
Value assigned to field 'next'
2
Assuming 'status' is 0
3
Taking false branch
1855 return _cairo_scan_converter_set_error (self, _cairo_error (status));
1856
1857 glitter_scan_converter_render (self->converter,
7
Calling 'glitter_scan_converter_render'
1858 self->fill_rule == CAIRO_FILL_RULE_WINDING ? ~0 : 1,
4
Assuming field 'fill_rule' is not equal to CAIRO_FILL_RULE_WINDING
5
'?' condition is false
1859 self->antialias != CAIRO_ANTIALIAS_NONE,
6
Assuming field 'antialias' is equal to CAIRO_ANTIALIAS_NONE
1860 renderer);
1861 return CAIRO_STATUS_SUCCESS;
1862}
1863
1864cairo_scan_converter_t *
1865_cairo_tor_scan_converter_create (int xmin,
1866 int ymin,
1867 int xmax,
1868 int ymax,
1869 cairo_fill_rule_t fill_rule,
1870 cairo_antialias_t antialias)
1871{
1872 cairo_tor_scan_converter_t *self;
1873 cairo_status_t status;
1874
1875 self = _cairo_malloc (sizeof(struct _cairo_tor_scan_converter))((sizeof(struct _cairo_tor_scan_converter)) != 0 ? malloc(sizeof
(struct _cairo_tor_scan_converter)) : ((void*)0))
;
1876 if (unlikely (self == NULL)(__builtin_expect (!!(self == ((void*)0)), 0))) {
1877 status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
1878 goto bail_nomem;
1879 }
1880
1881 self->base.destroy = _cairo_tor_scan_converter_destroy;
1882 self->base.generate = _cairo_tor_scan_converter_generate;
1883
1884 _glitter_scan_converter_init (self->converter, &self->jmp);
1885 status = glitter_scan_converter_reset (self->converter,
1886 xmin, ymin, xmax, ymax);
1887 if (unlikely (status)(__builtin_expect (!!(status), 0)))
1888 goto bail;
1889
1890 self->fill_rule = fill_rule;
1891 self->antialias = antialias;
1892
1893 return &self->base;
1894
1895 bail:
1896 self->base.destroy(&self->base);
1897 bail_nomem:
1898 return _cairo_scan_converter_create_in_error (status);
1899}