Bug Summary

File:root/firefox-clang/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/list.h
Warning:line 190, column 18
Access to field 'next' results in a dereference of a null pointer (loaded from field 'prev')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name lower_jumps.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -ffp-contract=off -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/root/firefox-clang/third_party/rust/glslopt -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/root/firefox-clang/third_party/rust/glslopt -resource-dir /usr/lib/llvm-21/lib/clang/21 -include /root/firefox-clang/config/gcc_hidden.h -include /root/firefox-clang/obj-x86_64-pc-linux-gnu/mozilla-config.h -I glsl-optimizer/include -I glsl-optimizer/src/mesa -I glsl-optimizer/src/mapi -I glsl-optimizer/src/compiler -I glsl-optimizer/src/compiler/glsl -I glsl-optimizer/src/gallium/auxiliary -I glsl-optimizer/src/gallium/include -I glsl-optimizer/src -I glsl-optimizer/src/util -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/stl_wrappers -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/system_wrappers -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=2 -D _GLIBCXX_ASSERTIONS -D DEBUG=1 -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nspr -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nss -D MOZILLA_CLIENT -D MOZILLA_CONFIG_H -D __STDC_FORMAT_MACROS -D _GNU_SOURCE -D HAVE_ENDIAN_H -D HAVE_PTHREAD -D HAVE_TIMESPEC_GET -D MOZ_INCLUDE_MOZALLOC_H -D mozilla_throw_gcc_h -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/x86_64-linux-gnu/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14/backward -internal-isystem /usr/lib/llvm-21/lib/clang/21/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-error=pessimizing-move -Wno-error=large-by-value-copy=128 -Wno-error=implicit-int-float-conversion -Wno-error=thread-safety-analysis -Wno-error=tautological-type-limit-compare -Wno-invalid-offsetof -Wno-range-loop-analysis -Wno-deprecated-anon-enum-enum-conversion -Wno-deprecated-enum-enum-conversion -Wno-deprecated-this-capture -Wno-inline-new-delete -Wno-error=deprecated-declarations -Wno-error=array-bounds -Wno-error=free-nonheap-object -Wno-error=atomic-alignment -Wno-error=deprecated-builtins -Wno-psabi -Wno-error=builtin-macro-redefined -Wno-vla-cxx-extension -Wno-unknown-warning-option -fdeprecated-macro -ferror-limit 19 -fstrict-flex-arrays=1 -stack-protector 2 -fstack-clash-protection -ftrivial-auto-var-init=pattern -fno-rtti -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -fno-sized-deallocation -fno-aligned-allocation -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2025-06-27-100320-3286336-1 -x c++ glsl-optimizer/src/compiler/glsl/lower_jumps.cpp

glsl-optimizer/src/compiler/glsl/lower_jumps.cpp

1/*
2 * Copyright © 2010 Luca Barbieri
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * \file lower_jumps.cpp
26 *
27 * This pass lowers jumps (break, continue, and return) to if/else structures.
28 *
29 * It can be asked to:
30 * 1. Pull jumps out of ifs where possible
31 * 2. Remove all "continue"s, replacing them with an "execute flag"
32 * 3. Replace all "break" with a single conditional one at the end of the loop
33 * 4. Replace all "return"s with a single return at the end of the function,
34 * for the main function and/or other functions
35 *
36 * Applying this pass gives several benefits:
37 * 1. All functions can be inlined.
38 * 2. nv40 and other pre-DX10 chips without "continue" can be supported
39 * 3. nv30 and other pre-DX10 chips with no control flow at all are better
40 * supported
41 *
42 * Continues are lowered by adding a per-loop "execute flag", initialized to
43 * true, that when cleared inhibits all execution until the end of the loop.
44 *
45 * Breaks are lowered to continues, plus setting a "break flag" that is checked
46 * at the end of the loop, and trigger the unique "break".
47 *
48 * Returns are lowered to breaks/continues, plus adding a "return flag" that
49 * causes loops to break again out of their enclosing loops until all the
50 * loops are exited: then the "execute flag" logic will ignore everything
51 * until the end of the function.
52 *
53 * Note that "continue" and "return" can also be implemented by adding
54 * a dummy loop and using break.
55 * However, this is bad for hardware with limited nesting depth, and
56 * prevents further optimization, and thus is not currently performed.
57 */
58
59#include "compiler/glsl_types.h"
60#include <string.h>
61#include "ir.h"
62
63/**
64 * Enum recording the result of analyzing how control flow might exit
65 * an IR node.
66 *
67 * Each possible value of jump_strength indicates a strictly stronger
68 * guarantee on control flow than the previous value.
69 *
70 * The ordering of strengths roughly reflects the way jumps are
71 * lowered: jumps with higher strength tend to be lowered to jumps of
72 * lower strength. Accordingly, strength is used as a heuristic to
73 * determine which lowering to perform first.
74 *
75 * This enum is also used by get_jump_strength() to categorize
76 * instructions as either break, continue, return, or other. When
77 * used in this fashion, strength_always_clears_execute_flag is not
78 * used.
79 *
80 * The control flow analysis made by this optimization pass makes two
81 * simplifying assumptions:
82 *
83 * - It ignores discard instructions, since they are lowered by a
84 * separate pass (lower_discard.cpp).
85 *
86 * - It assumes it is always possible for control to flow from a loop
87 * to the instruction immediately following it. Technically, this
88 * is not true (since all execution paths through the loop might
89 * jump back to the top, or return from the function).
90 *
91 * Both of these simplifying assumtions are safe, since they can never
92 * cause reachable code to be incorrectly classified as unreachable;
93 * they can only do the opposite.
94 */
95enum jump_strength
96{
97 /**
98 * Analysis has produced no guarantee on how control flow might
99 * exit this IR node. It might fall out the bottom (with or
100 * without clearing the execute flag, if present), or it might
101 * continue to the top of the innermost enclosing loop, break out
102 * of it, or return from the function.
103 */
104 strength_none,
105
106 /**
107 * The only way control can fall out the bottom of this node is
108 * through a code path that clears the execute flag. It might also
109 * continue to the top of the innermost enclosing loop, break out
110 * of it, or return from the function.
111 */
112 strength_always_clears_execute_flag,
113
114 /**
115 * Control cannot fall out the bottom of this node. It might
116 * continue to the top of the innermost enclosing loop, break out
117 * of it, or return from the function.
118 */
119 strength_continue,
120
121 /**
122 * Control cannot fall out the bottom of this node, or continue the
123 * top of the innermost enclosing loop. It can only break out of
124 * it or return from the function.
125 */
126 strength_break,
127
128 /**
129 * Control cannot fall out the bottom of this node, continue to the
130 * top of the innermost enclosing loop, or break out of it. It can
131 * only return from the function.
132 */
133 strength_return
134};
135
136namespace {
137
138struct block_record
139{
140 /* minimum jump strength (of lowered IR, not pre-lowering IR)
141 *
142 * If the block ends with a jump, must be the strength of the jump.
143 * Otherwise, the jump would be dead and have been deleted before)
144 *
145 * If the block doesn't end with a jump, it can be different than strength_none if all paths before it lead to some jump
146 * (e.g. an if with a return in one branch, and a break in the other, while not lowering them)
147 * Note that identical jumps are usually unified though.
148 */
149 jump_strength min_strength;
150
151 /* can anything clear the execute flag? */
152 bool may_clear_execute_flag;
153
154 block_record()
155 {
156 this->min_strength = strength_none;
157 this->may_clear_execute_flag = false;
158 }
159};
160
161struct loop_record
162{
163 ir_function_signature* signature;
164 ir_loop* loop;
165
166 /* used to avoid lowering the break used to represent lowered breaks */
167 unsigned nesting_depth;
168 bool in_if_at_the_end_of_the_loop;
169
170 bool may_set_return_flag;
171
172 ir_variable* break_flag;
173 ir_variable* execute_flag; /* cleared to emulate continue */
174
175 loop_record(ir_function_signature* p_signature = 0, ir_loop* p_loop = 0)
176 {
177 this->signature = p_signature;
178 this->loop = p_loop;
179 this->nesting_depth = 0;
180 this->in_if_at_the_end_of_the_loop = false;
181 this->may_set_return_flag = false;
182 this->break_flag = 0;
183 this->execute_flag = 0;
184 }
185
186 ir_variable* get_execute_flag()
187 {
188 /* also supported for the "function loop" */
189 if(!this->execute_flag) {
190 exec_list& list = this->loop ? this->loop->body_instructions : signature->body;
191 this->execute_flag = new(this->signature) ir_variable(glsl_type::bool_type, "execute_flag", ir_var_temporary);
192 list.push_head(new(this->signature) ir_assignment(new(this->signature) ir_dereference_variable(execute_flag), new(this->signature) ir_constant(true)));
193 list.push_head(this->execute_flag);
194 }
195 return this->execute_flag;
196 }
197
198 ir_variable* get_break_flag()
199 {
200 assert(this->loop)(static_cast <bool> (this->loop) ? void (0) : __assert_fail
("this->loop", __builtin_FILE (), __builtin_LINE (), __extension__
__PRETTY_FUNCTION__))
;
201 if(!this->break_flag) {
202 this->break_flag = new(this->signature) ir_variable(glsl_type::bool_type, "break_flag", ir_var_temporary);
203 this->loop->insert_before(this->break_flag);
204 this->loop->insert_before(new(this->signature) ir_assignment(new(this->signature) ir_dereference_variable(break_flag), new(this->signature) ir_constant(false)));
205 }
206 return this->break_flag;
207 }
208};
209
210struct function_record
211{
212 ir_function_signature* signature;
213 ir_variable* return_flag; /* used to break out of all loops and then jump to the return instruction */
214 ir_variable* return_value;
215 bool lower_return;
216 unsigned nesting_depth;
217
218 function_record(ir_function_signature* p_signature = 0,
219 bool lower_return = false)
220 {
221 this->signature = p_signature;
222 this->return_flag = 0;
223 this->return_value = 0;
224 this->nesting_depth = 0;
225 this->lower_return = lower_return;
226 }
227
228 ir_variable* get_return_flag()
229 {
230 if(!this->return_flag) {
231 this->return_flag = new(this->signature) ir_variable(glsl_type::bool_type, "return_flag", ir_var_temporary);
232 this->signature->body.push_head(new(this->signature) ir_assignment(new(this->signature) ir_dereference_variable(return_flag), new(this->signature) ir_constant(false)));
233 this->signature->body.push_head(this->return_flag);
234 }
235 return this->return_flag;
236 }
237
238 ir_variable* get_return_value()
239 {
240 if(!this->return_value) {
241 assert(!this->signature->return_type->is_void())(static_cast <bool> (!this->signature->return_type
->is_void()) ? void (0) : __assert_fail ("!this->signature->return_type->is_void()"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
242 return_value = new(this->signature) ir_variable(this->signature->return_type, "return_value", ir_var_temporary);
243 this->signature->body.push_head(this->return_value);
244 }
245 return this->return_value;
246 }
247};
248
249struct ir_lower_jumps_visitor : public ir_control_flow_visitor {
250 /* Postconditions: on exit of any visit() function:
251 *
252 * ANALYSIS: this->block.min_strength,
253 * this->block.may_clear_execute_flag, and
254 * this->loop.may_set_return_flag are updated to reflect the
255 * characteristics of the visited statement.
256 *
257 * DEAD_CODE_ELIMINATION: If this->block.min_strength is not
258 * strength_none, the visited node is at the end of its exec_list.
259 * In other words, any unreachable statements that follow the
260 * visited statement in its exec_list have been removed.
261 *
262 * CONTAINED_JUMPS_LOWERED: If the visited statement contains other
263 * statements, then should_lower_jump() is false for all of the
264 * return, break, or continue statements it contains.
265 *
266 * Note that visiting a jump does not lower it. That is the
267 * responsibility of the statement (or function signature) that
268 * contains the jump.
269 */
270
271 using ir_control_flow_visitor::visit;
272
273 bool progress;
274
275 struct function_record function;
276 struct loop_record loop;
277 struct block_record block;
278
279 bool pull_out_jumps;
280 bool lower_continue;
281 bool lower_break;
282 bool lower_sub_return;
283 bool lower_main_return;
284
285 ir_lower_jumps_visitor()
286 : progress(false),
287 pull_out_jumps(false),
288 lower_continue(false),
289 lower_break(false),
290 lower_sub_return(false),
291 lower_main_return(false)
292 {
293 }
294
295 void truncate_after_instruction(exec_node *ir)
296 {
297 if (!ir)
298 return;
299
300 while (!ir->get_next()->is_tail_sentinel()) {
301 ((ir_instruction *)ir->get_next())->remove();
302 this->progress = true;
303 }
304 }
305
306 void move_outer_block_inside(ir_instruction *ir, exec_list *inner_block)
307 {
308 while (!ir->get_next()->is_tail_sentinel()) {
309 ir_instruction *move_ir = (ir_instruction *)ir->get_next();
310
311 move_ir->remove();
312 inner_block->push_tail(move_ir);
313 }
314 }
315
316 /**
317 * Insert the instructions necessary to lower a return statement,
318 * before the given return instruction.
319 */
320 void insert_lowered_return(ir_return *ir)
321 {
322 ir_variable* return_flag = this->function.get_return_flag();
323 if(!this->function.signature->return_type->is_void()) {
324 ir_variable* return_value = this->function.get_return_value();
325 ir->insert_before(
326 new(ir) ir_assignment(
327 new (ir) ir_dereference_variable(return_value),
328 ir->value));
329 }
330 ir->insert_before(
331 new(ir) ir_assignment(
332 new (ir) ir_dereference_variable(return_flag),
333 new (ir) ir_constant(true)));
334 this->loop.may_set_return_flag = true;
335 }
336
337 /**
338 * If the given instruction is a return, lower it to instructions
339 * that store the return value (if there is one), set the return
340 * flag, and then break.
341 *
342 * It is safe to pass NULL to this function.
343 */
344 void lower_return_unconditionally(ir_instruction *ir)
345 {
346 if (get_jump_strength(ir) != strength_return) {
347 return;
348 }
349 insert_lowered_return((ir_return*)ir);
350 ir->replace_with(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
351 }
352
353 /**
354 * Create the necessary instruction to replace a break instruction.
355 */
356 ir_instruction *create_lowered_break()
357 {
358 void *ctx = this->function.signature;
359 return new(ctx) ir_assignment(
360 new(ctx) ir_dereference_variable(this->loop.get_break_flag()),
361 new(ctx) ir_constant(true));
362 }
363
364 /**
365 * If the given instruction is a break, lower it to an instruction
366 * that sets the break flag, without consulting
367 * should_lower_jump().
368 *
369 * It is safe to pass NULL to this function.
370 */
371 void lower_break_unconditionally(ir_instruction *ir)
372 {
373 if (get_jump_strength(ir) != strength_break) {
17
Calling 'ir_lower_jumps_visitor::get_jump_strength'
21
Returning from 'ir_lower_jumps_visitor::get_jump_strength'
22
Taking false branch
374 return;
375 }
376 ir->replace_with(create_lowered_break());
23
Calling 'exec_node::replace_with'
377 }
378
379 /**
380 * If the block ends in a conditional or unconditional break, lower
381 * it, even though should_lower_jump() says it needn't be lowered.
382 */
383 void lower_final_breaks(exec_list *block)
384 {
385 ir_instruction *ir = (ir_instruction *) block->get_tail();
386 lower_break_unconditionally(ir);
16
Calling 'ir_lower_jumps_visitor::lower_break_unconditionally'
387 ir_if *ir_if = ir->as_if();
388 if (ir_if) {
389 lower_break_unconditionally(
390 (ir_instruction *) ir_if->then_instructions.get_tail());
391 lower_break_unconditionally(
392 (ir_instruction *) ir_if->else_instructions.get_tail());
393 }
394 }
395
396 virtual void visit(class ir_loop_jump * ir)
397 {
398 /* Eliminate all instructions after each one, since they are
399 * unreachable. This satisfies the DEAD_CODE_ELIMINATION
400 * postcondition.
401 */
402 truncate_after_instruction(ir);
403
404 /* Set this->block.min_strength based on this instruction. This
405 * satisfies the ANALYSIS postcondition. It is not necessary to
406 * update this->block.may_clear_execute_flag or
407 * this->loop.may_set_return_flag, because an unlowered jump
408 * instruction can't change any flags.
409 */
410 this->block.min_strength = ir->is_break() ? strength_break : strength_continue;
411
412 /* The CONTAINED_JUMPS_LOWERED postcondition is already
413 * satisfied, because jump statements can't contain other
414 * statements.
415 */
416 }
417
418 virtual void visit(class ir_return * ir)
419 {
420 /* Eliminate all instructions after each one, since they are
421 * unreachable. This satisfies the DEAD_CODE_ELIMINATION
422 * postcondition.
423 */
424 truncate_after_instruction(ir);
425
426 /* Set this->block.min_strength based on this instruction. This
427 * satisfies the ANALYSIS postcondition. It is not necessary to
428 * update this->block.may_clear_execute_flag or
429 * this->loop.may_set_return_flag, because an unlowered return
430 * instruction can't change any flags.
431 */
432 this->block.min_strength = strength_return;
433
434 /* The CONTAINED_JUMPS_LOWERED postcondition is already
435 * satisfied, because jump statements can't contain other
436 * statements.
437 */
438 }
439
440 virtual void visit(class ir_discard * ir)
441 {
442 /* Nothing needs to be done. The ANALYSIS and
443 * DEAD_CODE_ELIMINATION postconditions are already satisfied,
444 * because discard statements are ignored by this optimization
445 * pass. The CONTAINED_JUMPS_LOWERED postcondition is already
446 * satisfied, because discard statements can't contain other
447 * statements.
448 */
449 (void) ir;
450 }
451
452 virtual void visit(class ir_precision_statement * ir)
453 {
454 /* Nothing needs to be done. */
455 }
456
457 virtual void visit(class ir_typedecl_statement * ir)
458 {
459 /* Nothing needs to be done. */
460 }
461
462 enum jump_strength get_jump_strength(ir_instruction* ir)
463 {
464 if(!ir
17.1
'ir' is non-null
17.1
'ir' is non-null
)
18
Taking false branch
465 return strength_none;
466 else if(ir->ir_type
18.1
Field 'ir_type' is equal to ir_type_loop_jump
18.1
Field 'ir_type' is equal to ir_type_loop_jump
== ir_type_loop_jump) {
19
Taking true branch
467 if(((ir_loop_jump*)ir)->is_break())
20
Taking true branch
468 return strength_break;
469 else
470 return strength_continue;
471 } else if(ir->ir_type == ir_type_return)
472 return strength_return;
473 else
474 return strength_none;
475 }
476
477 bool should_lower_jump(ir_jump* ir)
478 {
479 unsigned strength = get_jump_strength(ir);
480 bool lower;
481 switch(strength)
482 {
483 case strength_none:
484 lower = false; /* don't change this, code relies on it */
485 break;
486 case strength_continue:
487 lower = lower_continue;
488 break;
489 case strength_break:
490 assert(this->loop.loop)(static_cast <bool> (this->loop.loop) ? void (0) : __assert_fail
("this->loop.loop", __builtin_FILE (), __builtin_LINE (),
__extension__ __PRETTY_FUNCTION__))
;
491 /* never lower "canonical break" */
492 if(ir->get_next()->is_tail_sentinel() && (this->loop.nesting_depth == 0
493 || (this->loop.nesting_depth == 1 && this->loop.in_if_at_the_end_of_the_loop)))
494 lower = false;
495 else
496 lower = lower_break;
497 break;
498 case strength_return:
499 /* never lower return at the end of a this->function */
500 if(this->function.nesting_depth == 0 && ir->get_next()->is_tail_sentinel())
501 lower = false;
502 else
503 lower = this->function.lower_return;
504 break;
505 }
506 return lower;
507 }
508
509 block_record visit_block(exec_list* list)
510 {
511 /* Note: since visiting a node may change that node's next
512 * pointer, we can't use visit_exec_list(), because
513 * visit_exec_list() caches the node's next pointer before
514 * visiting it. So we use foreach_in_list() instead.
515 *
516 * foreach_in_list() isn't safe if the node being visited gets
517 * removed, but fortunately this visitor doesn't do that.
518 */
519
520 block_record saved_block = this->block;
521 this->block = block_record();
522 foreach_in_list(ir_instruction, node, list)for (ir_instruction *node = (!exec_node_is_tail_sentinel((list
)->head_sentinel.next) ? (ir_instruction *) ((list)->head_sentinel
.next) : __null); (node) != __null; (node) = (!exec_node_is_tail_sentinel
((node)->next) ? (ir_instruction *) ((node)->next) : __null
))
{
523 node->accept(this);
524 }
525 block_record ret = this->block;
526 this->block = saved_block;
527 return ret;
528 }
529
530 virtual void visit(ir_if *ir)
531 {
532 if(this->loop.nesting_depth == 0 && ir->get_next()->is_tail_sentinel())
533 this->loop.in_if_at_the_end_of_the_loop = true;
534
535 ++this->function.nesting_depth;
536 ++this->loop.nesting_depth;
537
538 block_record block_records[2];
539 ir_jump* jumps[2];
540
541 /* Recursively lower nested jumps. This satisfies the
542 * CONTAINED_JUMPS_LOWERED postcondition, except in the case of
543 * unconditional jumps at the end of ir->then_instructions and
544 * ir->else_instructions, which are handled below.
545 */
546 block_records[0] = visit_block(&ir->then_instructions);
547 block_records[1] = visit_block(&ir->else_instructions);
548
549retry: /* we get here if we put code after the if inside a branch */
550
551 /* Determine which of ir->then_instructions and
552 * ir->else_instructions end with an unconditional jump.
553 */
554 for(unsigned i = 0; i < 2; ++i) {
555 exec_list& list = i ? ir->else_instructions : ir->then_instructions;
556 jumps[i] = 0;
557 if(!list.is_empty() && get_jump_strength((ir_instruction*)list.get_tail()))
558 jumps[i] = (ir_jump*)list.get_tail();
559 }
560
561 /* Loop until we have satisfied the CONTAINED_JUMPS_LOWERED
562 * postcondition by lowering jumps in both then_instructions and
563 * else_instructions.
564 */
565 for(;;) {
566 /* Determine the types of the jumps that terminate
567 * ir->then_instructions and ir->else_instructions.
568 */
569 jump_strength jump_strengths[2];
570
571 for(unsigned i = 0; i < 2; ++i) {
572 if(jumps[i]) {
573 jump_strengths[i] = block_records[i].min_strength;
574 assert(jump_strengths[i] == get_jump_strength(jumps[i]))(static_cast <bool> (jump_strengths[i] == get_jump_strength
(jumps[i])) ? void (0) : __assert_fail ("jump_strengths[i] == get_jump_strength(jumps[i])"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
575 } else
576 jump_strengths[i] = strength_none;
577 }
578
579 /* If both code paths end in a jump, and the jumps are the
580 * same, and we are pulling out jumps, replace them with a
581 * single jump that comes after the if instruction. The new
582 * jump will be visited next, and it will be lowered if
583 * necessary by the loop or conditional that encloses it.
584 */
585 if(pull_out_jumps && jump_strengths[0] == jump_strengths[1]) {
586 bool unify = true;
587 if(jump_strengths[0] == strength_continue)
588 ir->insert_after(new(ir) ir_loop_jump(ir_loop_jump::jump_continue));
589 else if(jump_strengths[0] == strength_break)
590 ir->insert_after(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
591 /* FINISHME: unify returns with identical expressions */
592 else if(jump_strengths[0] == strength_return && this->function.signature->return_type->is_void())
593 ir->insert_after(new(ir) ir_return(NULL__null));
594 else
595 unify = false;
596
597 if(unify) {
598 jumps[0]->remove();
599 jumps[1]->remove();
600 this->progress = true;
601
602 /* Update jumps[] to reflect the fact that the jumps
603 * are gone, and update block_records[] to reflect the
604 * fact that control can now flow to the next
605 * instruction.
606 */
607 jumps[0] = 0;
608 jumps[1] = 0;
609 block_records[0].min_strength = strength_none;
610 block_records[1].min_strength = strength_none;
611
612 /* The CONTAINED_JUMPS_LOWERED postcondition is now
613 * satisfied, so we can break out of the loop.
614 */
615 break;
616 }
617 }
618
619 /* lower a jump: if both need to lowered, start with the strongest one, so that
620 * we might later unify the lowered version with the other one
621 */
622 bool should_lower[2];
623 for(unsigned i = 0; i < 2; ++i)
624 should_lower[i] = should_lower_jump(jumps[i]);
625
626 int lower;
627 if(should_lower[1] && should_lower[0])
628 lower = jump_strengths[1] > jump_strengths[0];
629 else if(should_lower[0])
630 lower = 0;
631 else if(should_lower[1])
632 lower = 1;
633 else
634 /* Neither code path ends in a jump that needs to be
635 * lowered, so the CONTAINED_JUMPS_LOWERED postcondition
636 * is satisfied and we can break out of the loop.
637 */
638 break;
639
640 if(jump_strengths[lower] == strength_return) {
641 /* To lower a return, we create a return flag (if the
642 * function doesn't have one already) and add instructions
643 * that: 1. store the return value (if this function has a
644 * non-void return) and 2. set the return flag
645 */
646 insert_lowered_return((ir_return*)jumps[lower]);
647 if(this->loop.loop) {
648 /* If we are in a loop, replace the return instruction
649 * with a break instruction, and then loop so that the
650 * break instruction can be lowered if necessary.
651 */
652 ir_loop_jump* lowered = 0;
653 lowered = new(ir) ir_loop_jump(ir_loop_jump::jump_break);
654 /* Note: we must update block_records and jumps to
655 * reflect the fact that the control path has been
656 * altered from a return to a break.
657 */
658 block_records[lower].min_strength = strength_break;
659 jumps[lower]->replace_with(lowered);
660 jumps[lower] = lowered;
661 } else {
662 /* If we are not in a loop, we then proceed as we would
663 * for a continue statement (set the execute flag to
664 * false to prevent the rest of the function from
665 * executing).
666 */
667 goto lower_continue;
668 }
669 this->progress = true;
670 } else if(jump_strengths[lower] == strength_break) {
671 /* To lower a break, we create a break flag (if the loop
672 * doesn't have one already) and add an instruction that
673 * sets it.
674 *
675 * Then we proceed as we would for a continue statement
676 * (set the execute flag to false to prevent the rest of
677 * the loop body from executing).
678 *
679 * The visit() function for the loop will ensure that the
680 * break flag is checked after executing the loop body.
681 */
682 jumps[lower]->insert_before(create_lowered_break());
683 goto lower_continue;
684 } else if(jump_strengths[lower] == strength_continue) {
685lower_continue:
686 /* To lower a continue, we create an execute flag (if the
687 * loop doesn't have one already) and replace the continue
688 * with an instruction that clears it.
689 *
690 * Note that this code path gets exercised when lowering
691 * return statements that are not inside a loop, so
692 * this->loop must be initialized even outside of loops.
693 */
694 ir_variable* execute_flag = this->loop.get_execute_flag();
695 jumps[lower]->replace_with(new(ir) ir_assignment(new (ir) ir_dereference_variable(execute_flag), new (ir) ir_constant(false)));
696 /* Note: we must update block_records and jumps to reflect
697 * the fact that the control path has been altered to an
698 * instruction that clears the execute flag.
699 */
700 jumps[lower] = 0;
701 block_records[lower].min_strength = strength_always_clears_execute_flag;
702 block_records[lower].may_clear_execute_flag = true;
703 this->progress = true;
704
705 /* Let the loop run again, in case the other branch of the
706 * if needs to be lowered too.
707 */
708 }
709 }
710
711 /* move out a jump out if possible */
712 if(pull_out_jumps) {
713 /* If one of the branches ends in a jump, and control cannot
714 * fall out the bottom of the other branch, then we can move
715 * the jump after the if.
716 *
717 * Set move_out to the branch we are moving a jump out of.
718 */
719 int move_out = -1;
720 if(jumps[0] && block_records[1].min_strength >= strength_continue)
721 move_out = 0;
722 else if(jumps[1] && block_records[0].min_strength >= strength_continue)
723 move_out = 1;
724
725 if(move_out >= 0)
726 {
727 jumps[move_out]->remove();
728 ir->insert_after(jumps[move_out]);
729 /* Note: we must update block_records and jumps to reflect
730 * the fact that the jump has been moved out of the if.
731 */
732 jumps[move_out] = 0;
733 block_records[move_out].min_strength = strength_none;
734 this->progress = true;
735 }
736 }
737
738 /* Now satisfy the ANALYSIS postcondition by setting
739 * this->block.min_strength and
740 * this->block.may_clear_execute_flag based on the
741 * characteristics of the two branches.
742 */
743 if(block_records[0].min_strength < block_records[1].min_strength)
744 this->block.min_strength = block_records[0].min_strength;
745 else
746 this->block.min_strength = block_records[1].min_strength;
747 this->block.may_clear_execute_flag = this->block.may_clear_execute_flag || block_records[0].may_clear_execute_flag || block_records[1].may_clear_execute_flag;
748
749 /* Now we need to clean up the instructions that follow the
750 * if.
751 *
752 * If those instructions are unreachable, then satisfy the
753 * DEAD_CODE_ELIMINATION postcondition by eliminating them.
754 * Otherwise that postcondition is already satisfied.
755 */
756 if(this->block.min_strength)
757 truncate_after_instruction(ir);
758 else if(this->block.may_clear_execute_flag)
759 {
760 /* If the "if" instruction might clear the execute flag, then
761 * we need to guard any instructions that follow so that they
762 * are only executed if the execute flag is set.
763 *
764 * If one of the branches of the "if" always clears the
765 * execute flag, and the other branch never clears it, then
766 * this is easy: just move all the instructions following the
767 * "if" into the branch that never clears it.
768 */
769 int move_into = -1;
770 if(block_records[0].min_strength && !block_records[1].may_clear_execute_flag)
771 move_into = 1;
772 else if(block_records[1].min_strength && !block_records[0].may_clear_execute_flag)
773 move_into = 0;
774
775 if(move_into >= 0) {
776 assert(!block_records[move_into].min_strength && !block_records[move_into].may_clear_execute_flag)(static_cast <bool> (!block_records[move_into].min_strength
&& !block_records[move_into].may_clear_execute_flag)
? void (0) : __assert_fail ("!block_records[move_into].min_strength && !block_records[move_into].may_clear_execute_flag"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
; /* otherwise, we just truncated */
777
778 exec_list* list = move_into ? &ir->else_instructions : &ir->then_instructions;
779 exec_node* next = ir->get_next();
780 if(!next->is_tail_sentinel()) {
781 move_outer_block_inside(ir, list);
782
783 /* If any instructions moved, then we need to visit
784 * them (since they are now inside the "if"). Since
785 * block_records[move_into] is in its default state
786 * (see assertion above), we can safely replace
787 * block_records[move_into] with the result of this
788 * analysis.
789 */
790 exec_list list;
791 list.head_sentinel.next = next;
792 block_records[move_into] = visit_block(&list);
793
794 /*
795 * Then we need to re-start our jump lowering, since one
796 * of the instructions we moved might be a jump that
797 * needs to be lowered.
798 */
799 this->progress = true;
800 goto retry;
801 }
802 } else {
803 /* If we get here, then the simple case didn't apply; we
804 * need to actually guard the instructions that follow.
805 *
806 * To avoid creating unnecessarily-deep nesting, first
807 * look through the instructions that follow and unwrap
808 * any instructions that that are already wrapped in the
809 * appropriate guard.
810 */
811 ir_instruction* ir_after;
812 for(ir_after = (ir_instruction*)ir->get_next(); !ir_after->is_tail_sentinel();)
813 {
814 ir_if* ir_if = ir_after->as_if();
815 if(ir_if && ir_if->else_instructions.is_empty()) {
816 ir_dereference_variable* ir_if_cond_deref = ir_if->condition->as_dereference_variable();
817 if(ir_if_cond_deref && ir_if_cond_deref->var == this->loop.execute_flag) {
818 ir_instruction* ir_next = (ir_instruction*)ir_after->get_next();
819 ir_after->insert_before(&ir_if->then_instructions);
820 ir_after->remove();
821 ir_after = ir_next;
822 continue;
823 }
824 }
825 ir_after = (ir_instruction*)ir_after->get_next();
826
827 /* only set this if we find any unprotected instruction */
828 this->progress = true;
829 }
830
831 /* Then, wrap all the instructions that follow in a single
832 * guard.
833 */
834 if(!ir->get_next()->is_tail_sentinel()) {
835 assert(this->loop.execute_flag)(static_cast <bool> (this->loop.execute_flag) ? void
(0) : __assert_fail ("this->loop.execute_flag", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
836 ir_if* if_execute = new(ir) ir_if(new(ir) ir_dereference_variable(this->loop.execute_flag));
837 move_outer_block_inside(ir, &if_execute->then_instructions);
838 ir->insert_after(if_execute);
839 }
840 }
841 }
842 --this->loop.nesting_depth;
843 --this->function.nesting_depth;
844 }
845
846 virtual void visit(ir_loop *ir)
847 {
848 /* Visit the body of the loop, with a fresh data structure in
849 * this->loop so that the analysis we do here won't bleed into
850 * enclosing loops.
851 *
852 * We assume that all code after a loop is reachable from the
853 * loop (see comments on enum jump_strength), so the
854 * DEAD_CODE_ELIMINATION postcondition is automatically
855 * satisfied, as is the block.min_strength portion of the
856 * ANALYSIS postcondition.
857 *
858 * The block.may_clear_execute_flag portion of the ANALYSIS
859 * postcondition is automatically satisfied because execute
860 * flags do not propagate outside of loops.
861 *
862 * The loop.may_set_return_flag portion of the ANALYSIS
863 * postcondition is handled below.
864 */
865 ++this->function.nesting_depth;
866 loop_record saved_loop = this->loop;
867 this->loop = loop_record(this->function.signature, ir);
868
869 /* Recursively lower nested jumps. This satisfies the
870 * CONTAINED_JUMPS_LOWERED postcondition, except in the case of
871 * an unconditional continue or return at the bottom of the
872 * loop, which are handled below.
873 */
874 block_record body = visit_block(&ir->body_instructions);
875
876 /* If the loop ends in an unconditional continue, eliminate it
877 * because it is redundant.
878 */
879 ir_instruction *ir_last
880 = (ir_instruction *) ir->body_instructions.get_tail();
881 if (get_jump_strength(ir_last) == strength_continue) {
1
Taking true branch
882 ir_last->remove();
2
Calling 'exec_node::remove'
6
Returning from 'exec_node::remove'
883 }
884
885 /* If the loop ends in an unconditional return, and we are
886 * lowering returns, lower it.
887 */
888 if (this->function.lower_return)
7
Assuming field 'lower_return' is false
8
Taking false branch
889 lower_return_unconditionally(ir_last);
890
891 if(body.min_strength >= strength_break) {
9
Assuming field 'min_strength' is < strength_break
10
Taking false branch
892 /* FINISHME: If the min_strength of the loop body is
893 * strength_break or strength_return, that means that it
894 * isn't a loop at all, since control flow always leaves the
895 * body of the loop via break or return. In principle the
896 * loop could be eliminated in this case. This optimization
897 * is not implemented yet.
898 */
899 }
900
901 if(this->loop.break_flag) {
11
Assuming field 'break_flag' is non-null
12
Taking true branch
902 /* We only get here if we are lowering breaks */
903 assert (lower_break)(static_cast <bool> (lower_break) ? void (0) : __assert_fail
("lower_break", __builtin_FILE (), __builtin_LINE (), __extension__
__PRETTY_FUNCTION__))
;
13
Assuming field 'lower_break' is true
14
'?' condition is true
904
905 /* If a break flag was generated while visiting the body of
906 * the loop, then at least one break was lowered, so we need
907 * to generate an if statement at the end of the loop that
908 * does a "break" if the break flag is set. The break we
909 * generate won't violate the CONTAINED_JUMPS_LOWERED
910 * postcondition, because should_lower_jump() always returns
911 * false for a break that happens at the end of a loop.
912 *
913 * However, if the loop already ends in a conditional or
914 * unconditional break, then we need to lower that break,
915 * because it won't be at the end of the loop anymore.
916 */
917 lower_final_breaks(&ir->body_instructions);
15
Calling 'ir_lower_jumps_visitor::lower_final_breaks'
918
919 ir_if* break_if = new(ir) ir_if(new(ir) ir_dereference_variable(this->loop.break_flag));
920 break_if->then_instructions.push_tail(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
921 ir->body_instructions.push_tail(break_if);
922 }
923
924 /* If the body of the loop may set the return flag, then at
925 * least one return was lowered to a break, so we need to ensure
926 * that the return flag is checked after the body of the loop is
927 * executed.
928 */
929 if(this->loop.may_set_return_flag) {
930 assert(this->function.return_flag)(static_cast <bool> (this->function.return_flag) ? void
(0) : __assert_fail ("this->function.return_flag", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
931 /* Generate the if statement to check the return flag */
932 ir_if* return_if = new(ir) ir_if(new(ir) ir_dereference_variable(this->function.return_flag));
933 /* Note: we also need to propagate the knowledge that the
934 * return flag may get set to the outer context. This
935 * satisfies the loop.may_set_return_flag part of the
936 * ANALYSIS postcondition.
937 */
938 saved_loop.may_set_return_flag = true;
939 if(saved_loop.loop)
940 /* If this loop is nested inside another one, then the if
941 * statement that we generated should break out of that
942 * loop if the return flag is set. Caller will lower that
943 * break statement if necessary.
944 */
945 return_if->then_instructions.push_tail(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
946 else {
947 /* Otherwise, ensure that the instructions that follow are only
948 * executed if the return flag is clear. We can do that by moving
949 * those instructions into the else clause of the generated if
950 * statement.
951 */
952 move_outer_block_inside(ir, &return_if->else_instructions);
953
954 /* In case the loop is embedded inside an if add a new return to
955 * the return flag then branch and let a future pass tidy it up.
956 */
957 if (this->function.signature->return_type->is_void())
958 return_if->then_instructions.push_tail(new(ir) ir_return(NULL__null));
959 else {
960 assert(this->function.return_value)(static_cast <bool> (this->function.return_value) ? void
(0) : __assert_fail ("this->function.return_value", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
961 ir_variable* return_value = this->function.return_value;
962 return_if->then_instructions.push_tail(
963 new(ir) ir_return(new(ir) ir_dereference_variable(return_value)));
964 }
965 }
966
967 ir->insert_after(return_if);
968 }
969
970 this->loop = saved_loop;
971 --this->function.nesting_depth;
972 }
973
974 virtual void visit(ir_function_signature *ir)
975 {
976 /* these are not strictly necessary */
977 assert(!this->function.signature)(static_cast <bool> (!this->function.signature) ? void
(0) : __assert_fail ("!this->function.signature", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
978 assert(!this->loop.loop)(static_cast <bool> (!this->loop.loop) ? void (0) : __assert_fail
("!this->loop.loop", __builtin_FILE (), __builtin_LINE ()
, __extension__ __PRETTY_FUNCTION__))
;
979
980 bool lower_return;
981 if (strcmp(ir->function_name(), "main") == 0)
982 lower_return = lower_main_return;
983 else
984 lower_return = lower_sub_return;
985
986 function_record saved_function = this->function;
987 loop_record saved_loop = this->loop;
988 this->function = function_record(ir, lower_return);
989 this->loop = loop_record(ir);
990
991 assert(!this->loop.loop)(static_cast <bool> (!this->loop.loop) ? void (0) : __assert_fail
("!this->loop.loop", __builtin_FILE (), __builtin_LINE ()
, __extension__ __PRETTY_FUNCTION__))
;
992
993 /* Visit the body of the function to lower any jumps that occur
994 * in it, except possibly an unconditional return statement at
995 * the end of it.
996 */
997 visit_block(&ir->body);
998
999 /* If the body ended in an unconditional return of non-void,
1000 * then we don't need to lower it because it's the one canonical
1001 * return.
1002 *
1003 * If the body ended in a return of void, eliminate it because
1004 * it is redundant.
1005 */
1006 if (ir->return_type->is_void() &&
1007 get_jump_strength((ir_instruction *) ir->body.get_tail())) {
1008 ir_jump *jump = (ir_jump *) ir->body.get_tail();
1009 assert (jump->ir_type == ir_type_return)(static_cast <bool> (jump->ir_type == ir_type_return
) ? void (0) : __assert_fail ("jump->ir_type == ir_type_return"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
1010 jump->remove();
1011 }
1012
1013 if(this->function.return_value)
1014 ir->body.push_tail(new(ir) ir_return(new (ir) ir_dereference_variable(this->function.return_value)));
1015
1016 this->loop = saved_loop;
1017 this->function = saved_function;
1018 }
1019
1020 virtual void visit(class ir_function * ir)
1021 {
1022 visit_block(&ir->signatures);
1023 }
1024};
1025
1026} /* anonymous namespace */
1027
1028bool
1029do_lower_jumps(exec_list *instructions, bool pull_out_jumps, bool lower_sub_return, bool lower_main_return, bool lower_continue, bool lower_break)
1030{
1031 ir_lower_jumps_visitor v;
1032 v.pull_out_jumps = pull_out_jumps;
1033 v.lower_continue = lower_continue;
1034 v.lower_break = lower_break;
1035 v.lower_sub_return = lower_sub_return;
1036 v.lower_main_return = lower_main_return;
1037
1038 bool progress_ever = false;
1039 do {
1040 v.progress = false;
1041 visit_exec_list(instructions, &v);
1042 progress_ever = v.progress || progress_ever;
1043 } while (v.progress);
1044
1045 return progress_ever;
1046}

glsl-optimizer/src/compiler/glsl/list.h

1/*
2 * Copyright © 2008, 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * \file list.h
26 * \brief Doubly-linked list abstract container type.
27 *
28 * Each doubly-linked list has a sentinel head and tail node. These nodes
29 * contain no data. The head sentinel can be identified by its \c prev
30 * pointer being \c NULL. The tail sentinel can be identified by its
31 * \c next pointer being \c NULL.
32 *
33 * A list is empty if either the head sentinel's \c next pointer points to the
34 * tail sentinel or the tail sentinel's \c prev poiner points to the head
35 * sentinel. The head sentinel and tail sentinel nodes are allocated within the
36 * list structure.
37 *
38 * Do note that this means that the list nodes will contain pointers into the
39 * list structure itself and as a result you may not \c realloc() an \c
40 * exec_list or any structure in which an \c exec_list is embedded.
41 */
42
43#ifndef LIST_CONTAINER_H
44#define LIST_CONTAINER_H
45
46#ifndef __cplusplus201703L
47#include <stddef.h>
48#endif
49#include <assert.h>
50
51#include "util/ralloc.h"
52
53struct exec_node {
54 struct exec_node *next;
55 struct exec_node *prev;
56
57#ifdef __cplusplus201703L
58 DECLARE_RZALLOC_CXX_OPERATORS(exec_node)private: static void _ralloc_destructor(void *p) { reinterpret_cast
<exec_node *>(p)->exec_node::~exec_node(); } public:
static void* operator new(size_t size, void *mem_ctx) { void
*p = rzalloc_size(mem_ctx, size); (static_cast <bool> (
p != __null) ? void (0) : __assert_fail ("p != NULL", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__)); if
(!__has_trivial_destructor(exec_node)) ralloc_set_destructor
(p, _ralloc_destructor); return p; } static void operator delete
(void *p) { if (!__has_trivial_destructor(exec_node)) ralloc_set_destructor
(p, __null); ralloc_free(p); }
59
60 exec_node() : next(NULL__null), prev(NULL__null)
61 {
62 /* empty */
63 }
64
65 const exec_node *get_next() const;
66 exec_node *get_next();
67
68 const exec_node *get_prev() const;
69 exec_node *get_prev();
70
71 void remove();
72
73 /**
74 * Link a node with itself
75 *
76 * This creates a sort of degenerate list that is occasionally useful.
77 */
78 void self_link();
79
80 /**
81 * Insert a node in the list after the current node
82 */
83 void insert_after(exec_node *after);
84
85 /**
86 * Insert another list in the list after the current node
87 */
88 void insert_after(struct exec_list *after);
89
90 /**
91 * Insert a node in the list before the current node
92 */
93 void insert_before(exec_node *before);
94
95 /**
96 * Insert another list in the list before the current node
97 */
98 void insert_before(struct exec_list *before);
99
100 /**
101 * Replace the current node with the given node.
102 */
103 void replace_with(exec_node *replacement);
104
105 /**
106 * Is this the sentinel at the tail of the list?
107 */
108 bool is_tail_sentinel() const;
109
110 /**
111 * Is this the sentinel at the head of the list?
112 */
113 bool is_head_sentinel() const;
114#endif
115};
116
117static inline void
118exec_node_init(struct exec_node *n)
119{
120 n->next = NULL__null;
121 n->prev = NULL__null;
122}
123
124static inline const struct exec_node *
125exec_node_get_next_const(const struct exec_node *n)
126{
127 return n->next;
128}
129
130static inline struct exec_node *
131exec_node_get_next(struct exec_node *n)
132{
133 return n->next;
134}
135
136static inline const struct exec_node *
137exec_node_get_prev_const(const struct exec_node *n)
138{
139 return n->prev;
140}
141
142static inline struct exec_node *
143exec_node_get_prev(struct exec_node *n)
144{
145 return n->prev;
146}
147
148static inline void
149exec_node_remove(struct exec_node *n)
150{
151 n->next->prev = n->prev;
152 n->prev->next = n->next;
153 n->next = NULL__null;
154 n->prev = NULL__null;
4
Null pointer value stored to field 'prev'
155}
156
157static inline void
158exec_node_self_link(struct exec_node *n)
159{
160 n->next = n;
161 n->prev = n;
162}
163
164static inline void
165exec_node_insert_after(struct exec_node *n, struct exec_node *after)
166{
167 after->next = n->next;
168 after->prev = n;
169
170 n->next->prev = after;
171 n->next = after;
172}
173
174static inline void
175exec_node_insert_node_before(struct exec_node *n, struct exec_node *before)
176{
177 before->next = n;
178 before->prev = n->prev;
179
180 n->prev->next = before;
181 n->prev = before;
182}
183
184static inline void
185exec_node_replace_with(struct exec_node *n, struct exec_node *replacement)
186{
187 replacement->prev = n->prev;
188 replacement->next = n->next;
189
190 n->prev->next = replacement;
25
Access to field 'next' results in a dereference of a null pointer (loaded from field 'prev')
191 n->next->prev = replacement;
192}
193
194static inline bool
195exec_node_is_tail_sentinel(const struct exec_node *n)
196{
197 return n->next == NULL__null;
198}
199
200static inline bool
201exec_node_is_head_sentinel(const struct exec_node *n)
202{
203 return n->prev == NULL__null;
204}
205
206#ifdef __cplusplus201703L
207inline const exec_node *exec_node::get_next() const
208{
209 return exec_node_get_next_const(this);
210}
211
212inline exec_node *exec_node::get_next()
213{
214 return exec_node_get_next(this);
215}
216
217inline const exec_node *exec_node::get_prev() const
218{
219 return exec_node_get_prev_const(this);
220}
221
222inline exec_node *exec_node::get_prev()
223{
224 return exec_node_get_prev(this);
225}
226
227inline void exec_node::remove()
228{
229 exec_node_remove(this);
3
Calling 'exec_node_remove'
5
Returning from 'exec_node_remove'
230}
231
232inline void exec_node::self_link()
233{
234 exec_node_self_link(this);
235}
236
237inline void exec_node::insert_after(exec_node *after)
238{
239 exec_node_insert_after(this, after);
240}
241
242inline void exec_node::insert_before(exec_node *before)
243{
244 exec_node_insert_node_before(this, before);
245}
246
247inline void exec_node::replace_with(exec_node *replacement)
248{
249 exec_node_replace_with(this, replacement);
24
Calling 'exec_node_replace_with'
250}
251
252inline bool exec_node::is_tail_sentinel() const
253{
254 return exec_node_is_tail_sentinel(this);
255}
256
257inline bool exec_node::is_head_sentinel() const
258{
259 return exec_node_is_head_sentinel(this);
260}
261#endif
262
263#ifdef __cplusplus201703L
264/* This macro will not work correctly if `t' uses virtual inheritance. If you
265 * are using virtual inheritance, you deserve a slow and painful death. Enjoy!
266 */
267#define exec_list_offsetof(t, f, p)(((char *) &((t *) p)->f) - ((char *) p)) \
268 (((char *) &((t *) p)->f) - ((char *) p))
269#else
270#define exec_list_offsetof(t, f, p)(((char *) &((t *) p)->f) - ((char *) p)) offsetof(t, f)__builtin_offsetof(t, f)
271#endif
272
273/**
274 * Get a pointer to the structure containing an exec_node
275 *
276 * Given a pointer to an \c exec_node embedded in a structure, get a pointer to
277 * the containing structure.
278 *
279 * \param type Base type of the structure containing the node
280 * \param node Pointer to the \c exec_node
281 * \param field Name of the field in \c type that is the embedded \c exec_node
282 */
283#define exec_node_data(type, node, field)((type *) (((uintptr_t) node) - (((char *) &((type *) node
)->field) - ((char *) node))))
\
284 ((type *) (((uintptr_t) node) - exec_list_offsetof(type, field, node)(((char *) &((type *) node)->field) - ((char *) node))))
285
286#ifdef __cplusplus201703L
287struct exec_node;
288#endif
289
290struct exec_list {
291 struct exec_node head_sentinel;
292 struct exec_node tail_sentinel;
293
294#ifdef __cplusplus201703L
295 DECLARE_RALLOC_CXX_OPERATORS(exec_list)private: static void _ralloc_destructor(void *p) { reinterpret_cast
<exec_list *>(p)->exec_list::~exec_list(); } public:
static void* operator new(size_t size, void *mem_ctx) { void
*p = ralloc_size(mem_ctx, size); (static_cast <bool> (
p != __null) ? void (0) : __assert_fail ("p != NULL", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__)); if
(!__has_trivial_destructor(exec_list)) ralloc_set_destructor
(p, _ralloc_destructor); return p; } static void operator delete
(void *p) { if (!__has_trivial_destructor(exec_list)) ralloc_set_destructor
(p, __null); ralloc_free(p); }
296
297 exec_list()
298 {
299 make_empty();
300 }
301
302 void make_empty();
303
304 bool is_empty() const;
305
306 const exec_node *get_head() const;
307 exec_node *get_head();
308 const exec_node *get_head_raw() const;
309 exec_node *get_head_raw();
310
311 const exec_node *get_tail() const;
312 exec_node *get_tail();
313 const exec_node *get_tail_raw() const;
314 exec_node *get_tail_raw();
315
316 unsigned length() const;
317
318 void push_head(exec_node *n);
319 void push_tail(exec_node *n);
320 void push_degenerate_list_at_head(exec_node *n);
321
322 /**
323 * Remove the first node from a list and return it
324 *
325 * \return
326 * The first node in the list or \c NULL if the list is empty.
327 *
328 * \sa exec_list::get_head
329 */
330 exec_node *pop_head();
331
332 /**
333 * Move all of the nodes from this list to the target list
334 */
335 void move_nodes_to(exec_list *target);
336
337 /**
338 * Append all nodes from the source list to the end of the target list
339 */
340 void append_list(exec_list *source);
341
342 /**
343 * Prepend all nodes from the source list to the beginning of the target
344 * list
345 */
346 void prepend_list(exec_list *source);
347#endif
348};
349
350static inline void
351exec_list_make_empty(struct exec_list *list)
352{
353 list->head_sentinel.next = &list->tail_sentinel;
354 list->head_sentinel.prev = NULL__null;
355 list->tail_sentinel.next = NULL__null;
356 list->tail_sentinel.prev = &list->head_sentinel;
357}
358
359static inline bool
360exec_list_is_empty(const struct exec_list *list)
361{
362 /* There are three ways to test whether a list is empty or not.
363 *
364 * - Check to see if the head sentinel's \c next is the tail sentinel.
365 * - Check to see if the tail sentinel's \c prev is the head sentinel.
366 * - Check to see if the head is the sentinel node by test whether its
367 * \c next pointer is \c NULL.
368 *
369 * The first two methods tend to generate better code on modern systems
370 * because they save a pointer dereference.
371 */
372 return list->head_sentinel.next == &list->tail_sentinel;
373}
374
375static inline bool
376exec_list_is_singular(const struct exec_list *list)
377{
378 return !exec_list_is_empty(list) &&
379 list->head_sentinel.next->next == &list->tail_sentinel;
380}
381
382static inline const struct exec_node *
383exec_list_get_head_const(const struct exec_list *list)
384{
385 return !exec_list_is_empty(list) ? list->head_sentinel.next : NULL__null;
386}
387
388static inline struct exec_node *
389exec_list_get_head(struct exec_list *list)
390{
391 return !exec_list_is_empty(list) ? list->head_sentinel.next : NULL__null;
392}
393
394static inline const struct exec_node *
395exec_list_get_head_raw_const(const struct exec_list *list)
396{
397 return list->head_sentinel.next;
398}
399
400static inline struct exec_node *
401exec_list_get_head_raw(struct exec_list *list)
402{
403 return list->head_sentinel.next;
404}
405
406static inline const struct exec_node *
407exec_list_get_tail_const(const struct exec_list *list)
408{
409 return !exec_list_is_empty(list) ? list->tail_sentinel.prev : NULL__null;
410}
411
412static inline struct exec_node *
413exec_list_get_tail(struct exec_list *list)
414{
415 return !exec_list_is_empty(list) ? list->tail_sentinel.prev : NULL__null;
416}
417
418static inline const struct exec_node *
419exec_list_get_tail_raw_const(const struct exec_list *list)
420{
421 return list->tail_sentinel.prev;
422}
423
424static inline struct exec_node *
425exec_list_get_tail_raw(struct exec_list *list)
426{
427 return list->tail_sentinel.prev;
428}
429
430static inline unsigned
431exec_list_length(const struct exec_list *list)
432{
433 unsigned size = 0;
434 struct exec_node *node;
435
436 for (node = list->head_sentinel.next; node->next != NULL__null; node = node->next) {
437 size++;
438 }
439
440 return size;
441}
442
443static inline void
444exec_list_push_head(struct exec_list *list, struct exec_node *n)
445{
446 n->next = list->head_sentinel.next;
447 n->prev = &list->head_sentinel;
448
449 n->next->prev = n;
450 list->head_sentinel.next = n;
451}
452
453static inline void
454exec_list_push_tail(struct exec_list *list, struct exec_node *n)
455{
456 n->next = &list->tail_sentinel;
457 n->prev = list->tail_sentinel.prev;
458
459 n->prev->next = n;
460 list->tail_sentinel.prev = n;
461}
462
463static inline void
464exec_list_push_degenerate_list_at_head(struct exec_list *list, struct exec_node *n)
465{
466 assert(n->prev->next == n)(static_cast <bool> (n->prev->next == n) ? void (
0) : __assert_fail ("n->prev->next == n", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
467
468 n->prev->next = list->head_sentinel.next;
469 list->head_sentinel.next->prev = n->prev;
470 n->prev = &list->head_sentinel;
471 list->head_sentinel.next = n;
472}
473
474static inline struct exec_node *
475exec_list_pop_head(struct exec_list *list)
476{
477 struct exec_node *const n = exec_list_get_head(list);
478 if (n != NULL__null)
479 exec_node_remove(n);
480
481 return n;
482}
483
484static inline void
485exec_list_move_nodes_to(struct exec_list *list, struct exec_list *target)
486{
487 if (exec_list_is_empty(list)) {
488 exec_list_make_empty(target);
489 } else {
490 target->head_sentinel.next = list->head_sentinel.next;
491 target->head_sentinel.prev = NULL__null;
492 target->tail_sentinel.next = NULL__null;
493 target->tail_sentinel.prev = list->tail_sentinel.prev;
494
495 target->head_sentinel.next->prev = &target->head_sentinel;
496 target->tail_sentinel.prev->next = &target->tail_sentinel;
497
498 exec_list_make_empty(list);
499 }
500}
501
502static inline void
503exec_list_append(struct exec_list *list, struct exec_list *source)
504{
505 if (exec_list_is_empty(source))
506 return;
507
508 /* Link the first node of the source with the last node of the target list.
509 */
510 list->tail_sentinel.prev->next = source->head_sentinel.next;
511 source->head_sentinel.next->prev = list->tail_sentinel.prev;
512
513 /* Make the tail of the source list be the tail of the target list.
514 */
515 list->tail_sentinel.prev = source->tail_sentinel.prev;
516 list->tail_sentinel.prev->next = &list->tail_sentinel;
517
518 /* Make the source list empty for good measure.
519 */
520 exec_list_make_empty(source);
521}
522
523static inline void
524exec_node_insert_list_after(struct exec_node *n, struct exec_list *after)
525{
526 if (exec_list_is_empty(after))
527 return;
528
529 after->tail_sentinel.prev->next = n->next;
530 after->head_sentinel.next->prev = n;
531
532 n->next->prev = after->tail_sentinel.prev;
533 n->next = after->head_sentinel.next;
534
535 exec_list_make_empty(after);
536}
537
538static inline void
539exec_list_prepend(struct exec_list *list, struct exec_list *source)
540{
541 exec_list_append(source, list);
542 exec_list_move_nodes_to(source, list);
543}
544
545static inline void
546exec_node_insert_list_before(struct exec_node *n, struct exec_list *before)
547{
548 if (exec_list_is_empty(before))
549 return;
550
551 before->tail_sentinel.prev->next = n;
552 before->head_sentinel.next->prev = n->prev;
553
554 n->prev->next = before->head_sentinel.next;
555 n->prev = before->tail_sentinel.prev;
556
557 exec_list_make_empty(before);
558}
559
560static inline void
561exec_list_validate(const struct exec_list *list)
562{
563 const struct exec_node *node;
564
565 assert(list->head_sentinel.next->prev == &list->head_sentinel)(static_cast <bool> (list->head_sentinel.next->prev
== &list->head_sentinel) ? void (0) : __assert_fail (
"list->head_sentinel.next->prev == &list->head_sentinel"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
566 assert(list->head_sentinel.prev == NULL)(static_cast <bool> (list->head_sentinel.prev == __null
) ? void (0) : __assert_fail ("list->head_sentinel.prev == NULL"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
567 assert(list->tail_sentinel.next == NULL)(static_cast <bool> (list->tail_sentinel.next == __null
) ? void (0) : __assert_fail ("list->tail_sentinel.next == NULL"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
568 assert(list->tail_sentinel.prev->next == &list->tail_sentinel)(static_cast <bool> (list->tail_sentinel.prev->next
== &list->tail_sentinel) ? void (0) : __assert_fail (
"list->tail_sentinel.prev->next == &list->tail_sentinel"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
569
570 /* We could try to use one of the interators below for this but they all
571 * either require C++ or assume the exec_node is embedded in a structure
572 * which is not the case for this function.
573 */
574 for (node = list->head_sentinel.next; node->next != NULL__null; node = node->next) {
575 assert(node->next->prev == node)(static_cast <bool> (node->next->prev == node) ? void
(0) : __assert_fail ("node->next->prev == node", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
576 assert(node->prev->next == node)(static_cast <bool> (node->prev->next == node) ? void
(0) : __assert_fail ("node->prev->next == node", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
577 }
578}
579
580#ifdef __cplusplus201703L
581inline void exec_list::make_empty()
582{
583 exec_list_make_empty(this);
584}
585
586inline bool exec_list::is_empty() const
587{
588 return exec_list_is_empty(this);
589}
590
591inline const exec_node *exec_list::get_head() const
592{
593 return exec_list_get_head_const(this);
594}
595
596inline exec_node *exec_list::get_head()
597{
598 return exec_list_get_head(this);
599}
600
601inline const exec_node *exec_list::get_head_raw() const
602{
603 return exec_list_get_head_raw_const(this);
604}
605
606inline exec_node *exec_list::get_head_raw()
607{
608 return exec_list_get_head_raw(this);
609}
610
611inline const exec_node *exec_list::get_tail() const
612{
613 return exec_list_get_tail_const(this);
614}
615
616inline exec_node *exec_list::get_tail()
617{
618 return exec_list_get_tail(this);
619}
620
621inline const exec_node *exec_list::get_tail_raw() const
622{
623 return exec_list_get_tail_raw_const(this);
624}
625
626inline exec_node *exec_list::get_tail_raw()
627{
628 return exec_list_get_tail_raw(this);
629}
630
631inline unsigned exec_list::length() const
632{
633 return exec_list_length(this);
634}
635
636inline void exec_list::push_head(exec_node *n)
637{
638 exec_list_push_head(this, n);
639}
640
641inline void exec_list::push_tail(exec_node *n)
642{
643 exec_list_push_tail(this, n);
644}
645
646inline void exec_list::push_degenerate_list_at_head(exec_node *n)
647{
648 exec_list_push_degenerate_list_at_head(this, n);
649}
650
651inline exec_node *exec_list::pop_head()
652{
653 return exec_list_pop_head(this);
654}
655
656inline void exec_list::move_nodes_to(exec_list *target)
657{
658 exec_list_move_nodes_to(this, target);
659}
660
661inline void exec_list::append_list(exec_list *source)
662{
663 exec_list_append(this, source);
664}
665
666inline void exec_node::insert_after(exec_list *after)
667{
668 exec_node_insert_list_after(this, after);
669}
670
671inline void exec_list::prepend_list(exec_list *source)
672{
673 exec_list_prepend(this, source);
674}
675
676inline void exec_node::insert_before(exec_list *before)
677{
678 exec_node_insert_list_before(this, before);
679}
680#endif
681
682#define exec_node_typed_forward(__node, __type)(!exec_node_is_tail_sentinel(__node) ? (__type) (__node) : __null
)
\
683 (!exec_node_is_tail_sentinel(__node) ? (__type) (__node) : NULL__null)
684
685#define exec_node_typed_backward(__node, __type)(!exec_node_is_head_sentinel(__node) ? (__type) (__node) : __null
)
\
686 (!exec_node_is_head_sentinel(__node) ? (__type) (__node) : NULL__null)
687
688#define foreach_in_list(__type, __inst, __list)for (__type *__inst = (!exec_node_is_tail_sentinel((__list)->
head_sentinel.next) ? (__type *) ((__list)->head_sentinel.
next) : __null); (__inst) != __null; (__inst) = (!exec_node_is_tail_sentinel
((__inst)->next) ? (__type *) ((__inst)->next) : __null
))
\
689 for (__type *__inst = exec_node_typed_forward((__list)->head_sentinel.next, __type *)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next)
? (__type *) ((__list)->head_sentinel.next) : __null)
; \
690 (__inst) != NULL__null; \
691 (__inst) = exec_node_typed_forward((__inst)->next, __type *)(!exec_node_is_tail_sentinel((__inst)->next) ? (__type *) (
(__inst)->next) : __null)
)
692
693#define foreach_in_list_reverse(__type, __inst, __list)for (__type *__inst = (!exec_node_is_head_sentinel((__list)->
tail_sentinel.prev) ? (__type *) ((__list)->tail_sentinel.
prev) : __null); (__inst) != __null; (__inst) = (!exec_node_is_head_sentinel
((__inst)->prev) ? (__type *) ((__inst)->prev) : __null
))
\
694 for (__type *__inst = exec_node_typed_backward((__list)->tail_sentinel.prev, __type *)(!exec_node_is_head_sentinel((__list)->tail_sentinel.prev)
? (__type *) ((__list)->tail_sentinel.prev) : __null)
; \
695 (__inst) != NULL__null; \
696 (__inst) = exec_node_typed_backward((__inst)->prev, __type *)(!exec_node_is_head_sentinel((__inst)->prev) ? (__type *) (
(__inst)->prev) : __null)
)
697
698/**
699 * This version is safe even if the current node is removed.
700 */
701
702#define foreach_in_list_safe(__type, __node, __list)for (__type *__node = (!exec_node_is_tail_sentinel((__list)->
head_sentinel.next) ? (__type *) ((__list)->head_sentinel.
next) : __null), *__next = (__node) ? (!exec_node_is_tail_sentinel
((__list)->head_sentinel.next->next) ? (__type *) ((__list
)->head_sentinel.next->next) : __null) : __null; (__node
) != __null; (__node) = __next, __next = __next ? (!exec_node_is_tail_sentinel
(__next->next) ? (__type *) (__next->next) : __null) : __null
)
\
703 for (__type *__node = exec_node_typed_forward((__list)->head_sentinel.next, __type *)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next)
? (__type *) ((__list)->head_sentinel.next) : __null)
, \
704 *__next = (__node) ? exec_node_typed_forward((__list)->head_sentinel.next->next, __type *)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next->
next) ? (__type *) ((__list)->head_sentinel.next->next)
: __null)
: NULL__null; \
705 (__node) != NULL__null; \
706 (__node) = __next, __next = __next ? exec_node_typed_forward(__next->next, __type *)(!exec_node_is_tail_sentinel(__next->next) ? (__type *) (__next
->next) : __null)
: NULL__null)
707
708#define foreach_in_list_reverse_safe(__type, __node, __list)for (__type *__node = (!exec_node_is_head_sentinel((__list)->
tail_sentinel.prev) ? (__type *) ((__list)->tail_sentinel.
prev) : __null), *__prev = (__node) ? (!exec_node_is_head_sentinel
((__list)->tail_sentinel.prev->prev) ? (__type *) ((__list
)->tail_sentinel.prev->prev) : __null) : __null; (__node
) != __null; (__node) = __prev, __prev = __prev ? (!exec_node_is_head_sentinel
(__prev->prev) ? (__type *) (__prev->prev) : __null) : __null
)
\
709 for (__type *__node = exec_node_typed_backward((__list)->tail_sentinel.prev, __type *)(!exec_node_is_head_sentinel((__list)->tail_sentinel.prev)
? (__type *) ((__list)->tail_sentinel.prev) : __null)
, \
710 *__prev = (__node) ? exec_node_typed_backward((__list)->tail_sentinel.prev->prev, __type *)(!exec_node_is_head_sentinel((__list)->tail_sentinel.prev->
prev) ? (__type *) ((__list)->tail_sentinel.prev->prev)
: __null)
: NULL__null; \
711 (__node) != NULL__null; \
712 (__node) = __prev, __prev = __prev ? exec_node_typed_backward(__prev->prev, __type *)(!exec_node_is_head_sentinel(__prev->prev) ? (__type *) (__prev
->prev) : __null)
: NULL__null)
713
714#define foreach_in_list_use_after(__type, __inst, __list)__type *__inst; for ((__inst) = (!exec_node_is_tail_sentinel(
(__list)->head_sentinel.next) ? (__type *) ((__list)->head_sentinel
.next) : __null); (__inst) != __null; (__inst) = (!exec_node_is_tail_sentinel
((__inst)->next) ? (__type *) ((__inst)->next) : __null
))
\
715 __type *__inst; \
716 for ((__inst) = exec_node_typed_forward((__list)->head_sentinel.next, __type *)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next)
? (__type *) ((__list)->head_sentinel.next) : __null)
; \
717 (__inst) != NULL__null; \
718 (__inst) = exec_node_typed_forward((__inst)->next, __type *)(!exec_node_is_tail_sentinel((__inst)->next) ? (__type *) (
(__inst)->next) : __null)
)
719
720/**
721 * Iterate through two lists at once. Stops at the end of the shorter list.
722 *
723 * This is safe against either current node being removed or replaced.
724 */
725#define foreach_two_lists(__node1, __list1, __node2, __list2)for (struct exec_node * __node1 = (__list1)->head_sentinel
.next, * __node2 = (__list2)->head_sentinel.next, * __next1
= __node1->next, * __next2 = __node2->next ; __next1 !=
__null && __next2 != __null ; __node1 = __next1, __node2
= __next2, __next1 = __next1->next, __next2 = __next2->
next)
\
726 for (struct exec_node * __node1 = (__list1)->head_sentinel.next, \
727 * __node2 = (__list2)->head_sentinel.next, \
728 * __next1 = __node1->next, \
729 * __next2 = __node2->next \
730 ; __next1 != NULL__null && __next2 != NULL__null \
731 ; __node1 = __next1, \
732 __node2 = __next2, \
733 __next1 = __next1->next, \
734 __next2 = __next2->next)
735
736#define exec_node_data_forward(type, node, field)(!exec_node_is_tail_sentinel(node) ? ((type *) (((uintptr_t) node
) - (((char *) &((type *) node)->field) - ((char *) node
)))) : __null)
\
737 (!exec_node_is_tail_sentinel(node) ? exec_node_data(type, node, field)((type *) (((uintptr_t) node) - (((char *) &((type *) node
)->field) - ((char *) node))))
: NULL__null)
738
739#define exec_node_data_backward(type, node, field)(!exec_node_is_head_sentinel(node) ? ((type *) (((uintptr_t) node
) - (((char *) &((type *) node)->field) - ((char *) node
)))) : __null)
\
740 (!exec_node_is_head_sentinel(node) ? exec_node_data(type, node, field)((type *) (((uintptr_t) node) - (((char *) &((type *) node
)->field) - ((char *) node))))
: NULL__null)
741
742#define foreach_list_typed(__type, __node, __field, __list)for (__type * __node = (!exec_node_is_tail_sentinel((__list)->
head_sentinel.next) ? ((__type *) (((uintptr_t) (__list)->
head_sentinel.next) - (((char *) &((__type *) (__list)->
head_sentinel.next)->__field) - ((char *) (__list)->head_sentinel
.next)))) : __null); (__node) != __null; (__node) = (!exec_node_is_tail_sentinel
((__node)->__field.next) ? ((__type *) (((uintptr_t) (__node
)->__field.next) - (((char *) &((__type *) (__node)->
__field.next)->__field) - ((char *) (__node)->__field.next
)))) : __null))
\
743 for (__type * __node = \
744 exec_node_data_forward(__type, (__list)->head_sentinel.next, __field)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next)
? ((__type *) (((uintptr_t) (__list)->head_sentinel.next)
- (((char *) &((__type *) (__list)->head_sentinel.next
)->__field) - ((char *) (__list)->head_sentinel.next)))
) : __null)
; \
745 (__node) != NULL__null; \
746 (__node) = exec_node_data_forward(__type, (__node)->__field.next, __field)(!exec_node_is_tail_sentinel((__node)->__field.next) ? ((__type
*) (((uintptr_t) (__node)->__field.next) - (((char *) &
((__type *) (__node)->__field.next)->__field) - ((char *
) (__node)->__field.next)))) : __null)
)
747
748#define foreach_list_typed_from(__type, __node, __field, __list, __start)for (__type * __node = (!exec_node_is_tail_sentinel((__start)
) ? ((__type *) (((uintptr_t) (__start)) - (((char *) &((
__type *) (__start))->__field) - ((char *) (__start))))) :
__null); (__node) != __null; (__node) = (!exec_node_is_tail_sentinel
((__node)->__field.next) ? ((__type *) (((uintptr_t) (__node
)->__field.next) - (((char *) &((__type *) (__node)->
__field.next)->__field) - ((char *) (__node)->__field.next
)))) : __null))
\
749 for (__type * __node = exec_node_data_forward(__type, (__start), __field)(!exec_node_is_tail_sentinel((__start)) ? ((__type *) (((uintptr_t
) (__start)) - (((char *) &((__type *) (__start))->__field
) - ((char *) (__start))))) : __null)
; \
750 (__node) != NULL__null; \
751 (__node) = exec_node_data_forward(__type, (__node)->__field.next, __field)(!exec_node_is_tail_sentinel((__node)->__field.next) ? ((__type
*) (((uintptr_t) (__node)->__field.next) - (((char *) &
((__type *) (__node)->__field.next)->__field) - ((char *
) (__node)->__field.next)))) : __null)
)
752
753#define foreach_list_typed_reverse(__type, __node, __field, __list)for (__type * __node = (!exec_node_is_head_sentinel((__list)->
tail_sentinel.prev) ? ((__type *) (((uintptr_t) (__list)->
tail_sentinel.prev) - (((char *) &((__type *) (__list)->
tail_sentinel.prev)->__field) - ((char *) (__list)->tail_sentinel
.prev)))) : __null); (__node) != __null; (__node) = (!exec_node_is_head_sentinel
((__node)->__field.prev) ? ((__type *) (((uintptr_t) (__node
)->__field.prev) - (((char *) &((__type *) (__node)->
__field.prev)->__field) - ((char *) (__node)->__field.prev
)))) : __null))
\
754 for (__type * __node = \
755 exec_node_data_backward(__type, (__list)->tail_sentinel.prev, __field)(!exec_node_is_head_sentinel((__list)->tail_sentinel.prev)
? ((__type *) (((uintptr_t) (__list)->tail_sentinel.prev)
- (((char *) &((__type *) (__list)->tail_sentinel.prev
)->__field) - ((char *) (__list)->tail_sentinel.prev)))
) : __null)
; \
756 (__node) != NULL__null; \
757 (__node) = exec_node_data_backward(__type, (__node)->__field.prev, __field)(!exec_node_is_head_sentinel((__node)->__field.prev) ? ((__type
*) (((uintptr_t) (__node)->__field.prev) - (((char *) &
((__type *) (__node)->__field.prev)->__field) - ((char *
) (__node)->__field.prev)))) : __null)
)
758
759#define foreach_list_typed_safe(__type, __node, __field, __list)for (__type * __node = (!exec_node_is_tail_sentinel((__list)->
head_sentinel.next) ? ((__type *) (((uintptr_t) (__list)->
head_sentinel.next) - (((char *) &((__type *) (__list)->
head_sentinel.next)->__field) - ((char *) (__list)->head_sentinel
.next)))) : __null), * __next = (__node) ? (!exec_node_is_tail_sentinel
((__node)->__field.next) ? ((__type *) (((uintptr_t) (__node
)->__field.next) - (((char *) &((__type *) (__node)->
__field.next)->__field) - ((char *) (__node)->__field.next
)))) : __null) : __null; (__node) != __null; (__node) = __next
, __next = (__next && (__next)->__field.next) ? (!
exec_node_is_tail_sentinel((__next)->__field.next) ? ((__type
*) (((uintptr_t) (__next)->__field.next) - (((char *) &
((__type *) (__next)->__field.next)->__field) - ((char *
) (__next)->__field.next)))) : __null) : __null)
\
760 for (__type * __node = \
761 exec_node_data_forward(__type, (__list)->head_sentinel.next, __field)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next)
? ((__type *) (((uintptr_t) (__list)->head_sentinel.next)
- (((char *) &((__type *) (__list)->head_sentinel.next
)->__field) - ((char *) (__list)->head_sentinel.next)))
) : __null)
, \
762 * __next = (__node) ? \
763 exec_node_data_forward(__type, (__node)->__field.next, __field)(!exec_node_is_tail_sentinel((__node)->__field.next) ? ((__type
*) (((uintptr_t) (__node)->__field.next) - (((char *) &
((__type *) (__node)->__field.next)->__field) - ((char *
) (__node)->__field.next)))) : __null)
: NULL__null; \
764 (__node) != NULL__null; \
765 (__node) = __next, __next = (__next && (__next)->__field.next) ? \
766 exec_node_data_forward(__type, (__next)->__field.next, __field)(!exec_node_is_tail_sentinel((__next)->__field.next) ? ((__type
*) (((uintptr_t) (__next)->__field.next) - (((char *) &
((__type *) (__next)->__field.next)->__field) - ((char *
) (__next)->__field.next)))) : __null)
: NULL__null)
767
768#define foreach_list_typed_reverse_safe(__type, __node, __field, __list)for (__type * __node = (!exec_node_is_head_sentinel((__list)->
tail_sentinel.prev) ? ((__type *) (((uintptr_t) (__list)->
tail_sentinel.prev) - (((char *) &((__type *) (__list)->
tail_sentinel.prev)->__field) - ((char *) (__list)->tail_sentinel
.prev)))) : __null), * __prev = (__node) ? (!exec_node_is_head_sentinel
((__node)->__field.prev) ? ((__type *) (((uintptr_t) (__node
)->__field.prev) - (((char *) &((__type *) (__node)->
__field.prev)->__field) - ((char *) (__node)->__field.prev
)))) : __null) : __null; (__node) != __null; (__node) = __prev
, __prev = (__prev && (__prev)->__field.prev) ? (!
exec_node_is_head_sentinel((__prev)->__field.prev) ? ((__type
*) (((uintptr_t) (__prev)->__field.prev) - (((char *) &
((__type *) (__prev)->__field.prev)->__field) - ((char *
) (__prev)->__field.prev)))) : __null) : __null)
\
769 for (__type * __node = \
770 exec_node_data_backward(__type, (__list)->tail_sentinel.prev, __field)(!exec_node_is_head_sentinel((__list)->tail_sentinel.prev)
? ((__type *) (((uintptr_t) (__list)->tail_sentinel.prev)
- (((char *) &((__type *) (__list)->tail_sentinel.prev
)->__field) - ((char *) (__list)->tail_sentinel.prev)))
) : __null)
, \
771 * __prev = (__node) ? \
772 exec_node_data_backward(__type, (__node)->__field.prev, __field)(!exec_node_is_head_sentinel((__node)->__field.prev) ? ((__type
*) (((uintptr_t) (__node)->__field.prev) - (((char *) &
((__type *) (__node)->__field.prev)->__field) - ((char *
) (__node)->__field.prev)))) : __null)
: NULL__null; \
773 (__node) != NULL__null; \
774 (__node) = __prev, __prev = (__prev && (__prev)->__field.prev) ? \
775 exec_node_data_backward(__type, (__prev)->__field.prev, __field)(!exec_node_is_head_sentinel((__prev)->__field.prev) ? ((__type
*) (((uintptr_t) (__prev)->__field.prev) - (((char *) &
((__type *) (__prev)->__field.prev)->__field) - ((char *
) (__prev)->__field.prev)))) : __null)
: NULL__null)
776
777#endif /* LIST_CONTAINER_H */