Bug Summary

File:root/firefox-clang/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/list.h
Warning:line 151, column 18
Access to field 'prev' results in a dereference of a null pointer (loaded from field 'next')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ir_function_detect_recursion.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -ffp-contract=off -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/root/firefox-clang/third_party/rust/glslopt -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/root/firefox-clang/third_party/rust/glslopt -resource-dir /usr/lib/llvm-21/lib/clang/21 -include /root/firefox-clang/config/gcc_hidden.h -include /root/firefox-clang/obj-x86_64-pc-linux-gnu/mozilla-config.h -I glsl-optimizer/include -I glsl-optimizer/src/mesa -I glsl-optimizer/src/mapi -I glsl-optimizer/src/compiler -I glsl-optimizer/src/compiler/glsl -I glsl-optimizer/src/gallium/auxiliary -I glsl-optimizer/src/gallium/include -I glsl-optimizer/src -I glsl-optimizer/src/util -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/stl_wrappers -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/system_wrappers -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=2 -D _GLIBCXX_ASSERTIONS -D DEBUG=1 -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nspr -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nss -D MOZILLA_CLIENT -D MOZILLA_CONFIG_H -D __STDC_FORMAT_MACROS -D _GNU_SOURCE -D HAVE_ENDIAN_H -D HAVE_PTHREAD -D HAVE_TIMESPEC_GET -D MOZ_INCLUDE_MOZALLOC_H -D mozilla_throw_gcc_h -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/x86_64-linux-gnu/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14/backward -internal-isystem /usr/lib/llvm-21/lib/clang/21/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-error=pessimizing-move -Wno-error=large-by-value-copy=128 -Wno-error=implicit-int-float-conversion -Wno-error=thread-safety-analysis -Wno-error=tautological-type-limit-compare -Wno-invalid-offsetof -Wno-range-loop-analysis -Wno-deprecated-anon-enum-enum-conversion -Wno-deprecated-enum-enum-conversion -Wno-deprecated-this-capture -Wno-inline-new-delete -Wno-error=deprecated-declarations -Wno-error=array-bounds -Wno-error=free-nonheap-object -Wno-error=atomic-alignment -Wno-error=deprecated-builtins -Wno-psabi -Wno-error=builtin-macro-redefined -Wno-vla-cxx-extension -Wno-unknown-warning-option -fdeprecated-macro -ferror-limit 19 -fstrict-flex-arrays=1 -stack-protector 2 -fstack-clash-protection -ftrivial-auto-var-init=pattern -fno-rtti -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -fno-sized-deallocation -fno-aligned-allocation -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2025-06-27-100320-3286336-1 -x c++ glsl-optimizer/src/compiler/glsl/ir_function_detect_recursion.cpp

glsl-optimizer/src/compiler/glsl/ir_function_detect_recursion.cpp

1/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * \file ir_function_detect_recursion.cpp
26 * Determine whether a shader contains static recursion.
27 *
28 * Consider the (possibly disjoint) graph of function calls in a shader. If a
29 * program contains recursion, this graph will contain a cycle. If a function
30 * is part of a cycle, it will have a caller and it will have a callee (it
31 * calls another function).
32 *
33 * To detect recursion, the function call graph is constructed. The graph is
34 * repeatedly reduced by removing any function that either has no callees
35 * (leaf functions) or has no caller. Eventually the only functions that
36 * remain will be the functions in the cycles.
37 *
38 * The GLSL spec is a bit wishy-washy about recursion.
39 *
40 * From page 39 (page 45 of the PDF) of the GLSL 1.10 spec:
41 *
42 * "Behavior is undefined if recursion is used. Recursion means having any
43 * function appearing more than once at any one time in the run-time stack
44 * of function calls. That is, a function may not call itself either
45 * directly or indirectly. Compilers may give diagnostic messages when
46 * this is detectable at compile time, but not all such cases can be
47 * detected at compile time."
48 *
49 * From page 79 (page 85 of the PDF):
50 *
51 * "22) Should recursion be supported?
52 *
53 * DISCUSSION: Probably not necessary, but another example of limiting
54 * the language based on how it would directly map to hardware. One
55 * thought is that recursion would benefit ray tracing shaders. On the
56 * other hand, many recursion operations can also be implemented with the
57 * user managing the recursion through arrays. RenderMan doesn't support
58 * recursion. This could be added at a later date, if it proved to be
59 * necessary.
60 *
61 * RESOLVED on September 10, 2002: Implementations are not required to
62 * support recursion.
63 *
64 * CLOSED on September 10, 2002."
65 *
66 * From page 79 (page 85 of the PDF):
67 *
68 * "56) Is it an error for an implementation to support recursion if the
69 * specification says recursion is not supported?
70 *
71 * ADDED on September 10, 2002.
72 *
73 * DISCUSSION: This issues is related to Issue (22). If we say that
74 * recursion (or some other piece of functionality) is not supported, is
75 * it an error for an implementation to support it? Perhaps the
76 * specification should remain silent on these kind of things so that they
77 * could be gracefully added later as an extension or as part of the
78 * standard.
79 *
80 * RESOLUTION: Languages, in general, have programs that are not
81 * well-formed in ways a compiler cannot detect. Portability is only
82 * ensured for well-formed programs. Detecting recursion is an example of
83 * this. The language will say a well-formed program may not recurse, but
84 * compilers are not forced to detect that recursion may happen.
85 *
86 * CLOSED: November 29, 2002."
87 *
88 * In GLSL 1.10 the behavior of recursion is undefined. Compilers don't have
89 * to reject shaders (at compile-time or link-time) that contain recursion.
90 * Instead they could work, or crash, or kill a kitten.
91 *
92 * From page 44 (page 50 of the PDF) of the GLSL 1.20 spec:
93 *
94 * "Recursion is not allowed, not even statically. Static recursion is
95 * present if the static function call graph of the program contains
96 * cycles."
97 *
98 * This langauge clears things up a bit, but it still leaves a lot of
99 * questions unanswered.
100 *
101 * - Is the error generated at compile-time or link-time?
102 *
103 * - Is it an error to have a recursive function that is never statically
104 * called by main or any function called directly or indirectly by main?
105 * Technically speaking, such a function is not in the "static function
106 * call graph of the program" at all.
107 *
108 * \bug
109 * If a shader has multiple cycles, this algorithm may erroneously complain
110 * about functions that aren't in any cycle, but are in the part of the call
111 * tree that connects them. For example, if the call graph consists of a
112 * cycle between A and B, and a cycle between D and E, and B also calls C
113 * which calls D, then this algorithm will report C as a function which "has
114 * static recursion" even though it is not part of any cycle.
115 *
116 * A better algorithm for cycle detection that doesn't have this drawback can
117 * be found here:
118 *
119 * http://en.wikipedia.org/wiki/Tarjan%E2%80%99s_strongly_connected_components_algorithm
120 *
121 * \author Ian Romanick <ian.d.romanick@intel.com>
122 */
123#include "ir.h"
124#include "glsl_parser_extras.h"
125#include "linker.h"
126#include "util/hash_table.h"
127#include "program.h"
128
129namespace {
130
131struct call_node : public exec_node {
132 class function *func;
133};
134
135class function {
136public:
137 function(ir_function_signature *sig)
138 : sig(sig)
139 {
140 /* empty */
141 }
142
143 DECLARE_RALLOC_CXX_OPERATORS(function)private: static void _ralloc_destructor(void *p) { reinterpret_cast
<function *>(p)->function::~function(); } public: static
void* operator new(size_t size, void *mem_ctx) { void *p = ralloc_size
(mem_ctx, size); (static_cast <bool> (p != __null) ? void
(0) : __assert_fail ("p != NULL", __builtin_FILE (), __builtin_LINE
(), __extension__ __PRETTY_FUNCTION__)); if (!__has_trivial_destructor
(function)) ralloc_set_destructor(p, _ralloc_destructor); return
p; } static void operator delete(void *p) { if (!__has_trivial_destructor
(function)) ralloc_set_destructor(p, __null); ralloc_free(p);
}
144
145 ir_function_signature *sig;
146
147 /** List of functions called by this function. */
148 exec_list callees;
149
150 /** List of functions that call this function. */
151 exec_list callers;
152};
153
154class has_recursion_visitor : public ir_hierarchical_visitor {
155public:
156 has_recursion_visitor()
157 : current(NULL__null)
158 {
159 progress = false;
160 this->mem_ctx = ralloc_context(NULL__null);
161 this->function_hash = _mesa_pointer_hash_table_create(NULL__null);
162 }
163
164 ~has_recursion_visitor()
165 {
166 _mesa_hash_table_destroy(this->function_hash, NULL__null);
167 ralloc_free(this->mem_ctx);
168 }
169
170 function *get_function(ir_function_signature *sig)
171 {
172 function *f;
173 hash_entry *entry = _mesa_hash_table_search(this->function_hash, sig);
174 if (entry == NULL__null) {
175 f = new(mem_ctx) function(sig);
176 _mesa_hash_table_insert(this->function_hash, sig, f);
177 } else {
178 f = (function *) entry->data;
179 }
180
181 return f;
182 }
183
184 virtual ir_visitor_status visit_enter(ir_function_signature *sig)
185 {
186 this->current = this->get_function(sig);
187 return visit_continue;
188 }
189
190 virtual ir_visitor_status visit_leave(ir_function_signature *sig)
191 {
192 (void) sig;
193 this->current = NULL__null;
194 return visit_continue;
195 }
196
197 virtual ir_visitor_status visit_enter(ir_call *call)
198 {
199 /* At global scope this->current will be NULL. Since there is no way to
200 * call global scope, it can never be part of a cycle. Don't bother
201 * adding calls from global scope to the graph.
202 */
203 if (this->current == NULL__null)
204 return visit_continue;
205
206 function *const target = this->get_function(call->callee);
207
208 /* Create a link from the caller to the callee.
209 */
210 call_node *node = new(mem_ctx) call_node;
211 node->func = target;
212 this->current->callees.push_tail(node);
213
214 /* Create a link from the callee to the caller.
215 */
216 node = new(mem_ctx) call_node;
217 node->func = this->current;
218 target->callers.push_tail(node);
219 return visit_continue;
220 }
221
222 function *current;
223 struct hash_table *function_hash;
224 void *mem_ctx;
225 bool progress;
226};
227
228} /* anonymous namespace */
229
230static void
231destroy_links(exec_list *list, function *f)
232{
233 foreach_in_list_safe(call_node, node, list)for (call_node *node = (!exec_node_is_tail_sentinel((list)->
head_sentinel.next) ? (call_node *) ((list)->head_sentinel
.next) : __null), *__next = (node) ? (!exec_node_is_tail_sentinel
((list)->head_sentinel.next->next) ? (call_node *) ((list
)->head_sentinel.next->next) : __null) : __null; (node)
!= __null; (node) = __next, __next = __next ? (!exec_node_is_tail_sentinel
(__next->next) ? (call_node *) (__next->next) : __null)
: __null)
{
234 /* If this is the right function, remove it. Note that the loop cannot
235 * terminate now. There can be multiple links to a function if it is
236 * either called multiple times or calls multiple times.
237 */
238 if (node->func == f)
239 node->remove();
240 }
241}
242
243
244/**
245 * Remove a function if it has either no in or no out links
246 */
247static void
248remove_unlinked_functions(const void *key, void *data, void *closure)
249{
250 has_recursion_visitor *visitor = (has_recursion_visitor *) closure;
251 function *f = (function *) data;
252
253 if (f->callers.is_empty() || f->callees.is_empty()) {
5
Assuming the condition is true
254 while (!f->callers.is_empty()) {
6
Loop condition is true. Entering loop body
16
Loop condition is true. Entering loop body
255 struct call_node *n = (struct call_node *) f->callers.pop_head();
7
Calling 'exec_list::pop_head'
15
Returning from 'exec_list::pop_head'
17
Calling 'exec_list::pop_head'
256 destroy_links(& n->func->callees, f);
257 }
258
259 while (!f->callees.is_empty()) {
260 struct call_node *n = (struct call_node *) f->callees.pop_head();
261 destroy_links(& n->func->callers, f);
262 }
263
264 hash_entry *entry = _mesa_hash_table_search(visitor->function_hash, key);
265 _mesa_hash_table_remove(visitor->function_hash, entry);
266 visitor->progress = true;
267 }
268}
269
270
271static void
272emit_errors_unlinked(const void *key, void *data, void *closure)
273{
274 struct _mesa_glsl_parse_state *state =
275 (struct _mesa_glsl_parse_state *) closure;
276 function *f = (function *) data;
277 YYLTYPE loc;
278
279 (void) key;
280
281 char *proto = prototype_string(f->sig->return_type,
282 f->sig->function_name(),
283 &f->sig->parameters);
284
285 memset(&loc, 0, sizeof(loc));
286 _mesa_glsl_error(&loc, state,
287 "function `%s' has static recursion",
288 proto);
289 ralloc_free(proto);
290}
291
292
293static void
294emit_errors_linked(const void *key, void *data, void *closure)
295{
296 struct gl_shader_program *prog =
297 (struct gl_shader_program *) closure;
298 function *f = (function *) data;
299
300 (void) key;
301
302 char *proto = prototype_string(f->sig->return_type,
303 f->sig->function_name(),
304 &f->sig->parameters);
305
306 linker_error(prog, "function `%s' has static recursion.\n", proto);
307 ralloc_free(proto);
308}
309
310
311void
312detect_recursion_unlinked(struct _mesa_glsl_parse_state *state,
313 exec_list *instructions)
314{
315 has_recursion_visitor v;
316
317 /* Collect all of the information about which functions call which other
318 * functions.
319 */
320 v.run(instructions);
321
322 /* Remove from the set all of the functions that either have no caller or
323 * call no other functions. Repeat until no functions are removed.
324 */
325 do {
326 v.progress = false;
327 hash_table_call_foreach(v.function_hash, remove_unlinked_functions, & v);
328 } while (v.progress);
329
330
331 /* At this point any functions still in the hash must be part of a cycle.
332 */
333 hash_table_call_foreach(v.function_hash, emit_errors_unlinked, state);
334}
335
336
337void
338detect_recursion_linked(struct gl_shader_program *prog,
339 exec_list *instructions)
340{
341 has_recursion_visitor v;
342
343 /* Collect all of the information about which functions call which other
344 * functions.
345 */
346 v.run(instructions);
347
348 /* Remove from the set all of the functions that either have no caller or
349 * call no other functions. Repeat until no functions are removed.
350 */
351 do {
352 v.progress = false;
353 hash_table_call_foreach(v.function_hash, remove_unlinked_functions, & v);
1
Calling 'hash_table_call_foreach'
354 } while (v.progress);
355
356
357 /* At this point any functions still in the hash must be part of a cycle.
358 */
359 hash_table_call_foreach(v.function_hash, emit_errors_linked, prog);
360}

glsl-optimizer/src/util/hash_table.h

1/*
2 * Copyright © 2009,2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#ifndef _HASH_TABLE_H
29#define _HASH_TABLE_H
30
31#include <stdlib.h>
32#include <inttypes.h>
33#include <stdbool.h>
34#include "c99_compat.h"
35#include "fnv1a.h"
36#include "macros.h"
37
38#ifdef __cplusplus201703L
39extern "C" {
40#endif
41
42struct hash_entry {
43 uint32_t hash;
44 const void *key;
45 void *data;
46};
47
48struct hash_table {
49 struct hash_entry *table;
50 uint32_t (*key_hash_function)(const void *key);
51 bool (*key_equals_function)(const void *a, const void *b);
52 const void *deleted_key;
53 uint32_t size;
54 uint32_t rehash;
55 uint64_t size_magic;
56 uint64_t rehash_magic;
57 uint32_t max_entries;
58 uint32_t size_index;
59 uint32_t entries;
60 uint32_t deleted_entries;
61};
62
63struct hash_table *
64_mesa_hash_table_create(void *mem_ctx,
65 uint32_t (*key_hash_function)(const void *key),
66 bool (*key_equals_function)(const void *a,
67 const void *b));
68
69bool
70_mesa_hash_table_init(struct hash_table *ht,
71 void *mem_ctx,
72 uint32_t (*key_hash_function)(const void *key),
73 bool (*key_equals_function)(const void *a,
74 const void *b));
75
76struct hash_table *
77_mesa_hash_table_clone(struct hash_table *src, void *dst_mem_ctx);
78void _mesa_hash_table_destroy(struct hash_table *ht,
79 void (*delete_function)(struct hash_entry *entry));
80void _mesa_hash_table_clear(struct hash_table *ht,
81 void (*delete_function)(struct hash_entry *entry));
82void _mesa_hash_table_set_deleted_key(struct hash_table *ht,
83 const void *deleted_key);
84
85static inline uint32_t _mesa_hash_table_num_entries(struct hash_table *ht)
86{
87 return ht->entries;
88}
89
90struct hash_entry *
91_mesa_hash_table_insert(struct hash_table *ht, const void *key, void *data);
92struct hash_entry *
93_mesa_hash_table_insert_pre_hashed(struct hash_table *ht, uint32_t hash,
94 const void *key, void *data);
95struct hash_entry *
96_mesa_hash_table_search(struct hash_table *ht, const void *key);
97struct hash_entry *
98_mesa_hash_table_search_pre_hashed(struct hash_table *ht, uint32_t hash,
99 const void *key);
100void _mesa_hash_table_remove(struct hash_table *ht,
101 struct hash_entry *entry);
102void _mesa_hash_table_remove_key(struct hash_table *ht,
103 const void *key);
104
105struct hash_entry *_mesa_hash_table_next_entry(struct hash_table *ht,
106 struct hash_entry *entry);
107struct hash_entry *
108_mesa_hash_table_random_entry(struct hash_table *ht,
109 bool (*predicate)(struct hash_entry *entry));
110
111uint32_t _mesa_hash_data(const void *data, size_t size);
112
113uint32_t _mesa_hash_int(const void *key);
114uint32_t _mesa_hash_uint(const void *key);
115uint32_t _mesa_hash_u32(const void *key);
116uint32_t _mesa_hash_string(const void *key);
117uint32_t _mesa_hash_pointer(const void *pointer);
118
119bool _mesa_key_int_equal(const void *a, const void *b);
120bool _mesa_key_uint_equal(const void *a, const void *b);
121bool _mesa_key_u32_equal(const void *a, const void *b);
122bool _mesa_key_string_equal(const void *a, const void *b);
123bool _mesa_key_pointer_equal(const void *a, const void *b);
124
125struct hash_table *
126_mesa_pointer_hash_table_create(void *mem_ctx);
127
128/**
129 * This foreach function is safe against deletion (which just replaces
130 * an entry's data with the deleted marker), but not against insertion
131 * (which may rehash the table, making entry a dangling pointer).
132 */
133#define hash_table_foreach(ht, entry)for (struct hash_entry *entry = _mesa_hash_table_next_entry(ht
, __null); entry != __null; entry = _mesa_hash_table_next_entry
(ht, entry))
\
134 for (struct hash_entry *entry = _mesa_hash_table_next_entry(ht, NULL__null); \
135 entry != NULL__null; \
136 entry = _mesa_hash_table_next_entry(ht, entry))
137
138static inline void
139hash_table_call_foreach(struct hash_table *ht,
140 void (*callback)(const void *key,
141 void *data,
142 void *closure),
143 void *closure)
144{
145 hash_table_foreach(ht, entry)for (struct hash_entry *entry = _mesa_hash_table_next_entry(ht
, __null); entry != __null; entry = _mesa_hash_table_next_entry
(ht, entry))
2
Assuming the condition is true
3
Loop condition is true. Entering loop body
146 callback(entry->key, entry->data, closure);
4
Calling 'remove_unlinked_functions'
147}
148
149/**
150 * Hash table wrapper which supports 64-bit keys.
151 */
152struct hash_table_u64 {
153 struct hash_table *table;
154 void *freed_key_data;
155 void *deleted_key_data;
156};
157
158struct hash_table_u64 *
159_mesa_hash_table_u64_create(void *mem_ctx);
160
161void
162_mesa_hash_table_u64_destroy(struct hash_table_u64 *ht,
163 void (*delete_function)(struct hash_entry *entry));
164
165void
166_mesa_hash_table_u64_insert(struct hash_table_u64 *ht, uint64_t key,
167 void *data);
168
169void *
170_mesa_hash_table_u64_search(struct hash_table_u64 *ht, uint64_t key);
171
172void
173_mesa_hash_table_u64_remove(struct hash_table_u64 *ht, uint64_t key);
174
175void
176_mesa_hash_table_u64_clear(struct hash_table_u64 *ht,
177 void (*delete_function)(struct hash_entry *entry));
178
179#ifdef __cplusplus201703L
180} /* extern C */
181#endif
182
183#endif /* _HASH_TABLE_H */

glsl-optimizer/src/compiler/glsl/list.h

1/*
2 * Copyright © 2008, 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * \file list.h
26 * \brief Doubly-linked list abstract container type.
27 *
28 * Each doubly-linked list has a sentinel head and tail node. These nodes
29 * contain no data. The head sentinel can be identified by its \c prev
30 * pointer being \c NULL. The tail sentinel can be identified by its
31 * \c next pointer being \c NULL.
32 *
33 * A list is empty if either the head sentinel's \c next pointer points to the
34 * tail sentinel or the tail sentinel's \c prev poiner points to the head
35 * sentinel. The head sentinel and tail sentinel nodes are allocated within the
36 * list structure.
37 *
38 * Do note that this means that the list nodes will contain pointers into the
39 * list structure itself and as a result you may not \c realloc() an \c
40 * exec_list or any structure in which an \c exec_list is embedded.
41 */
42
43#ifndef LIST_CONTAINER_H
44#define LIST_CONTAINER_H
45
46#ifndef __cplusplus201703L
47#include <stddef.h>
48#endif
49#include <assert.h>
50
51#include "util/ralloc.h"
52
53struct exec_node {
54 struct exec_node *next;
55 struct exec_node *prev;
56
57#ifdef __cplusplus201703L
58 DECLARE_RZALLOC_CXX_OPERATORS(exec_node)private: static void _ralloc_destructor(void *p) { reinterpret_cast
<exec_node *>(p)->exec_node::~exec_node(); } public:
static void* operator new(size_t size, void *mem_ctx) { void
*p = rzalloc_size(mem_ctx, size); (static_cast <bool> (
p != __null) ? void (0) : __assert_fail ("p != NULL", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__)); if
(!__has_trivial_destructor(exec_node)) ralloc_set_destructor
(p, _ralloc_destructor); return p; } static void operator delete
(void *p) { if (!__has_trivial_destructor(exec_node)) ralloc_set_destructor
(p, __null); ralloc_free(p); }
59
60 exec_node() : next(NULL__null), prev(NULL__null)
61 {
62 /* empty */
63 }
64
65 const exec_node *get_next() const;
66 exec_node *get_next();
67
68 const exec_node *get_prev() const;
69 exec_node *get_prev();
70
71 void remove();
72
73 /**
74 * Link a node with itself
75 *
76 * This creates a sort of degenerate list that is occasionally useful.
77 */
78 void self_link();
79
80 /**
81 * Insert a node in the list after the current node
82 */
83 void insert_after(exec_node *after);
84
85 /**
86 * Insert another list in the list after the current node
87 */
88 void insert_after(struct exec_list *after);
89
90 /**
91 * Insert a node in the list before the current node
92 */
93 void insert_before(exec_node *before);
94
95 /**
96 * Insert another list in the list before the current node
97 */
98 void insert_before(struct exec_list *before);
99
100 /**
101 * Replace the current node with the given node.
102 */
103 void replace_with(exec_node *replacement);
104
105 /**
106 * Is this the sentinel at the tail of the list?
107 */
108 bool is_tail_sentinel() const;
109
110 /**
111 * Is this the sentinel at the head of the list?
112 */
113 bool is_head_sentinel() const;
114#endif
115};
116
117static inline void
118exec_node_init(struct exec_node *n)
119{
120 n->next = NULL__null;
121 n->prev = NULL__null;
122}
123
124static inline const struct exec_node *
125exec_node_get_next_const(const struct exec_node *n)
126{
127 return n->next;
128}
129
130static inline struct exec_node *
131exec_node_get_next(struct exec_node *n)
132{
133 return n->next;
134}
135
136static inline const struct exec_node *
137exec_node_get_prev_const(const struct exec_node *n)
138{
139 return n->prev;
140}
141
142static inline struct exec_node *
143exec_node_get_prev(struct exec_node *n)
144{
145 return n->prev;
146}
147
148static inline void
149exec_node_remove(struct exec_node *n)
150{
151 n->next->prev = n->prev;
21
Access to field 'prev' results in a dereference of a null pointer (loaded from field 'next')
152 n->prev->next = n->next;
153 n->next = NULL__null;
12
Null pointer value stored to field 'next'
154 n->prev = NULL__null;
155}
156
157static inline void
158exec_node_self_link(struct exec_node *n)
159{
160 n->next = n;
161 n->prev = n;
162}
163
164static inline void
165exec_node_insert_after(struct exec_node *n, struct exec_node *after)
166{
167 after->next = n->next;
168 after->prev = n;
169
170 n->next->prev = after;
171 n->next = after;
172}
173
174static inline void
175exec_node_insert_node_before(struct exec_node *n, struct exec_node *before)
176{
177 before->next = n;
178 before->prev = n->prev;
179
180 n->prev->next = before;
181 n->prev = before;
182}
183
184static inline void
185exec_node_replace_with(struct exec_node *n, struct exec_node *replacement)
186{
187 replacement->prev = n->prev;
188 replacement->next = n->next;
189
190 n->prev->next = replacement;
191 n->next->prev = replacement;
192}
193
194static inline bool
195exec_node_is_tail_sentinel(const struct exec_node *n)
196{
197 return n->next == NULL__null;
198}
199
200static inline bool
201exec_node_is_head_sentinel(const struct exec_node *n)
202{
203 return n->prev == NULL__null;
204}
205
206#ifdef __cplusplus201703L
207inline const exec_node *exec_node::get_next() const
208{
209 return exec_node_get_next_const(this);
210}
211
212inline exec_node *exec_node::get_next()
213{
214 return exec_node_get_next(this);
215}
216
217inline const exec_node *exec_node::get_prev() const
218{
219 return exec_node_get_prev_const(this);
220}
221
222inline exec_node *exec_node::get_prev()
223{
224 return exec_node_get_prev(this);
225}
226
227inline void exec_node::remove()
228{
229 exec_node_remove(this);
230}
231
232inline void exec_node::self_link()
233{
234 exec_node_self_link(this);
235}
236
237inline void exec_node::insert_after(exec_node *after)
238{
239 exec_node_insert_after(this, after);
240}
241
242inline void exec_node::insert_before(exec_node *before)
243{
244 exec_node_insert_node_before(this, before);
245}
246
247inline void exec_node::replace_with(exec_node *replacement)
248{
249 exec_node_replace_with(this, replacement);
250}
251
252inline bool exec_node::is_tail_sentinel() const
253{
254 return exec_node_is_tail_sentinel(this);
255}
256
257inline bool exec_node::is_head_sentinel() const
258{
259 return exec_node_is_head_sentinel(this);
260}
261#endif
262
263#ifdef __cplusplus201703L
264/* This macro will not work correctly if `t' uses virtual inheritance. If you
265 * are using virtual inheritance, you deserve a slow and painful death. Enjoy!
266 */
267#define exec_list_offsetof(t, f, p)(((char *) &((t *) p)->f) - ((char *) p)) \
268 (((char *) &((t *) p)->f) - ((char *) p))
269#else
270#define exec_list_offsetof(t, f, p)(((char *) &((t *) p)->f) - ((char *) p)) offsetof(t, f)__builtin_offsetof(t, f)
271#endif
272
273/**
274 * Get a pointer to the structure containing an exec_node
275 *
276 * Given a pointer to an \c exec_node embedded in a structure, get a pointer to
277 * the containing structure.
278 *
279 * \param type Base type of the structure containing the node
280 * \param node Pointer to the \c exec_node
281 * \param field Name of the field in \c type that is the embedded \c exec_node
282 */
283#define exec_node_data(type, node, field)((type *) (((uintptr_t) node) - (((char *) &((type *) node
)->field) - ((char *) node))))
\
284 ((type *) (((uintptr_t) node) - exec_list_offsetof(type, field, node)(((char *) &((type *) node)->field) - ((char *) node))))
285
286#ifdef __cplusplus201703L
287struct exec_node;
288#endif
289
290struct exec_list {
291 struct exec_node head_sentinel;
292 struct exec_node tail_sentinel;
293
294#ifdef __cplusplus201703L
295 DECLARE_RALLOC_CXX_OPERATORS(exec_list)private: static void _ralloc_destructor(void *p) { reinterpret_cast
<exec_list *>(p)->exec_list::~exec_list(); } public:
static void* operator new(size_t size, void *mem_ctx) { void
*p = ralloc_size(mem_ctx, size); (static_cast <bool> (
p != __null) ? void (0) : __assert_fail ("p != NULL", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__)); if
(!__has_trivial_destructor(exec_list)) ralloc_set_destructor
(p, _ralloc_destructor); return p; } static void operator delete
(void *p) { if (!__has_trivial_destructor(exec_list)) ralloc_set_destructor
(p, __null); ralloc_free(p); }
296
297 exec_list()
298 {
299 make_empty();
300 }
301
302 void make_empty();
303
304 bool is_empty() const;
305
306 const exec_node *get_head() const;
307 exec_node *get_head();
308 const exec_node *get_head_raw() const;
309 exec_node *get_head_raw();
310
311 const exec_node *get_tail() const;
312 exec_node *get_tail();
313 const exec_node *get_tail_raw() const;
314 exec_node *get_tail_raw();
315
316 unsigned length() const;
317
318 void push_head(exec_node *n);
319 void push_tail(exec_node *n);
320 void push_degenerate_list_at_head(exec_node *n);
321
322 /**
323 * Remove the first node from a list and return it
324 *
325 * \return
326 * The first node in the list or \c NULL if the list is empty.
327 *
328 * \sa exec_list::get_head
329 */
330 exec_node *pop_head();
331
332 /**
333 * Move all of the nodes from this list to the target list
334 */
335 void move_nodes_to(exec_list *target);
336
337 /**
338 * Append all nodes from the source list to the end of the target list
339 */
340 void append_list(exec_list *source);
341
342 /**
343 * Prepend all nodes from the source list to the beginning of the target
344 * list
345 */
346 void prepend_list(exec_list *source);
347#endif
348};
349
350static inline void
351exec_list_make_empty(struct exec_list *list)
352{
353 list->head_sentinel.next = &list->tail_sentinel;
354 list->head_sentinel.prev = NULL__null;
355 list->tail_sentinel.next = NULL__null;
356 list->tail_sentinel.prev = &list->head_sentinel;
357}
358
359static inline bool
360exec_list_is_empty(const struct exec_list *list)
361{
362 /* There are three ways to test whether a list is empty or not.
363 *
364 * - Check to see if the head sentinel's \c next is the tail sentinel.
365 * - Check to see if the tail sentinel's \c prev is the head sentinel.
366 * - Check to see if the head is the sentinel node by test whether its
367 * \c next pointer is \c NULL.
368 *
369 * The first two methods tend to generate better code on modern systems
370 * because they save a pointer dereference.
371 */
372 return list->head_sentinel.next == &list->tail_sentinel;
373}
374
375static inline bool
376exec_list_is_singular(const struct exec_list *list)
377{
378 return !exec_list_is_empty(list) &&
379 list->head_sentinel.next->next == &list->tail_sentinel;
380}
381
382static inline const struct exec_node *
383exec_list_get_head_const(const struct exec_list *list)
384{
385 return !exec_list_is_empty(list) ? list->head_sentinel.next : NULL__null;
386}
387
388static inline struct exec_node *
389exec_list_get_head(struct exec_list *list)
390{
391 return !exec_list_is_empty(list) ? list->head_sentinel.next : NULL__null;
392}
393
394static inline const struct exec_node *
395exec_list_get_head_raw_const(const struct exec_list *list)
396{
397 return list->head_sentinel.next;
398}
399
400static inline struct exec_node *
401exec_list_get_head_raw(struct exec_list *list)
402{
403 return list->head_sentinel.next;
404}
405
406static inline const struct exec_node *
407exec_list_get_tail_const(const struct exec_list *list)
408{
409 return !exec_list_is_empty(list) ? list->tail_sentinel.prev : NULL__null;
410}
411
412static inline struct exec_node *
413exec_list_get_tail(struct exec_list *list)
414{
415 return !exec_list_is_empty(list) ? list->tail_sentinel.prev : NULL__null;
416}
417
418static inline const struct exec_node *
419exec_list_get_tail_raw_const(const struct exec_list *list)
420{
421 return list->tail_sentinel.prev;
422}
423
424static inline struct exec_node *
425exec_list_get_tail_raw(struct exec_list *list)
426{
427 return list->tail_sentinel.prev;
428}
429
430static inline unsigned
431exec_list_length(const struct exec_list *list)
432{
433 unsigned size = 0;
434 struct exec_node *node;
435
436 for (node = list->head_sentinel.next; node->next != NULL__null; node = node->next) {
437 size++;
438 }
439
440 return size;
441}
442
443static inline void
444exec_list_push_head(struct exec_list *list, struct exec_node *n)
445{
446 n->next = list->head_sentinel.next;
447 n->prev = &list->head_sentinel;
448
449 n->next->prev = n;
450 list->head_sentinel.next = n;
451}
452
453static inline void
454exec_list_push_tail(struct exec_list *list, struct exec_node *n)
455{
456 n->next = &list->tail_sentinel;
457 n->prev = list->tail_sentinel.prev;
458
459 n->prev->next = n;
460 list->tail_sentinel.prev = n;
461}
462
463static inline void
464exec_list_push_degenerate_list_at_head(struct exec_list *list, struct exec_node *n)
465{
466 assert(n->prev->next == n)(static_cast <bool> (n->prev->next == n) ? void (
0) : __assert_fail ("n->prev->next == n", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
467
468 n->prev->next = list->head_sentinel.next;
469 list->head_sentinel.next->prev = n->prev;
470 n->prev = &list->head_sentinel;
471 list->head_sentinel.next = n;
472}
473
474static inline struct exec_node *
475exec_list_pop_head(struct exec_list *list)
476{
477 struct exec_node *const n = exec_list_get_head(list);
478 if (n
18.1
'n' is not equal to NULL
18.1
'n' is not equal to NULL
18.1
'n' is not equal to NULL
!= NULL__null
)
9
Assuming 'n' is not equal to NULL
10
Taking true branch
19
Taking true branch
479 exec_node_remove(n);
11
Calling 'exec_node_remove'
13
Returning from 'exec_node_remove'
20
Calling 'exec_node_remove'
480
481 return n;
482}
483
484static inline void
485exec_list_move_nodes_to(struct exec_list *list, struct exec_list *target)
486{
487 if (exec_list_is_empty(list)) {
488 exec_list_make_empty(target);
489 } else {
490 target->head_sentinel.next = list->head_sentinel.next;
491 target->head_sentinel.prev = NULL__null;
492 target->tail_sentinel.next = NULL__null;
493 target->tail_sentinel.prev = list->tail_sentinel.prev;
494
495 target->head_sentinel.next->prev = &target->head_sentinel;
496 target->tail_sentinel.prev->next = &target->tail_sentinel;
497
498 exec_list_make_empty(list);
499 }
500}
501
502static inline void
503exec_list_append(struct exec_list *list, struct exec_list *source)
504{
505 if (exec_list_is_empty(source))
506 return;
507
508 /* Link the first node of the source with the last node of the target list.
509 */
510 list->tail_sentinel.prev->next = source->head_sentinel.next;
511 source->head_sentinel.next->prev = list->tail_sentinel.prev;
512
513 /* Make the tail of the source list be the tail of the target list.
514 */
515 list->tail_sentinel.prev = source->tail_sentinel.prev;
516 list->tail_sentinel.prev->next = &list->tail_sentinel;
517
518 /* Make the source list empty for good measure.
519 */
520 exec_list_make_empty(source);
521}
522
523static inline void
524exec_node_insert_list_after(struct exec_node *n, struct exec_list *after)
525{
526 if (exec_list_is_empty(after))
527 return;
528
529 after->tail_sentinel.prev->next = n->next;
530 after->head_sentinel.next->prev = n;
531
532 n->next->prev = after->tail_sentinel.prev;
533 n->next = after->head_sentinel.next;
534
535 exec_list_make_empty(after);
536}
537
538static inline void
539exec_list_prepend(struct exec_list *list, struct exec_list *source)
540{
541 exec_list_append(source, list);
542 exec_list_move_nodes_to(source, list);
543}
544
545static inline void
546exec_node_insert_list_before(struct exec_node *n, struct exec_list *before)
547{
548 if (exec_list_is_empty(before))
549 return;
550
551 before->tail_sentinel.prev->next = n;
552 before->head_sentinel.next->prev = n->prev;
553
554 n->prev->next = before->head_sentinel.next;
555 n->prev = before->tail_sentinel.prev;
556
557 exec_list_make_empty(before);
558}
559
560static inline void
561exec_list_validate(const struct exec_list *list)
562{
563 const struct exec_node *node;
564
565 assert(list->head_sentinel.next->prev == &list->head_sentinel)(static_cast <bool> (list->head_sentinel.next->prev
== &list->head_sentinel) ? void (0) : __assert_fail (
"list->head_sentinel.next->prev == &list->head_sentinel"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
566 assert(list->head_sentinel.prev == NULL)(static_cast <bool> (list->head_sentinel.prev == __null
) ? void (0) : __assert_fail ("list->head_sentinel.prev == NULL"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
567 assert(list->tail_sentinel.next == NULL)(static_cast <bool> (list->tail_sentinel.next == __null
) ? void (0) : __assert_fail ("list->tail_sentinel.next == NULL"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
568 assert(list->tail_sentinel.prev->next == &list->tail_sentinel)(static_cast <bool> (list->tail_sentinel.prev->next
== &list->tail_sentinel) ? void (0) : __assert_fail (
"list->tail_sentinel.prev->next == &list->tail_sentinel"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
569
570 /* We could try to use one of the interators below for this but they all
571 * either require C++ or assume the exec_node is embedded in a structure
572 * which is not the case for this function.
573 */
574 for (node = list->head_sentinel.next; node->next != NULL__null; node = node->next) {
575 assert(node->next->prev == node)(static_cast <bool> (node->next->prev == node) ? void
(0) : __assert_fail ("node->next->prev == node", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
576 assert(node->prev->next == node)(static_cast <bool> (node->prev->next == node) ? void
(0) : __assert_fail ("node->prev->next == node", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
577 }
578}
579
580#ifdef __cplusplus201703L
581inline void exec_list::make_empty()
582{
583 exec_list_make_empty(this);
584}
585
586inline bool exec_list::is_empty() const
587{
588 return exec_list_is_empty(this);
589}
590
591inline const exec_node *exec_list::get_head() const
592{
593 return exec_list_get_head_const(this);
594}
595
596inline exec_node *exec_list::get_head()
597{
598 return exec_list_get_head(this);
599}
600
601inline const exec_node *exec_list::get_head_raw() const
602{
603 return exec_list_get_head_raw_const(this);
604}
605
606inline exec_node *exec_list::get_head_raw()
607{
608 return exec_list_get_head_raw(this);
609}
610
611inline const exec_node *exec_list::get_tail() const
612{
613 return exec_list_get_tail_const(this);
614}
615
616inline exec_node *exec_list::get_tail()
617{
618 return exec_list_get_tail(this);
619}
620
621inline const exec_node *exec_list::get_tail_raw() const
622{
623 return exec_list_get_tail_raw_const(this);
624}
625
626inline exec_node *exec_list::get_tail_raw()
627{
628 return exec_list_get_tail_raw(this);
629}
630
631inline unsigned exec_list::length() const
632{
633 return exec_list_length(this);
634}
635
636inline void exec_list::push_head(exec_node *n)
637{
638 exec_list_push_head(this, n);
639}
640
641inline void exec_list::push_tail(exec_node *n)
642{
643 exec_list_push_tail(this, n);
644}
645
646inline void exec_list::push_degenerate_list_at_head(exec_node *n)
647{
648 exec_list_push_degenerate_list_at_head(this, n);
649}
650
651inline exec_node *exec_list::pop_head()
652{
653 return exec_list_pop_head(this);
8
Calling 'exec_list_pop_head'
14
Returning from 'exec_list_pop_head'
18
Calling 'exec_list_pop_head'
654}
655
656inline void exec_list::move_nodes_to(exec_list *target)
657{
658 exec_list_move_nodes_to(this, target);
659}
660
661inline void exec_list::append_list(exec_list *source)
662{
663 exec_list_append(this, source);
664}
665
666inline void exec_node::insert_after(exec_list *after)
667{
668 exec_node_insert_list_after(this, after);
669}
670
671inline void exec_list::prepend_list(exec_list *source)
672{
673 exec_list_prepend(this, source);
674}
675
676inline void exec_node::insert_before(exec_list *before)
677{
678 exec_node_insert_list_before(this, before);
679}
680#endif
681
682#define exec_node_typed_forward(__node, __type)(!exec_node_is_tail_sentinel(__node) ? (__type) (__node) : __null
)
\
683 (!exec_node_is_tail_sentinel(__node) ? (__type) (__node) : NULL__null)
684
685#define exec_node_typed_backward(__node, __type)(!exec_node_is_head_sentinel(__node) ? (__type) (__node) : __null
)
\
686 (!exec_node_is_head_sentinel(__node) ? (__type) (__node) : NULL__null)
687
688#define foreach_in_list(__type, __inst, __list)for (__type *__inst = (!exec_node_is_tail_sentinel((__list)->
head_sentinel.next) ? (__type *) ((__list)->head_sentinel.
next) : __null); (__inst) != __null; (__inst) = (!exec_node_is_tail_sentinel
((__inst)->next) ? (__type *) ((__inst)->next) : __null
))
\
689 for (__type *__inst = exec_node_typed_forward((__list)->head_sentinel.next, __type *)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next)
? (__type *) ((__list)->head_sentinel.next) : __null)
; \
690 (__inst) != NULL__null; \
691 (__inst) = exec_node_typed_forward((__inst)->next, __type *)(!exec_node_is_tail_sentinel((__inst)->next) ? (__type *) (
(__inst)->next) : __null)
)
692
693#define foreach_in_list_reverse(__type, __inst, __list)for (__type *__inst = (!exec_node_is_head_sentinel((__list)->
tail_sentinel.prev) ? (__type *) ((__list)->tail_sentinel.
prev) : __null); (__inst) != __null; (__inst) = (!exec_node_is_head_sentinel
((__inst)->prev) ? (__type *) ((__inst)->prev) : __null
))
\
694 for (__type *__inst = exec_node_typed_backward((__list)->tail_sentinel.prev, __type *)(!exec_node_is_head_sentinel((__list)->tail_sentinel.prev)
? (__type *) ((__list)->tail_sentinel.prev) : __null)
; \
695 (__inst) != NULL__null; \
696 (__inst) = exec_node_typed_backward((__inst)->prev, __type *)(!exec_node_is_head_sentinel((__inst)->prev) ? (__type *) (
(__inst)->prev) : __null)
)
697
698/**
699 * This version is safe even if the current node is removed.
700 */
701
702#define foreach_in_list_safe(__type, __node, __list)for (__type *__node = (!exec_node_is_tail_sentinel((__list)->
head_sentinel.next) ? (__type *) ((__list)->head_sentinel.
next) : __null), *__next = (__node) ? (!exec_node_is_tail_sentinel
((__list)->head_sentinel.next->next) ? (__type *) ((__list
)->head_sentinel.next->next) : __null) : __null; (__node
) != __null; (__node) = __next, __next = __next ? (!exec_node_is_tail_sentinel
(__next->next) ? (__type *) (__next->next) : __null) : __null
)
\
703 for (__type *__node = exec_node_typed_forward((__list)->head_sentinel.next, __type *)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next)
? (__type *) ((__list)->head_sentinel.next) : __null)
, \
704 *__next = (__node) ? exec_node_typed_forward((__list)->head_sentinel.next->next, __type *)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next->
next) ? (__type *) ((__list)->head_sentinel.next->next)
: __null)
: NULL__null; \
705 (__node) != NULL__null; \
706 (__node) = __next, __next = __next ? exec_node_typed_forward(__next->next, __type *)(!exec_node_is_tail_sentinel(__next->next) ? (__type *) (__next
->next) : __null)
: NULL__null)
707
708#define foreach_in_list_reverse_safe(__type, __node, __list)for (__type *__node = (!exec_node_is_head_sentinel((__list)->
tail_sentinel.prev) ? (__type *) ((__list)->tail_sentinel.
prev) : __null), *__prev = (__node) ? (!exec_node_is_head_sentinel
((__list)->tail_sentinel.prev->prev) ? (__type *) ((__list
)->tail_sentinel.prev->prev) : __null) : __null; (__node
) != __null; (__node) = __prev, __prev = __prev ? (!exec_node_is_head_sentinel
(__prev->prev) ? (__type *) (__prev->prev) : __null) : __null
)
\
709 for (__type *__node = exec_node_typed_backward((__list)->tail_sentinel.prev, __type *)(!exec_node_is_head_sentinel((__list)->tail_sentinel.prev)
? (__type *) ((__list)->tail_sentinel.prev) : __null)
, \
710 *__prev = (__node) ? exec_node_typed_backward((__list)->tail_sentinel.prev->prev, __type *)(!exec_node_is_head_sentinel((__list)->tail_sentinel.prev->
prev) ? (__type *) ((__list)->tail_sentinel.prev->prev)
: __null)
: NULL__null; \
711 (__node) != NULL__null; \
712 (__node) = __prev, __prev = __prev ? exec_node_typed_backward(__prev->prev, __type *)(!exec_node_is_head_sentinel(__prev->prev) ? (__type *) (__prev
->prev) : __null)
: NULL__null)
713
714#define foreach_in_list_use_after(__type, __inst, __list)__type *__inst; for ((__inst) = (!exec_node_is_tail_sentinel(
(__list)->head_sentinel.next) ? (__type *) ((__list)->head_sentinel
.next) : __null); (__inst) != __null; (__inst) = (!exec_node_is_tail_sentinel
((__inst)->next) ? (__type *) ((__inst)->next) : __null
))
\
715 __type *__inst; \
716 for ((__inst) = exec_node_typed_forward((__list)->head_sentinel.next, __type *)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next)
? (__type *) ((__list)->head_sentinel.next) : __null)
; \
717 (__inst) != NULL__null; \
718 (__inst) = exec_node_typed_forward((__inst)->next, __type *)(!exec_node_is_tail_sentinel((__inst)->next) ? (__type *) (
(__inst)->next) : __null)
)
719
720/**
721 * Iterate through two lists at once. Stops at the end of the shorter list.
722 *
723 * This is safe against either current node being removed or replaced.
724 */
725#define foreach_two_lists(__node1, __list1, __node2, __list2)for (struct exec_node * __node1 = (__list1)->head_sentinel
.next, * __node2 = (__list2)->head_sentinel.next, * __next1
= __node1->next, * __next2 = __node2->next ; __next1 !=
__null && __next2 != __null ; __node1 = __next1, __node2
= __next2, __next1 = __next1->next, __next2 = __next2->
next)
\
726 for (struct exec_node * __node1 = (__list1)->head_sentinel.next, \
727 * __node2 = (__list2)->head_sentinel.next, \
728 * __next1 = __node1->next, \
729 * __next2 = __node2->next \
730 ; __next1 != NULL__null && __next2 != NULL__null \
731 ; __node1 = __next1, \
732 __node2 = __next2, \
733 __next1 = __next1->next, \
734 __next2 = __next2->next)
735
736#define exec_node_data_forward(type, node, field)(!exec_node_is_tail_sentinel(node) ? ((type *) (((uintptr_t) node
) - (((char *) &((type *) node)->field) - ((char *) node
)))) : __null)
\
737 (!exec_node_is_tail_sentinel(node) ? exec_node_data(type, node, field)((type *) (((uintptr_t) node) - (((char *) &((type *) node
)->field) - ((char *) node))))
: NULL__null)
738
739#define exec_node_data_backward(type, node, field)(!exec_node_is_head_sentinel(node) ? ((type *) (((uintptr_t) node
) - (((char *) &((type *) node)->field) - ((char *) node
)))) : __null)
\
740 (!exec_node_is_head_sentinel(node) ? exec_node_data(type, node, field)((type *) (((uintptr_t) node) - (((char *) &((type *) node
)->field) - ((char *) node))))
: NULL__null)
741
742#define foreach_list_typed(__type, __node, __field, __list)for (__type * __node = (!exec_node_is_tail_sentinel((__list)->
head_sentinel.next) ? ((__type *) (((uintptr_t) (__list)->
head_sentinel.next) - (((char *) &((__type *) (__list)->
head_sentinel.next)->__field) - ((char *) (__list)->head_sentinel
.next)))) : __null); (__node) != __null; (__node) = (!exec_node_is_tail_sentinel
((__node)->__field.next) ? ((__type *) (((uintptr_t) (__node
)->__field.next) - (((char *) &((__type *) (__node)->
__field.next)->__field) - ((char *) (__node)->__field.next
)))) : __null))
\
743 for (__type * __node = \
744 exec_node_data_forward(__type, (__list)->head_sentinel.next, __field)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next)
? ((__type *) (((uintptr_t) (__list)->head_sentinel.next)
- (((char *) &((__type *) (__list)->head_sentinel.next
)->__field) - ((char *) (__list)->head_sentinel.next)))
) : __null)
; \
745 (__node) != NULL__null; \
746 (__node) = exec_node_data_forward(__type, (__node)->__field.next, __field)(!exec_node_is_tail_sentinel((__node)->__field.next) ? ((__type
*) (((uintptr_t) (__node)->__field.next) - (((char *) &
((__type *) (__node)->__field.next)->__field) - ((char *
) (__node)->__field.next)))) : __null)
)
747
748#define foreach_list_typed_from(__type, __node, __field, __list, __start)for (__type * __node = (!exec_node_is_tail_sentinel((__start)
) ? ((__type *) (((uintptr_t) (__start)) - (((char *) &((
__type *) (__start))->__field) - ((char *) (__start))))) :
__null); (__node) != __null; (__node) = (!exec_node_is_tail_sentinel
((__node)->__field.next) ? ((__type *) (((uintptr_t) (__node
)->__field.next) - (((char *) &((__type *) (__node)->
__field.next)->__field) - ((char *) (__node)->__field.next
)))) : __null))
\
749 for (__type * __node = exec_node_data_forward(__type, (__start), __field)(!exec_node_is_tail_sentinel((__start)) ? ((__type *) (((uintptr_t
) (__start)) - (((char *) &((__type *) (__start))->__field
) - ((char *) (__start))))) : __null)
; \
750 (__node) != NULL__null; \
751 (__node) = exec_node_data_forward(__type, (__node)->__field.next, __field)(!exec_node_is_tail_sentinel((__node)->__field.next) ? ((__type
*) (((uintptr_t) (__node)->__field.next) - (((char *) &
((__type *) (__node)->__field.next)->__field) - ((char *
) (__node)->__field.next)))) : __null)
)
752
753#define foreach_list_typed_reverse(__type, __node, __field, __list)for (__type * __node = (!exec_node_is_head_sentinel((__list)->
tail_sentinel.prev) ? ((__type *) (((uintptr_t) (__list)->
tail_sentinel.prev) - (((char *) &((__type *) (__list)->
tail_sentinel.prev)->__field) - ((char *) (__list)->tail_sentinel
.prev)))) : __null); (__node) != __null; (__node) = (!exec_node_is_head_sentinel
((__node)->__field.prev) ? ((__type *) (((uintptr_t) (__node
)->__field.prev) - (((char *) &((__type *) (__node)->
__field.prev)->__field) - ((char *) (__node)->__field.prev
)))) : __null))
\
754 for (__type * __node = \
755 exec_node_data_backward(__type, (__list)->tail_sentinel.prev, __field)(!exec_node_is_head_sentinel((__list)->tail_sentinel.prev)
? ((__type *) (((uintptr_t) (__list)->tail_sentinel.prev)
- (((char *) &((__type *) (__list)->tail_sentinel.prev
)->__field) - ((char *) (__list)->tail_sentinel.prev)))
) : __null)
; \
756 (__node) != NULL__null; \
757 (__node) = exec_node_data_backward(__type, (__node)->__field.prev, __field)(!exec_node_is_head_sentinel((__node)->__field.prev) ? ((__type
*) (((uintptr_t) (__node)->__field.prev) - (((char *) &
((__type *) (__node)->__field.prev)->__field) - ((char *
) (__node)->__field.prev)))) : __null)
)
758
759#define foreach_list_typed_safe(__type, __node, __field, __list)for (__type * __node = (!exec_node_is_tail_sentinel((__list)->
head_sentinel.next) ? ((__type *) (((uintptr_t) (__list)->
head_sentinel.next) - (((char *) &((__type *) (__list)->
head_sentinel.next)->__field) - ((char *) (__list)->head_sentinel
.next)))) : __null), * __next = (__node) ? (!exec_node_is_tail_sentinel
((__node)->__field.next) ? ((__type *) (((uintptr_t) (__node
)->__field.next) - (((char *) &((__type *) (__node)->
__field.next)->__field) - ((char *) (__node)->__field.next
)))) : __null) : __null; (__node) != __null; (__node) = __next
, __next = (__next && (__next)->__field.next) ? (!
exec_node_is_tail_sentinel((__next)->__field.next) ? ((__type
*) (((uintptr_t) (__next)->__field.next) - (((char *) &
((__type *) (__next)->__field.next)->__field) - ((char *
) (__next)->__field.next)))) : __null) : __null)
\
760 for (__type * __node = \
761 exec_node_data_forward(__type, (__list)->head_sentinel.next, __field)(!exec_node_is_tail_sentinel((__list)->head_sentinel.next)
? ((__type *) (((uintptr_t) (__list)->head_sentinel.next)
- (((char *) &((__type *) (__list)->head_sentinel.next
)->__field) - ((char *) (__list)->head_sentinel.next)))
) : __null)
, \
762 * __next = (__node) ? \
763 exec_node_data_forward(__type, (__node)->__field.next, __field)(!exec_node_is_tail_sentinel((__node)->__field.next) ? ((__type
*) (((uintptr_t) (__node)->__field.next) - (((char *) &
((__type *) (__node)->__field.next)->__field) - ((char *
) (__node)->__field.next)))) : __null)
: NULL__null; \
764 (__node) != NULL__null; \
765 (__node) = __next, __next = (__next && (__next)->__field.next) ? \
766 exec_node_data_forward(__type, (__next)->__field.next, __field)(!exec_node_is_tail_sentinel((__next)->__field.next) ? ((__type
*) (((uintptr_t) (__next)->__field.next) - (((char *) &
((__type *) (__next)->__field.next)->__field) - ((char *
) (__next)->__field.next)))) : __null)
: NULL__null)
767
768#define foreach_list_typed_reverse_safe(__type, __node, __field, __list)for (__type * __node = (!exec_node_is_head_sentinel((__list)->
tail_sentinel.prev) ? ((__type *) (((uintptr_t) (__list)->
tail_sentinel.prev) - (((char *) &((__type *) (__list)->
tail_sentinel.prev)->__field) - ((char *) (__list)->tail_sentinel
.prev)))) : __null), * __prev = (__node) ? (!exec_node_is_head_sentinel
((__node)->__field.prev) ? ((__type *) (((uintptr_t) (__node
)->__field.prev) - (((char *) &((__type *) (__node)->
__field.prev)->__field) - ((char *) (__node)->__field.prev
)))) : __null) : __null; (__node) != __null; (__node) = __prev
, __prev = (__prev && (__prev)->__field.prev) ? (!
exec_node_is_head_sentinel((__prev)->__field.prev) ? ((__type
*) (((uintptr_t) (__prev)->__field.prev) - (((char *) &
((__type *) (__prev)->__field.prev)->__field) - ((char *
) (__prev)->__field.prev)))) : __null) : __null)
\
769 for (__type * __node = \
770 exec_node_data_backward(__type, (__list)->tail_sentinel.prev, __field)(!exec_node_is_head_sentinel((__list)->tail_sentinel.prev)
? ((__type *) (((uintptr_t) (__list)->tail_sentinel.prev)
- (((char *) &((__type *) (__list)->tail_sentinel.prev
)->__field) - ((char *) (__list)->tail_sentinel.prev)))
) : __null)
, \
771 * __prev = (__node) ? \
772 exec_node_data_backward(__type, (__node)->__field.prev, __field)(!exec_node_is_head_sentinel((__node)->__field.prev) ? ((__type
*) (((uintptr_t) (__node)->__field.prev) - (((char *) &
((__type *) (__node)->__field.prev)->__field) - ((char *
) (__node)->__field.prev)))) : __null)
: NULL__null; \
773 (__node) != NULL__null; \
774 (__node) = __prev, __prev = (__prev && (__prev)->__field.prev) ? \
775 exec_node_data_backward(__type, (__prev)->__field.prev, __field)(!exec_node_is_head_sentinel((__prev)->__field.prev) ? ((__type
*) (((uintptr_t) (__prev)->__field.prev) - (((char *) &
((__type *) (__prev)->__field.prev)->__field) - ((char *
) (__prev)->__field.prev)))) : __null)
: NULL__null)
776
777#endif /* LIST_CONTAINER_H */