Bug Summary

File:root/firefox-clang/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_copy_propagation_elements.cpp
Warning:line 704, column 8
Dereference of null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name opt_copy_propagation_elements.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -ffp-contract=off -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/root/firefox-clang/third_party/rust/glslopt -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/root/firefox-clang/third_party/rust/glslopt -resource-dir /usr/lib/llvm-21/lib/clang/21 -include /root/firefox-clang/config/gcc_hidden.h -include /root/firefox-clang/obj-x86_64-pc-linux-gnu/mozilla-config.h -I glsl-optimizer/include -I glsl-optimizer/src/mesa -I glsl-optimizer/src/mapi -I glsl-optimizer/src/compiler -I glsl-optimizer/src/compiler/glsl -I glsl-optimizer/src/gallium/auxiliary -I glsl-optimizer/src/gallium/include -I glsl-optimizer/src -I glsl-optimizer/src/util -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/stl_wrappers -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/system_wrappers -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=2 -D _GLIBCXX_ASSERTIONS -D DEBUG=1 -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nspr -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nss -D MOZILLA_CLIENT -D MOZILLA_CONFIG_H -D __STDC_FORMAT_MACROS -D _GNU_SOURCE -D HAVE_ENDIAN_H -D HAVE_PTHREAD -D HAVE_TIMESPEC_GET -D MOZ_INCLUDE_MOZALLOC_H -D mozilla_throw_gcc_h -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/x86_64-linux-gnu/c++/14 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../include/c++/14/backward -internal-isystem /usr/lib/llvm-21/lib/clang/21/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-error=pessimizing-move -Wno-error=large-by-value-copy=128 -Wno-error=implicit-int-float-conversion -Wno-error=thread-safety-analysis -Wno-error=tautological-type-limit-compare -Wno-invalid-offsetof -Wno-range-loop-analysis -Wno-deprecated-anon-enum-enum-conversion -Wno-deprecated-enum-enum-conversion -Wno-deprecated-this-capture -Wno-inline-new-delete -Wno-error=deprecated-declarations -Wno-error=array-bounds -Wno-error=free-nonheap-object -Wno-error=atomic-alignment -Wno-error=deprecated-builtins -Wno-psabi -Wno-error=builtin-macro-redefined -Wno-vla-cxx-extension -Wno-unknown-warning-option -fdeprecated-macro -ferror-limit 19 -fstrict-flex-arrays=1 -stack-protector 2 -fstack-clash-protection -ftrivial-auto-var-init=pattern -fno-rtti -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -fno-sized-deallocation -fno-aligned-allocation -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2025-06-27-100320-3286336-1 -x c++ glsl-optimizer/src/compiler/glsl/opt_copy_propagation_elements.cpp

glsl-optimizer/src/compiler/glsl/opt_copy_propagation_elements.cpp

1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * \file opt_copy_propagation_elements.cpp
26 *
27 * Replaces usage of recently-copied components of variables with the
28 * previous copy of the variable.
29 *
30 * This should reduce the number of MOV instructions in the generated
31 * programs and help triggering other optimizations that live in GLSL
32 * level.
33 */
34
35#include "ir.h"
36#include "ir_rvalue_visitor.h"
37#include "ir_basic_block.h"
38#include "ir_optimization.h"
39#include "compiler/glsl_types.h"
40#include "util/hash_table.h"
41#include "util/set.h"
42
43static bool debug = false;
44
45namespace {
46
47class acp_entry
48{
49public:
50 DECLARE_LINEAR_ZALLOC_CXX_OPERATORS(acp_entry)private: static void _ralloc_destructor(void *p) { reinterpret_cast
<acp_entry *>(p)->acp_entry::~acp_entry(); } public:
static void* operator new(size_t size, void *mem_ctx) { void
*p = linear_zalloc_child(mem_ctx, size); (static_cast <bool
> (p != __null) ? void (0) : __assert_fail ("p != NULL", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__)); if
(!__has_trivial_destructor(acp_entry)) ralloc_set_destructor
(p, _ralloc_destructor); return p; } static void operator delete
(void *p) { if (!__has_trivial_destructor(acp_entry)) ralloc_set_destructor
(p, __null); ralloc_free(p); }
51
52 /* If set, rhs_full indicates that this ACP entry represents a
53 * whole-variable copy. The rhs_element[] array will still be filled,
54 * to allow the swizzling from its components in case the variable
55 * was a vector (and to simplify some of the erase() and write_vector()
56 * logic).
57 */
58
59 ir_variable *rhs_full;
60 ir_variable *rhs_element[4];
61 unsigned rhs_channel[4];
62
63 /* Set of variables that use the variable associated with this acp_entry as
64 * RHS. This has the "reverse references" of rhs_full/rhs_element. It is
65 * used to speed up invalidating those references when the acp_entry
66 * changes.
67 */
68 set *dsts;
69};
70
71class copy_propagation_state {
72public:
73 DECLARE_RZALLOC_CXX_OPERATORS(copy_propagation_state)private: static void _ralloc_destructor(void *p) { reinterpret_cast
<copy_propagation_state *>(p)->copy_propagation_state
::~copy_propagation_state(); } public: static void* operator new
(size_t size, void *mem_ctx) { void *p = rzalloc_size(mem_ctx
, size); (static_cast <bool> (p != __null) ? void (0) :
__assert_fail ("p != NULL", __builtin_FILE (), __builtin_LINE
(), __extension__ __PRETTY_FUNCTION__)); if (!__has_trivial_destructor
(copy_propagation_state)) ralloc_set_destructor(p, _ralloc_destructor
); return p; } static void operator delete(void *p) { if (!__has_trivial_destructor
(copy_propagation_state)) ralloc_set_destructor(p, __null); ralloc_free
(p); }
;
74
75 static
76 copy_propagation_state* create(void *mem_ctx)
77 {
78 return new (mem_ctx) copy_propagation_state(NULL__null);
79 }
80
81 copy_propagation_state* clone()
82 {
83 return new (ralloc_parent(this)) copy_propagation_state(this);
84 }
85
86 void erase_all()
87 {
88 /* Individual elements were allocated from a linear allocator, so will
89 * be destroyed when the state is destroyed.
90 */
91 _mesa_hash_table_clear(acp, NULL__null);
92 fallback = NULL__null;
93 }
94
95 void erase(ir_variable *var, unsigned write_mask)
96 {
97 acp_entry *entry = pull_acp(var);
98 entry->rhs_full = NULL__null;
99
100 for (int i = 0; i < 4; i++) {
101 if (!entry->rhs_element[i])
102 continue;
103 if ((write_mask & (1 << i)) == 0)
104 continue;
105
106 ir_variable *to_remove = entry->rhs_element[i];
107 entry->rhs_element[i] = NULL__null;
108 remove_unused_var_from_dsts(entry, var, to_remove);
109 }
110
111 /* TODO: Check write mask, and possibly not clear everything. */
112
113 /* For any usage of our variable on the RHS, clear it out. */
114 set_foreach(entry->dsts, set_entry)for (struct set_entry *set_entry = _mesa_set_next_entry(entry
->dsts, __null); set_entry != __null; set_entry = _mesa_set_next_entry
(entry->dsts, set_entry))
{
115 ir_variable *dst_var = (ir_variable *)set_entry->key;
116 acp_entry *dst_entry = pull_acp(dst_var);
117 for (int i = 0; i < 4; i++) {
118 if (dst_entry->rhs_element[i] == var)
119 dst_entry->rhs_element[i] = NULL__null;
120 }
121 if (dst_entry->rhs_full == var)
122 dst_entry->rhs_full = NULL__null;
123 _mesa_set_remove(entry->dsts, set_entry);
124 }
125 }
126
127 acp_entry *read(ir_variable *var)
128 {
129 for (copy_propagation_state *s = this; s != NULL__null; s = s->fallback) {
130 hash_entry *ht_entry = _mesa_hash_table_search(s->acp, var);
131 if (ht_entry)
132 return (acp_entry *) ht_entry->data;
133 }
134 return NULL__null;
135 }
136
137 void write_elements(ir_variable *lhs, ir_variable *rhs, unsigned write_mask, int swizzle[4])
138 {
139 acp_entry *lhs_entry = pull_acp(lhs);
140 lhs_entry->rhs_full = NULL__null;
141
142 for (int i = 0; i < 4; i++) {
143 if ((write_mask & (1 << i)) == 0)
144 continue;
145 ir_variable *to_remove = lhs_entry->rhs_element[i];
146 lhs_entry->rhs_element[i] = rhs;
147 lhs_entry->rhs_channel[i] = swizzle[i];
148
149 remove_unused_var_from_dsts(lhs_entry, lhs, to_remove);
150 }
151
152 acp_entry *rhs_entry = pull_acp(rhs);
153 _mesa_set_add(rhs_entry->dsts, lhs);
154 }
155
156 void write_full(ir_variable *lhs, ir_variable *rhs)
157 {
158 acp_entry *lhs_entry = pull_acp(lhs);
159 if (lhs_entry->rhs_full == rhs)
160 return;
161
162 if (lhs_entry->rhs_full) {
163 remove_from_dsts(lhs_entry->rhs_full, lhs);
164 } else if (lhs->type->is_vector()) {
165 for (int i = 0; i < 4; i++) {
166 if (lhs_entry->rhs_element[i])
167 remove_from_dsts(lhs_entry->rhs_element[i], lhs);
168 }
169 }
170
171 lhs_entry->rhs_full = rhs;
172 acp_entry *rhs_entry = pull_acp(rhs);
173 _mesa_set_add(rhs_entry->dsts, lhs);
174
175 if (lhs->type->is_vector()) {
176 for (int i = 0; i < 4; i++) {
177 lhs_entry->rhs_element[i] = rhs;
178 lhs_entry->rhs_channel[i] = i;
179 }
180 }
181 }
182
183 void remove_unused_var_from_dsts(acp_entry *lhs_entry, ir_variable *lhs, ir_variable *var)
184 {
185 if (!var)
186 return;
187
188 /* If lhs still uses var, don't remove anything. */
189 for (int j = 0; j < 4; j++) {
190 if (lhs_entry->rhs_element[j] == var)
191 return;
192 }
193
194 acp_entry *element = pull_acp(var);
195 assert(element)(static_cast <bool> (element) ? void (0) : __assert_fail
("element", __builtin_FILE (), __builtin_LINE (), __extension__
__PRETTY_FUNCTION__))
;
196 _mesa_set_remove_key(element->dsts, lhs);
197 }
198
199private:
200 explicit copy_propagation_state(copy_propagation_state *fallback)
201 {
202 this->fallback = fallback;
203 /* Use 'this' as context for the table, no explicit destruction
204 * needed later.
205 */
206 acp = _mesa_pointer_hash_table_create(this);
207 lin_ctx = linear_alloc_parent(this, 0);
208 }
209
210 acp_entry *pull_acp(ir_variable *var)
211 {
212 hash_entry *ht_entry = _mesa_hash_table_search(acp, var);
213 if (ht_entry)
214 return (acp_entry *) ht_entry->data;
215
216 /* If not found, create one and copy data from fallback if available. */
217 acp_entry *entry = new(lin_ctx) acp_entry();
218 _mesa_hash_table_insert(acp, var, entry);
219
220 bool found = false;
221 for (copy_propagation_state *s = fallback; s != NULL__null; s = s->fallback) {
222 hash_entry *fallback_ht_entry = _mesa_hash_table_search(s->acp, var);
223 if (fallback_ht_entry) {
224 acp_entry *fallback_entry = (acp_entry *) fallback_ht_entry->data;
225 *entry = *fallback_entry;
226 entry->dsts = _mesa_set_clone(fallback_entry->dsts, this);
227 found = true;
228 break;
229 }
230 }
231
232 if (!found) {
233 entry->dsts = _mesa_pointer_set_create(this);
234 }
235
236 return entry;
237 }
238
239 void
240 remove_from_dsts(ir_variable *var, ir_variable *to_remove)
241 {
242 acp_entry *entry = pull_acp(var);
243 assert(entry)(static_cast <bool> (entry) ? void (0) : __assert_fail (
"entry", __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
244 _mesa_set_remove_key(entry->dsts, to_remove);
245 }
246
247 /** Available Copy to Propagate table, from variable to the entry
248 * containing the current sources that can be used. */
249 hash_table *acp;
250
251 /** When a state is cloned, entries are copied on demand from fallback. */
252 copy_propagation_state *fallback;
253
254 void *lin_ctx;
255};
256
257class kill_entry : public exec_node
258{
259public:
260 /* override operator new from exec_node */
261 DECLARE_LINEAR_ZALLOC_CXX_OPERATORS(kill_entry)private: static void _ralloc_destructor(void *p) { reinterpret_cast
<kill_entry *>(p)->kill_entry::~kill_entry(); } public
: static void* operator new(size_t size, void *mem_ctx) { void
*p = linear_zalloc_child(mem_ctx, size); (static_cast <bool
> (p != __null) ? void (0) : __assert_fail ("p != NULL", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__)); if
(!__has_trivial_destructor(kill_entry)) ralloc_set_destructor
(p, _ralloc_destructor); return p; } static void operator delete
(void *p) { if (!__has_trivial_destructor(kill_entry)) ralloc_set_destructor
(p, __null); ralloc_free(p); }
262
263 kill_entry(ir_variable *var, int write_mask)
264 {
265 this->var = var;
266 this->write_mask = write_mask;
267 }
268
269 ir_variable *var;
270 unsigned int write_mask;
271};
272
273class ir_copy_propagation_elements_visitor : public ir_rvalue_visitor {
274public:
275 ir_copy_propagation_elements_visitor()
276 {
277 this->progress = false;
278 this->killed_all = false;
279 this->mem_ctx = ralloc_context(NULL__null);
280 this->lin_ctx = linear_alloc_parent(this->mem_ctx, 0);
281 this->shader_mem_ctx = NULL__null;
282 this->kills = new(mem_ctx) exec_list;
283 this->state = copy_propagation_state::create(mem_ctx);
284 }
285 ~ir_copy_propagation_elements_visitor()
286 {
287 ralloc_free(mem_ctx);
288 }
289
290 virtual ir_visitor_status visit(ir_dereference_variable *);
291
292 void handle_loop(ir_loop *, bool keep_acp);
293 virtual ir_visitor_status visit_enter(class ir_loop *);
294 virtual ir_visitor_status visit_enter(class ir_function_signature *);
295 virtual ir_visitor_status visit_leave(class ir_assignment *);
296 virtual ir_visitor_status visit_enter(class ir_call *);
297 virtual ir_visitor_status visit_enter(class ir_if *);
298 virtual ir_visitor_status visit_leave(class ir_swizzle *);
299
300 void handle_rvalue(ir_rvalue **rvalue);
301
302 void add_copy(ir_assignment *ir);
303 void kill(kill_entry *k);
304 void handle_if_block(exec_list *instructions, exec_list *kills, bool *killed_all);
305
306 copy_propagation_state *state;
307
308 /**
309 * List of kill_entry: The variables whose values were killed in this
310 * block.
311 */
312 exec_list *kills;
313
314 bool progress;
315
316 bool killed_all;
317
318 /* Context for our local data structures. */
319 void *mem_ctx;
320 void *lin_ctx;
321 /* Context for allocating new shader nodes. */
322 void *shader_mem_ctx;
323};
324
325} /* unnamed namespace */
326
327ir_visitor_status
328ir_copy_propagation_elements_visitor::visit(ir_dereference_variable *ir)
329{
330 if (this->in_assignee)
331 return visit_continue;
332
333 const acp_entry *entry = state->read(ir->var);
334 if (entry && entry->rhs_full) {
335 ir->var = (ir_variable *) entry->rhs_full;
336 progress = true;
337 }
338
339 return visit_continue;
340}
341
342ir_visitor_status
343ir_copy_propagation_elements_visitor::visit_enter(ir_function_signature *ir)
344{
345 /* Treat entry into a function signature as a completely separate
346 * block. Any instructions at global scope will be shuffled into
347 * main() at link time, so they're irrelevant to us.
348 */
349 exec_list *orig_kills = this->kills;
350 bool orig_killed_all = this->killed_all;
351
352 this->kills = new(mem_ctx) exec_list;
353 this->killed_all = false;
354
355 copy_propagation_state *orig_state = state;
356 this->state = copy_propagation_state::create(mem_ctx);
357
358 visit_list_elements(this, &ir->body);
359
360 delete this->state;
361 this->state = orig_state;
362
363 ralloc_free(this->kills);
364 this->kills = orig_kills;
365 this->killed_all = orig_killed_all;
366
367 return visit_continue_with_parent;
368}
369
370ir_visitor_status
371ir_copy_propagation_elements_visitor::visit_leave(ir_assignment *ir)
372{
373 ir_dereference_variable *lhs = ir->lhs->as_dereference_variable();
374 ir_variable *var = ir->lhs->variable_referenced();
375
376 kill_entry *k;
377
378 if (lhs
0.1
'lhs' is null
0.1
'lhs' is null
&& var->type->is_vector())
379 k = new(this->lin_ctx) kill_entry(var, ir->write_mask);
380 else
381 k = new(this->lin_ctx) kill_entry(var, ~0);
382
383 kill(k);
384
385 add_copy(ir);
1
Calling 'ir_copy_propagation_elements_visitor::add_copy'
386
387 return visit_continue;
388}
389
390ir_visitor_status
391ir_copy_propagation_elements_visitor::visit_leave(ir_swizzle *)
392{
393 /* Don't visit the values of swizzles since they are handled while
394 * visiting the swizzle itself.
395 */
396 return visit_continue;
397}
398
399/**
400 * Replaces dereferences of ACP RHS variables with ACP LHS variables.
401 *
402 * This is where the actual copy propagation occurs. Note that the
403 * rewriting of ir_dereference means that the ir_dereference instance
404 * must not be shared by multiple IR operations!
405 */
406void
407ir_copy_propagation_elements_visitor::handle_rvalue(ir_rvalue **ir)
408{
409 int swizzle_chan[4];
410 ir_dereference_variable *deref_var;
411 ir_variable *source[4] = {NULL__null, NULL__null, NULL__null, NULL__null};
412 int source_chan[4] = {0, 0, 0, 0};
413 int chans;
414 bool noop_swizzle = true;
415
416 if (!*ir)
417 return;
418
419 ir_swizzle *swizzle = (*ir)->as_swizzle();
420 if (swizzle) {
421 deref_var = swizzle->val->as_dereference_variable();
422 if (!deref_var)
423 return;
424
425 swizzle_chan[0] = swizzle->mask.x;
426 swizzle_chan[1] = swizzle->mask.y;
427 swizzle_chan[2] = swizzle->mask.z;
428 swizzle_chan[3] = swizzle->mask.w;
429 chans = swizzle->type->vector_elements;
430 } else {
431 deref_var = (*ir)->as_dereference_variable();
432 if (!deref_var)
433 return;
434
435 swizzle_chan[0] = 0;
436 swizzle_chan[1] = 1;
437 swizzle_chan[2] = 2;
438 swizzle_chan[3] = 3;
439 chans = deref_var->type->vector_elements;
440 }
441
442 if (this->in_assignee)
443 return;
444
445 ir_variable *var = deref_var->var;
446
447 /* Try to find ACP entries covering swizzle_chan[], hoping they're
448 * the same source variable.
449 */
450
451 const acp_entry *entry = state->read(var);
452 if (entry) {
453 for (int c = 0; c < chans; c++) {
454 unsigned index = swizzle_chan[c];
455 ir_variable *src = entry->rhs_element[index];
456 if (!src)
457 continue;
458 source[c] = src;
459 source_chan[c] = entry->rhs_channel[index];
460 if (source_chan[c] != swizzle_chan[c])
461 noop_swizzle = false;
462 }
463 }
464
465 /* Make sure all channels are copying from the same source variable. */
466 if (!source[0])
467 return;
468 for (int c = 1; c < chans; c++) {
469 if (source[c] != source[0])
470 return;
471 }
472
473 if (!shader_mem_ctx)
474 shader_mem_ctx = ralloc_parent(deref_var);
475
476 /* Don't pointlessly replace the rvalue with itself (or a noop swizzle
477 * of itself, which would just be deleted by opt_noop_swizzle).
478 */
479 if (source[0] == var && noop_swizzle)
480 return;
481
482 if (debug) {
483 printf("Copy propagation from:\n");
484 (*ir)->print();
485 }
486
487 deref_var = new(shader_mem_ctx) ir_dereference_variable(source[0]);
488 *ir = new(shader_mem_ctx) ir_swizzle(deref_var,
489 source_chan[0],
490 source_chan[1],
491 source_chan[2],
492 source_chan[3],
493 chans);
494 progress = true;
495
496 if (debug) {
497 printf("to:\n");
498 (*ir)->print();
499 printf("\n");
500 }
501}
502
503
504ir_visitor_status
505ir_copy_propagation_elements_visitor::visit_enter(ir_call *ir)
506{
507 /* Do copy propagation on call parameters, but skip any out params */
508 foreach_two_lists(formal_node, &ir->callee->parameters,for (struct exec_node * formal_node = (&ir->callee->
parameters)->head_sentinel.next, * actual_node = (&ir->
actual_parameters)->head_sentinel.next, * __next1 = formal_node
->next, * __next2 = actual_node->next ; __next1 != __null
&& __next2 != __null ; formal_node = __next1, actual_node
= __next2, __next1 = __next1->next, __next2 = __next2->
next)
509 actual_node, &ir->actual_parameters)for (struct exec_node * formal_node = (&ir->callee->
parameters)->head_sentinel.next, * actual_node = (&ir->
actual_parameters)->head_sentinel.next, * __next1 = formal_node
->next, * __next2 = actual_node->next ; __next1 != __null
&& __next2 != __null ; formal_node = __next1, actual_node
= __next2, __next1 = __next1->next, __next2 = __next2->
next)
{
510 ir_variable *sig_param = (ir_variable *) formal_node;
511 ir_rvalue *ir = (ir_rvalue *) actual_node;
512 if (sig_param->data.mode != ir_var_function_out
513 && sig_param->data.mode != ir_var_function_inout) {
514 ir->accept(this);
515 }
516 }
517
518 if (!ir->callee->is_intrinsic()) {
519 state->erase_all();
520 this->killed_all = true;
521 } else {
522 if (ir->return_deref) {
523 kill(new(this->lin_ctx) kill_entry(ir->return_deref->var, ~0));
524 }
525
526 foreach_two_lists(formal_node, &ir->callee->parameters,for (struct exec_node * formal_node = (&ir->callee->
parameters)->head_sentinel.next, * actual_node = (&ir->
actual_parameters)->head_sentinel.next, * __next1 = formal_node
->next, * __next2 = actual_node->next ; __next1 != __null
&& __next2 != __null ; formal_node = __next1, actual_node
= __next2, __next1 = __next1->next, __next2 = __next2->
next)
527 actual_node, &ir->actual_parameters)for (struct exec_node * formal_node = (&ir->callee->
parameters)->head_sentinel.next, * actual_node = (&ir->
actual_parameters)->head_sentinel.next, * __next1 = formal_node
->next, * __next2 = actual_node->next ; __next1 != __null
&& __next2 != __null ; formal_node = __next1, actual_node
= __next2, __next1 = __next1->next, __next2 = __next2->
next)
{
528 ir_variable *sig_param = (ir_variable *) formal_node;
529 if (sig_param->data.mode == ir_var_function_out ||
530 sig_param->data.mode == ir_var_function_inout) {
531 ir_rvalue *ir = (ir_rvalue *) actual_node;
532 ir_variable *var = ir->variable_referenced();
533 kill(new(this->lin_ctx) kill_entry(var, ~0));
534 }
535 }
536 }
537
538 return visit_continue_with_parent;
539}
540
541void
542ir_copy_propagation_elements_visitor::handle_if_block(exec_list *instructions, exec_list *kills, bool *killed_all)
543{
544 exec_list *orig_kills = this->kills;
545 bool orig_killed_all = this->killed_all;
546
547 this->kills = kills;
548 this->killed_all = false;
549
550 /* Populate the initial acp with a copy of the original */
551 copy_propagation_state *orig_state = state;
552 this->state = orig_state->clone();
553
554 visit_list_elements(this, instructions);
555
556 delete this->state;
557 this->state = orig_state;
558
559 *killed_all = this->killed_all;
560 this->kills = orig_kills;
561 this->killed_all = orig_killed_all;
562}
563
564ir_visitor_status
565ir_copy_propagation_elements_visitor::visit_enter(ir_if *ir)
566{
567 ir->condition->accept(this);
568
569 exec_list *new_kills = new(mem_ctx) exec_list;
570 bool then_killed_all = false;
571 bool else_killed_all = false;
572
573 handle_if_block(&ir->then_instructions, new_kills, &then_killed_all);
574 handle_if_block(&ir->else_instructions, new_kills, &else_killed_all);
575
576 if (then_killed_all || else_killed_all) {
577 state->erase_all();
578 killed_all = true;
579 } else {
580 foreach_in_list_safe(kill_entry, k, new_kills)for (kill_entry *k = (!exec_node_is_tail_sentinel((new_kills)
->head_sentinel.next) ? (kill_entry *) ((new_kills)->head_sentinel
.next) : __null), *__next = (k) ? (!exec_node_is_tail_sentinel
((new_kills)->head_sentinel.next->next) ? (kill_entry *
) ((new_kills)->head_sentinel.next->next) : __null) : __null
; (k) != __null; (k) = __next, __next = __next ? (!exec_node_is_tail_sentinel
(__next->next) ? (kill_entry *) (__next->next) : __null
) : __null)
581 kill(k);
582 }
583
584 ralloc_free(new_kills);
585
586 /* handle_if_block() already descended into the children. */
587 return visit_continue_with_parent;
588}
589
590void
591ir_copy_propagation_elements_visitor::handle_loop(ir_loop *ir, bool keep_acp)
592{
593 exec_list *orig_kills = this->kills;
594 bool orig_killed_all = this->killed_all;
595
596 this->kills = new(mem_ctx) exec_list;
597 this->killed_all = false;
598
599 copy_propagation_state *orig_state = state;
600
601 if (keep_acp) {
602 /* Populate the initial acp with a copy of the original */
603 this->state = orig_state->clone();
604 } else {
605 this->state = copy_propagation_state::create(mem_ctx);
606 }
607
608 visit_list_elements(this, &ir->body_instructions);
609
610 delete this->state;
611 this->state = orig_state;
612
613 if (this->killed_all)
614 this->state->erase_all();
615
616 exec_list *new_kills = this->kills;
617 this->kills = orig_kills;
618 this->killed_all = this->killed_all || orig_killed_all;
619
620 foreach_in_list_safe(kill_entry, k, new_kills)for (kill_entry *k = (!exec_node_is_tail_sentinel((new_kills)
->head_sentinel.next) ? (kill_entry *) ((new_kills)->head_sentinel
.next) : __null), *__next = (k) ? (!exec_node_is_tail_sentinel
((new_kills)->head_sentinel.next->next) ? (kill_entry *
) ((new_kills)->head_sentinel.next->next) : __null) : __null
; (k) != __null; (k) = __next, __next = __next ? (!exec_node_is_tail_sentinel
(__next->next) ? (kill_entry *) (__next->next) : __null
) : __null)
{
621 kill(k);
622 }
623
624 ralloc_free(new_kills);
625}
626
627ir_visitor_status
628ir_copy_propagation_elements_visitor::visit_enter(ir_loop *ir)
629{
630 handle_loop(ir, false);
631 handle_loop(ir, true);
632
633 /* already descended into the children. */
634 return visit_continue_with_parent;
635}
636
637/* Remove any entries currently in the ACP for this kill. */
638void
639ir_copy_propagation_elements_visitor::kill(kill_entry *k)
640{
641 state->erase(k->var, k->write_mask);
642
643 /* If we were on a list, remove ourselves before inserting */
644 if (k->next)
645 k->remove();
646
647 this->kills->push_tail(k);
648}
649
650/**
651 * Adds directly-copied channels between vector variables to the available
652 * copy propagation list.
653 */
654void
655ir_copy_propagation_elements_visitor::add_copy(ir_assignment *ir)
656{
657 if (ir->condition)
2
Assuming field 'condition' is null
3
Taking false branch
658 return;
659
660 {
661 ir_variable *lhs_var = ir->whole_variable_written();
662 ir_dereference_variable *rhs = ir->rhs->as_dereference_variable();
4
Calling 'ir_instruction::as_dereference_variable'
10
Returning from 'ir_instruction::as_dereference_variable'
663
664 if (lhs_var != NULL__null && rhs
11.1
'rhs' is non-null
11.1
'rhs' is non-null
&& rhs->var != NULL__null && lhs_var != rhs->var) {
11
Assuming 'lhs_var' is not equal to NULL
12
Assuming field 'var' is equal to NULL
665 if (lhs_var->data.mode == ir_var_shader_storage ||
666 lhs_var->data.mode == ir_var_shader_shared ||
667 rhs->var->data.mode == ir_var_shader_storage ||
668 rhs->var->data.mode == ir_var_shader_shared ||
669 lhs_var->data.precise != rhs->var->data.precise) {
670 return;
671 }
672 state->write_full(lhs_var, rhs->var);
673 return;
674 }
675 }
676
677 int orig_swizzle[4] = {0, 1, 2, 3};
678 int swizzle[4];
679
680 ir_dereference_variable *lhs = ir->lhs->as_dereference_variable();
13
Calling 'ir_instruction::as_dereference_variable'
19
Returning from 'ir_instruction::as_dereference_variable'
681 if (!lhs
19.1
'lhs' is non-null
19.1
'lhs' is non-null
|| !(lhs->type->is_scalar() || lhs->type->is_vector()))
682 return;
683
684 if (lhs->var->data.mode == ir_var_shader_storage ||
20
Assuming field 'mode' is not equal to ir_var_shader_storage
22
Taking false branch
685 lhs->var->data.mode == ir_var_shader_shared)
21
Assuming field 'mode' is not equal to ir_var_shader_shared
686 return;
687
688 ir_dereference_variable *rhs = ir->rhs->as_dereference_variable();
23
Calling 'ir_instruction::as_dereference_variable'
28
Returning from 'ir_instruction::as_dereference_variable'
689 if (!rhs
28.1
'rhs' is non-null
28.1
'rhs' is non-null
) {
690 ir_swizzle *swiz = ir->rhs->as_swizzle();
691 if (!swiz)
692 return;
693
694 rhs = swiz->val->as_dereference_variable();
695 if (!rhs)
696 return;
697
698 orig_swizzle[0] = swiz->mask.x;
699 orig_swizzle[1] = swiz->mask.y;
700 orig_swizzle[2] = swiz->mask.z;
701 orig_swizzle[3] = swiz->mask.w;
702 }
703
704 if (rhs->var->data.mode == ir_var_shader_storage ||
29
Dereference of null pointer
705 rhs->var->data.mode == ir_var_shader_shared)
706 return;
707
708 /* Move the swizzle channels out to the positions they match in the
709 * destination. We don't want to have to rewrite the swizzle[]
710 * array every time we clear a bit of the write_mask.
711 */
712 int j = 0;
713 for (int i = 0; i < 4; i++) {
714 if (ir->write_mask & (1 << i))
715 swizzle[i] = orig_swizzle[j++];
716 }
717
718 int write_mask = ir->write_mask;
719 if (lhs->var == rhs->var) {
720 /* If this is a copy from the variable to itself, then we need
721 * to be sure not to include the updated channels from this
722 * instruction in the set of new source channels to be
723 * copy-propagated from.
724 */
725 for (int i = 0; i < 4; i++) {
726 if (ir->write_mask & (1 << orig_swizzle[i]))
727 write_mask &= ~(1 << i);
728 }
729 }
730
731 if (lhs->var->data.precise != rhs->var->data.precise)
732 return;
733
734 state->write_elements(lhs->var, rhs->var, write_mask, swizzle);
735}
736
737bool
738do_copy_propagation_elements(exec_list *instructions)
739{
740 ir_copy_propagation_elements_visitor v;
741
742 visit_list_elements(&v, instructions);
743
744 return v.progress;
745}

glsl-optimizer/src/compiler/glsl/ir.h

1/* -*- c++ -*- */
2/*
3 * Copyright © 2010 Intel Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef IR_H
26#define IR_H
27
28#include <stdio.h>
29#include <stdlib.h>
30
31#include "util/ralloc.h"
32#include "util/format/u_format.h"
33#include "util/half_float.h"
34#include "compiler/glsl_types.h"
35#include "list.h"
36#include "ir_visitor.h"
37#include "ir_hierarchical_visitor.h"
38
39#ifdef __cplusplus201703L
40
41/**
42 * \defgroup IR Intermediate representation nodes
43 *
44 * @{
45 */
46
47/**
48 * Class tags
49 *
50 * Each concrete class derived from \c ir_instruction has a value in this
51 * enumerant. The value for the type is stored in \c ir_instruction::ir_type
52 * by the constructor. While using type tags is not very C++, it is extremely
53 * convenient. For example, during debugging you can simply inspect
54 * \c ir_instruction::ir_type to find out the actual type of the object.
55 *
56 * In addition, it is possible to use a switch-statement based on \c
57 * \c ir_instruction::ir_type to select different behavior for different object
58 * types. For functions that have only slight differences for several object
59 * types, this allows writing very straightforward, readable code.
60 */
61enum ir_node_type {
62 ir_type_dereference_array,
63 ir_type_dereference_record,
64 ir_type_dereference_variable,
65 ir_type_constant,
66 ir_type_expression,
67 ir_type_swizzle,
68 ir_type_texture,
69 ir_type_variable,
70 ir_type_assignment,
71 ir_type_call,
72 ir_type_function,
73 ir_type_function_signature,
74 ir_type_if,
75 ir_type_loop,
76 ir_type_loop_jump,
77 ir_type_return,
78 ir_type_precision,
79 ir_type_typedecl,
80 ir_type_discard,
81 ir_type_demote,
82 ir_type_emit_vertex,
83 ir_type_end_primitive,
84 ir_type_barrier,
85 ir_type_max, /**< maximum ir_type enum number, for validation */
86 ir_type_unset = ir_type_max
87};
88
89
90/**
91 * Base class of all IR instructions
92 */
93class ir_instruction : public exec_node {
94public:
95 enum ir_node_type ir_type;
96
97 /**
98 * GCC 4.7+ and clang warn when deleting an ir_instruction unless
99 * there's a virtual destructor present. Because we almost
100 * universally use ralloc for our memory management of
101 * ir_instructions, the destructor doesn't need to do any work.
102 */
103 virtual ~ir_instruction()
104 {
105 }
106
107 /** ir_print_visitor helper for debugging. */
108 void print(void) const;
109 void fprint(FILE *f) const;
110
111 virtual void accept(ir_visitor *) = 0;
112 virtual ir_visitor_status accept(ir_hierarchical_visitor *) = 0;
113 virtual ir_instruction *clone(void *mem_ctx,
114 struct hash_table *ht) const = 0;
115
116 bool is_rvalue() const
117 {
118 return ir_type == ir_type_dereference_array ||
119 ir_type == ir_type_dereference_record ||
120 ir_type == ir_type_dereference_variable ||
121 ir_type == ir_type_constant ||
122 ir_type == ir_type_expression ||
123 ir_type == ir_type_swizzle ||
124 ir_type == ir_type_texture;
125 }
126
127 bool is_dereference() const
128 {
129 return ir_type == ir_type_dereference_array ||
130 ir_type == ir_type_dereference_record ||
131 ir_type == ir_type_dereference_variable;
132 }
133
134 bool is_jump() const
135 {
136 return ir_type == ir_type_loop_jump ||
137 ir_type == ir_type_return ||
138 ir_type == ir_type_discard;
139 }
140
141 /**
142 * \name IR instruction downcast functions
143 *
144 * These functions either cast the object to a derived class or return
145 * \c NULL if the object's type does not match the specified derived class.
146 * Additional downcast functions will be added as needed.
147 */
148 /*@{*/
149 #define AS_BASE(TYPE) \
150 class ir_##TYPE *as_##TYPE() \
151 { \
152 assume(this != NULL)do { (static_cast <bool> (this != __null) ? void (0) : __assert_fail
("this != __null", __builtin_FILE (), __builtin_LINE (), __extension__
__PRETTY_FUNCTION__)); __builtin_assume(this != __null); } while
(0)
; \
153 return is_##TYPE() ? (ir_##TYPE *) this : NULL__null; \
154 } \
155 const class ir_##TYPE *as_##TYPE() const \
156 { \
157 assume(this != NULL)do { (static_cast <bool> (this != __null) ? void (0) : __assert_fail
("this != __null", __builtin_FILE (), __builtin_LINE (), __extension__
__PRETTY_FUNCTION__)); __builtin_assume(this != __null); } while
(0)
; \
158 return is_##TYPE() ? (ir_##TYPE *) this : NULL__null; \
159 }
160
161 AS_BASE(rvalue)
162 AS_BASE(dereference)
163 AS_BASE(jump)
164 #undef AS_BASE
165
166 #define AS_CHILD(TYPE) \
167 class ir_##TYPE * as_##TYPE() \
168 { \
169 assume(this != NULL)do { (static_cast <bool> (this != __null) ? void (0) : __assert_fail
("this != __null", __builtin_FILE (), __builtin_LINE (), __extension__
__PRETTY_FUNCTION__)); __builtin_assume(this != __null); } while
(0)
; \
170 return ir_type == ir_type_##TYPE ? (ir_##TYPE *) this : NULL__null; \
171 } \
172 const class ir_##TYPE * as_##TYPE() const \
173 { \
174 assume(this != NULL)do { (static_cast <bool> (this != __null) ? void (0) : __assert_fail
("this != __null", __builtin_FILE (), __builtin_LINE (), __extension__
__PRETTY_FUNCTION__)); __builtin_assume(this != __null); } while
(0)
; \
175 return ir_type == ir_type_##TYPE ? (const ir_##TYPE *) this : NULL__null; \
176 }
177 AS_CHILD(variable)
178 AS_CHILD(function)
179 AS_CHILD(dereference_array)
180 AS_CHILD(dereference_variable)
5
'?' condition is true
6
Loop condition is false. Exiting loop
7
Assuming field 'ir_type' is equal to ir_type_dereference_variable
8
'?' condition is true
9
Returning pointer, which participates in a condition later
14
'?' condition is true
15
Loop condition is false. Exiting loop
16
Assuming field 'ir_type' is equal to ir_type_dereference_variable
17
'?' condition is true
18
Returning pointer, which participates in a condition later
24
'?' condition is true
25
Loop condition is false. Exiting loop
26
'?' condition is true
27
Returning pointer, which participates in a condition later
181 AS_CHILD(dereference_record)
182 AS_CHILD(expression)
183 AS_CHILD(loop)
184 AS_CHILD(assignment)
185 AS_CHILD(call)
186 AS_CHILD(return)
187 AS_CHILD(if)
188 AS_CHILD(swizzle)
189 AS_CHILD(texture)
190 AS_CHILD(constant)
191 AS_CHILD(discard)
192 #undef AS_CHILD
193 /*@}*/
194
195 /**
196 * IR equality method: Return true if the referenced instruction would
197 * return the same value as this one.
198 *
199 * This intended to be used for CSE and algebraic optimizations, on rvalues
200 * in particular. No support for other instruction types (assignments,
201 * jumps, calls, etc.) is planned.
202 */
203 virtual bool equals(const ir_instruction *ir,
204 enum ir_node_type ignore = ir_type_unset) const;
205
206protected:
207 ir_instruction(enum ir_node_type t)
208 : ir_type(t)
209 {
210 }
211
212private:
213 ir_instruction()
214 {
215 assert(!"Should not get here.")(static_cast <bool> (!"Should not get here.") ? void (0
) : __assert_fail ("!\"Should not get here.\"", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
216 }
217};
218
219
220/**
221 * The base class for all "values"/expression trees.
222 */
223class ir_rvalue : public ir_instruction {
224public:
225 const struct glsl_type *type;
226
227 virtual ir_rvalue *clone(void *mem_ctx, struct hash_table *) const;
228
229 virtual void accept(ir_visitor *v)
230 {
231 v->visit(this);
232 }
233
234 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
235
236 virtual ir_constant *constant_expression_value(void *mem_ctx,
237 struct hash_table *variable_context = NULL__null);
238
239 ir_rvalue *as_rvalue_to_saturate();
240
241 virtual bool is_lvalue(const struct _mesa_glsl_parse_state * = NULL__null) const
242 {
243 return false;
244 }
245
246 /**
247 * Get the variable that is ultimately referenced by an r-value
248 */
249 virtual ir_variable *variable_referenced() const
250 {
251 return NULL__null;
252 }
253
254
255 /**
256 * If an r-value is a reference to a whole variable, get that variable
257 *
258 * \return
259 * Pointer to a variable that is completely dereferenced by the r-value. If
260 * the r-value is not a dereference or the dereference does not access the
261 * entire variable (i.e., it's just one array element, struct field), \c NULL
262 * is returned.
263 */
264 virtual ir_variable *whole_variable_referenced()
265 {
266 return NULL__null;
267 }
268
269 /**
270 * Determine if an r-value has the value zero
271 *
272 * The base implementation of this function always returns \c false. The
273 * \c ir_constant class over-rides this function to return \c true \b only
274 * for vector and scalar types that have all elements set to the value
275 * zero (or \c false for booleans).
276 *
277 * \sa ir_constant::has_value, ir_rvalue::is_one, ir_rvalue::is_negative_one
278 */
279 virtual bool is_zero() const;
280
281 /**
282 * Determine if an r-value has the value one
283 *
284 * The base implementation of this function always returns \c false. The
285 * \c ir_constant class over-rides this function to return \c true \b only
286 * for vector and scalar types that have all elements set to the value
287 * one (or \c true for booleans).
288 *
289 * \sa ir_constant::has_value, ir_rvalue::is_zero, ir_rvalue::is_negative_one
290 */
291 virtual bool is_one() const;
292
293 /**
294 * Determine if an r-value has the value negative one
295 *
296 * The base implementation of this function always returns \c false. The
297 * \c ir_constant class over-rides this function to return \c true \b only
298 * for vector and scalar types that have all elements set to the value
299 * negative one. For boolean types, the result is always \c false.
300 *
301 * \sa ir_constant::has_value, ir_rvalue::is_zero, ir_rvalue::is_one
302 */
303 virtual bool is_negative_one() const;
304
305 /**
306 * Determine if an r-value is an unsigned integer constant which can be
307 * stored in 16 bits.
308 *
309 * \sa ir_constant::is_uint16_constant.
310 */
311 virtual bool is_uint16_constant() const { return false; }
312
313 /**
314 * Return a generic value of error_type.
315 *
316 * Allocation will be performed with 'mem_ctx' as ralloc owner.
317 */
318 static ir_rvalue *error_value(void *mem_ctx);
319
320protected:
321 ir_rvalue(enum ir_node_type t);
322};
323
324
325/**
326 * Variable storage classes
327 */
328enum ir_variable_mode {
329 ir_var_auto = 0, /**< Function local variables and globals. */
330 ir_var_uniform, /**< Variable declared as a uniform. */
331 ir_var_shader_storage, /**< Variable declared as an ssbo. */
332 ir_var_shader_shared, /**< Variable declared as shared. */
333 ir_var_shader_in,
334 ir_var_shader_out,
335 ir_var_function_in,
336 ir_var_function_out,
337 ir_var_function_inout,
338 ir_var_const_in, /**< "in" param that must be a constant expression */
339 ir_var_system_value, /**< Ex: front-face, instance-id, etc. */
340 ir_var_temporary, /**< Temporary variable generated during compilation. */
341 ir_var_mode_count /**< Number of variable modes */
342};
343
344/**
345 * Enum keeping track of how a variable was declared. For error checking of
346 * the gl_PerVertex redeclaration rules.
347 */
348enum ir_var_declaration_type {
349 /**
350 * Normal declaration (for most variables, this means an explicit
351 * declaration. Exception: temporaries are always implicitly declared, but
352 * they still use ir_var_declared_normally).
353 *
354 * Note: an ir_variable that represents a named interface block uses
355 * ir_var_declared_normally.
356 */
357 ir_var_declared_normally = 0,
358
359 /**
360 * Variable was explicitly declared (or re-declared) in an unnamed
361 * interface block.
362 */
363 ir_var_declared_in_block,
364
365 /**
366 * Variable is an implicitly declared built-in that has not been explicitly
367 * re-declared by the shader.
368 */
369 ir_var_declared_implicitly,
370
371 /**
372 * Variable is implicitly generated by the compiler and should not be
373 * visible via the API.
374 */
375 ir_var_hidden,
376};
377
378/**
379 * \brief Layout qualifiers for gl_FragDepth.
380 *
381 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
382 * with a layout qualifier.
383 */
384enum ir_depth_layout {
385 ir_depth_layout_none, /**< No depth layout is specified. */
386 ir_depth_layout_any,
387 ir_depth_layout_greater,
388 ir_depth_layout_less,
389 ir_depth_layout_unchanged
390};
391
392/**
393 * \brief Convert depth layout qualifier to string.
394 */
395const char*
396depth_layout_string(ir_depth_layout layout);
397
398/**
399 * Description of built-in state associated with a uniform
400 *
401 * \sa ir_variable::state_slots
402 */
403struct ir_state_slot {
404 gl_state_index16 tokens[STATE_LENGTH5];
405 int swizzle;
406};
407
408
409/**
410 * Get the string value for an interpolation qualifier
411 *
412 * \return The string that would be used in a shader to specify \c
413 * mode will be returned.
414 *
415 * This function is used to generate error messages of the form "shader
416 * uses %s interpolation qualifier", so in the case where there is no
417 * interpolation qualifier, it returns "no".
418 *
419 * This function should only be used on a shader input or output variable.
420 */
421const char *interpolation_string(unsigned interpolation);
422
423
424class ir_variable : public ir_instruction {
425public:
426 ir_variable(const struct glsl_type *, const char *, ir_variable_mode);
427
428 virtual ir_variable *clone(void *mem_ctx, struct hash_table *ht) const;
429
430 virtual void accept(ir_visitor *v)
431 {
432 v->visit(this);
433 }
434
435 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
436
437
438 /**
439 * Determine whether or not a variable is part of a uniform or
440 * shader storage block.
441 */
442 inline bool is_in_buffer_block() const
443 {
444 return (this->data.mode == ir_var_uniform ||
445 this->data.mode == ir_var_shader_storage) &&
446 this->interface_type != NULL__null;
447 }
448
449 /**
450 * Determine whether or not a variable is part of a shader storage block.
451 */
452 inline bool is_in_shader_storage_block() const
453 {
454 return this->data.mode == ir_var_shader_storage &&
455 this->interface_type != NULL__null;
456 }
457
458 /**
459 * Determine whether or not a variable is the declaration of an interface
460 * block
461 *
462 * For the first declaration below, there will be an \c ir_variable named
463 * "instance" whose type and whose instance_type will be the same
464 * \c glsl_type. For the second declaration, there will be an \c ir_variable
465 * named "f" whose type is float and whose instance_type is B2.
466 *
467 * "instance" is an interface instance variable, but "f" is not.
468 *
469 * uniform B1 {
470 * float f;
471 * } instance;
472 *
473 * uniform B2 {
474 * float f;
475 * };
476 */
477 inline bool is_interface_instance() const
478 {
479 return this->type->without_array() == this->interface_type;
480 }
481
482 /**
483 * Return whether this variable contains a bindless sampler/image.
484 */
485 inline bool contains_bindless() const
486 {
487 if (!this->type->contains_sampler() && !this->type->contains_image())
488 return false;
489
490 return this->data.bindless || this->data.mode != ir_var_uniform;
491 }
492
493 /**
494 * Set this->interface_type on a newly created variable.
495 */
496 void init_interface_type(const struct glsl_type *type)
497 {
498 assert(this->interface_type == NULL)(static_cast <bool> (this->interface_type == __null)
? void (0) : __assert_fail ("this->interface_type == NULL"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
499 this->interface_type = type;
500 if (this->is_interface_instance()) {
501 this->u.max_ifc_array_access =
502 ralloc_array(this, int, type->length)((int *) ralloc_array_size(this, sizeof(int), type->length
))
;
503 for (unsigned i = 0; i < type->length; i++) {
504 this->u.max_ifc_array_access[i] = -1;
505 }
506 }
507 }
508
509 /**
510 * Change this->interface_type on a variable that previously had a
511 * different, but compatible, interface_type. This is used during linking
512 * to set the size of arrays in interface blocks.
513 */
514 void change_interface_type(const struct glsl_type *type)
515 {
516 if (this->u.max_ifc_array_access != NULL__null) {
517 /* max_ifc_array_access has already been allocated, so make sure the
518 * new interface has the same number of fields as the old one.
519 */
520 assert(this->interface_type->length == type->length)(static_cast <bool> (this->interface_type->length
== type->length) ? void (0) : __assert_fail ("this->interface_type->length == type->length"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
521 }
522 this->interface_type = type;
523 }
524
525 /**
526 * Change this->interface_type on a variable that previously had a
527 * different, and incompatible, interface_type. This is used during
528 * compilation to handle redeclaration of the built-in gl_PerVertex
529 * interface block.
530 */
531 void reinit_interface_type(const struct glsl_type *type)
532 {
533 if (this->u.max_ifc_array_access != NULL__null) {
534#ifndef NDEBUG
535 /* Redeclaring gl_PerVertex is only allowed if none of the built-ins
536 * it defines have been accessed yet; so it's safe to throw away the
537 * old max_ifc_array_access pointer, since all of its values are
538 * zero.
539 */
540 for (unsigned i = 0; i < this->interface_type->length; i++)
541 assert(this->u.max_ifc_array_access[i] == -1)(static_cast <bool> (this->u.max_ifc_array_access[i]
== -1) ? void (0) : __assert_fail ("this->u.max_ifc_array_access[i] == -1"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
542#endif
543 ralloc_free(this->u.max_ifc_array_access);
544 this->u.max_ifc_array_access = NULL__null;
545 }
546 this->interface_type = NULL__null;
547 init_interface_type(type);
548 }
549
550 const glsl_type *get_interface_type() const
551 {
552 return this->interface_type;
553 }
554
555 enum glsl_interface_packing get_interface_type_packing() const
556 {
557 return this->interface_type->get_interface_packing();
558 }
559 /**
560 * Get the max_ifc_array_access pointer
561 *
562 * A "set" function is not needed because the array is dynmically allocated
563 * as necessary.
564 */
565 inline int *get_max_ifc_array_access()
566 {
567 assert(this->data._num_state_slots == 0)(static_cast <bool> (this->data._num_state_slots == 0
) ? void (0) : __assert_fail ("this->data._num_state_slots == 0"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
568 return this->u.max_ifc_array_access;
569 }
570
571 inline unsigned get_num_state_slots() const
572 {
573 assert(!this->is_interface_instance()(static_cast <bool> (!this->is_interface_instance() ||
this->data._num_state_slots == 0) ? void (0) : __assert_fail
("!this->is_interface_instance() || this->data._num_state_slots == 0"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
574 || this->data._num_state_slots == 0)(static_cast <bool> (!this->is_interface_instance() ||
this->data._num_state_slots == 0) ? void (0) : __assert_fail
("!this->is_interface_instance() || this->data._num_state_slots == 0"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
575 return this->data._num_state_slots;
576 }
577
578 inline void set_num_state_slots(unsigned n)
579 {
580 assert(!this->is_interface_instance()(static_cast <bool> (!this->is_interface_instance() ||
n == 0) ? void (0) : __assert_fail ("!this->is_interface_instance() || n == 0"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
581 || n == 0)(static_cast <bool> (!this->is_interface_instance() ||
n == 0) ? void (0) : __assert_fail ("!this->is_interface_instance() || n == 0"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
582 this->data._num_state_slots = n;
583 }
584
585 inline ir_state_slot *get_state_slots()
586 {
587 return this->is_interface_instance() ? NULL__null : this->u.state_slots;
588 }
589
590 inline const ir_state_slot *get_state_slots() const
591 {
592 return this->is_interface_instance() ? NULL__null : this->u.state_slots;
593 }
594
595 inline ir_state_slot *allocate_state_slots(unsigned n)
596 {
597 assert(!this->is_interface_instance())(static_cast <bool> (!this->is_interface_instance())
? void (0) : __assert_fail ("!this->is_interface_instance()"
, __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__
))
;
598
599 this->u.state_slots = ralloc_array(this, ir_state_slot, n)((ir_state_slot *) ralloc_array_size(this, sizeof(ir_state_slot
), n))
;
600 this->data._num_state_slots = 0;
601
602 if (this->u.state_slots != NULL__null)
603 this->data._num_state_slots = n;
604
605 return this->u.state_slots;
606 }
607
608 inline bool is_interpolation_flat() const
609 {
610 return this->data.interpolation == INTERP_MODE_FLAT ||
611 this->type->contains_integer() ||
612 this->type->contains_double();
613 }
614
615 inline bool is_name_ralloced() const
616 {
617 return this->name != ir_variable::tmp_name &&
618 this->name != this->name_storage;
619 }
620
621 /**
622 * Enable emitting extension warnings for this variable
623 */
624 void enable_extension_warning(const char *extension);
625
626 /**
627 * Get the extension warning string for this variable
628 *
629 * If warnings are not enabled, \c NULL is returned.
630 */
631 const char *get_extension_warning() const;
632
633 /**
634 * Declared type of the variable
635 */
636 const struct glsl_type *type;
637
638 /**
639 * Declared name of the variable
640 */
641 const char *name;
642
643private:
644 /**
645 * If the name length fits into name_storage, it's used, otherwise
646 * the name is ralloc'd. shader-db mining showed that 70% of variables
647 * fit here. This is a win over ralloc where only ralloc_header has
648 * 20 bytes on 64-bit (28 bytes with DEBUG), and we can also skip malloc.
649 */
650 char name_storage[16];
651
652public:
653 struct ir_variable_data {
654
655 /**
656 * Is the variable read-only?
657 *
658 * This is set for variables declared as \c const, shader inputs,
659 * and uniforms.
660 */
661 unsigned read_only:1;
662 unsigned centroid:1;
663 unsigned sample:1;
664 unsigned patch:1;
665 /**
666 * Was an 'invariant' qualifier explicitly set in the shader?
667 *
668 * This is used to cross validate qualifiers.
669 */
670 unsigned explicit_invariant:1;
671 /**
672 * Is the variable invariant?
673 *
674 * It can happen either by having the 'invariant' qualifier
675 * explicitly set in the shader or by being used in calculations
676 * of other invariant variables.
677 */
678 unsigned invariant:1;
679 unsigned precise:1;
680
681 /**
682 * Has this variable been used for reading or writing?
683 *
684 * Several GLSL semantic checks require knowledge of whether or not a
685 * variable has been used. For example, it is an error to redeclare a
686 * variable as invariant after it has been used.
687 *
688 * This is maintained in the ast_to_hir.cpp path and during linking,
689 * but not in Mesa's fixed function or ARB program paths.
690 */
691 unsigned used:1;
692
693 /**
694 * Has this variable been statically assigned?
695 *
696 * This answers whether the variable was assigned in any path of
697 * the shader during ast_to_hir. This doesn't answer whether it is
698 * still written after dead code removal, nor is it maintained in
699 * non-ast_to_hir.cpp (GLSL parsing) paths.
700 */
701 unsigned assigned:1;
702
703 /**
704 * When separate shader programs are enabled, only input/outputs between
705 * the stages of a multi-stage separate program can be safely removed
706 * from the shader interface. Other input/outputs must remains active.
707 */
708 unsigned always_active_io:1;
709
710 /**
711 * Enum indicating how the variable was declared. See
712 * ir_var_declaration_type.
713 *
714 * This is used to detect certain kinds of illegal variable redeclarations.
715 */
716 unsigned how_declared:2;
717
718 /**
719 * Storage class of the variable.
720 *
721 * \sa ir_variable_mode
722 */
723 unsigned mode:4;
724
725 /**
726 * Interpolation mode for shader inputs / outputs
727 *
728 * \sa glsl_interp_mode
729 */
730 unsigned interpolation:2;
731
732 /**
733 * Was the location explicitly set in the shader?
734 *
735 * If the location is explicitly set in the shader, it \b cannot be changed
736 * by the linker or by the API (e.g., calls to \c glBindAttribLocation have
737 * no effect).
738 */
739 unsigned explicit_location:1;
740 unsigned explicit_index:1;
741
742 /**
743 * Was an initial binding explicitly set in the shader?
744 *
745 * If so, constant_value contains an integer ir_constant representing the
746 * initial binding point.
747 */
748 unsigned explicit_binding:1;
749
750 /**
751 * Was an initial component explicitly set in the shader?
752 */
753 unsigned explicit_component:1;
754
755 /**
756 * Does this variable have an initializer?
757 *
758 * This is used by the linker to cross-validiate initializers of global
759 * variables.
760 */
761 unsigned has_initializer:1;
762
763 /**
764 * Is this variable a generic output or input that has not yet been matched
765 * up to a variable in another stage of the pipeline?
766 *
767 * This is used by the linker as scratch storage while assigning locations
768 * to generic inputs and outputs.
769 */
770 unsigned is_unmatched_generic_inout:1;
771
772 /**
773 * Is this varying used by transform feedback?
774 *
775 * This is used by the linker to decide if it's safe to pack the varying.
776 */
777 unsigned is_xfb:1;
778
779 /**
780 * Is this varying used only by transform feedback?
781 *
782 * This is used by the linker to decide if its safe to pack the varying.
783 */
784 unsigned is_xfb_only:1;
785
786 /**
787 * Was a transform feedback buffer set in the shader?
788 */
789 unsigned explicit_xfb_buffer:1;
790
791 /**
792 * Was a transform feedback offset set in the shader?
793 */
794 unsigned explicit_xfb_offset:1;
795
796 /**
797 * Was a transform feedback stride set in the shader?
798 */
799 unsigned explicit_xfb_stride:1;
800
801 /**
802 * If non-zero, then this variable may be packed along with other variables
803 * into a single varying slot, so this offset should be applied when
804 * accessing components. For example, an offset of 1 means that the x
805 * component of this variable is actually stored in component y of the
806 * location specified by \c location.
807 */
808 unsigned location_frac:2;
809
810 /**
811 * Layout of the matrix. Uses glsl_matrix_layout values.
812 */
813 unsigned matrix_layout:2;
814
815 /**
816 * Non-zero if this variable was created by lowering a named interface
817 * block.
818 */
819 unsigned from_named_ifc_block:1;
820
821 /**
822 * Non-zero if the variable must be a shader input. This is useful for
823 * constraints on function parameters.
824 */
825 unsigned must_be_shader_input:1;
826
827 /**
828 * Output index for dual source blending.
829 *
830 * \note
831 * The GLSL spec only allows the values 0 or 1 for the index in \b dual
832 * source blending.
833 */
834 unsigned index:1;
835
836 /**
837 * Precision qualifier.
838 *
839 * In desktop GLSL we do not care about precision qualifiers at all, in
840 * fact, the spec says that precision qualifiers are ignored.
841 *
842 * To make things easy, we make it so that this field is always
843 * GLSL_PRECISION_NONE on desktop shaders. This way all the variables
844 * have the same precision value and the checks we add in the compiler
845 * for this field will never break a desktop shader compile.
846 */
847 unsigned precision:2;
848
849 /**
850 * \brief Layout qualifier for gl_FragDepth.
851 *
852 * This is not equal to \c ir_depth_layout_none if and only if this
853 * variable is \c gl_FragDepth and a layout qualifier is specified.
854 */
855 ir_depth_layout depth_layout:3;
856
857 /**
858 * Memory qualifiers.
859 */
860 unsigned memory_read_only:1; /**< "readonly" qualifier. */
861 unsigned memory_write_only:1; /**< "writeonly" qualifier. */
862 unsigned memory_coherent:1;
863 unsigned memory_volatile:1;
864 unsigned memory_restrict:1;
865
866 /**
867 * ARB_shader_storage_buffer_object
868 */
869 unsigned from_ssbo_unsized_array:1; /**< unsized array buffer variable. */
870
871 unsigned implicit_sized_array:1;
872
873 /**
874 * Whether this is a fragment shader output implicitly initialized with
875 * the previous contents of the specified render target at the
876 * framebuffer location corresponding to this shader invocation.
877 */
878 unsigned fb_fetch_output:1;
879
880 /**
881 * Non-zero if this variable is considered bindless as defined by
882 * ARB_bindless_texture.
883 */
884 unsigned bindless:1;
885
886 /**
887 * Non-zero if this variable is considered bound as defined by
888 * ARB_bindless_texture.
889 */
890 unsigned bound:1;
891
892 /**
893 * Emit a warning if this variable is accessed.
894 */
895 private:
896 uint8_t warn_extension_index;
897
898 public:
899 /**
900 * Image internal format if specified explicitly, otherwise
901 * PIPE_FORMAT_NONE.
902 */
903 enum pipe_format image_format;
904
905 private:
906 /**
907 * Number of state slots used
908 *
909 * \note
910 * This could be stored in as few as 7-bits, if necessary. If it is made
911 * smaller, add an assertion to \c ir_variable::allocate_state_slots to
912 * be safe.
913 */
914 uint16_t _num_state_slots;
915
916 public:
917 /**
918 * Initial binding point for a sampler, atomic, or UBO.
919 *
920 * For array types, this represents the binding point for the first element.
921 */
922 uint16_t binding;
923
924 /**
925 * Storage location of the base of this variable
926 *
927 * The precise meaning of this field depends on the nature of the variable.
928 *
929 * - Vertex shader input: one of the values from \c gl_vert_attrib.
930 * - Vertex shader output: one of the values from \c gl_varying_slot.
931 * - Geometry shader input: one of the values from \c gl_varying_slot.
932 * - Geometry shader output: one of the values from \c gl_varying_slot.
933 * - Fragment shader input: one of the values from \c gl_varying_slot.
934 * - Fragment shader output: one of the values from \c gl_frag_result.
935 * - Uniforms: Per-stage uniform slot number for default uniform block.
936 * - Uniforms: Index within the uniform block definition for UBO members.
937 * - Non-UBO Uniforms: explicit location until linking then reused to
938 * store uniform slot number.
939 * - Other: This field is not currently used.
940 *
941 * If the variable is a uniform, shader input, or shader output, and the
942 * slot has not been assigned, the value will be -1.
943 */
944 int location;
945
946 /**
947 * for glsl->tgsi/mesa IR we need to store the index into the
948 * parameters for uniforms, initially the code overloaded location
949 * but this causes problems with indirect samplers and AoA.
950 * This is assigned in _mesa_generate_parameters_list_for_uniforms.
951 */
952 int param_index;
953
954 /**
955 * Vertex stream output identifier.
956 *
957 * For packed outputs, bit 31 is set and bits [2*i+1,2*i] indicate the
958 * stream of the i-th component.
959 */
960 unsigned stream;
961
962 /**
963 * Atomic, transform feedback or block member offset.
964 */
965 unsigned offset;
966
967 /**
968 * Highest element accessed with a constant expression array index
969 *
970 * Not used for non-array variables. -1 is never accessed.
971 */
972 int max_array_access;
973
974 /**
975 * Transform feedback buffer.
976 */
977 unsigned xfb_buffer;
978
979 /**
980 * Transform feedback stride.
981 */
982 unsigned xfb_stride;
983
984 /**
985 * Allow (only) ir_variable direct access private members.
986 */
987 friend class ir_variable;
988 } data;
989
990 /**
991 * Value assigned in the initializer of a variable declared "const"
992 */
993 ir_constant *constant_value;
994
995 /**
996 * Constant expression assigned in the initializer of the variable
997 *
998 * \warning
999 * This field and \c ::constant_value are distinct. Even if the two fields
1000 * refer to constants with the same value, they must point to separate
1001 * objects.
1002 */
1003 ir_constant *constant_initializer;
1004
1005private:
1006 static const char *const warn_extension_table[];
1007
1008 union {
1009 /**
1010 * For variables which satisfy the is_interface_instance() predicate,
1011 * this points to an array of integers such that if the ith member of
1012 * the interface block is an array, max_ifc_array_access[i] is the
1013 * maximum array element of that member that has been accessed. If the
1014 * ith member of the interface block is not an array,
1015 * max_ifc_array_access[i] is unused.
1016 *
1017 * For variables whose type is not an interface block, this pointer is
1018 * NULL.
1019 */
1020 int *max_ifc_array_access;
1021
1022 /**
1023 * Built-in state that backs this uniform
1024 *
1025 * Once set at variable creation, \c state_slots must remain invariant.
1026 *
1027 * If the variable is not a uniform, \c _num_state_slots will be zero
1028 * and \c state_slots will be \c NULL.
1029 */
1030 ir_state_slot *state_slots;
1031 } u;
1032
1033 /**
1034 * For variables that are in an interface block or are an instance of an
1035 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
1036 *
1037 * \sa ir_variable::location
1038 */
1039 const glsl_type *interface_type;
1040
1041 /**
1042 * Name used for anonymous compiler temporaries
1043 */
1044 static const char tmp_name[];
1045
1046public:
1047 /**
1048 * Should the construct keep names for ir_var_temporary variables?
1049 *
1050 * When this global is false, names passed to the constructor for
1051 * \c ir_var_temporary variables will be dropped. Instead, the variable will
1052 * be named "compiler_temp". This name will be in static storage.
1053 *
1054 * \warning
1055 * \b NEVER change the mode of an \c ir_var_temporary.
1056 *
1057 * \warning
1058 * This variable is \b not thread-safe. It is global, \b not
1059 * per-context. It begins life false. A context can, at some point, make
1060 * it true. From that point on, it will be true forever. This should be
1061 * okay since it will only be set true while debugging.
1062 */
1063 static bool temporaries_allocate_names;
1064};
1065
1066/**
1067 * A function that returns whether a built-in function is available in the
1068 * current shading language (based on version, ES or desktop, and extensions).
1069 */
1070typedef bool (*builtin_available_predicate)(const _mesa_glsl_parse_state *);
1071
1072#define MAKE_INTRINSIC_FOR_TYPE(op, t)ir_intrinsic_generic_op - ir_intrinsic_generic_load + ir_intrinsic_t_load \
1073 ir_intrinsic_generic_ ## op - ir_intrinsic_generic_load + ir_intrinsic_ ## t ## _ ## load
1074
1075#define MAP_INTRINSIC_TO_TYPE(i, t)ir_intrinsic_id(int(i) - int(ir_intrinsic_generic_load) + int
(ir_intrinsic_t_load))
\
1076 ir_intrinsic_id(int(i) - int(ir_intrinsic_generic_load) + int(ir_intrinsic_ ## t ## _ ## load))
1077
1078enum ir_intrinsic_id {
1079 ir_intrinsic_invalid = 0,
1080
1081 /**
1082 * \name Generic intrinsics
1083 *
1084 * Each of these intrinsics has a specific version for shared variables and
1085 * SSBOs.
1086 */
1087 /*@{*/
1088 ir_intrinsic_generic_load,
1089 ir_intrinsic_generic_store,
1090 ir_intrinsic_generic_atomic_add,
1091 ir_intrinsic_generic_atomic_and,
1092 ir_intrinsic_generic_atomic_or,
1093 ir_intrinsic_generic_atomic_xor,
1094 ir_intrinsic_generic_atomic_min,
1095 ir_intrinsic_generic_atomic_max,
1096 ir_intrinsic_generic_atomic_exchange,
1097 ir_intrinsic_generic_atomic_comp_swap,
1098 /*@}*/
1099
1100 ir_intrinsic_atomic_counter_read,
1101 ir_intrinsic_atomic_counter_increment,
1102 ir_intrinsic_atomic_counter_predecrement,
1103 ir_intrinsic_atomic_counter_add,
1104 ir_intrinsic_atomic_counter_and,
1105 ir_intrinsic_atomic_counter_or,
1106 ir_intrinsic_atomic_counter_xor,
1107 ir_intrinsic_atomic_counter_min,
1108 ir_intrinsic_atomic_counter_max,
1109 ir_intrinsic_atomic_counter_exchange,
1110 ir_intrinsic_atomic_counter_comp_swap,
1111
1112 ir_intrinsic_image_load,
1113 ir_intrinsic_image_store,
1114 ir_intrinsic_image_atomic_add,
1115 ir_intrinsic_image_atomic_and,
1116 ir_intrinsic_image_atomic_or,
1117 ir_intrinsic_image_atomic_xor,
1118 ir_intrinsic_image_atomic_min,
1119 ir_intrinsic_image_atomic_max,
1120 ir_intrinsic_image_atomic_exchange,
1121 ir_intrinsic_image_atomic_comp_swap,
1122 ir_intrinsic_image_size,
1123 ir_intrinsic_image_samples,
1124 ir_intrinsic_image_atomic_inc_wrap,
1125 ir_intrinsic_image_atomic_dec_wrap,
1126
1127 ir_intrinsic_ssbo_load,
1128 ir_intrinsic_ssbo_store = MAKE_INTRINSIC_FOR_TYPE(store, ssbo)ir_intrinsic_generic_store - ir_intrinsic_generic_load + ir_intrinsic_ssbo_load,
1129 ir_intrinsic_ssbo_atomic_add = MAKE_INTRINSIC_FOR_TYPE(atomic_add, ssbo)ir_intrinsic_generic_atomic_add - ir_intrinsic_generic_load +
ir_intrinsic_ssbo_load
,
1130 ir_intrinsic_ssbo_atomic_and = MAKE_INTRINSIC_FOR_TYPE(atomic_and, ssbo)ir_intrinsic_generic_atomic_and - ir_intrinsic_generic_load +
ir_intrinsic_ssbo_load
,
1131 ir_intrinsic_ssbo_atomic_or = MAKE_INTRINSIC_FOR_TYPE(atomic_or, ssbo)ir_intrinsic_generic_atomic_or - ir_intrinsic_generic_load + ir_intrinsic_ssbo_load,
1132 ir_intrinsic_ssbo_atomic_xor = MAKE_INTRINSIC_FOR_TYPE(atomic_xor, ssbo)ir_intrinsic_generic_atomic_xor - ir_intrinsic_generic_load +
ir_intrinsic_ssbo_load
,
1133 ir_intrinsic_ssbo_atomic_min = MAKE_INTRINSIC_FOR_TYPE(atomic_min, ssbo)ir_intrinsic_generic_atomic_min - ir_intrinsic_generic_load +
ir_intrinsic_ssbo_load
,
1134 ir_intrinsic_ssbo_atomic_max = MAKE_INTRINSIC_FOR_TYPE(atomic_max, ssbo)ir_intrinsic_generic_atomic_max - ir_intrinsic_generic_load +
ir_intrinsic_ssbo_load
,
1135 ir_intrinsic_ssbo_atomic_exchange = MAKE_INTRINSIC_FOR_TYPE(atomic_exchange, ssbo)ir_intrinsic_generic_atomic_exchange - ir_intrinsic_generic_load
+ ir_intrinsic_ssbo_load
,
1136 ir_intrinsic_ssbo_atomic_comp_swap = MAKE_INTRINSIC_FOR_TYPE(atomic_comp_swap, ssbo)ir_intrinsic_generic_atomic_comp_swap - ir_intrinsic_generic_load
+ ir_intrinsic_ssbo_load
,
1137
1138 ir_intrinsic_memory_barrier,
1139 ir_intrinsic_shader_clock,
1140 ir_intrinsic_group_memory_barrier,
1141 ir_intrinsic_memory_barrier_atomic_counter,
1142 ir_intrinsic_memory_barrier_buffer,
1143 ir_intrinsic_memory_barrier_image,
1144 ir_intrinsic_memory_barrier_shared,
1145 ir_intrinsic_begin_invocation_interlock,
1146 ir_intrinsic_end_invocation_interlock,
1147
1148 ir_intrinsic_vote_all,
1149 ir_intrinsic_vote_any,
1150 ir_intrinsic_vote_eq,
1151 ir_intrinsic_ballot,
1152 ir_intrinsic_read_invocation,
1153 ir_intrinsic_read_first_invocation,
1154
1155 ir_intrinsic_helper_invocation,
1156
1157 ir_intrinsic_shared_load,
1158 ir_intrinsic_shared_store = MAKE_INTRINSIC_FOR_TYPE(store, shared)ir_intrinsic_generic_store - ir_intrinsic_generic_load + ir_intrinsic_shared_load,
1159 ir_intrinsic_shared_atomic_add = MAKE_INTRINSIC_FOR_TYPE(atomic_add, shared)ir_intrinsic_generic_atomic_add - ir_intrinsic_generic_load +
ir_intrinsic_shared_load
,
1160 ir_intrinsic_shared_atomic_and = MAKE_INTRINSIC_FOR_TYPE(atomic_and, shared)ir_intrinsic_generic_atomic_and - ir_intrinsic_generic_load +
ir_intrinsic_shared_load
,
1161 ir_intrinsic_shared_atomic_or = MAKE_INTRINSIC_FOR_TYPE(atomic_or, shared)ir_intrinsic_generic_atomic_or - ir_intrinsic_generic_load + ir_intrinsic_shared_load,
1162 ir_intrinsic_shared_atomic_xor = MAKE_INTRINSIC_FOR_TYPE(atomic_xor, shared)ir_intrinsic_generic_atomic_xor - ir_intrinsic_generic_load +
ir_intrinsic_shared_load
,
1163 ir_intrinsic_shared_atomic_min = MAKE_INTRINSIC_FOR_TYPE(atomic_min, shared)ir_intrinsic_generic_atomic_min - ir_intrinsic_generic_load +
ir_intrinsic_shared_load
,
1164 ir_intrinsic_shared_atomic_max = MAKE_INTRINSIC_FOR_TYPE(atomic_max, shared)ir_intrinsic_generic_atomic_max - ir_intrinsic_generic_load +
ir_intrinsic_shared_load
,
1165 ir_intrinsic_shared_atomic_exchange = MAKE_INTRINSIC_FOR_TYPE(atomic_exchange, shared)ir_intrinsic_generic_atomic_exchange - ir_intrinsic_generic_load
+ ir_intrinsic_shared_load
,
1166 ir_intrinsic_shared_atomic_comp_swap = MAKE_INTRINSIC_FOR_TYPE(atomic_comp_swap, shared)ir_intrinsic_generic_atomic_comp_swap - ir_intrinsic_generic_load
+ ir_intrinsic_shared_load
,
1167};
1168
1169/*@{*/
1170/**
1171 * The representation of a function instance; may be the full definition or
1172 * simply a prototype.
1173 */
1174class ir_function_signature : public ir_instruction {
1175 /* An ir_function_signature will be part of the list of signatures in
1176 * an ir_function.
1177 */
1178public:
1179 ir_function_signature(const glsl_type *return_type,
1180 builtin_available_predicate builtin_avail = NULL__null);
1181
1182 virtual ir_function_signature *clone(void *mem_ctx,
1183 struct hash_table *ht) const;
1184 ir_function_signature *clone_prototype(void *mem_ctx,
1185 struct hash_table *ht) const;
1186
1187 virtual void accept(ir_visitor *v)
1188 {
1189 v->visit(this);
1190 }
1191
1192 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1193
1194 /**
1195 * Attempt to evaluate this function as a constant expression,
1196 * given a list of the actual parameters and the variable context.
1197 * Returns NULL for non-built-ins.
1198 */
1199 ir_constant *constant_expression_value(void *mem_ctx,
1200 exec_list *actual_parameters,
1201 struct hash_table *variable_context);
1202
1203 /**
1204 * Get the name of the function for which this is a signature
1205 */
1206 const char *function_name() const;
1207
1208 /**
1209 * Get a handle to the function for which this is a signature
1210 *
1211 * There is no setter function, this function returns a \c const pointer,
1212 * and \c ir_function_signature::_function is private for a reason. The
1213 * only way to make a connection between a function and function signature
1214 * is via \c ir_function::add_signature. This helps ensure that certain
1215 * invariants (i.e., a function signature is in the list of signatures for
1216 * its \c _function) are met.
1217 *
1218 * \sa ir_function::add_signature
1219 */
1220 inline const class ir_function *function() const
1221 {
1222 return this->_function;
1223 }
1224
1225 /**
1226 * Check whether the qualifiers match between this signature's parameters
1227 * and the supplied parameter list. If not, returns the name of the first
1228 * parameter with mismatched qualifiers (for use in error messages).
1229 */
1230 const char *qualifiers_match(exec_list *params);
1231
1232 /**
1233 * Replace the current parameter list with the given one. This is useful
1234 * if the current information came from a prototype, and either has invalid
1235 * or missing parameter names.
1236 */
1237 void replace_parameters(exec_list *new_params);
1238
1239 /**
1240 * Function return type.
1241 *
1242 * \note The precision qualifier is stored separately in return_precision.
1243 */
1244 const struct glsl_type *return_type;
1245
1246 /**
1247 * List of ir_variable of function parameters.
1248 *
1249 * This represents the storage. The paramaters passed in a particular
1250 * call will be in ir_call::actual_paramaters.
1251 */
1252 struct exec_list parameters;
1253
1254 /** Whether or not this function has a body (which may be empty). */
1255 unsigned is_defined:1;
1256
1257 /*
1258 * Precision qualifier for the return type.
1259 *
1260 * See the comment for ir_variable_data::precision for more details.
1261 */
1262 unsigned return_precision:2;
1263
1264 /** Whether or not this function signature is a built-in. */
1265 bool is_builtin() const;
1266
1267 /**
1268 * Whether or not this function is an intrinsic to be implemented
1269 * by the driver.
1270 */
1271 inline bool is_intrinsic() const
1272 {
1273 return intrinsic_id != ir_intrinsic_invalid;
1274 }
1275
1276 /** Indentifier for this intrinsic. */
1277 enum ir_intrinsic_id intrinsic_id;
1278
1279 /** Whether or not a built-in is available for this shader. */
1280 bool is_builtin_available(const _mesa_glsl_parse_state *state) const;
1281
1282 /** Body of instructions in the function. */
1283 struct exec_list body;
1284
1285private:
1286 /**
1287 * A function pointer to a predicate that answers whether a built-in
1288 * function is available in the current shader. NULL if not a built-in.
1289 */
1290 builtin_available_predicate builtin_avail;
1291
1292 /** Function of which this signature is one overload. */
1293 class ir_function *_function;
1294
1295 /** Function signature of which this one is a prototype clone */
1296 const ir_function_signature *origin;
1297
1298 friend class ir_function;
1299
1300 /**
1301 * Helper function to run a list of instructions for constant
1302 * expression evaluation.
1303 *
1304 * The hash table represents the values of the visible variables.
1305 * There are no scoping issues because the table is indexed on
1306 * ir_variable pointers, not variable names.
1307 *
1308 * Returns false if the expression is not constant, true otherwise,
1309 * and the value in *result if result is non-NULL.
1310 */
1311 bool constant_expression_evaluate_expression_list(void *mem_ctx,
1312 const struct exec_list &body,
1313 struct hash_table *variable_context,
1314 ir_constant **result);
1315};
1316
1317
1318/**
1319 * Header for tracking multiple overloaded functions with the same name.
1320 * Contains a list of ir_function_signatures representing each of the
1321 * actual functions.
1322 */
1323class ir_function : public ir_instruction {
1324public:
1325 ir_function(const char *name);
1326
1327 virtual ir_function *clone(void *mem_ctx, struct hash_table *ht) const;
1328
1329 virtual void accept(ir_visitor *v)
1330 {
1331 v->visit(this);
1332 }
1333
1334 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1335
1336 void add_signature(ir_function_signature *sig)
1337 {
1338 sig->_function = this;
1339 this->signatures.push_tail(sig);
1340 }
1341
1342 /**
1343 * Find a signature that matches a set of actual parameters, taking implicit
1344 * conversions into account. Also flags whether the match was exact.
1345 */
1346 ir_function_signature *matching_signature(_mesa_glsl_parse_state *state,
1347 const exec_list *actual_param,
1348 bool allow_builtins,
1349 bool *match_is_exact);
1350
1351 /**
1352 * Find a signature that matches a set of actual parameters, taking implicit
1353 * conversions into account.
1354 */
1355 ir_function_signature *matching_signature(_mesa_glsl_parse_state *state,
1356 const exec_list *actual_param,
1357 bool allow_builtins);
1358
1359 /**
1360 * Find a signature that exactly matches a set of actual parameters without
1361 * any implicit type conversions.
1362 */
1363 ir_function_signature *exact_matching_signature(_mesa_glsl_parse_state *state,
1364 const exec_list *actual_ps);
1365
1366 /**
1367 * Name of the function.
1368 */
1369 const char *name;
1370
1371 /** Whether or not this function has a signature that isn't a built-in. */
1372 bool has_user_signature();
1373
1374 /**
1375 * List of ir_function_signature for each overloaded function with this name.
1376 */
1377 struct exec_list signatures;
1378
1379 /**
1380 * is this function a subroutine type declaration
1381 * e.g. subroutine void type1(float arg1);
1382 */
1383 bool is_subroutine;
1384
1385 /**
1386 * is this function associated to a subroutine type
1387 * e.g. subroutine (type1, type2) function_name { function_body };
1388 * would have num_subroutine_types 2,
1389 * and pointers to the type1 and type2 types.
1390 */
1391 int num_subroutine_types;
1392 const struct glsl_type **subroutine_types;
1393
1394 int subroutine_index;
1395};
1396
1397inline const char *ir_function_signature::function_name() const
1398{
1399 return this->_function->name;
1400}
1401/*@}*/
1402
1403
1404/**
1405 * IR instruction representing high-level if-statements
1406 */
1407class ir_if : public ir_instruction {
1408public:
1409 ir_if(ir_rvalue *condition)
1410 : ir_instruction(ir_type_if), condition(condition)
1411 {
1412 }
1413
1414 virtual ir_if *clone(void *mem_ctx, struct hash_table *ht) const;
1415
1416 virtual void accept(ir_visitor *v)
1417 {
1418 v->visit(this);
1419 }
1420
1421 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1422
1423 ir_rvalue *condition;
1424 /** List of ir_instruction for the body of the then branch */
1425 exec_list then_instructions;
1426 /** List of ir_instruction for the body of the else branch */
1427 exec_list else_instructions;
1428};
1429
1430
1431/**
1432 * IR instruction representing a high-level loop structure.
1433 */
1434class ir_loop : public ir_instruction {
1435public:
1436 ir_loop();
1437
1438 virtual ir_loop *clone(void *mem_ctx, struct hash_table *ht) const;
1439
1440 virtual void accept(ir_visitor *v)
1441 {
1442 v->visit(this);
1443 }
1444
1445 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1446
1447 /** List of ir_instruction that make up the body of the loop. */
1448 exec_list body_instructions;
1449};
1450
1451
1452class ir_assignment : public ir_instruction {
1453public:
1454 ir_assignment(ir_rvalue *lhs, ir_rvalue *rhs, ir_rvalue *condition = NULL__null);
1455
1456 /**
1457 * Construct an assignment with an explicit write mask
1458 *
1459 * \note
1460 * Since a write mask is supplied, the LHS must already be a bare
1461 * \c ir_dereference. The cannot be any swizzles in the LHS.
1462 */
1463 ir_assignment(ir_dereference *lhs, ir_rvalue *rhs, ir_rvalue *condition,
1464 unsigned write_mask);
1465
1466 virtual ir_assignment *clone(void *mem_ctx, struct hash_table *ht) const;
1467
1468 virtual ir_constant *constant_expression_value(void *mem_ctx,
1469 struct hash_table *variable_context = NULL__null);
1470
1471 virtual void accept(ir_visitor *v)
1472 {
1473 v->visit(this);
1474 }
1475
1476 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1477
1478 /**
1479 * Get a whole variable written by an assignment
1480 *
1481 * If the LHS of the assignment writes a whole variable, the variable is
1482 * returned. Otherwise \c NULL is returned. Examples of whole-variable
1483 * assignment are:
1484 *
1485 * - Assigning to a scalar
1486 * - Assigning to all components of a vector
1487 * - Whole array (or matrix) assignment
1488 * - Whole structure assignment
1489 */
1490 ir_variable *whole_variable_written();
1491
1492 /**
1493 * Set the LHS of an assignment
1494 */
1495 void set_lhs(ir_rvalue *lhs);
1496
1497 /**
1498 * Left-hand side of the assignment.
1499 *
1500 * This should be treated as read only. If you need to set the LHS of an
1501 * assignment, use \c ir_assignment::set_lhs.
1502 */
1503 ir_dereference *lhs;
1504
1505 /**
1506 * Value being assigned
1507 */
1508 ir_rvalue *rhs;
1509
1510 /**
1511 * Optional condition for the assignment.
1512 */
1513 ir_rvalue *condition;
1514
1515
1516 /**
1517 * Component mask written
1518 *
1519 * For non-vector types in the LHS, this field will be zero. For vector
1520 * types, a bit will be set for each component that is written. Note that
1521 * for \c vec2 and \c vec3 types only the lower bits will ever be set.
1522 *
1523 * A partially-set write mask means that each enabled channel gets
1524 * the value from a consecutive channel of the rhs. For example,
1525 * to write just .xyw of gl_FrontColor with color:
1526 *
1527 * (assign (constant bool (1)) (xyw)
1528 * (var_ref gl_FragColor)
1529 * (swiz xyw (var_ref color)))
1530 */
1531 unsigned write_mask:4;
1532};
1533
1534#include "ir_expression_operation.h"
1535
1536extern const char *const ir_expression_operation_strings[ir_last_opcode + 1];
1537extern const char *const ir_expression_operation_enum_strings[ir_last_opcode + 1];
1538
1539class ir_expression : public ir_rvalue {
1540public:
1541 ir_expression(int op, const struct glsl_type *type,
1542 ir_rvalue *op0, ir_rvalue *op1 = NULL__null,
1543 ir_rvalue *op2 = NULL__null, ir_rvalue *op3 = NULL__null);
1544
1545 /**
1546 * Constructor for unary operation expressions
1547 */
1548 ir_expression(int op, ir_rvalue *);
1549
1550 /**
1551 * Constructor for binary operation expressions
1552 */
1553 ir_expression(int op, ir_rvalue *op0, ir_rvalue *op1);
1554
1555 /**
1556 * Constructor for ternary operation expressions
1557 */
1558 ir_expression(int op, ir_rvalue *op0, ir_rvalue *op1, ir_rvalue *op2);
1559
1560 virtual bool equals(const ir_instruction *ir,
1561 enum ir_node_type ignore = ir_type_unset) const;
1562
1563 virtual ir_expression *clone(void *mem_ctx, struct hash_table *ht) const;
1564
1565 /**
1566 * Attempt to constant-fold the expression
1567 *
1568 * The "variable_context" hash table links ir_variable * to ir_constant *
1569 * that represent the variables' values. \c NULL represents an empty
1570 * context.
1571 *
1572 * If the expression cannot be constant folded, this method will return
1573 * \c NULL.
1574 */
1575 virtual ir_constant *constant_expression_value(void *mem_ctx,
1576 struct hash_table *variable_context = NULL__null);
1577
1578 /**
1579 * This is only here for ir_reader to used for testing purposes please use
1580 * the precomputed num_operands field if you need the number of operands.
1581 */
1582 static unsigned get_num_operands(ir_expression_operation);
1583
1584 /**
1585 * Return whether the expression operates on vectors horizontally.
1586 */
1587 bool is_horizontal() const
1588 {
1589 return operation == ir_binop_all_equal ||
1590 operation == ir_binop_any_nequal ||
1591 operation == ir_binop_dot ||
1592 operation == ir_binop_vector_extract ||
1593 operation == ir_triop_vector_insert ||
1594 operation == ir_binop_ubo_load ||
1595 operation == ir_quadop_vector;
1596 }
1597
1598 /**
1599 * Do a reverse-lookup to translate the given string into an operator.
1600 */
1601 static ir_expression_operation get_operator(const char *);
1602
1603 virtual void accept(ir_visitor *v)
1604 {
1605 v->visit(this);
1606 }
1607
1608 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1609
1610 virtual ir_variable *variable_referenced() const;
1611
1612 /**
1613 * Determine the number of operands used by an expression
1614 */
1615 void init_num_operands()
1616 {
1617 if (operation == ir_quadop_vector) {
1618 num_operands = this->type->vector_elements;
1619 } else {
1620 num_operands = get_num_operands(operation);
1621 }
1622 }
1623
1624 ir_expression_operation operation;
1625 ir_rvalue *operands[4];
1626 uint8_t num_operands;
1627};
1628
1629
1630/**
1631 * HIR instruction representing a high-level function call, containing a list
1632 * of parameters and returning a value in the supplied temporary.
1633 */
1634class ir_call : public ir_instruction {
1635public:
1636 ir_call(ir_function_signature *callee,
1637 ir_dereference_variable *return_deref,
1638 exec_list *actual_parameters)
1639 : ir_instruction(ir_type_call), return_deref(return_deref), callee(callee), sub_var(NULL__null), array_idx(NULL__null)
1640 {
1641 assert(callee->return_type != NULL)(static_cast <bool> (callee->return_type != __null) ?
void (0) : __assert_fail ("callee->return_type != NULL", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
1642 actual_parameters->move_nodes_to(& this->actual_parameters);
1643 }
1644
1645 ir_call(ir_function_signature *callee,
1646 ir_dereference_variable *return_deref,
1647 exec_list *actual_parameters,
1648 ir_variable *var, ir_rvalue *array_idx)
1649 : ir_instruction(ir_type_call), return_deref(return_deref), callee(callee), sub_var(var), array_idx(array_idx)
1650 {
1651 assert(callee->return_type != NULL)(static_cast <bool> (callee->return_type != __null) ?
void (0) : __assert_fail ("callee->return_type != NULL", __builtin_FILE
(), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__))
;
1652 actual_parameters->move_nodes_to(& this->actual_parameters);
1653 }
1654
1655 virtual ir_call *clone(void *mem_ctx, struct hash_table *ht) const;
1656
1657 virtual ir_constant *constant_expression_value(void *mem_ctx,
1658 struct hash_table *variable_context = NULL__null);
1659
1660 virtual void accept(ir_visitor *v)
1661 {
1662 v->visit(this);
1663 }
1664
1665 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1666
1667 /**
1668 * Get the name of the function being called.
1669 */
1670 const char *callee_name() const
1671 {
1672 return callee->function_name();
1673 }
1674
1675 /**
1676 * Generates an inline version of the function before @ir,
1677 * storing the return value in return_deref.
1678 */
1679 void generate_inline(ir_instruction *ir);
1680
1681 /**
1682 * Storage for the function's return value.
1683 * This must be NULL if the return type is void.
1684 */
1685 ir_dereference_variable *return_deref;
1686
1687 /**
1688 * The specific function signature being called.
1689 */
1690 ir_function_signature *callee;
1691
1692 /* List of ir_rvalue of paramaters passed in this call. */
1693 exec_list actual_parameters;
1694
1695 /*
1696 * ARB_shader_subroutine support -
1697 * the subroutine uniform variable and array index
1698 * rvalue to be used in the lowering pass later.
1699 */
1700 ir_variable *sub_var;
1701 ir_rvalue *array_idx;
1702};
1703
1704
1705/**
1706 * \name Jump-like IR instructions.
1707 *
1708 * These include \c break, \c continue, \c return, and \c discard.
1709 */
1710/*@{*/
1711class ir_jump : public ir_instruction {
1712protected:
1713 ir_jump(enum ir_node_type t)
1714 : ir_instruction(t)
1715 {
1716 }
1717};
1718
1719class ir_return : public ir_jump {
1720public:
1721 ir_return()
1722 : ir_jump(ir_type_return), value(NULL__null)
1723 {
1724 }
1725
1726 ir_return(ir_rvalue *value)
1727 : ir_jump(ir_type_return), value(value)
1728 {
1729 }
1730
1731 virtual ir_return *clone(void *mem_ctx, struct hash_table *) const;
1732
1733 ir_rvalue *get_value() const
1734 {
1735 return value;
1736 }
1737
1738 virtual void accept(ir_visitor *v)
1739 {
1740 v->visit(this);
1741 }
1742
1743 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1744
1745 ir_rvalue *value;
1746};
1747
1748
1749/**
1750 * Jump instructions used inside loops
1751 *
1752 * These include \c break and \c continue. The \c break within a loop is
1753 * different from the \c break within a switch-statement.
1754 *
1755 * \sa ir_switch_jump
1756 */
1757class ir_loop_jump : public ir_jump {
1758public:
1759 enum jump_mode {
1760 jump_break,
1761 jump_continue
1762 };
1763
1764 ir_loop_jump(jump_mode mode)
1765 : ir_jump(ir_type_loop_jump)
1766 {
1767 this->mode = mode;
1768 }
1769
1770 virtual ir_loop_jump *clone(void *mem_ctx, struct hash_table *) const;
1771
1772 virtual void accept(ir_visitor *v)
1773 {
1774 v->visit(this);
1775 }
1776
1777 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1778
1779 bool is_break() const
1780 {
1781 return mode == jump_break;
1782 }
1783
1784 bool is_continue() const
1785 {
1786 return mode == jump_continue;
1787 }
1788
1789 /** Mode selector for the jump instruction. */
1790 enum jump_mode mode;
1791};
1792
1793/**
1794 * IR instruction representing discard statements.
1795 */
1796class ir_discard : public ir_jump {
1797public:
1798 ir_discard()
1799 : ir_jump(ir_type_discard)
1800 {
1801 this->condition = NULL__null;
1802 }
1803
1804 ir_discard(ir_rvalue *cond)
1805 : ir_jump(ir_type_discard)
1806 {
1807 this->condition = cond;
1808 }
1809
1810 virtual ir_discard *clone(void *mem_ctx, struct hash_table *ht) const;
1811
1812 virtual void accept(ir_visitor *v)
1813 {
1814 v->visit(this);
1815 }
1816
1817 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1818
1819 ir_rvalue *condition;
1820};
1821/*@}*/
1822
1823
1824/**
1825 * IR instruction representing demote statements from
1826 * GL_EXT_demote_to_helper_invocation.
1827 */
1828class ir_demote : public ir_instruction {
1829public:
1830 ir_demote()
1831 : ir_instruction(ir_type_demote)
1832 {
1833 }
1834
1835 virtual ir_demote *clone(void *mem_ctx, struct hash_table *ht) const;
1836
1837 virtual void accept(ir_visitor *v)
1838 {
1839 v->visit(this);
1840 }
1841
1842 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1843};
1844
1845
1846/**
1847 * Texture sampling opcodes used in ir_texture
1848 */
1849enum ir_texture_opcode {
1850 ir_tex, /**< Regular texture look-up */
1851 ir_txb, /**< Texture look-up with LOD bias */
1852 ir_txl, /**< Texture look-up with explicit LOD */
1853 ir_txd, /**< Texture look-up with partial derivatvies */
1854 ir_txf, /**< Texel fetch with explicit LOD */
1855 ir_txf_ms, /**< Multisample texture fetch */
1856 ir_txs, /**< Texture size */
1857 ir_lod, /**< Texture lod query */
1858 ir_tg4, /**< Texture gather */
1859 ir_query_levels, /**< Texture levels query */
1860 ir_texture_samples, /**< Texture samples query */
1861 ir_samples_identical, /**< Query whether all samples are definitely identical. */
1862};
1863
1864
1865/**
1866 * IR instruction to sample a texture
1867 *
1868 * The specific form of the IR instruction depends on the \c mode value
1869 * selected from \c ir_texture_opcodes. In the printed IR, these will
1870 * appear as:
1871 *
1872 * Texel offset (0 or an expression)
1873 * | Projection divisor
1874 * | | Shadow comparator
1875 * | | |
1876 * v v v
1877 * (tex <type> <sampler> <coordinate> 0 1 ( ))
1878 * (txb <type> <sampler> <coordinate> 0 1 ( ) <bias>)
1879 * (txl <type> <sampler> <coordinate> 0 1 ( ) <lod>)
1880 * (txd <type> <sampler> <coordinate> 0 1 ( ) (dPdx dPdy))
1881 * (txf <type> <sampler> <coordinate> 0 <lod>)
1882 * (txf_ms
1883 * <type> <sampler> <coordinate> <sample_index>)
1884 * (txs <type> <sampler> <lod>)
1885 * (lod <type> <sampler> <coordinate>)
1886 * (tg4 <type> <sampler> <coordinate> <offset> <component>)
1887 * (query_levels <type> <sampler>)
1888 * (samples_identical <sampler> <coordinate>)
1889 */
1890class ir_texture : public ir_rvalue {
1891public:
1892 ir_texture(enum ir_texture_opcode op)
1893 : ir_rvalue(ir_type_texture),
1894 op(op), sampler(NULL__null), coordinate(NULL__null), projector(NULL__null),
1895 shadow_comparator(NULL__null), offset(NULL__null)
1896 {
1897 memset(&lod_info, 0, sizeof(lod_info));
1898 }
1899
1900 virtual ir_texture *clone(void *mem_ctx, struct hash_table *) const;
1901
1902 virtual ir_constant *constant_expression_value(void *mem_ctx,
1903 struct hash_table *variable_context = NULL__null);
1904
1905 virtual void accept(ir_visitor *v)
1906 {
1907 v->visit(this);
1908 }
1909
1910 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
1911
1912 virtual bool equals(const ir_instruction *ir,
1913 enum ir_node_type ignore = ir_type_unset) const;
1914
1915 /**
1916 * Return a string representing the ir_texture_opcode.
1917 */
1918 const char *opcode_string();
1919
1920 /** Set the sampler and type. */
1921 void set_sampler(ir_dereference *sampler, const glsl_type *type);
1922
1923 static bool has_lod(const glsl_type *sampler_type);
1924
1925 /**
1926 * Do a reverse-lookup to translate a string into an ir_texture_opcode.
1927 */
1928 static ir_texture_opcode get_opcode(const char *);
1929
1930 enum ir_texture_opcode op;
1931
1932 /** Sampler to use for the texture access. */
1933 ir_dereference *sampler;
1934
1935 /** Texture coordinate to sample */
1936 ir_rvalue *coordinate;
1937
1938 /**
1939 * Value used for projective divide.
1940 *
1941 * If there is no projective divide (the common case), this will be
1942 * \c NULL. Optimization passes should check for this to point to a constant
1943 * of 1.0 and replace that with \c NULL.
1944 */
1945 ir_rvalue *projector;
1946
1947 /**
1948 * Coordinate used for comparison on shadow look-ups.
1949 *
1950 * If there is no shadow comparison, this will be \c NULL. For the
1951 * \c ir_txf opcode, this *must* be \c NULL.
1952 */
1953 ir_rvalue *shadow_comparator;
1954
1955 /** Texel offset. */
1956 ir_rvalue *offset;
1957
1958 union {
1959 ir_rvalue *lod; /**< Floating point LOD */
1960 ir_rvalue *bias; /**< Floating point LOD bias */
1961 ir_rvalue *sample_index; /**< MSAA sample index */
1962 ir_rvalue *component; /**< Gather component selector */
1963 struct {
1964 ir_rvalue *dPdx; /**< Partial derivative of coordinate wrt X */
1965 ir_rvalue *dPdy; /**< Partial derivative of coordinate wrt Y */
1966 } grad;
1967 } lod_info;
1968};
1969
1970
1971struct ir_swizzle_mask {
1972 unsigned x:2;
1973 unsigned y:2;
1974 unsigned z:2;
1975 unsigned w:2;
1976
1977 /**
1978 * Number of components in the swizzle.
1979 */
1980 unsigned num_components:3;
1981
1982 /**
1983 * Does the swizzle contain duplicate components?
1984 *
1985 * L-value swizzles cannot contain duplicate components.
1986 */
1987 unsigned has_duplicates:1;
1988};
1989
1990
1991class ir_swizzle : public ir_rvalue {
1992public:
1993 ir_swizzle(ir_rvalue *, unsigned x, unsigned y, unsigned z, unsigned w,
1994 unsigned count);
1995
1996 ir_swizzle(ir_rvalue *val, const unsigned *components, unsigned count);
1997
1998 ir_swizzle(ir_rvalue *val, ir_swizzle_mask mask);
1999
2000 virtual ir_swizzle *clone(void *mem_ctx, struct hash_table *) const;
2001
2002 virtual ir_constant *constant_expression_value(void *mem_ctx,
2003 struct hash_table *variable_context = NULL__null);
2004
2005 /**
2006 * Construct an ir_swizzle from the textual representation. Can fail.
2007 */
2008 static ir_swizzle *create(ir_rvalue *, const char *, unsigned vector_length);
2009
2010 virtual void accept(ir_visitor *v)
2011 {
2012 v->visit(this);
2013 }
2014
2015 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
2016
2017 virtual bool equals(const ir_instruction *ir,
2018 enum ir_node_type ignore = ir_type_unset) const;
2019
2020 bool is_lvalue(const struct _mesa_glsl_parse_state *state) const
2021 {
2022 return val->is_lvalue(state) && !mask.has_duplicates;
2023 }
2024
2025 /**
2026 * Get the variable that is ultimately referenced by an r-value
2027 */
2028 virtual ir_variable *variable_referenced() const;
2029
2030 ir_rvalue *val;
2031 ir_swizzle_mask mask;
2032
2033private:
2034 /**
2035 * Initialize the mask component of a swizzle
2036 *
2037 * This is used by the \c ir_swizzle constructors.
2038 */
2039 void init_mask(const unsigned *components, unsigned count);
2040};
2041
2042
2043class ir_dereference : public ir_rvalue {
2044public:
2045 virtual ir_dereference *clone(void *mem_ctx, struct hash_table *) const = 0;
2046
2047 bool is_lvalue(const struct _mesa_glsl_parse_state *state) const;
2048
2049 /**
2050 * Get the variable that is ultimately referenced by an r-value
2051 */
2052 virtual ir_variable *variable_referenced() const = 0;
2053
2054 /**
2055 * Get the precision. This can either come from the eventual variable that
2056 * is dereferenced, or from a record member.
2057 */
2058 virtual int precision() const = 0;
2059
2060protected:
2061 ir_dereference(enum ir_node_type t)
2062 : ir_rvalue(t)
2063 {
2064 }
2065};
2066
2067
2068class ir_dereference_variable : public ir_dereference {
2069public:
2070 ir_dereference_variable(ir_variable *var);
2071
2072 virtual ir_dereference_variable *clone(void *mem_ctx,
2073 struct hash_table *) const;
2074
2075 virtual ir_constant *constant_expression_value(void *mem_ctx,
2076 struct hash_table *variable_context = NULL__null);
2077
2078 virtual bool equals(const ir_instruction *ir,
2079 enum ir_node_type ignore = ir_type_unset) const;
2080
2081 /**
2082 * Get the variable that is ultimately referenced by an r-value
2083 */
2084 virtual ir_variable *variable_referenced() const
2085 {
2086 return this->var;
2087 }
2088
2089 virtual int precision() const
2090 {
2091 return this->var->data.precision;
2092 }
2093
2094 virtual ir_variable *whole_variable_referenced()
2095 {
2096 /* ir_dereference_variable objects always dereference the entire
2097 * variable. However, if this dereference is dereferenced by anything
2098 * else, the complete deferefernce chain is not a whole-variable
2099 * dereference. This method should only be called on the top most
2100 * ir_rvalue in a dereference chain.
2101 */
2102 return this->var;
2103 }
2104
2105 virtual void accept(ir_visitor *v)
2106 {
2107 v->visit(this);
2108 }
2109
2110 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
2111
2112 /**
2113 * Object being dereferenced.
2114 */
2115 ir_variable *var;
2116};
2117
2118
2119class ir_dereference_array : public ir_dereference {
2120public:
2121 ir_dereference_array(ir_rvalue *value, ir_rvalue *array_index);
2122
2123 ir_dereference_array(ir_variable *var, ir_rvalue *array_index);
2124
2125 virtual ir_dereference_array *clone(void *mem_ctx,
2126 struct hash_table *) const;
2127
2128 virtual ir_constant *constant_expression_value(void *mem_ctx,
2129 struct hash_table *variable_context = NULL__null);
2130
2131 virtual bool equals(const ir_instruction *ir,
2132 enum ir_node_type ignore = ir_type_unset) const;
2133
2134 /**
2135 * Get the variable that is ultimately referenced by an r-value
2136 */
2137 virtual ir_variable *variable_referenced() const
2138 {
2139 return this->array->variable_referenced();
2140 }
2141
2142 virtual int precision() const
2143 {
2144 ir_dereference *deref = this->array->as_dereference();
2145
2146 if (deref == NULL__null)
2147 return GLSL_PRECISION_NONE;
2148 else
2149 return deref->precision();
2150 }
2151
2152 virtual void accept(ir_visitor *v)
2153 {
2154 v->visit(this);
2155 }
2156
2157 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
2158
2159 ir_rvalue *array;
2160 ir_rvalue *array_index;
2161
2162private:
2163 void set_array(ir_rvalue *value);
2164};
2165
2166
2167class ir_dereference_record : public ir_dereference {
2168public:
2169 ir_dereference_record(ir_rvalue *value, const char *field);
2170
2171 ir_dereference_record(ir_variable *var, const char *field);
2172
2173 virtual ir_dereference_record *clone(void *mem_ctx,
2174 struct hash_table *) const;
2175
2176 virtual ir_constant *constant_expression_value(void *mem_ctx,
2177 struct hash_table *variable_context = NULL__null);
2178
2179 /**
2180 * Get the variable that is ultimately referenced by an r-value
2181 */
2182 virtual ir_variable *variable_referenced() const
2183 {
2184 return this->record->variable_referenced();
2185 }
2186
2187 virtual int precision() const
2188 {
2189 glsl_struct_field *field = record->type->fields.structure + field_idx;
2190
2191 return field->precision;
2192 }
2193
2194 virtual void accept(ir_visitor *v)
2195 {
2196 v->visit(this);
2197 }
2198
2199 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
2200
2201 ir_rvalue *record;
2202 int field_idx;
2203};
2204
2205
2206/**
2207 * Data stored in an ir_constant
2208 */
2209union ir_constant_data {
2210 unsigned u[16];
2211 int i[16];
2212 float f[16];
2213 bool b[16];
2214 double d[16];
2215 uint16_t f16[16];
2216 uint64_t u64[16];
2217 int64_t i64[16];
2218};
2219
2220
2221class ir_constant : public ir_rvalue {
2222public:
2223 ir_constant(const struct glsl_type *type, const ir_constant_data *data);
2224 ir_constant(bool b, unsigned vector_elements=1);
2225 ir_constant(unsigned int u, unsigned vector_elements=1);
2226 ir_constant(int i, unsigned vector_elements=1);
2227 ir_constant(float16_t f16, unsigned vector_elements=1);
2228 ir_constant(float f, unsigned vector_elements=1);
2229 ir_constant(double d, unsigned vector_elements=1);
2230 ir_constant(uint64_t u64, unsigned vector_elements=1);
2231 ir_constant(int64_t i64, unsigned vector_elements=1);
2232
2233 /**
2234 * Construct an ir_constant from a list of ir_constant values
2235 */
2236 ir_constant(const struct glsl_type *type, exec_list *values);
2237
2238 /**
2239 * Construct an ir_constant from a scalar component of another ir_constant
2240 *
2241 * The new \c ir_constant inherits the type of the component from the
2242 * source constant.
2243 *
2244 * \note
2245 * In the case of a matrix constant, the new constant is a scalar, \b not
2246 * a vector.
2247 */
2248 ir_constant(const ir_constant *c, unsigned i);
2249
2250 /**
2251 * Return a new ir_constant of the specified type containing all zeros.
2252 */
2253 static ir_constant *zero(void *mem_ctx, const glsl_type *type);
2254
2255 virtual ir_constant *clone(void *mem_ctx, struct hash_table *) const;
2256
2257 virtual ir_constant *constant_expression_value(void *mem_ctx,
2258 struct hash_table *variable_context = NULL__null);
2259
2260 virtual void accept(ir_visitor *v)
2261 {
2262 v->visit(this);
2263 }
2264
2265 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
2266
2267 virtual bool equals(const ir_instruction *ir,
2268 enum ir_node_type ignore = ir_type_unset) const;
2269
2270 /**
2271 * Get a particular component of a constant as a specific type
2272 *
2273 * This is useful, for example, to get a value from an integer constant
2274 * as a float or bool. This appears frequently when constructors are
2275 * called with all constant parameters.
2276 */
2277 /*@{*/
2278 bool get_bool_component(unsigned i) const;
2279 float get_float_component(unsigned i) const;
2280 uint16_t get_float16_component(unsigned i) const;
2281 double get_double_component(unsigned i) const;
2282 int get_int_component(unsigned i) const;
2283 unsigned get_uint_component(unsigned i) const;
2284 int64_t get_int64_component(unsigned i) const;
2285 uint64_t get_uint64_component(unsigned i) const;
2286 /*@}*/
2287
2288 ir_constant *get_array_element(unsigned i) const;
2289
2290 ir_constant *get_record_field(int idx);
2291
2292 /**
2293 * Copy the values on another constant at a given offset.
2294 *
2295 * The offset is ignored for array or struct copies, it's only for
2296 * scalars or vectors into vectors or matrices.
2297 *
2298 * With identical types on both sides and zero offset it's clone()
2299 * without creating a new object.
2300 */
2301
2302 void copy_offset(ir_constant *src, int offset);
2303
2304 /**
2305 * Copy the values on another constant at a given offset and
2306 * following an assign-like mask.
2307 *
2308 * The mask is ignored for scalars.
2309 *
2310 * Note that this function only handles what assign can handle,
2311 * i.e. at most a vector as source and a column of a matrix as
2312 * destination.
2313 */
2314
2315 void copy_masked_offset(ir_constant *src, int offset, unsigned int mask);
2316
2317 /**
2318 * Determine whether a constant has the same value as another constant
2319 *
2320 * \sa ir_constant::is_zero, ir_constant::is_one,
2321 * ir_constant::is_negative_one
2322 */
2323 bool has_value(const ir_constant *) const;
2324
2325 /**
2326 * Return true if this ir_constant represents the given value.
2327 *
2328 * For vectors, this checks that each component is the given value.
2329 */
2330 virtual bool is_value(float f, int i) const;
2331 virtual bool is_zero() const;
2332 virtual bool is_one() const;
2333 virtual bool is_negative_one() const;
2334
2335 /**
2336 * Return true for constants that could be stored as 16-bit unsigned values.
2337 *
2338 * Note that this will return true even for signed integer ir_constants, as
2339 * long as the value is non-negative and fits in 16-bits.
2340 */
2341 virtual bool is_uint16_constant() const;
2342
2343 /**
2344 * Value of the constant.
2345 *
2346 * The field used to back the values supplied by the constant is determined
2347 * by the type associated with the \c ir_instruction. Constants may be
2348 * scalars, vectors, or matrices.
2349 */
2350 union ir_constant_data value;
2351
2352 /* Array elements and structure fields */
2353 ir_constant **const_elements;
2354
2355private:
2356 /**
2357 * Parameterless constructor only used by the clone method
2358 */
2359 ir_constant(void);
2360};
2361
2362class ir_precision_statement : public ir_instruction {
2363public:
2364 ir_precision_statement(const char *statement_to_store)
2365 : ir_instruction(ir_type_precision)
2366 {
2367 ir_type = ir_type_precision;
2368 precision_statement = statement_to_store;
2369 }
2370
2371 virtual ir_precision_statement *clone(void *mem_ctx, struct hash_table *) const;
2372
2373 virtual void accept(ir_visitor *v)
2374 {
2375 v->visit(this);
2376 }
2377
2378 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
2379
2380 /**
2381 * Precision statement
2382 */
2383 const char *precision_statement;
2384};
2385
2386class ir_typedecl_statement : public ir_instruction {
2387public:
2388 ir_typedecl_statement(const glsl_type* type_decl)
2389 : ir_instruction(ir_type_typedecl)
2390 {
2391 this->ir_type = ir_type_typedecl;
2392 this->type_decl = type_decl;
2393 }
2394
2395 virtual ir_typedecl_statement *clone(void *mem_ctx, struct hash_table *) const;
2396
2397 virtual void accept(ir_visitor *v)
2398 {
2399 v->visit(this);
2400 }
2401
2402 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
2403
2404 const glsl_type* type_decl;
2405};
2406
2407/**
2408 * IR instruction to emit a vertex in a geometry shader.
2409 */
2410class ir_emit_vertex : public ir_instruction {
2411public:
2412 ir_emit_vertex(ir_rvalue *stream)
2413 : ir_instruction(ir_type_emit_vertex),
2414 stream(stream)
2415 {
2416 assert(stream)(static_cast <bool> (stream) ? void (0) : __assert_fail
("stream", __builtin_FILE (), __builtin_LINE (), __extension__
__PRETTY_FUNCTION__))
;
2417 }
2418
2419 virtual void accept(ir_visitor *v)
2420 {
2421 v->visit(this);
2422 }
2423
2424 virtual ir_emit_vertex *clone(void *mem_ctx, struct hash_table *ht) const
2425 {
2426 return new(mem_ctx) ir_emit_vertex(this->stream->clone(mem_ctx, ht));
2427 }
2428
2429 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
2430
2431 int stream_id() const
2432 {
2433 return stream->as_constant()->value.i[0];
2434 }
2435
2436 ir_rvalue *stream;
2437};
2438
2439/**
2440 * IR instruction to complete the current primitive and start a new one in a
2441 * geometry shader.
2442 */
2443class ir_end_primitive : public ir_instruction {
2444public:
2445 ir_end_primitive(ir_rvalue *stream)
2446 : ir_instruction(ir_type_end_primitive),
2447 stream(stream)
2448 {
2449 assert(stream)(static_cast <bool> (stream) ? void (0) : __assert_fail
("stream", __builtin_FILE (), __builtin_LINE (), __extension__
__PRETTY_FUNCTION__))
;
2450 }
2451
2452 virtual void accept(ir_visitor *v)
2453 {
2454 v->visit(this);
2455 }
2456
2457 virtual ir_end_primitive *clone(void *mem_ctx, struct hash_table *ht) const
2458 {
2459 return new(mem_ctx) ir_end_primitive(this->stream->clone(mem_ctx, ht));
2460 }
2461
2462 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
2463
2464 int stream_id() const
2465 {
2466 return stream->as_constant()->value.i[0];
2467 }
2468
2469 ir_rvalue *stream;
2470};
2471
2472/**
2473 * IR instruction for tessellation control and compute shader barrier.
2474 */
2475class ir_barrier : public ir_instruction {
2476public:
2477 ir_barrier()
2478 : ir_instruction(ir_type_barrier)
2479 {
2480 }
2481
2482 virtual void accept(ir_visitor *v)
2483 {
2484 v->visit(this);
2485 }
2486
2487 virtual ir_barrier *clone(void *mem_ctx, struct hash_table *) const
2488 {
2489 return new(mem_ctx) ir_barrier();
2490 }
2491
2492 virtual ir_visitor_status accept(ir_hierarchical_visitor *);
2493};
2494
2495/*@}*/
2496
2497/**
2498 * Apply a visitor to each IR node in a list
2499 */
2500void
2501visit_exec_list(exec_list *list, ir_visitor *visitor);
2502
2503/**
2504 * Validate invariants on each IR node in a list
2505 */
2506void validate_ir_tree(exec_list *instructions);
2507
2508struct _mesa_glsl_parse_state;
2509struct gl_shader_program;
2510
2511/**
2512 * Detect whether an unlinked shader contains static recursion
2513 *
2514 * If the list of instructions is determined to contain static recursion,
2515 * \c _mesa_glsl_error will be called to emit error messages for each function
2516 * that is in the recursion cycle.
2517 */
2518void
2519detect_recursion_unlinked(struct _mesa_glsl_parse_state *state,
2520 exec_list *instructions);
2521
2522/**
2523 * Detect whether a linked shader contains static recursion
2524 *
2525 * If the list of instructions is determined to contain static recursion,
2526 * \c link_error_printf will be called to emit error messages for each function
2527 * that is in the recursion cycle. In addition,
2528 * \c gl_shader_program::LinkStatus will be set to false.
2529 */
2530void
2531detect_recursion_linked(struct gl_shader_program *prog,
2532 exec_list *instructions);
2533
2534/**
2535 * Make a clone of each IR instruction in a list
2536 *
2537 * \param in List of IR instructions that are to be cloned
2538 * \param out List to hold the cloned instructions
2539 */
2540void
2541clone_ir_list(void *mem_ctx, exec_list *out, const exec_list *in);
2542
2543extern void
2544_mesa_glsl_initialize_variables(exec_list *instructions,
2545 struct _mesa_glsl_parse_state *state);
2546
2547extern void
2548reparent_ir(exec_list *list, void *mem_ctx);
2549
2550extern void
2551do_set_program_inouts(exec_list *instructions, struct gl_program *prog,
2552 gl_shader_stage shader_stage);
2553
2554extern char *
2555prototype_string(const glsl_type *return_type, const char *name,
2556 exec_list *parameters);
2557
2558const char *
2559mode_string(const ir_variable *var);
2560
2561/**
2562 * Built-in / reserved GL variables names start with "gl_"
2563 */
2564static inline bool
2565is_gl_identifier(const char *s)
2566{
2567 return s && s[0] == 'g' && s[1] == 'l' && s[2] == '_';
2568}
2569
2570extern "C" {
2571#endif /* __cplusplus */
2572
2573extern void _mesa_print_ir(FILE *f, struct exec_list *instructions,
2574 struct _mesa_glsl_parse_state *state);
2575
2576extern void
2577fprint_ir(FILE *f, const void *instruction);
2578
2579extern const struct gl_builtin_uniform_desc *
2580_mesa_glsl_get_builtin_uniform_desc(const char *name);
2581
2582#ifdef __cplusplus201703L
2583} /* extern "C" */
2584#endif
2585
2586unsigned
2587vertices_per_prim(GLenum prim);
2588
2589#endif /* IR_H */