File: | var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/x86_64-unknown-linux-gnu/debug/build/swgl-6f6517a85091acb7/out/brush_image_TEXTURE_2D.h |
Warning: | line 527, column 14 Value stored to 'normalized_offset' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | struct brush_image_TEXTURE_2D_common { |
2 | struct Samplers { |
3 | sampler2D_impl sClipMask_impl; |
4 | int sClipMask_slot; |
5 | sampler2D_impl sColor0_impl; |
6 | int sColor0_slot; |
7 | sampler2D_impl sGpuCache_impl; |
8 | int sGpuCache_slot; |
9 | sampler2D_impl sPrimitiveHeadersF_impl; |
10 | int sPrimitiveHeadersF_slot; |
11 | isampler2D_impl sPrimitiveHeadersI_impl; |
12 | int sPrimitiveHeadersI_slot; |
13 | sampler2D_impl sRenderTasks_impl; |
14 | int sRenderTasks_slot; |
15 | sampler2D_impl sTransformPalette_impl; |
16 | int sTransformPalette_slot; |
17 | bool set_slot(int index, int value) { |
18 | switch (index) { |
19 | case 7: |
20 | sClipMask_slot = value; |
21 | return true; |
22 | case 8: |
23 | sColor0_slot = value; |
24 | return true; |
25 | case 2: |
26 | sGpuCache_slot = value; |
27 | return true; |
28 | case 4: |
29 | sPrimitiveHeadersF_slot = value; |
30 | return true; |
31 | case 5: |
32 | sPrimitiveHeadersI_slot = value; |
33 | return true; |
34 | case 1: |
35 | sRenderTasks_slot = value; |
36 | return true; |
37 | case 3: |
38 | sTransformPalette_slot = value; |
39 | return true; |
40 | } |
41 | return false; |
42 | } |
43 | } samplers; |
44 | struct AttribLocations { |
45 | int aPosition = NULL_ATTRIB16; |
46 | int aData = NULL_ATTRIB16; |
47 | void bind_loc(const char* name, int index) { |
48 | if (strcmp("aPosition", name) == 0) { aPosition = index; return; } |
49 | if (strcmp("aData", name) == 0) { aData = index; return; } |
50 | } |
51 | int get_loc(const char* name) const { |
52 | if (strcmp("aPosition", name) == 0) { return aPosition != NULL_ATTRIB16 ? aPosition : -1; } |
53 | if (strcmp("aData", name) == 0) { return aData != NULL_ATTRIB16 ? aData : -1; } |
54 | return -1; |
55 | } |
56 | } attrib_locations; |
57 | vec4_scalar vTransformBounds; |
58 | vec4_scalar v_uv_bounds; |
59 | vec4_scalar v_uv_sample_bounds; |
60 | vec2_scalar v_perspective; |
61 | sampler2D sClipMask; |
62 | sampler2D sColor0; |
63 | sampler2D sGpuCache; |
64 | sampler2D sPrimitiveHeadersF; |
65 | isampler2D sPrimitiveHeadersI; |
66 | sampler2D sRenderTasks; |
67 | sampler2D sTransformPalette; |
68 | mat4_scalar uTransform; |
69 | void bind_textures() { |
70 | sClipMask = lookup_sampler(&samplers.sClipMask_impl, samplers.sClipMask_slot); |
71 | sColor0 = lookup_sampler(&samplers.sColor0_impl, samplers.sColor0_slot); |
72 | sGpuCache = lookup_sampler(&samplers.sGpuCache_impl, samplers.sGpuCache_slot); |
73 | sPrimitiveHeadersF = lookup_sampler(&samplers.sPrimitiveHeadersF_impl, samplers.sPrimitiveHeadersF_slot); |
74 | sPrimitiveHeadersI = lookup_isampler(&samplers.sPrimitiveHeadersI_impl, samplers.sPrimitiveHeadersI_slot); |
75 | sRenderTasks = lookup_sampler(&samplers.sRenderTasks_impl, samplers.sRenderTasks_slot); |
76 | sTransformPalette = lookup_sampler(&samplers.sTransformPalette_impl, samplers.sTransformPalette_slot); |
77 | } |
78 | }; |
79 | struct brush_image_TEXTURE_2D_vert : VertexShaderImpl, brush_image_TEXTURE_2D_common { |
80 | private: |
81 | typedef brush_image_TEXTURE_2D_vert Self; |
82 | // mat4_scalar uTransform; |
83 | vec2 aPosition; |
84 | // sampler2D sColor0; |
85 | // sampler2D sColor1; |
86 | // sampler2D sColor2; |
87 | struct RectWithSize_scalar { |
88 | vec2_scalar p0; |
89 | vec2_scalar size; |
90 | RectWithSize_scalar() = default; |
91 | RectWithSize_scalar(vec2_scalar p0, vec2_scalar size) : p0(p0), size(size){} |
92 | }; |
93 | struct RectWithSize { |
94 | vec2 p0; |
95 | vec2 size; |
96 | RectWithSize() = default; |
97 | RectWithSize(vec2 p0, vec2 size) : p0(p0), size(size){} |
98 | RectWithSize(vec2_scalar p0, vec2_scalar size):p0(p0),size(size){ |
99 | } |
100 | IMPLICIT RectWithSize(RectWithSize_scalar s):p0(s.p0),size(s.size){ |
101 | } |
102 | friend RectWithSize if_then_else(I32 c, RectWithSize t, RectWithSize e) { return RectWithSize( |
103 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.size, e.size)); |
104 | }}; |
105 | struct RectWithEndpoint_scalar { |
106 | vec2_scalar p0; |
107 | vec2_scalar p1; |
108 | RectWithEndpoint_scalar() = default; |
109 | RectWithEndpoint_scalar(vec2_scalar p0, vec2_scalar p1) : p0(p0), p1(p1){} |
110 | }; |
111 | struct RectWithEndpoint { |
112 | vec2 p0; |
113 | vec2 p1; |
114 | RectWithEndpoint() = default; |
115 | RectWithEndpoint(vec2 p0, vec2 p1) : p0(p0), p1(p1){} |
116 | RectWithEndpoint(vec2_scalar p0, vec2_scalar p1):p0(p0),p1(p1){ |
117 | } |
118 | IMPLICIT RectWithEndpoint(RectWithEndpoint_scalar s):p0(s.p0),p1(s.p1){ |
119 | } |
120 | friend RectWithEndpoint if_then_else(I32 c, RectWithEndpoint t, RectWithEndpoint e) { return RectWithEndpoint( |
121 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.p1, e.p1)); |
122 | }}; |
123 | // sampler2D sRenderTasks; |
124 | struct RenderTaskData_scalar { |
125 | RectWithEndpoint_scalar task_rect; |
126 | vec4_scalar user_data; |
127 | RenderTaskData_scalar() = default; |
128 | RenderTaskData_scalar(RectWithEndpoint_scalar task_rect, vec4_scalar user_data) : task_rect(task_rect), user_data(user_data){} |
129 | }; |
130 | struct RenderTaskData { |
131 | RectWithEndpoint task_rect; |
132 | vec4 user_data; |
133 | RenderTaskData() = default; |
134 | RenderTaskData(RectWithEndpoint task_rect, vec4 user_data) : task_rect(task_rect), user_data(user_data){} |
135 | RenderTaskData(RectWithEndpoint_scalar task_rect, vec4_scalar user_data):task_rect(task_rect),user_data(user_data){ |
136 | } |
137 | IMPLICIT RenderTaskData(RenderTaskData_scalar s):task_rect(s.task_rect),user_data(s.user_data){ |
138 | } |
139 | friend RenderTaskData if_then_else(I32 c, RenderTaskData t, RenderTaskData e) { return RenderTaskData( |
140 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.user_data, e.user_data)); |
141 | }}; |
142 | struct PictureTask_scalar { |
143 | RectWithEndpoint_scalar task_rect; |
144 | float device_pixel_scale; |
145 | vec2_scalar content_origin; |
146 | PictureTask_scalar() = default; |
147 | PictureTask_scalar(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar content_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), content_origin(content_origin){} |
148 | }; |
149 | struct PictureTask { |
150 | RectWithEndpoint task_rect; |
151 | Float device_pixel_scale; |
152 | vec2 content_origin; |
153 | PictureTask() = default; |
154 | PictureTask(RectWithEndpoint task_rect, Float device_pixel_scale, vec2 content_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), content_origin(content_origin){} |
155 | PictureTask(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar content_origin):task_rect(task_rect),device_pixel_scale(device_pixel_scale),content_origin(content_origin){ |
156 | } |
157 | IMPLICIT PictureTask(PictureTask_scalar s):task_rect(s.task_rect),device_pixel_scale(s.device_pixel_scale),content_origin(s.content_origin){ |
158 | } |
159 | friend PictureTask if_then_else(I32 c, PictureTask t, PictureTask e) { return PictureTask( |
160 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.device_pixel_scale, e.device_pixel_scale), if_then_else(c, t.content_origin, e.content_origin)); |
161 | }}; |
162 | struct ClipArea_scalar { |
163 | RectWithEndpoint_scalar task_rect; |
164 | float device_pixel_scale; |
165 | vec2_scalar screen_origin; |
166 | ClipArea_scalar() = default; |
167 | ClipArea_scalar(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar screen_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), screen_origin(screen_origin){} |
168 | }; |
169 | struct ClipArea { |
170 | RectWithEndpoint task_rect; |
171 | Float device_pixel_scale; |
172 | vec2 screen_origin; |
173 | ClipArea() = default; |
174 | ClipArea(RectWithEndpoint task_rect, Float device_pixel_scale, vec2 screen_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), screen_origin(screen_origin){} |
175 | ClipArea(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar screen_origin):task_rect(task_rect),device_pixel_scale(device_pixel_scale),screen_origin(screen_origin){ |
176 | } |
177 | IMPLICIT ClipArea(ClipArea_scalar s):task_rect(s.task_rect),device_pixel_scale(s.device_pixel_scale),screen_origin(s.screen_origin){ |
178 | } |
179 | friend ClipArea if_then_else(I32 c, ClipArea t, ClipArea e) { return ClipArea( |
180 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.device_pixel_scale, e.device_pixel_scale), if_then_else(c, t.screen_origin, e.screen_origin)); |
181 | }}; |
182 | // sampler2D sGpuCache; |
183 | struct ImageSource_scalar { |
184 | RectWithEndpoint_scalar uv_rect; |
185 | vec4_scalar user_data; |
186 | ImageSource_scalar() = default; |
187 | ImageSource_scalar(RectWithEndpoint_scalar uv_rect, vec4_scalar user_data) : uv_rect(uv_rect), user_data(user_data){} |
188 | }; |
189 | struct ImageSource { |
190 | RectWithEndpoint uv_rect; |
191 | vec4 user_data; |
192 | ImageSource() = default; |
193 | ImageSource(RectWithEndpoint uv_rect, vec4 user_data) : uv_rect(uv_rect), user_data(user_data){} |
194 | ImageSource(RectWithEndpoint_scalar uv_rect, vec4_scalar user_data):uv_rect(uv_rect),user_data(user_data){ |
195 | } |
196 | IMPLICIT ImageSource(ImageSource_scalar s):uv_rect(s.uv_rect),user_data(s.user_data){ |
197 | } |
198 | friend ImageSource if_then_else(I32 c, ImageSource t, ImageSource e) { return ImageSource( |
199 | if_then_else(c, t.uv_rect, e.uv_rect), if_then_else(c, t.user_data, e.user_data)); |
200 | }}; |
201 | struct ImageSourceExtra_scalar { |
202 | vec4_scalar st_tl; |
203 | vec4_scalar st_tr; |
204 | vec4_scalar st_bl; |
205 | vec4_scalar st_br; |
206 | ImageSourceExtra_scalar() = default; |
207 | ImageSourceExtra_scalar(vec4_scalar st_tl, vec4_scalar st_tr, vec4_scalar st_bl, vec4_scalar st_br) : st_tl(st_tl), st_tr(st_tr), st_bl(st_bl), st_br(st_br){} |
208 | }; |
209 | struct ImageSourceExtra { |
210 | vec4 st_tl; |
211 | vec4 st_tr; |
212 | vec4 st_bl; |
213 | vec4 st_br; |
214 | ImageSourceExtra() = default; |
215 | ImageSourceExtra(vec4 st_tl, vec4 st_tr, vec4 st_bl, vec4 st_br) : st_tl(st_tl), st_tr(st_tr), st_bl(st_bl), st_br(st_br){} |
216 | ImageSourceExtra(vec4_scalar st_tl, vec4_scalar st_tr, vec4_scalar st_bl, vec4_scalar st_br):st_tl(st_tl),st_tr(st_tr),st_bl(st_bl),st_br(st_br){ |
217 | } |
218 | IMPLICIT ImageSourceExtra(ImageSourceExtra_scalar s):st_tl(s.st_tl),st_tr(s.st_tr),st_bl(s.st_bl),st_br(s.st_br){ |
219 | } |
220 | friend ImageSourceExtra if_then_else(I32 c, ImageSourceExtra t, ImageSourceExtra e) { return ImageSourceExtra( |
221 | if_then_else(c, t.st_tl, e.st_tl), if_then_else(c, t.st_tr, e.st_tr), if_then_else(c, t.st_bl, e.st_bl), if_then_else(c, t.st_br, e.st_br)); |
222 | }}; |
223 | // vec4_scalar vTransformBounds; |
224 | // sampler2D sTransformPalette; |
225 | struct Transform_scalar { |
226 | mat4_scalar m; |
227 | mat4_scalar inv_m; |
228 | bool is_axis_aligned; |
229 | Transform_scalar() = default; |
230 | Transform_scalar(mat4_scalar m, mat4_scalar inv_m, bool is_axis_aligned) : m(m), inv_m(inv_m), is_axis_aligned(is_axis_aligned){} |
231 | }; |
232 | struct Transform { |
233 | mat4 m; |
234 | mat4 inv_m; |
235 | Bool is_axis_aligned; |
236 | Transform() = default; |
237 | Transform(mat4 m, mat4 inv_m, Bool is_axis_aligned) : m(m), inv_m(inv_m), is_axis_aligned(is_axis_aligned){} |
238 | Transform(mat4_scalar m, mat4_scalar inv_m, bool is_axis_aligned):m(m),inv_m(inv_m),is_axis_aligned(is_axis_aligned){ |
239 | } |
240 | IMPLICIT Transform(Transform_scalar s):m(s.m),inv_m(s.inv_m),is_axis_aligned(s.is_axis_aligned){ |
241 | } |
242 | friend Transform if_then_else(I32 c, Transform t, Transform e) { return Transform( |
243 | if_then_else(c, t.m, e.m), if_then_else(c, t.inv_m, e.inv_m), if_then_else(c, t.is_axis_aligned, e.is_axis_aligned)); |
244 | }}; |
245 | // sampler2D sClipMask; |
246 | // sampler2D sPrimitiveHeadersF; |
247 | // isampler2D sPrimitiveHeadersI; |
248 | ivec4_scalar aData; |
249 | struct Instance_scalar { |
250 | int32_t prim_header_address; |
251 | int32_t clip_address; |
252 | int32_t segment_index; |
253 | int32_t flags; |
254 | int32_t resource_address; |
255 | int32_t brush_kind; |
256 | Instance_scalar() = default; |
257 | Instance_scalar(int32_t prim_header_address, int32_t clip_address, int32_t segment_index, int32_t flags, int32_t resource_address, int32_t brush_kind) : prim_header_address(prim_header_address), clip_address(clip_address), segment_index(segment_index), flags(flags), resource_address(resource_address), brush_kind(brush_kind){} |
258 | }; |
259 | struct Instance { |
260 | I32 prim_header_address; |
261 | I32 clip_address; |
262 | I32 segment_index; |
263 | I32 flags; |
264 | I32 resource_address; |
265 | I32 brush_kind; |
266 | Instance() = default; |
267 | Instance(I32 prim_header_address, I32 clip_address, I32 segment_index, I32 flags, I32 resource_address, I32 brush_kind) : prim_header_address(prim_header_address), clip_address(clip_address), segment_index(segment_index), flags(flags), resource_address(resource_address), brush_kind(brush_kind){} |
268 | Instance(int32_t prim_header_address, int32_t clip_address, int32_t segment_index, int32_t flags, int32_t resource_address, int32_t brush_kind):prim_header_address(prim_header_address),clip_address(clip_address),segment_index(segment_index),flags(flags),resource_address(resource_address),brush_kind(brush_kind){ |
269 | } |
270 | IMPLICIT Instance(Instance_scalar s):prim_header_address(s.prim_header_address),clip_address(s.clip_address),segment_index(s.segment_index),flags(s.flags),resource_address(s.resource_address),brush_kind(s.brush_kind){ |
271 | } |
272 | friend Instance if_then_else(I32 c, Instance t, Instance e) { return Instance( |
273 | if_then_else(c, t.prim_header_address, e.prim_header_address), if_then_else(c, t.clip_address, e.clip_address), if_then_else(c, t.segment_index, e.segment_index), if_then_else(c, t.flags, e.flags), if_then_else(c, t.resource_address, e.resource_address), if_then_else(c, t.brush_kind, e.brush_kind)); |
274 | }}; |
275 | struct PrimitiveHeader_scalar { |
276 | RectWithEndpoint_scalar local_rect; |
277 | RectWithEndpoint_scalar local_clip_rect; |
278 | float z; |
279 | int32_t specific_prim_address; |
280 | int32_t transform_id; |
281 | int32_t picture_task_address; |
282 | ivec4_scalar user_data; |
283 | PrimitiveHeader_scalar() = default; |
284 | PrimitiveHeader_scalar(RectWithEndpoint_scalar local_rect, RectWithEndpoint_scalar local_clip_rect, float z, int32_t specific_prim_address, int32_t transform_id, int32_t picture_task_address, ivec4_scalar user_data) : local_rect(local_rect), local_clip_rect(local_clip_rect), z(z), specific_prim_address(specific_prim_address), transform_id(transform_id), picture_task_address(picture_task_address), user_data(user_data){} |
285 | }; |
286 | struct PrimitiveHeader { |
287 | RectWithEndpoint local_rect; |
288 | RectWithEndpoint local_clip_rect; |
289 | Float z; |
290 | I32 specific_prim_address; |
291 | I32 transform_id; |
292 | I32 picture_task_address; |
293 | ivec4 user_data; |
294 | PrimitiveHeader() = default; |
295 | PrimitiveHeader(RectWithEndpoint local_rect, RectWithEndpoint local_clip_rect, Float z, I32 specific_prim_address, I32 transform_id, I32 picture_task_address, ivec4 user_data) : local_rect(local_rect), local_clip_rect(local_clip_rect), z(z), specific_prim_address(specific_prim_address), transform_id(transform_id), picture_task_address(picture_task_address), user_data(user_data){} |
296 | PrimitiveHeader(RectWithEndpoint_scalar local_rect, RectWithEndpoint_scalar local_clip_rect, float z, int32_t specific_prim_address, int32_t transform_id, int32_t picture_task_address, ivec4_scalar user_data):local_rect(local_rect),local_clip_rect(local_clip_rect),z(z),specific_prim_address(specific_prim_address),transform_id(transform_id),picture_task_address(picture_task_address),user_data(user_data){ |
297 | } |
298 | IMPLICIT PrimitiveHeader(PrimitiveHeader_scalar s):local_rect(s.local_rect),local_clip_rect(s.local_clip_rect),z(s.z),specific_prim_address(s.specific_prim_address),transform_id(s.transform_id),picture_task_address(s.picture_task_address),user_data(s.user_data){ |
299 | } |
300 | friend PrimitiveHeader if_then_else(I32 c, PrimitiveHeader t, PrimitiveHeader e) { return PrimitiveHeader( |
301 | if_then_else(c, t.local_rect, e.local_rect), if_then_else(c, t.local_clip_rect, e.local_clip_rect), if_then_else(c, t.z, e.z), if_then_else(c, t.specific_prim_address, e.specific_prim_address), if_then_else(c, t.transform_id, e.transform_id), if_then_else(c, t.picture_task_address, e.picture_task_address), if_then_else(c, t.user_data, e.user_data)); |
302 | }}; |
303 | struct VertexInfo_scalar { |
304 | vec2_scalar local_pos; |
305 | vec4_scalar world_pos; |
306 | VertexInfo_scalar() = default; |
307 | VertexInfo_scalar(vec2_scalar local_pos, vec4_scalar world_pos) : local_pos(local_pos), world_pos(world_pos){} |
308 | }; |
309 | struct VertexInfo { |
310 | vec2 local_pos; |
311 | vec4 world_pos; |
312 | VertexInfo() = default; |
313 | VertexInfo(vec2 local_pos, vec4 world_pos) : local_pos(local_pos), world_pos(world_pos){} |
314 | VertexInfo(vec2_scalar local_pos, vec4_scalar world_pos):local_pos(local_pos),world_pos(world_pos){ |
315 | } |
316 | IMPLICIT VertexInfo(VertexInfo_scalar s):local_pos(s.local_pos),world_pos(s.world_pos){ |
317 | } |
318 | friend VertexInfo if_then_else(I32 c, VertexInfo t, VertexInfo e) { return VertexInfo( |
319 | if_then_else(c, t.local_pos, e.local_pos), if_then_else(c, t.world_pos, e.world_pos)); |
320 | }}; |
321 | vec2 v_uv; |
322 | // vec4_scalar v_uv_bounds; |
323 | // vec4_scalar v_uv_sample_bounds; |
324 | // vec2_scalar v_perspective; |
325 | struct ImageBrushData_scalar { |
326 | vec4_scalar color; |
327 | vec4_scalar background_color; |
328 | vec2_scalar stretch_size; |
329 | ImageBrushData_scalar() = default; |
330 | ImageBrushData_scalar(vec4_scalar color, vec4_scalar background_color, vec2_scalar stretch_size) : color(color), background_color(background_color), stretch_size(stretch_size){} |
331 | }; |
332 | struct ImageBrushData { |
333 | vec4 color; |
334 | vec4 background_color; |
335 | vec2 stretch_size; |
336 | ImageBrushData() = default; |
337 | ImageBrushData(vec4 color, vec4 background_color, vec2 stretch_size) : color(color), background_color(background_color), stretch_size(stretch_size){} |
338 | ImageBrushData(vec4_scalar color, vec4_scalar background_color, vec2_scalar stretch_size):color(color),background_color(background_color),stretch_size(stretch_size){ |
339 | } |
340 | IMPLICIT ImageBrushData(ImageBrushData_scalar s):color(s.color),background_color(s.background_color),stretch_size(s.stretch_size){ |
341 | } |
342 | friend ImageBrushData if_then_else(I32 c, ImageBrushData t, ImageBrushData e) { return ImageBrushData( |
343 | if_then_else(c, t.color, e.color), if_then_else(c, t.background_color, e.background_color), if_then_else(c, t.stretch_size, e.stretch_size)); |
344 | }}; |
345 | Instance_scalar decode_instance_attributes() { |
346 | Instance_scalar instance; |
347 | (instance).prim_header_address = (aData).x; |
348 | (instance).clip_address = (aData).y; |
349 | (instance).segment_index = ((aData).z)&(65535); |
350 | (instance).flags = ((aData).z)>>(16); |
351 | (instance).resource_address = ((aData).w)&(16777215); |
352 | (instance).brush_kind = ((aData).w)>>(24); |
353 | return instance; |
354 | } |
355 | PrimitiveHeader_scalar fetch_prim_header(int32_t index) { |
356 | PrimitiveHeader_scalar ph; |
357 | ivec2_scalar uv_f = make_ivec2(make_int((2u)*((make_uint(index))%((1024u)/(2u)))), make_int((make_uint(index))/((1024u)/(2u)))); |
358 | auto sPrimitiveHeadersF_uv_f_fetch = texelFetchPtr(sPrimitiveHeadersF, uv_f, 0, 1, 0, 0); |
359 | vec4_scalar local_rect = texelFetchUnchecked(sPrimitiveHeadersF, sPrimitiveHeadersF_uv_f_fetch, 0, 0); |
360 | vec4_scalar local_clip_rect = texelFetchUnchecked(sPrimitiveHeadersF, sPrimitiveHeadersF_uv_f_fetch, 1, 0); |
361 | (ph).local_rect = RectWithEndpoint_scalar((local_rect).sel(X,Y), (local_rect).sel(Z,W)); |
362 | (ph).local_clip_rect = RectWithEndpoint_scalar((local_clip_rect).sel(X,Y), (local_clip_rect).sel(Z,W)); |
363 | ivec2_scalar uv_i = make_ivec2(make_int((2u)*((make_uint(index))%((1024u)/(2u)))), make_int((make_uint(index))/((1024u)/(2u)))); |
364 | auto sPrimitiveHeadersI_uv_i_fetch = texelFetchPtr(sPrimitiveHeadersI, uv_i, 0, 1, 0, 0); |
365 | ivec4_scalar data0 = texelFetchUnchecked(sPrimitiveHeadersI, sPrimitiveHeadersI_uv_i_fetch, 0, 0); |
366 | ivec4_scalar data1 = texelFetchUnchecked(sPrimitiveHeadersI, sPrimitiveHeadersI_uv_i_fetch, 1, 0); |
367 | (ph).z = make_float((data0).x); |
368 | (ph).specific_prim_address = (data0).y; |
369 | (ph).transform_id = (data0).z; |
370 | (ph).picture_task_address = (data0).w; |
371 | (ph).user_data = data1; |
372 | return ph; |
373 | } |
374 | Transform_scalar fetch_transform(int32_t id) { |
375 | Transform_scalar transform; |
376 | (transform).is_axis_aligned = ((id)>>(23))==(0); |
377 | int32_t index = (id)&(8388607); |
378 | ivec2_scalar uv = make_ivec2(make_int((8u)*((make_uint(index))%((1024u)/(8u)))), make_int((make_uint(index))/((1024u)/(8u)))); |
379 | ivec2_scalar uv0 = make_ivec2(((uv).x)+(0), (uv).y); |
380 | auto sTransformPalette_uv0_fetch = texelFetchPtr(sTransformPalette, uv0, 0, 7, 0, 0); |
381 | (transform).m[0] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 0, 0); |
382 | (transform).m[1] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 1, 0); |
383 | (transform).m[2] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 2, 0); |
384 | (transform).m[3] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 3, 0); |
385 | (transform).inv_m[0] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 4, 0); |
386 | (transform).inv_m[1] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 5, 0); |
387 | (transform).inv_m[2] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 6, 0); |
388 | (transform).inv_m[3] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 7, 0); |
389 | return transform; |
390 | } |
391 | RenderTaskData_scalar fetch_render_task_data(int32_t index) { |
392 | ivec2_scalar uv = make_ivec2(make_int((2u)*((make_uint(index))%((1024u)/(2u)))), make_int((make_uint(index))/((1024u)/(2u)))); |
393 | auto sRenderTasks_uv_fetch = texelFetchPtr(sRenderTasks, uv, 0, 1, 0, 0); |
394 | vec4_scalar texel0 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 0, 0); |
395 | vec4_scalar texel1 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 1, 0); |
396 | RectWithEndpoint_scalar task_rect = RectWithEndpoint_scalar((texel0).sel(X,Y), (texel0).sel(Z,W)); |
397 | RenderTaskData_scalar data = RenderTaskData_scalar(task_rect, texel1); |
398 | return data; |
399 | } |
400 | PictureTask_scalar fetch_picture_task(int32_t address) { |
401 | RenderTaskData_scalar task_data = fetch_render_task_data(address); |
402 | PictureTask_scalar task = PictureTask_scalar((task_data).task_rect, ((task_data).user_data).x, ((task_data).user_data).sel(Y,Z)); |
403 | return task; |
404 | } |
405 | ClipArea_scalar fetch_clip_area(int32_t index) { |
406 | RenderTaskData_scalar task_data; |
407 | if ((index)>=(2147483647)) { |
408 | { |
409 | task_data = RenderTaskData_scalar(RectWithEndpoint_scalar(make_vec2(0.f), make_vec2(0.f)), make_vec4(0.f)); |
410 | } |
411 | } else { |
412 | task_data = fetch_render_task_data(index); |
413 | } |
414 | return ClipArea_scalar((task_data).task_rect, ((task_data).user_data).x, ((task_data).user_data).sel(Y,Z)); |
415 | } |
416 | ivec2_scalar get_gpu_cache_uv(int32_t address) { |
417 | return make_ivec2((make_uint(address))%(1024u), (make_uint(address))/(1024u)); |
418 | } |
419 | Array<vec4_scalar,2> fetch_from_gpu_cache_2(int32_t address) { |
420 | ivec2_scalar uv = get_gpu_cache_uv(address); |
421 | auto sGpuCache_uv_fetch = texelFetchPtr(sGpuCache, uv, 0, 1, 0, 0); |
422 | return Array<vec4_scalar,2>{{texelFetchUnchecked(sGpuCache, sGpuCache_uv_fetch, 0, 0), texelFetchUnchecked(sGpuCache, sGpuCache_uv_fetch, 1, 0)}}; |
423 | } |
424 | RectWithEndpoint_scalar clip_and_init_antialiasing(RectWithEndpoint_scalar segment_rect, RectWithEndpoint_scalar prim_rect, RectWithEndpoint_scalar clip_rect, int32_t edge_flags, float z, Transform_scalar transform, PictureTask_scalar task) { |
425 | bvec4_scalar clipped = make_bvec4(greaterThan((clip_rect).p0, (segment_rect).p0), lessThan((clip_rect).p1, (segment_rect).p1)); |
426 | swgl_antiAlias((edge_flags)|(((clipped).x ? 1 : 0)|(((clipped).y ? 2 : 0)|(((clipped).z ? 4 : 0)|((clipped).w ? 8 : 0)))))do { swgl_AAEdgeMask = calcAAEdgeMask((edge_flags)|(((clipped ).x ? 1 : 0)|(((clipped).y ? 2 : 0)|(((clipped).z ? 4 : 0)|(( clipped).w ? 8 : 0))))); if (swgl_AAEdgeMask) { swgl_ClipFlags |= SWGL_CLIP_FLAG_AA; } } while (0); |
427 | (segment_rect).p0 = clamp((segment_rect).p0, (clip_rect).p0, (clip_rect).p1); |
428 | (segment_rect).p1 = clamp((segment_rect).p1, (clip_rect).p0, (clip_rect).p1); |
429 | return segment_rect; |
430 | } |
431 | vec2 rect_clamp(RectWithEndpoint_scalar rect, vec2 pt) { |
432 | return clamp(pt, (rect).p0, (rect).p1); |
433 | } |
434 | VertexInfo write_vertex(vec2 local_pos, RectWithEndpoint_scalar local_clip_rect, float z, Transform_scalar transform, PictureTask_scalar task) { |
435 | vec2 clamped_local_pos = rect_clamp(local_clip_rect, local_pos); |
436 | vec4 world_pos = ((transform).m)*(make_vec4(clamped_local_pos, 0.f, 1.f)); |
437 | vec2 device_pos = ((world_pos).sel(X,Y))*((task).device_pixel_scale); |
438 | vec2_scalar final_offset = (-((task).content_origin))+(((task).task_rect).p0); |
439 | gl_Position = (uTransform)*(make_vec4((device_pos)+((final_offset)*((world_pos).w)), (z)*((world_pos).w), (world_pos).w)); |
440 | VertexInfo vi = VertexInfo(clamped_local_pos, world_pos); |
441 | return vi; |
442 | } |
443 | vec2_scalar rect_size(RectWithEndpoint_scalar rect) { |
444 | return ((rect).p1)-((rect).p0); |
445 | } |
446 | void write_clip(vec4 world_pos, ClipArea_scalar area, PictureTask_scalar task) { |
447 | swgl_clipMask(sClipMask, ((((task).task_rect).p0)-((task).content_origin))-((((area).task_rect).p0)-((area).screen_origin)), ((area).task_rect).p0, rect_size((area).task_rect))do { if (rect_size((area).task_rect) != vec2_scalar(0.0f, 0.0f )) { swgl_ClipFlags |= SWGL_CLIP_FLAG_MASK; swgl_ClipMask = sClipMask ; swgl_ClipMaskOffset = make_ivec2(((((task).task_rect).p0)-( (task).content_origin))-((((area).task_rect).p0)-((area).screen_origin ))); swgl_ClipMaskBounds = IntRect(make_ivec2(((area).task_rect ).p0), make_ivec2(rect_size((area).task_rect))); } } while (0 ); |
448 | } |
449 | Array<vec4_scalar,3> fetch_from_gpu_cache_3(int32_t address) { |
450 | ivec2_scalar uv = get_gpu_cache_uv(address); |
451 | auto sGpuCache_uv_fetch = texelFetchPtr(sGpuCache, uv, 0, 2, 0, 0); |
452 | return Array<vec4_scalar,3>{{texelFetchUnchecked(sGpuCache, sGpuCache_uv_fetch, 0, 0), texelFetchUnchecked(sGpuCache, sGpuCache_uv_fetch, 1, 0), texelFetchUnchecked(sGpuCache, sGpuCache_uv_fetch, 2, 0)}}; |
453 | } |
454 | ImageBrushData_scalar fetch_image_data(int32_t address) { |
455 | Array<vec4_scalar,3> raw_data = fetch_from_gpu_cache_3(address); |
456 | ImageBrushData_scalar data = ImageBrushData_scalar(raw_data[0], raw_data[1], (raw_data[2]).sel(X,Y)); |
457 | return data; |
458 | } |
459 | ImageSource_scalar fetch_image_source(int32_t address) { |
460 | Array<vec4_scalar,2> data = fetch_from_gpu_cache_2(address); |
461 | RectWithEndpoint_scalar uv_rect = RectWithEndpoint_scalar((data[0]).sel(X,Y), (data[0]).sel(Z,W)); |
462 | return ImageSource_scalar(uv_rect, data[1]); |
463 | } |
464 | Array<vec4_scalar,4> fetch_from_gpu_cache_4(int32_t address) { |
465 | ivec2_scalar uv = get_gpu_cache_uv(address); |
466 | auto sGpuCache_uv_fetch = texelFetchPtr(sGpuCache, uv, 0, 3, 0, 0); |
467 | return Array<vec4_scalar,4>{{texelFetchUnchecked(sGpuCache, sGpuCache_uv_fetch, 0, 0), texelFetchUnchecked(sGpuCache, sGpuCache_uv_fetch, 1, 0), texelFetchUnchecked(sGpuCache, sGpuCache_uv_fetch, 2, 0), texelFetchUnchecked(sGpuCache, sGpuCache_uv_fetch, 3, 0)}}; |
468 | } |
469 | ImageSourceExtra_scalar fetch_image_source_extra(int32_t address) { |
470 | Array<vec4_scalar,4> data = fetch_from_gpu_cache_4((address)+(2)); |
471 | return ImageSourceExtra_scalar(data[0], data[1], data[2], data[3]); |
472 | } |
473 | vec2 get_image_quad_uv(int32_t address, vec2 f) { |
474 | ImageSourceExtra_scalar extra_data = fetch_image_source_extra(address); |
475 | vec4 x = mix((extra_data).st_tl, (extra_data).st_tr, (f).x); |
476 | vec4 y = mix((extra_data).st_bl, (extra_data).st_br, (f).x); |
477 | vec4 z = mix(x, y, (f).y); |
478 | return ((z).sel(X,Y))/((z).w); |
479 | } |
480 | void brush_vs(VertexInfo vi, int32_t prim_address, RectWithEndpoint_scalar prim_rect, RectWithEndpoint_scalar segment_rect, ivec4_scalar prim_user_data, int32_t specific_resource_address, mat4_scalar transform, PictureTask_scalar pic_task, int32_t brush_flags, vec4_scalar segment_data) { |
481 | ImageBrushData_scalar image_data = fetch_image_data(prim_address); |
482 | vec2_scalar texture_size = make_vec2(textureSize(sColor0, 0)); |
483 | ImageSource_scalar res = fetch_image_source(specific_resource_address); |
484 | vec2_scalar uv0 = ((res).uv_rect).p0; |
485 | vec2_scalar uv1 = ((res).uv_rect).p1; |
486 | RectWithEndpoint_scalar local_rect = prim_rect; |
487 | vec2_scalar stretch_size = (image_data).stretch_size; |
488 | if (((stretch_size).x)<(0.f)) { |
489 | { |
490 | stretch_size = rect_size(local_rect); |
491 | } |
492 | } |
493 | if (((brush_flags)&(2))!=(0)) { |
494 | { |
495 | local_rect = segment_rect; |
496 | stretch_size = rect_size(local_rect); |
497 | if (((brush_flags)&(512))!=(0)) { |
498 | { |
499 | vec2_scalar uv_size = (((res).uv_rect).p1)-(((res).uv_rect).p0); |
500 | uv0 = (((res).uv_rect).p0)+(((segment_data).sel(X,Y))*(uv_size)); |
501 | uv1 = (((res).uv_rect).p0)+(((segment_data).sel(Z,W))*(uv_size)); |
502 | } |
503 | } |
504 | } |
505 | } |
506 | float perspective_interpolate = ((brush_flags)&(1))!=(0) ? 1.f : 0.f; |
507 | (v_perspective).x = perspective_interpolate; |
508 | if (((brush_flags)&(2048))!=(0)) { |
509 | { |
510 | uv0 *= texture_size; |
511 | uv1 *= texture_size; |
512 | } |
513 | } |
514 | vec2_scalar min_uv = min(uv0, uv1); |
515 | vec2_scalar max_uv = max(uv0, uv1); |
516 | v_uv_sample_bounds = (make_vec4((min_uv)+(make_vec2(0.5f)), (max_uv)-(make_vec2(0.5f))))/((texture_size).sel(X,Y,X,Y)); |
517 | vec2 f = (((vi).local_pos)-((local_rect).p0))/(rect_size(local_rect)); |
518 | int32_t raster_space = (prim_user_data).y; |
519 | if ((raster_space)==(1)) { |
520 | { |
521 | f = get_image_quad_uv(specific_resource_address, f); |
522 | } |
523 | } |
524 | vec2_scalar repeat = (rect_size(local_rect))/(stretch_size); |
525 | v_uv = (mix(uv0, uv1, f))-(min_uv); |
526 | v_uv *= (repeat).sel(X,Y); |
527 | vec2_scalar normalized_offset = make_vec2(0.f); |
Value stored to 'normalized_offset' during its initialization is never read | |
528 | v_uv /= texture_size; |
529 | if ((perspective_interpolate)==(0.f)) { |
530 | { |
531 | v_uv *= ((vi).world_pos).w; |
532 | } |
533 | } |
534 | v_uv_bounds = (make_vec4(min_uv, max_uv))/((texture_size).sel(X,Y,X,Y)); |
535 | } |
536 | void brush_shader_main_vs(Instance_scalar instance, PrimitiveHeader_scalar ph, Transform_scalar transform, PictureTask_scalar pic_task, ClipArea_scalar clip_area) { |
537 | int32_t edge_flags = (((instance).flags)>>(12))&(15); |
538 | int32_t brush_flags = ((instance).flags)&(4095); |
539 | vec4_scalar segment_data; |
540 | RectWithEndpoint_scalar segment_rect; |
541 | if (((instance).segment_index)==(65535)) { |
542 | { |
543 | segment_rect = (ph).local_rect; |
544 | segment_data = make_vec4(0.f); |
545 | } |
546 | } else { |
547 | int32_t segment_address = (((ph).specific_prim_address)+(3))+(((instance).segment_index)*(2)); |
548 | Array<vec4_scalar,2> segment_info = fetch_from_gpu_cache_2(segment_address); |
549 | segment_rect = RectWithEndpoint_scalar((segment_info[0]).sel(X,Y), (segment_info[0]).sel(Z,W)); |
550 | (segment_rect).p0 += ((ph).local_rect).p0; |
551 | (segment_rect).p1 += ((ph).local_rect).p0; |
552 | segment_data = segment_info[1]; |
553 | } |
554 | RectWithEndpoint_scalar adjusted_segment_rect = segment_rect; |
555 | bool antialiased = (!((transform).is_axis_aligned))||(((brush_flags)&(1024))!=(0)); |
556 | if (antialiased) { |
557 | { |
558 | adjusted_segment_rect = clip_and_init_antialiasing(segment_rect, (ph).local_rect, (ph).local_clip_rect, edge_flags, (ph).z, transform, pic_task); |
559 | ((ph).local_clip_rect).p0 = make_vec2(-(10000000000000000.f)); |
560 | ((ph).local_clip_rect).p1 = make_vec2(10000000000000000.f); |
561 | } |
562 | } else { |
563 | } |
564 | vec2 local_pos = mix((adjusted_segment_rect).p0, (adjusted_segment_rect).p1, (aPosition).sel(X,Y)); |
565 | VertexInfo vi = write_vertex(local_pos, (ph).local_clip_rect, (ph).z, transform, pic_task); |
566 | write_clip((vi).world_pos, clip_area, pic_task); |
567 | brush_vs(vi, (ph).specific_prim_address, (ph).local_rect, segment_rect, (ph).user_data, (instance).resource_address, (transform).m, pic_task, brush_flags, segment_data); |
568 | } |
569 | ALWAYS_INLINE__attribute__((always_inline)) inline void main(void) { |
570 | Instance_scalar instance = decode_instance_attributes(); |
571 | PrimitiveHeader_scalar ph = fetch_prim_header((instance).prim_header_address); |
572 | Transform_scalar transform = fetch_transform((ph).transform_id); |
573 | PictureTask_scalar task = fetch_picture_task((ph).picture_task_address); |
574 | ClipArea_scalar clip_area = fetch_clip_area((instance).clip_address); |
575 | brush_shader_main_vs(instance, ph, transform, task, clip_area); |
576 | } |
577 | static void set_uniform_1i(VertexShaderImpl* impl, int index, int value) { |
578 | Self* self = (Self*)impl; |
579 | if (self->samplers.set_slot(index, value)) return; |
580 | switch (index) { |
581 | case 7: |
582 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask |
583 | break; |
584 | case 8: |
585 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor0 |
586 | break; |
587 | case 2: |
588 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache |
589 | break; |
590 | case 4: |
591 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF |
592 | break; |
593 | case 5: |
594 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI |
595 | break; |
596 | case 1: |
597 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks |
598 | break; |
599 | case 3: |
600 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette |
601 | break; |
602 | case 6: |
603 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // uTransform |
604 | break; |
605 | } |
606 | } |
607 | static void set_uniform_4fv(VertexShaderImpl* impl, int index, const float *value) { |
608 | Self* self = (Self*)impl; |
609 | switch (index) { |
610 | case 7: |
611 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask |
612 | break; |
613 | case 8: |
614 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor0 |
615 | break; |
616 | case 2: |
617 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache |
618 | break; |
619 | case 4: |
620 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF |
621 | break; |
622 | case 5: |
623 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI |
624 | break; |
625 | case 1: |
626 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks |
627 | break; |
628 | case 3: |
629 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette |
630 | break; |
631 | case 6: |
632 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // uTransform |
633 | break; |
634 | } |
635 | } |
636 | static void set_uniform_matrix4fv(VertexShaderImpl* impl, int index, const float *value) { |
637 | Self* self = (Self*)impl; |
638 | switch (index) { |
639 | case 7: |
640 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask |
641 | break; |
642 | case 8: |
643 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor0 |
644 | break; |
645 | case 2: |
646 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache |
647 | break; |
648 | case 4: |
649 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF |
650 | break; |
651 | case 5: |
652 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI |
653 | break; |
654 | case 1: |
655 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks |
656 | break; |
657 | case 3: |
658 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette |
659 | break; |
660 | case 6: |
661 | self->uTransform = mat4_scalar::load_from_ptr(value); |
662 | break; |
663 | } |
664 | } |
665 | static void load_attribs(VertexShaderImpl* impl, VertexAttrib *attribs, uint32_t start, int instance, int count) {Self* self = (Self*)impl; |
666 | load_attrib(self->aPosition, attribs[self->attrib_locations.aPosition], start, instance, count); |
667 | load_flat_attrib(self->aData, attribs[self->attrib_locations.aData], start, instance, count); |
668 | } |
669 | public: |
670 | struct InterpOutputs { |
671 | vec2_scalar v_uv; |
672 | }; |
673 | private: |
674 | ALWAYS_INLINE__attribute__((always_inline)) inline void store_interp_outputs(char* dest_ptr, size_t stride) { |
675 | for(int n = 0; n < 4; n++) { |
676 | auto* dest = reinterpret_cast<InterpOutputs*>(dest_ptr); |
677 | dest->v_uv = get_nth(v_uv, n); |
678 | dest_ptr += stride; |
679 | } |
680 | } |
681 | static void run(VertexShaderImpl* impl, char* interps, size_t interp_stride) { |
682 | Self* self = (Self*)impl; |
683 | self->main(); |
684 | self->store_interp_outputs(interps, interp_stride); |
685 | } |
686 | static void init_batch(VertexShaderImpl* impl) { |
687 | Self* self = (Self*)impl; self->bind_textures(); } |
688 | public: |
689 | brush_image_TEXTURE_2D_vert() { |
690 | set_uniform_1i_func = &set_uniform_1i; |
691 | set_uniform_4fv_func = &set_uniform_4fv; |
692 | set_uniform_matrix4fv_func = &set_uniform_matrix4fv; |
693 | init_batch_func = &init_batch; |
694 | load_attribs_func = &load_attribs; |
695 | run_primitive_func = &run; |
696 | } |
697 | }; |
698 | |
699 | |
700 | struct brush_image_TEXTURE_2D_frag : FragmentShaderImpl, brush_image_TEXTURE_2D_vert { |
701 | private: |
702 | typedef brush_image_TEXTURE_2D_frag Self; |
703 | #define oFragColorgl_FragColor gl_FragColor |
704 | // vec4 oFragColor; |
705 | // sampler2D sColor0; |
706 | // sampler2D sColor1; |
707 | // sampler2D sColor2; |
708 | struct RectWithSize_scalar { |
709 | vec2_scalar p0; |
710 | vec2_scalar size; |
711 | RectWithSize_scalar() = default; |
712 | RectWithSize_scalar(vec2_scalar p0, vec2_scalar size) : p0(p0), size(size){} |
713 | }; |
714 | struct RectWithSize { |
715 | vec2 p0; |
716 | vec2 size; |
717 | RectWithSize() = default; |
718 | RectWithSize(vec2 p0, vec2 size) : p0(p0), size(size){} |
719 | RectWithSize(vec2_scalar p0, vec2_scalar size):p0(p0),size(size){ |
720 | } |
721 | IMPLICIT RectWithSize(RectWithSize_scalar s):p0(s.p0),size(s.size){ |
722 | } |
723 | friend RectWithSize if_then_else(I32 c, RectWithSize t, RectWithSize e) { return RectWithSize( |
724 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.size, e.size)); |
725 | }}; |
726 | struct RectWithEndpoint_scalar { |
727 | vec2_scalar p0; |
728 | vec2_scalar p1; |
729 | RectWithEndpoint_scalar() = default; |
730 | RectWithEndpoint_scalar(vec2_scalar p0, vec2_scalar p1) : p0(p0), p1(p1){} |
731 | }; |
732 | struct RectWithEndpoint { |
733 | vec2 p0; |
734 | vec2 p1; |
735 | RectWithEndpoint() = default; |
736 | RectWithEndpoint(vec2 p0, vec2 p1) : p0(p0), p1(p1){} |
737 | RectWithEndpoint(vec2_scalar p0, vec2_scalar p1):p0(p0),p1(p1){ |
738 | } |
739 | IMPLICIT RectWithEndpoint(RectWithEndpoint_scalar s):p0(s.p0),p1(s.p1){ |
740 | } |
741 | friend RectWithEndpoint if_then_else(I32 c, RectWithEndpoint t, RectWithEndpoint e) { return RectWithEndpoint( |
742 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.p1, e.p1)); |
743 | }}; |
744 | // sampler2D sGpuCache; |
745 | // vec4_scalar vTransformBounds; |
746 | // sampler2D sClipMask; |
747 | struct Fragment_scalar { |
748 | vec4_scalar color; |
749 | Fragment_scalar() = default; |
750 | explicit Fragment_scalar(vec4_scalar color) : color(color){} |
751 | }; |
752 | struct Fragment { |
753 | vec4 color; |
754 | Fragment() = default; |
755 | explicit Fragment(vec4 color) : color(color){} |
756 | explicit Fragment(vec4_scalar color):color(color){ |
757 | } |
758 | IMPLICIT Fragment(Fragment_scalar s):color(s.color){ |
759 | } |
760 | friend Fragment if_then_else(I32 c, Fragment t, Fragment e) { return Fragment( |
761 | if_then_else(c, t.color, e.color)); |
762 | }}; |
763 | vec2 v_uv; |
764 | // vec4_scalar v_uv_bounds; |
765 | // vec4_scalar v_uv_sample_bounds; |
766 | // vec2_scalar v_perspective; |
767 | vec2 compute_repeated_uvs(Float perspective_divisor) { |
768 | return ((v_uv)*(perspective_divisor))+((v_uv_bounds).sel(X,Y)); |
769 | } |
770 | Fragment brush_fs() { |
771 | Float perspective_divisor = mix((gl_FragCoord).w, 1.f, (v_perspective).x); |
772 | vec2 repeated_uv = compute_repeated_uvs(perspective_divisor); |
773 | vec2 uv = clamp(repeated_uv, (v_uv_sample_bounds).sel(X,Y), (v_uv_sample_bounds).sel(Z,W)); |
774 | vec4 texel = texture(sColor0, (uv).sel(X,Y)); |
775 | Fragment frag; |
776 | (frag).color = texel; |
777 | return frag; |
778 | } |
779 | void write_output(vec4 color) { |
780 | oFragColorgl_FragColor = color; |
781 | } |
782 | ALWAYS_INLINE__attribute__((always_inline)) inline void main(void) { |
783 | Fragment frag = brush_fs(); |
784 | write_output((frag).color); |
785 | } |
786 | vec2 compute_repeated_uvs(float perspective_divisor) { |
787 | return ((v_uv)*(perspective_divisor))+((v_uv_bounds).sel(X,Y)); |
788 | } |
789 | void swgl_drawSpanRGBA8() { |
790 | if (!(swgl_isTextureRGBA8(sColor0))) { |
791 | { |
792 | return; |
793 | } |
794 | } |
795 | float perspective_divisor = mix(swgl_forceScalar((gl_FragCoord).w), 1.f, (v_perspective).x); |
796 | vec2 uv = compute_repeated_uvs(perspective_divisor); |
797 | swgl_commitTextureRGBA8(sColor0, uv, v_uv_sample_bounds)do { if (sColor0->filter == TextureFilter::LINEAR) { do { auto packed_color = packColor(swgl_OutRGBA8, NoColor()); int len = (swgl_SpanLength); int drawn = 0; if (LinearFilter filter = needsTextureLinear (sColor0, uv, len)) { if (blend_key) { drawn = blendTextureLinear <true>(sColor0, uv, len, v_uv_sample_bounds, packed_color , swgl_OutRGBA8, filter); } else { drawn = blendTextureLinear <false>(sColor0, uv, len, v_uv_sample_bounds, packed_color , swgl_OutRGBA8, filter); } } else if (blend_key) { drawn = blendTextureNearestFast <true>(sColor0, uv, len, v_uv_sample_bounds, packed_color , swgl_OutRGBA8); } else { drawn = blendTextureNearestFast< false>(sColor0, uv, len, v_uv_sample_bounds, packed_color, swgl_OutRGBA8); } swgl_OutRGBA8 += drawn; swgl_SpanLength -= drawn; } while (0); } else { do { auto packed_color = packColor (swgl_OutRGBA8, NoColor()); int drawn = 0; if (needsNearestFallback (sColor0, uv, swgl_SpanLength)) { if (blend_key) { drawn = blendTextureNearestRepeat <true, false>( sColor0, uv, swgl_SpanLength, 0.0f, v_uv_sample_bounds , packed_color, swgl_OutRGBA8); } else { drawn = blendTextureNearestRepeat <false, false>( sColor0, uv, swgl_SpanLength, 0.0f, v_uv_sample_bounds , packed_color, swgl_OutRGBA8); } } else if (blend_key) { drawn = blendTextureNearestFast<true>(sColor0, uv, swgl_SpanLength , v_uv_sample_bounds, packed_color, swgl_OutRGBA8); } else { drawn = blendTextureNearestFast<false>(sColor0, uv, swgl_SpanLength , v_uv_sample_bounds, packed_color, swgl_OutRGBA8); } swgl_OutRGBA8 += drawn; swgl_SpanLength -= drawn; } while (0); } } while ( 0); |
798 | } |
799 | typedef brush_image_TEXTURE_2D_vert::InterpOutputs InterpInputs; |
800 | InterpInputs interp_step; |
801 | struct InterpPerspective { |
802 | vec2 v_uv; |
803 | }; |
804 | InterpPerspective interp_perspective; |
805 | static void read_interp_inputs(FragmentShaderImpl* impl, const void* init_, const void* step_) {Self* self = (Self*)impl;const InterpInputs* init = (const InterpInputs*)init_;const InterpInputs* step = (const InterpInputs*)step_; |
806 | self->v_uv = init_interp(init->v_uv, step->v_uv); |
807 | self->interp_step.v_uv = step->v_uv * 4.0f; |
808 | } |
809 | static void read_perspective_inputs(FragmentShaderImpl* impl, const void* init_, const void* step_) {Self* self = (Self*)impl;const InterpInputs* init = (const InterpInputs*)init_;const InterpInputs* step = (const InterpInputs*)step_; |
810 | Float w = 1.0f / self->gl_FragCoord.w; |
811 | self->interp_perspective.v_uv = init_interp(init->v_uv, step->v_uv); |
812 | self->v_uv = self->interp_perspective.v_uv * w; |
813 | self->interp_step.v_uv = step->v_uv * 4.0f; |
814 | } |
815 | ALWAYS_INLINE__attribute__((always_inline)) inline void step_interp_inputs(int steps = 4) { |
816 | float chunks = steps * 0.25f; |
817 | v_uv += interp_step.v_uv * chunks; |
818 | } |
819 | ALWAYS_INLINE__attribute__((always_inline)) inline void step_perspective_inputs(int steps = 4) { |
820 | step_perspective(steps); |
821 | float chunks = steps * 0.25f; |
822 | Float w = 1.0f / gl_FragCoord.w; |
823 | interp_perspective.v_uv += interp_step.v_uv * chunks; |
824 | v_uv = w * interp_perspective.v_uv; |
825 | } |
826 | static void run(FragmentShaderImpl* impl) { |
827 | Self* self = (Self*)impl; |
828 | self->main(); |
829 | self->step_interp_inputs(); |
830 | } |
831 | static void skip(FragmentShaderImpl* impl, int steps) { |
832 | Self* self = (Self*)impl; |
833 | self->step_interp_inputs(steps); |
834 | } |
835 | static void run_perspective(FragmentShaderImpl* impl) { |
836 | Self* self = (Self*)impl; |
837 | self->main(); |
838 | self->step_perspective_inputs(); |
839 | } |
840 | static void skip_perspective(FragmentShaderImpl* impl, int steps) { |
841 | Self* self = (Self*)impl; |
842 | self->step_perspective_inputs(steps); |
843 | } |
844 | static int draw_span_RGBA8(FragmentShaderImpl* impl) { |
845 | Self* self = (Self*)impl; DISPATCH_DRAW_SPAN(self, RGBA8)do { int total = self->swgl_SpanLength; self->swgl_drawSpanRGBA8 (); int drawn = total - self->swgl_SpanLength; if (drawn) self ->step_interp_inputs(drawn); return drawn; } while (0); } |
846 | public: |
847 | brush_image_TEXTURE_2D_frag() { |
848 | init_span_func = &read_interp_inputs; |
849 | run_func = &run; |
850 | skip_func = &skip; |
851 | draw_span_RGBA8_func = &draw_span_RGBA8; |
852 | enable_perspective(); |
853 | init_span_w_func = &read_perspective_inputs; |
854 | run_w_func = &run_perspective; |
855 | skip_w_func = &skip_perspective; |
856 | } |
857 | }; |
858 | |
859 | struct brush_image_TEXTURE_2D_program : ProgramImpl, brush_image_TEXTURE_2D_frag { |
860 | int get_uniform(const char *name) const override { |
861 | if (strcmp("sClipMask", name) == 0) { return 7; } |
862 | if (strcmp("sColor0", name) == 0) { return 8; } |
863 | if (strcmp("sGpuCache", name) == 0) { return 2; } |
864 | if (strcmp("sPrimitiveHeadersF", name) == 0) { return 4; } |
865 | if (strcmp("sPrimitiveHeadersI", name) == 0) { return 5; } |
866 | if (strcmp("sRenderTasks", name) == 0) { return 1; } |
867 | if (strcmp("sTransformPalette", name) == 0) { return 3; } |
868 | if (strcmp("uTransform", name) == 0) { return 6; } |
869 | return -1; |
870 | } |
871 | void bind_attrib(const char* name, int index) override { |
872 | attrib_locations.bind_loc(name, index); |
873 | } |
874 | int get_attrib(const char* name) const override { |
875 | return attrib_locations.get_loc(name); |
876 | } |
877 | size_t interpolants_size() const override { return sizeof(InterpOutputs); } |
878 | VertexShaderImpl* get_vertex_shader() override { |
879 | return this; |
880 | } |
881 | FragmentShaderImpl* get_fragment_shader() override { |
882 | return this; |
883 | } |
884 | const char* get_name() const override { return "brush_image_TEXTURE_2D"; } |
885 | static ProgramImpl* loader() { return new brush_image_TEXTURE_2D_program; } |
886 | }; |
887 |