File: | var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/x86_64-unknown-linux-gnu/debug/build/swgl-6f6517a85091acb7/out/cs_svg_filter.h |
Warning: | line 2, column 8 Excessive padding in 'struct cs_svg_filter_common::Samplers' (32 padding bytes, where 0 is optimal). Optimal fields order: sPrimitiveHeadersI_impl, sClipMask_impl, sColor0_impl, sColor1_impl, sGpuCache_impl, sPrimitiveHeadersF_impl, sRenderTasks_impl, sTransformPalette_impl, sClipMask_slot, sColor0_slot, sColor1_slot, sGpuCache_slot, sPrimitiveHeadersF_slot, sPrimitiveHeadersI_slot, sRenderTasks_slot, sTransformPalette_slot, consider reordering the fields or adding explicit padding members |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | struct cs_svg_filter_common { |
2 | struct Samplers { |
Excessive padding in 'struct cs_svg_filter_common::Samplers' (32 padding bytes, where 0 is optimal). Optimal fields order: sPrimitiveHeadersI_impl, sClipMask_impl, sColor0_impl, sColor1_impl, sGpuCache_impl, sPrimitiveHeadersF_impl, sRenderTasks_impl, sTransformPalette_impl, sClipMask_slot, sColor0_slot, sColor1_slot, sGpuCache_slot, sPrimitiveHeadersF_slot, sPrimitiveHeadersI_slot, sRenderTasks_slot, sTransformPalette_slot, consider reordering the fields or adding explicit padding members | |
3 | sampler2D_impl sClipMask_impl; |
4 | int sClipMask_slot; |
5 | sampler2D_impl sColor0_impl; |
6 | int sColor0_slot; |
7 | sampler2D_impl sColor1_impl; |
8 | int sColor1_slot; |
9 | sampler2D_impl sGpuCache_impl; |
10 | int sGpuCache_slot; |
11 | sampler2D_impl sPrimitiveHeadersF_impl; |
12 | int sPrimitiveHeadersF_slot; |
13 | isampler2D_impl sPrimitiveHeadersI_impl; |
14 | int sPrimitiveHeadersI_slot; |
15 | sampler2D_impl sRenderTasks_impl; |
16 | int sRenderTasks_slot; |
17 | sampler2D_impl sTransformPalette_impl; |
18 | int sTransformPalette_slot; |
19 | bool set_slot(int index, int value) { |
20 | switch (index) { |
21 | case 7: |
22 | sClipMask_slot = value; |
23 | return true; |
24 | case 8: |
25 | sColor0_slot = value; |
26 | return true; |
27 | case 9: |
28 | sColor1_slot = value; |
29 | return true; |
30 | case 2: |
31 | sGpuCache_slot = value; |
32 | return true; |
33 | case 4: |
34 | sPrimitiveHeadersF_slot = value; |
35 | return true; |
36 | case 5: |
37 | sPrimitiveHeadersI_slot = value; |
38 | return true; |
39 | case 1: |
40 | sRenderTasks_slot = value; |
41 | return true; |
42 | case 3: |
43 | sTransformPalette_slot = value; |
44 | return true; |
45 | } |
46 | return false; |
47 | } |
48 | } samplers; |
49 | struct AttribLocations { |
50 | int aPosition = NULL_ATTRIB16; |
51 | int aData = NULL_ATTRIB16; |
52 | int aFilterRenderTaskAddress = NULL_ATTRIB16; |
53 | int aFilterInput1TaskAddress = NULL_ATTRIB16; |
54 | int aFilterInput2TaskAddress = NULL_ATTRIB16; |
55 | int aFilterKind = NULL_ATTRIB16; |
56 | int aFilterInputCount = NULL_ATTRIB16; |
57 | int aFilterGenericInt = NULL_ATTRIB16; |
58 | int aFilterExtraDataAddress = NULL_ATTRIB16; |
59 | void bind_loc(const char* name, int index) { |
60 | if (strcmp("aPosition", name) == 0) { aPosition = index; return; } |
61 | if (strcmp("aData", name) == 0) { aData = index; return; } |
62 | if (strcmp("aFilterRenderTaskAddress", name) == 0) { aFilterRenderTaskAddress = index; return; } |
63 | if (strcmp("aFilterInput1TaskAddress", name) == 0) { aFilterInput1TaskAddress = index; return; } |
64 | if (strcmp("aFilterInput2TaskAddress", name) == 0) { aFilterInput2TaskAddress = index; return; } |
65 | if (strcmp("aFilterKind", name) == 0) { aFilterKind = index; return; } |
66 | if (strcmp("aFilterInputCount", name) == 0) { aFilterInputCount = index; return; } |
67 | if (strcmp("aFilterGenericInt", name) == 0) { aFilterGenericInt = index; return; } |
68 | if (strcmp("aFilterExtraDataAddress", name) == 0) { aFilterExtraDataAddress = index; return; } |
69 | } |
70 | int get_loc(const char* name) const { |
71 | if (strcmp("aPosition", name) == 0) { return aPosition != NULL_ATTRIB16 ? aPosition : -1; } |
72 | if (strcmp("aData", name) == 0) { return aData != NULL_ATTRIB16 ? aData : -1; } |
73 | if (strcmp("aFilterRenderTaskAddress", name) == 0) { return aFilterRenderTaskAddress != NULL_ATTRIB16 ? aFilterRenderTaskAddress : -1; } |
74 | if (strcmp("aFilterInput1TaskAddress", name) == 0) { return aFilterInput1TaskAddress != NULL_ATTRIB16 ? aFilterInput1TaskAddress : -1; } |
75 | if (strcmp("aFilterInput2TaskAddress", name) == 0) { return aFilterInput2TaskAddress != NULL_ATTRIB16 ? aFilterInput2TaskAddress : -1; } |
76 | if (strcmp("aFilterKind", name) == 0) { return aFilterKind != NULL_ATTRIB16 ? aFilterKind : -1; } |
77 | if (strcmp("aFilterInputCount", name) == 0) { return aFilterInputCount != NULL_ATTRIB16 ? aFilterInputCount : -1; } |
78 | if (strcmp("aFilterGenericInt", name) == 0) { return aFilterGenericInt != NULL_ATTRIB16 ? aFilterGenericInt : -1; } |
79 | if (strcmp("aFilterExtraDataAddress", name) == 0) { return aFilterExtraDataAddress != NULL_ATTRIB16 ? aFilterExtraDataAddress : -1; } |
80 | return -1; |
81 | } |
82 | } attrib_locations; |
83 | vec4_scalar vTransformBounds; |
84 | vec4_scalar vInput1UvRect; |
85 | vec4_scalar vInput2UvRect; |
86 | ivec4_scalar vData; |
87 | vec4_scalar vFilterData0; |
88 | vec4_scalar vFilterData1; |
89 | ivec2_scalar vFilterInputCountFilterKindVec; |
90 | vec2_scalar vFloat0; |
91 | mat4_scalar vColorMat; |
92 | ivec4_scalar vFuncs; |
93 | sampler2D sClipMask; |
94 | sampler2D sColor0; |
95 | sampler2D sColor1; |
96 | sampler2D sGpuCache; |
97 | sampler2D sPrimitiveHeadersF; |
98 | isampler2D sPrimitiveHeadersI; |
99 | sampler2D sRenderTasks; |
100 | sampler2D sTransformPalette; |
101 | mat4_scalar uTransform; |
102 | void bind_textures() { |
103 | sClipMask = lookup_sampler(&samplers.sClipMask_impl, samplers.sClipMask_slot); |
104 | sColor0 = lookup_sampler(&samplers.sColor0_impl, samplers.sColor0_slot); |
105 | sColor1 = lookup_sampler(&samplers.sColor1_impl, samplers.sColor1_slot); |
106 | sGpuCache = lookup_sampler(&samplers.sGpuCache_impl, samplers.sGpuCache_slot); |
107 | sPrimitiveHeadersF = lookup_sampler(&samplers.sPrimitiveHeadersF_impl, samplers.sPrimitiveHeadersF_slot); |
108 | sPrimitiveHeadersI = lookup_isampler(&samplers.sPrimitiveHeadersI_impl, samplers.sPrimitiveHeadersI_slot); |
109 | sRenderTasks = lookup_sampler(&samplers.sRenderTasks_impl, samplers.sRenderTasks_slot); |
110 | sTransformPalette = lookup_sampler(&samplers.sTransformPalette_impl, samplers.sTransformPalette_slot); |
111 | } |
112 | }; |
113 | struct cs_svg_filter_vert : VertexShaderImpl, cs_svg_filter_common { |
114 | private: |
115 | typedef cs_svg_filter_vert Self; |
116 | // mat4_scalar uTransform; |
117 | vec2 aPosition; |
118 | // sampler2D sColor0; |
119 | // sampler2D sColor1; |
120 | // sampler2D sColor2; |
121 | struct RectWithSize_scalar { |
122 | vec2_scalar p0; |
123 | vec2_scalar size; |
124 | RectWithSize_scalar() = default; |
125 | RectWithSize_scalar(vec2_scalar p0, vec2_scalar size) : p0(p0), size(size){} |
126 | }; |
127 | struct RectWithSize { |
128 | vec2 p0; |
129 | vec2 size; |
130 | RectWithSize() = default; |
131 | RectWithSize(vec2 p0, vec2 size) : p0(p0), size(size){} |
132 | RectWithSize(vec2_scalar p0, vec2_scalar size):p0(p0),size(size){ |
133 | } |
134 | IMPLICIT RectWithSize(RectWithSize_scalar s):p0(s.p0),size(s.size){ |
135 | } |
136 | friend RectWithSize if_then_else(I32 c, RectWithSize t, RectWithSize e) { return RectWithSize( |
137 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.size, e.size)); |
138 | }}; |
139 | struct RectWithEndpoint_scalar { |
140 | vec2_scalar p0; |
141 | vec2_scalar p1; |
142 | RectWithEndpoint_scalar() = default; |
143 | RectWithEndpoint_scalar(vec2_scalar p0, vec2_scalar p1) : p0(p0), p1(p1){} |
144 | }; |
145 | struct RectWithEndpoint { |
146 | vec2 p0; |
147 | vec2 p1; |
148 | RectWithEndpoint() = default; |
149 | RectWithEndpoint(vec2 p0, vec2 p1) : p0(p0), p1(p1){} |
150 | RectWithEndpoint(vec2_scalar p0, vec2_scalar p1):p0(p0),p1(p1){ |
151 | } |
152 | IMPLICIT RectWithEndpoint(RectWithEndpoint_scalar s):p0(s.p0),p1(s.p1){ |
153 | } |
154 | friend RectWithEndpoint if_then_else(I32 c, RectWithEndpoint t, RectWithEndpoint e) { return RectWithEndpoint( |
155 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.p1, e.p1)); |
156 | }}; |
157 | // sampler2D sRenderTasks; |
158 | struct RenderTaskData_scalar { |
159 | RectWithEndpoint_scalar task_rect; |
160 | vec4_scalar user_data; |
161 | RenderTaskData_scalar() = default; |
162 | RenderTaskData_scalar(RectWithEndpoint_scalar task_rect, vec4_scalar user_data) : task_rect(task_rect), user_data(user_data){} |
163 | }; |
164 | struct RenderTaskData { |
165 | RectWithEndpoint task_rect; |
166 | vec4 user_data; |
167 | RenderTaskData() = default; |
168 | RenderTaskData(RectWithEndpoint task_rect, vec4 user_data) : task_rect(task_rect), user_data(user_data){} |
169 | RenderTaskData(RectWithEndpoint_scalar task_rect, vec4_scalar user_data):task_rect(task_rect),user_data(user_data){ |
170 | } |
171 | IMPLICIT RenderTaskData(RenderTaskData_scalar s):task_rect(s.task_rect),user_data(s.user_data){ |
172 | } |
173 | friend RenderTaskData if_then_else(I32 c, RenderTaskData t, RenderTaskData e) { return RenderTaskData( |
174 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.user_data, e.user_data)); |
175 | }}; |
176 | struct PictureTask_scalar { |
177 | RectWithEndpoint_scalar task_rect; |
178 | float device_pixel_scale; |
179 | vec2_scalar content_origin; |
180 | PictureTask_scalar() = default; |
181 | PictureTask_scalar(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar content_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), content_origin(content_origin){} |
182 | }; |
183 | struct PictureTask { |
184 | RectWithEndpoint task_rect; |
185 | Float device_pixel_scale; |
186 | vec2 content_origin; |
187 | PictureTask() = default; |
188 | PictureTask(RectWithEndpoint task_rect, Float device_pixel_scale, vec2 content_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), content_origin(content_origin){} |
189 | PictureTask(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar content_origin):task_rect(task_rect),device_pixel_scale(device_pixel_scale),content_origin(content_origin){ |
190 | } |
191 | IMPLICIT PictureTask(PictureTask_scalar s):task_rect(s.task_rect),device_pixel_scale(s.device_pixel_scale),content_origin(s.content_origin){ |
192 | } |
193 | friend PictureTask if_then_else(I32 c, PictureTask t, PictureTask e) { return PictureTask( |
194 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.device_pixel_scale, e.device_pixel_scale), if_then_else(c, t.content_origin, e.content_origin)); |
195 | }}; |
196 | struct ClipArea_scalar { |
197 | RectWithEndpoint_scalar task_rect; |
198 | float device_pixel_scale; |
199 | vec2_scalar screen_origin; |
200 | ClipArea_scalar() = default; |
201 | ClipArea_scalar(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar screen_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), screen_origin(screen_origin){} |
202 | }; |
203 | struct ClipArea { |
204 | RectWithEndpoint task_rect; |
205 | Float device_pixel_scale; |
206 | vec2 screen_origin; |
207 | ClipArea() = default; |
208 | ClipArea(RectWithEndpoint task_rect, Float device_pixel_scale, vec2 screen_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), screen_origin(screen_origin){} |
209 | ClipArea(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar screen_origin):task_rect(task_rect),device_pixel_scale(device_pixel_scale),screen_origin(screen_origin){ |
210 | } |
211 | IMPLICIT ClipArea(ClipArea_scalar s):task_rect(s.task_rect),device_pixel_scale(s.device_pixel_scale),screen_origin(s.screen_origin){ |
212 | } |
213 | friend ClipArea if_then_else(I32 c, ClipArea t, ClipArea e) { return ClipArea( |
214 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.device_pixel_scale, e.device_pixel_scale), if_then_else(c, t.screen_origin, e.screen_origin)); |
215 | }}; |
216 | // sampler2D sGpuCache; |
217 | struct ImageSource_scalar { |
218 | RectWithEndpoint_scalar uv_rect; |
219 | vec4_scalar user_data; |
220 | ImageSource_scalar() = default; |
221 | ImageSource_scalar(RectWithEndpoint_scalar uv_rect, vec4_scalar user_data) : uv_rect(uv_rect), user_data(user_data){} |
222 | }; |
223 | struct ImageSource { |
224 | RectWithEndpoint uv_rect; |
225 | vec4 user_data; |
226 | ImageSource() = default; |
227 | ImageSource(RectWithEndpoint uv_rect, vec4 user_data) : uv_rect(uv_rect), user_data(user_data){} |
228 | ImageSource(RectWithEndpoint_scalar uv_rect, vec4_scalar user_data):uv_rect(uv_rect),user_data(user_data){ |
229 | } |
230 | IMPLICIT ImageSource(ImageSource_scalar s):uv_rect(s.uv_rect),user_data(s.user_data){ |
231 | } |
232 | friend ImageSource if_then_else(I32 c, ImageSource t, ImageSource e) { return ImageSource( |
233 | if_then_else(c, t.uv_rect, e.uv_rect), if_then_else(c, t.user_data, e.user_data)); |
234 | }}; |
235 | struct ImageSourceExtra_scalar { |
236 | vec4_scalar st_tl; |
237 | vec4_scalar st_tr; |
238 | vec4_scalar st_bl; |
239 | vec4_scalar st_br; |
240 | ImageSourceExtra_scalar() = default; |
241 | ImageSourceExtra_scalar(vec4_scalar st_tl, vec4_scalar st_tr, vec4_scalar st_bl, vec4_scalar st_br) : st_tl(st_tl), st_tr(st_tr), st_bl(st_bl), st_br(st_br){} |
242 | }; |
243 | struct ImageSourceExtra { |
244 | vec4 st_tl; |
245 | vec4 st_tr; |
246 | vec4 st_bl; |
247 | vec4 st_br; |
248 | ImageSourceExtra() = default; |
249 | ImageSourceExtra(vec4 st_tl, vec4 st_tr, vec4 st_bl, vec4 st_br) : st_tl(st_tl), st_tr(st_tr), st_bl(st_bl), st_br(st_br){} |
250 | ImageSourceExtra(vec4_scalar st_tl, vec4_scalar st_tr, vec4_scalar st_bl, vec4_scalar st_br):st_tl(st_tl),st_tr(st_tr),st_bl(st_bl),st_br(st_br){ |
251 | } |
252 | IMPLICIT ImageSourceExtra(ImageSourceExtra_scalar s):st_tl(s.st_tl),st_tr(s.st_tr),st_bl(s.st_bl),st_br(s.st_br){ |
253 | } |
254 | friend ImageSourceExtra if_then_else(I32 c, ImageSourceExtra t, ImageSourceExtra e) { return ImageSourceExtra( |
255 | if_then_else(c, t.st_tl, e.st_tl), if_then_else(c, t.st_tr, e.st_tr), if_then_else(c, t.st_bl, e.st_bl), if_then_else(c, t.st_br, e.st_br)); |
256 | }}; |
257 | // vec4_scalar vTransformBounds; |
258 | // sampler2D sTransformPalette; |
259 | struct Transform_scalar { |
260 | mat4_scalar m; |
261 | mat4_scalar inv_m; |
262 | bool is_axis_aligned; |
263 | Transform_scalar() = default; |
264 | Transform_scalar(mat4_scalar m, mat4_scalar inv_m, bool is_axis_aligned) : m(m), inv_m(inv_m), is_axis_aligned(is_axis_aligned){} |
265 | }; |
266 | struct Transform { |
267 | mat4 m; |
268 | mat4 inv_m; |
269 | Bool is_axis_aligned; |
270 | Transform() = default; |
271 | Transform(mat4 m, mat4 inv_m, Bool is_axis_aligned) : m(m), inv_m(inv_m), is_axis_aligned(is_axis_aligned){} |
272 | Transform(mat4_scalar m, mat4_scalar inv_m, bool is_axis_aligned):m(m),inv_m(inv_m),is_axis_aligned(is_axis_aligned){ |
273 | } |
274 | IMPLICIT Transform(Transform_scalar s):m(s.m),inv_m(s.inv_m),is_axis_aligned(s.is_axis_aligned){ |
275 | } |
276 | friend Transform if_then_else(I32 c, Transform t, Transform e) { return Transform( |
277 | if_then_else(c, t.m, e.m), if_then_else(c, t.inv_m, e.inv_m), if_then_else(c, t.is_axis_aligned, e.is_axis_aligned)); |
278 | }}; |
279 | // sampler2D sClipMask; |
280 | // sampler2D sPrimitiveHeadersF; |
281 | // isampler2D sPrimitiveHeadersI; |
282 | ivec4_scalar aData; |
283 | struct Instance_scalar { |
284 | int32_t prim_header_address; |
285 | int32_t clip_address; |
286 | int32_t segment_index; |
287 | int32_t flags; |
288 | int32_t resource_address; |
289 | int32_t brush_kind; |
290 | Instance_scalar() = default; |
291 | Instance_scalar(int32_t prim_header_address, int32_t clip_address, int32_t segment_index, int32_t flags, int32_t resource_address, int32_t brush_kind) : prim_header_address(prim_header_address), clip_address(clip_address), segment_index(segment_index), flags(flags), resource_address(resource_address), brush_kind(brush_kind){} |
292 | }; |
293 | struct Instance { |
294 | I32 prim_header_address; |
295 | I32 clip_address; |
296 | I32 segment_index; |
297 | I32 flags; |
298 | I32 resource_address; |
299 | I32 brush_kind; |
300 | Instance() = default; |
301 | Instance(I32 prim_header_address, I32 clip_address, I32 segment_index, I32 flags, I32 resource_address, I32 brush_kind) : prim_header_address(prim_header_address), clip_address(clip_address), segment_index(segment_index), flags(flags), resource_address(resource_address), brush_kind(brush_kind){} |
302 | Instance(int32_t prim_header_address, int32_t clip_address, int32_t segment_index, int32_t flags, int32_t resource_address, int32_t brush_kind):prim_header_address(prim_header_address),clip_address(clip_address),segment_index(segment_index),flags(flags),resource_address(resource_address),brush_kind(brush_kind){ |
303 | } |
304 | IMPLICIT Instance(Instance_scalar s):prim_header_address(s.prim_header_address),clip_address(s.clip_address),segment_index(s.segment_index),flags(s.flags),resource_address(s.resource_address),brush_kind(s.brush_kind){ |
305 | } |
306 | friend Instance if_then_else(I32 c, Instance t, Instance e) { return Instance( |
307 | if_then_else(c, t.prim_header_address, e.prim_header_address), if_then_else(c, t.clip_address, e.clip_address), if_then_else(c, t.segment_index, e.segment_index), if_then_else(c, t.flags, e.flags), if_then_else(c, t.resource_address, e.resource_address), if_then_else(c, t.brush_kind, e.brush_kind)); |
308 | }}; |
309 | struct PrimitiveHeader_scalar { |
310 | RectWithEndpoint_scalar local_rect; |
311 | RectWithEndpoint_scalar local_clip_rect; |
312 | float z; |
313 | int32_t specific_prim_address; |
314 | int32_t transform_id; |
315 | int32_t picture_task_address; |
316 | ivec4_scalar user_data; |
317 | PrimitiveHeader_scalar() = default; |
318 | PrimitiveHeader_scalar(RectWithEndpoint_scalar local_rect, RectWithEndpoint_scalar local_clip_rect, float z, int32_t specific_prim_address, int32_t transform_id, int32_t picture_task_address, ivec4_scalar user_data) : local_rect(local_rect), local_clip_rect(local_clip_rect), z(z), specific_prim_address(specific_prim_address), transform_id(transform_id), picture_task_address(picture_task_address), user_data(user_data){} |
319 | }; |
320 | struct PrimitiveHeader { |
321 | RectWithEndpoint local_rect; |
322 | RectWithEndpoint local_clip_rect; |
323 | Float z; |
324 | I32 specific_prim_address; |
325 | I32 transform_id; |
326 | I32 picture_task_address; |
327 | ivec4 user_data; |
328 | PrimitiveHeader() = default; |
329 | PrimitiveHeader(RectWithEndpoint local_rect, RectWithEndpoint local_clip_rect, Float z, I32 specific_prim_address, I32 transform_id, I32 picture_task_address, ivec4 user_data) : local_rect(local_rect), local_clip_rect(local_clip_rect), z(z), specific_prim_address(specific_prim_address), transform_id(transform_id), picture_task_address(picture_task_address), user_data(user_data){} |
330 | PrimitiveHeader(RectWithEndpoint_scalar local_rect, RectWithEndpoint_scalar local_clip_rect, float z, int32_t specific_prim_address, int32_t transform_id, int32_t picture_task_address, ivec4_scalar user_data):local_rect(local_rect),local_clip_rect(local_clip_rect),z(z),specific_prim_address(specific_prim_address),transform_id(transform_id),picture_task_address(picture_task_address),user_data(user_data){ |
331 | } |
332 | IMPLICIT PrimitiveHeader(PrimitiveHeader_scalar s):local_rect(s.local_rect),local_clip_rect(s.local_clip_rect),z(s.z),specific_prim_address(s.specific_prim_address),transform_id(s.transform_id),picture_task_address(s.picture_task_address),user_data(s.user_data){ |
333 | } |
334 | friend PrimitiveHeader if_then_else(I32 c, PrimitiveHeader t, PrimitiveHeader e) { return PrimitiveHeader( |
335 | if_then_else(c, t.local_rect, e.local_rect), if_then_else(c, t.local_clip_rect, e.local_clip_rect), if_then_else(c, t.z, e.z), if_then_else(c, t.specific_prim_address, e.specific_prim_address), if_then_else(c, t.transform_id, e.transform_id), if_then_else(c, t.picture_task_address, e.picture_task_address), if_then_else(c, t.user_data, e.user_data)); |
336 | }}; |
337 | struct VertexInfo_scalar { |
338 | vec2_scalar local_pos; |
339 | vec4_scalar world_pos; |
340 | VertexInfo_scalar() = default; |
341 | VertexInfo_scalar(vec2_scalar local_pos, vec4_scalar world_pos) : local_pos(local_pos), world_pos(world_pos){} |
342 | }; |
343 | struct VertexInfo { |
344 | vec2 local_pos; |
345 | vec4 world_pos; |
346 | VertexInfo() = default; |
347 | VertexInfo(vec2 local_pos, vec4 world_pos) : local_pos(local_pos), world_pos(world_pos){} |
348 | VertexInfo(vec2_scalar local_pos, vec4_scalar world_pos):local_pos(local_pos),world_pos(world_pos){ |
349 | } |
350 | IMPLICIT VertexInfo(VertexInfo_scalar s):local_pos(s.local_pos),world_pos(s.world_pos){ |
351 | } |
352 | friend VertexInfo if_then_else(I32 c, VertexInfo t, VertexInfo e) { return VertexInfo( |
353 | if_then_else(c, t.local_pos, e.local_pos), if_then_else(c, t.world_pos, e.world_pos)); |
354 | }}; |
355 | vec2 vInput1Uv; |
356 | vec2 vInput2Uv; |
357 | // vec4_scalar vInput1UvRect; |
358 | // vec4_scalar vInput2UvRect; |
359 | // ivec4_scalar vData; |
360 | // vec4_scalar vFilterData0; |
361 | // vec4_scalar vFilterData1; |
362 | // ivec2_scalar vFilterInputCountFilterKindVec; |
363 | // vec2_scalar vFloat0; |
364 | // mat4_scalar vColorMat; |
365 | // ivec4_scalar vFuncs; |
366 | int32_t aFilterRenderTaskAddress; |
367 | int32_t aFilterInput1TaskAddress; |
368 | int32_t aFilterInput2TaskAddress; |
369 | int32_t aFilterKind; |
370 | int32_t aFilterInputCount; |
371 | int32_t aFilterGenericInt; |
372 | ivec2_scalar aFilterExtraDataAddress; |
373 | struct FilterTask_scalar { |
374 | RectWithEndpoint_scalar task_rect; |
375 | vec3_scalar user_data; |
376 | FilterTask_scalar() = default; |
377 | FilterTask_scalar(RectWithEndpoint_scalar task_rect, vec3_scalar user_data) : task_rect(task_rect), user_data(user_data){} |
378 | }; |
379 | struct FilterTask { |
380 | RectWithEndpoint task_rect; |
381 | vec3 user_data; |
382 | FilterTask() = default; |
383 | FilterTask(RectWithEndpoint task_rect, vec3 user_data) : task_rect(task_rect), user_data(user_data){} |
384 | FilterTask(RectWithEndpoint_scalar task_rect, vec3_scalar user_data):task_rect(task_rect),user_data(user_data){ |
385 | } |
386 | IMPLICIT FilterTask(FilterTask_scalar s):task_rect(s.task_rect),user_data(s.user_data){ |
387 | } |
388 | friend FilterTask if_then_else(I32 c, FilterTask t, FilterTask e) { return FilterTask( |
389 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.user_data, e.user_data)); |
390 | }}; |
391 | RenderTaskData_scalar fetch_render_task_data(int32_t index) { |
392 | ivec2_scalar uv = make_ivec2(make_int((2u)*((make_uint(index))%((1024u)/(2u)))), make_int((make_uint(index))/((1024u)/(2u)))); |
393 | auto sRenderTasks_uv_fetch = texelFetchPtr(sRenderTasks, uv, 0, 1, 0, 0); |
394 | vec4_scalar texel0 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 0, 0); |
395 | vec4_scalar texel1 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 1, 0); |
396 | RectWithEndpoint_scalar task_rect = RectWithEndpoint_scalar((texel0).sel(X,Y), (texel0).sel(Z,W)); |
397 | RenderTaskData_scalar data = RenderTaskData_scalar(task_rect, texel1); |
398 | return data; |
399 | } |
400 | FilterTask_scalar fetch_filter_task(int32_t address) { |
401 | RenderTaskData_scalar task_data = fetch_render_task_data(address); |
402 | FilterTask_scalar task = FilterTask_scalar((task_data).task_rect, ((task_data).user_data).sel(X,Y,Z)); |
403 | return task; |
404 | } |
405 | RectWithEndpoint_scalar fetch_render_task_rect(int32_t index) { |
406 | ivec2_scalar uv = make_ivec2(make_int((2u)*((make_uint(index))%((1024u)/(2u)))), make_int((make_uint(index))/((1024u)/(2u)))); |
407 | auto sRenderTasks_uv_fetch = texelFetchPtr(sRenderTasks, uv, 0, 1, 0, 0); |
408 | vec4_scalar texel0 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 0, 0); |
409 | vec4_scalar texel1 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 1, 0); |
410 | RectWithEndpoint_scalar task_rect = RectWithEndpoint_scalar((texel0).sel(X,Y), (texel0).sel(Z,W)); |
411 | return task_rect; |
412 | } |
413 | vec4_scalar compute_uv_rect(RectWithEndpoint_scalar task_rect, vec2_scalar texture_size) { |
414 | vec4_scalar uvRect = make_vec4(((task_rect).p0)+(make_vec2(0.5f)), ((task_rect).p1)-(make_vec2(0.5f))); |
415 | uvRect /= (texture_size).sel(X,Y,X,Y); |
416 | return uvRect; |
417 | } |
418 | vec2 compute_uv(RectWithEndpoint_scalar task_rect, vec2_scalar texture_size) { |
419 | vec2_scalar uv0 = ((task_rect).p0)/(texture_size); |
420 | vec2_scalar uv1 = (floor__glsl_floor((task_rect).p1))/(texture_size); |
421 | return mix(uv0, uv1, (aPosition).sel(X,Y)); |
422 | } |
423 | vec4_scalar fetch_from_gpu_cache_1_direct(ivec2_scalar address) { |
424 | return texelFetch(sGpuCache, address, 0); |
425 | } |
426 | Array<vec4_scalar,4> fetch_from_gpu_cache_4_direct(ivec2_scalar address) { |
427 | auto sGpuCache_address_fetch = texelFetchPtr(sGpuCache, address, 0, 3, 0, 0); |
428 | return Array<vec4_scalar,4>{{texelFetchUnchecked(sGpuCache, sGpuCache_address_fetch, 0, 0), texelFetchUnchecked(sGpuCache, sGpuCache_address_fetch, 1, 0), texelFetchUnchecked(sGpuCache, sGpuCache_address_fetch, 2, 0), texelFetchUnchecked(sGpuCache, sGpuCache_address_fetch, 3, 0)}}; |
429 | } |
430 | ALWAYS_INLINE__attribute__((always_inline)) inline void main(void) { |
431 | FilterTask_scalar filter_task = fetch_filter_task(aFilterRenderTaskAddress); |
432 | RectWithEndpoint_scalar target_rect = (filter_task).task_rect; |
433 | vec2 pos = mix((target_rect).p0, (target_rect).p1, (aPosition).sel(X,Y)); |
434 | RectWithEndpoint_scalar input_1_task; |
435 | if ((aFilterInputCount)>(0)) { |
436 | { |
437 | vec2_scalar texture_size = make_vec2((textureSize(sColor0, 0)).sel(X,Y)); |
438 | input_1_task = fetch_render_task_rect(aFilterInput1TaskAddress); |
439 | vInput1UvRect = compute_uv_rect(input_1_task, texture_size); |
440 | vInput1Uv = compute_uv(input_1_task, texture_size); |
441 | } |
442 | } |
443 | RectWithEndpoint_scalar input_2_task; |
444 | if ((aFilterInputCount)>(1)) { |
445 | { |
446 | vec2_scalar texture_size = make_vec2((textureSize(sColor1, 0)).sel(X,Y)); |
447 | input_2_task = fetch_render_task_rect(aFilterInput2TaskAddress); |
448 | vInput2UvRect = compute_uv_rect(input_2_task, texture_size); |
449 | vInput2Uv = compute_uv(input_2_task, texture_size); |
450 | } |
451 | } |
452 | (vFilterInputCountFilterKindVec).x = aFilterInputCount; |
453 | (vFilterInputCountFilterKindVec).y = aFilterKind; |
454 | (vFuncs).x = ((aFilterGenericInt)>>(12))&(15); |
455 | (vFuncs).y = ((aFilterGenericInt)>>(8))&(15); |
456 | (vFuncs).z = ((aFilterGenericInt)>>(4))&(15); |
457 | (vFuncs).w = (aFilterGenericInt)&(15); |
458 | switch (aFilterKind) { |
459 | case 0: |
460 | vData = make_ivec4(aFilterGenericInt, 0, 0, 0); |
461 | break; |
462 | case 1: |
463 | vFilterData0 = fetch_from_gpu_cache_1_direct(aFilterExtraDataAddress); |
464 | break; |
465 | case 4: |
466 | (vFloat0).x = ((filter_task).user_data).x; |
467 | break; |
468 | case 5: |
469 | { |
470 | Array<vec4_scalar,4> mat_data = fetch_from_gpu_cache_4_direct(aFilterExtraDataAddress); |
471 | vColorMat = make_mat4(mat_data[0], mat_data[1], mat_data[2], mat_data[3]); |
472 | vFilterData0 = fetch_from_gpu_cache_1_direct((aFilterExtraDataAddress)+(make_ivec2(4, 0))); |
473 | break; |
474 | } |
475 | case 6: |
476 | vFilterData0 = fetch_from_gpu_cache_1_direct(aFilterExtraDataAddress); |
477 | break; |
478 | case 7: |
479 | { |
480 | vec2_scalar texture_size = make_vec2((textureSize(sColor0, 0)).sel(X,Y)); |
481 | vFilterData0 = make_vec4((-(((filter_task).user_data).sel(X,Y)))/(texture_size), make_vec2(0.f)); |
482 | RectWithEndpoint_scalar task_rect = input_1_task; |
483 | vec4_scalar clipRect = make_vec4((task_rect).p0, (task_rect).p1); |
484 | clipRect /= (texture_size).sel(X,Y,X,Y); |
485 | vFilterData1 = clipRect; |
486 | break; |
487 | } |
488 | case 8: |
489 | vData = make_ivec4(aFilterExtraDataAddress, 0, 0); |
490 | break; |
491 | case 10: |
492 | vData = make_ivec4(aFilterGenericInt, 0, 0, 0); |
493 | if ((aFilterGenericInt)==(6)) { |
494 | { |
495 | vFilterData0 = fetch_from_gpu_cache_1_direct(aFilterExtraDataAddress); |
496 | } |
497 | } |
498 | break; |
499 | default: |
500 | break; |
501 | } |
502 | gl_Position = (uTransform)*(make_vec4(pos, 0.f, 1.f)); |
503 | } |
504 | static void set_uniform_1i(VertexShaderImpl* impl, int index, int value) { |
505 | Self* self = (Self*)impl; |
506 | if (self->samplers.set_slot(index, value)) return; |
507 | switch (index) { |
508 | case 7: |
509 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask |
510 | break; |
511 | case 8: |
512 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor0 |
513 | break; |
514 | case 9: |
515 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor1 |
516 | break; |
517 | case 2: |
518 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache |
519 | break; |
520 | case 4: |
521 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF |
522 | break; |
523 | case 5: |
524 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI |
525 | break; |
526 | case 1: |
527 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks |
528 | break; |
529 | case 3: |
530 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette |
531 | break; |
532 | case 6: |
533 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // uTransform |
534 | break; |
535 | } |
536 | } |
537 | static void set_uniform_4fv(VertexShaderImpl* impl, int index, const float *value) { |
538 | Self* self = (Self*)impl; |
539 | switch (index) { |
540 | case 7: |
541 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask |
542 | break; |
543 | case 8: |
544 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor0 |
545 | break; |
546 | case 9: |
547 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor1 |
548 | break; |
549 | case 2: |
550 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache |
551 | break; |
552 | case 4: |
553 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF |
554 | break; |
555 | case 5: |
556 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI |
557 | break; |
558 | case 1: |
559 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks |
560 | break; |
561 | case 3: |
562 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette |
563 | break; |
564 | case 6: |
565 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // uTransform |
566 | break; |
567 | } |
568 | } |
569 | static void set_uniform_matrix4fv(VertexShaderImpl* impl, int index, const float *value) { |
570 | Self* self = (Self*)impl; |
571 | switch (index) { |
572 | case 7: |
573 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask |
574 | break; |
575 | case 8: |
576 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor0 |
577 | break; |
578 | case 9: |
579 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor1 |
580 | break; |
581 | case 2: |
582 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache |
583 | break; |
584 | case 4: |
585 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF |
586 | break; |
587 | case 5: |
588 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI |
589 | break; |
590 | case 1: |
591 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks |
592 | break; |
593 | case 3: |
594 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette |
595 | break; |
596 | case 6: |
597 | self->uTransform = mat4_scalar::load_from_ptr(value); |
598 | break; |
599 | } |
600 | } |
601 | static void load_attribs(VertexShaderImpl* impl, VertexAttrib *attribs, uint32_t start, int instance, int count) {Self* self = (Self*)impl; |
602 | load_attrib(self->aPosition, attribs[self->attrib_locations.aPosition], start, instance, count); |
603 | load_flat_attrib(self->aFilterRenderTaskAddress, attribs[self->attrib_locations.aFilterRenderTaskAddress], start, instance, count); |
604 | load_flat_attrib(self->aFilterInput1TaskAddress, attribs[self->attrib_locations.aFilterInput1TaskAddress], start, instance, count); |
605 | load_flat_attrib(self->aFilterInput2TaskAddress, attribs[self->attrib_locations.aFilterInput2TaskAddress], start, instance, count); |
606 | load_flat_attrib(self->aFilterKind, attribs[self->attrib_locations.aFilterKind], start, instance, count); |
607 | load_flat_attrib(self->aFilterInputCount, attribs[self->attrib_locations.aFilterInputCount], start, instance, count); |
608 | load_flat_attrib(self->aFilterGenericInt, attribs[self->attrib_locations.aFilterGenericInt], start, instance, count); |
609 | load_flat_attrib(self->aFilterExtraDataAddress, attribs[self->attrib_locations.aFilterExtraDataAddress], start, instance, count); |
610 | } |
611 | public: |
612 | struct InterpOutputs { |
613 | vec2_scalar vInput1Uv; |
614 | vec2_scalar vInput2Uv; |
615 | }; |
616 | private: |
617 | ALWAYS_INLINE__attribute__((always_inline)) inline void store_interp_outputs(char* dest_ptr, size_t stride) { |
618 | for(int n = 0; n < 4; n++) { |
619 | auto* dest = reinterpret_cast<InterpOutputs*>(dest_ptr); |
620 | dest->vInput1Uv = get_nth(vInput1Uv, n); |
621 | dest->vInput2Uv = get_nth(vInput2Uv, n); |
622 | dest_ptr += stride; |
623 | } |
624 | } |
625 | static void run(VertexShaderImpl* impl, char* interps, size_t interp_stride) { |
626 | Self* self = (Self*)impl; |
627 | self->main(); |
628 | self->store_interp_outputs(interps, interp_stride); |
629 | } |
630 | static void init_batch(VertexShaderImpl* impl) { |
631 | Self* self = (Self*)impl; self->bind_textures(); } |
632 | public: |
633 | cs_svg_filter_vert() { |
634 | set_uniform_1i_func = &set_uniform_1i; |
635 | set_uniform_4fv_func = &set_uniform_4fv; |
636 | set_uniform_matrix4fv_func = &set_uniform_matrix4fv; |
637 | init_batch_func = &init_batch; |
638 | load_attribs_func = &load_attribs; |
639 | run_primitive_func = &run; |
640 | } |
641 | }; |
642 | |
643 | |
644 | struct cs_svg_filter_frag : FragmentShaderImpl, cs_svg_filter_vert { |
645 | private: |
646 | typedef cs_svg_filter_frag Self; |
647 | #define oFragColorgl_FragColor gl_FragColor |
648 | // vec4 oFragColor; |
649 | // sampler2D sColor0; |
650 | // sampler2D sColor1; |
651 | // sampler2D sColor2; |
652 | struct RectWithSize_scalar { |
653 | vec2_scalar p0; |
654 | vec2_scalar size; |
655 | RectWithSize_scalar() = default; |
656 | RectWithSize_scalar(vec2_scalar p0, vec2_scalar size) : p0(p0), size(size){} |
657 | }; |
658 | struct RectWithSize { |
659 | vec2 p0; |
660 | vec2 size; |
661 | RectWithSize() = default; |
662 | RectWithSize(vec2 p0, vec2 size) : p0(p0), size(size){} |
663 | RectWithSize(vec2_scalar p0, vec2_scalar size):p0(p0),size(size){ |
664 | } |
665 | IMPLICIT RectWithSize(RectWithSize_scalar s):p0(s.p0),size(s.size){ |
666 | } |
667 | friend RectWithSize if_then_else(I32 c, RectWithSize t, RectWithSize e) { return RectWithSize( |
668 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.size, e.size)); |
669 | }}; |
670 | struct RectWithEndpoint_scalar { |
671 | vec2_scalar p0; |
672 | vec2_scalar p1; |
673 | RectWithEndpoint_scalar() = default; |
674 | RectWithEndpoint_scalar(vec2_scalar p0, vec2_scalar p1) : p0(p0), p1(p1){} |
675 | }; |
676 | struct RectWithEndpoint { |
677 | vec2 p0; |
678 | vec2 p1; |
679 | RectWithEndpoint() = default; |
680 | RectWithEndpoint(vec2 p0, vec2 p1) : p0(p0), p1(p1){} |
681 | RectWithEndpoint(vec2_scalar p0, vec2_scalar p1):p0(p0),p1(p1){ |
682 | } |
683 | IMPLICIT RectWithEndpoint(RectWithEndpoint_scalar s):p0(s.p0),p1(s.p1){ |
684 | } |
685 | friend RectWithEndpoint if_then_else(I32 c, RectWithEndpoint t, RectWithEndpoint e) { return RectWithEndpoint( |
686 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.p1, e.p1)); |
687 | }}; |
688 | // sampler2D sGpuCache; |
689 | // vec4_scalar vTransformBounds; |
690 | // sampler2D sClipMask; |
691 | struct Fragment_scalar { |
692 | vec4_scalar color; |
693 | Fragment_scalar() = default; |
694 | explicit Fragment_scalar(vec4_scalar color) : color(color){} |
695 | }; |
696 | struct Fragment { |
697 | vec4 color; |
698 | Fragment() = default; |
699 | explicit Fragment(vec4 color) : color(color){} |
700 | explicit Fragment(vec4_scalar color):color(color){ |
701 | } |
702 | IMPLICIT Fragment(Fragment_scalar s):color(s.color){ |
703 | } |
704 | friend Fragment if_then_else(I32 c, Fragment t, Fragment e) { return Fragment( |
705 | if_then_else(c, t.color, e.color)); |
706 | }}; |
707 | vec2 vInput1Uv; |
708 | vec2 vInput2Uv; |
709 | // vec4_scalar vInput1UvRect; |
710 | // vec4_scalar vInput2UvRect; |
711 | // ivec4_scalar vData; |
712 | // vec4_scalar vFilterData0; |
713 | // vec4_scalar vFilterData1; |
714 | // ivec2_scalar vFilterInputCountFilterKindVec; |
715 | // vec2_scalar vFloat0; |
716 | // mat4_scalar vColorMat; |
717 | // ivec4_scalar vFuncs; |
718 | int32_t static constexpr BlendMode_Normal = 0; |
719 | int32_t static constexpr BlendMode_Multiply = 1; |
720 | int32_t static constexpr BlendMode_Screen = 2; |
721 | int32_t static constexpr BlendMode_Overlay = 3; |
722 | int32_t static constexpr BlendMode_Darken = 4; |
723 | int32_t static constexpr BlendMode_Lighten = 5; |
724 | int32_t static constexpr BlendMode_ColorDodge = 6; |
725 | int32_t static constexpr BlendMode_ColorBurn = 7; |
726 | int32_t static constexpr BlendMode_HardLight = 8; |
727 | int32_t static constexpr BlendMode_SoftLight = 9; |
728 | int32_t static constexpr BlendMode_Difference = 10; |
729 | int32_t static constexpr BlendMode_Exclusion = 11; |
730 | int32_t static constexpr BlendMode_Hue = 12; |
731 | int32_t static constexpr BlendMode_Saturation = 13; |
732 | int32_t static constexpr BlendMode_Color = 14; |
733 | int32_t static constexpr BlendMode_Luminosity = 15; |
734 | vec4 sampleInUvRect(sampler2D sampler, vec2 uv, vec4_scalar uvRect) { |
735 | vec2 clamped = clamp((uv).sel(X,Y), (uvRect).sel(X,Y), (uvRect).sel(Z,W)); |
736 | return texture(sampler, clamped); |
737 | } |
738 | vec3 Multiply(vec3 Cb, vec3 Cs) { |
739 | return (Cb)*(Cs); |
740 | } |
741 | vec3 Screen(vec3 Cb, vec3 Cs) { |
742 | return ((Cb)+(Cs))-((Cb)*(Cs)); |
743 | } |
744 | vec3 HardLight(vec3 Cb, vec3 Cs) { |
745 | vec3 m = Multiply(Cb, (2.f)*(Cs)); |
746 | vec3 s = Screen(Cb, ((2.f)*(Cs))-(1.f)); |
747 | vec3_scalar edge = make_vec3(0.5f, 0.5f, 0.5f); |
748 | return mix(m, s, step(edge, Cs)); |
749 | } |
750 | Float ColorDodge(Float Cb, Float Cs) { |
751 | I32 ret_mask = ~0; |
752 | Float ret; |
753 | auto _c8_ = (Cb)==(0.f); |
754 | ret = 0.f; |
755 | ret_mask = ~I32(_c8_); |
756 | auto _c9_ = (Cs)==(1.f); |
757 | auto _c10_ = (~(_c8_))&(_c9_); |
758 | ret = if_then_else(ret_mask & I32(_c10_), 1.f, ret); |
759 | ret_mask &= ~I32(_c10_); |
760 | _c9_ = (~(_c8_))&(~(_c9_)); |
761 | ret = if_then_else(ret_mask & I32(_c9_), min(1.f, (Cb)/((1.f)-(Cs))), ret); |
762 | ret_mask &= ~I32(_c9_); |
763 | return ret; |
764 | } |
765 | Float ColorBurn(Float Cb, Float Cs) { |
766 | I32 ret_mask = ~0; |
767 | Float ret; |
768 | auto _c14_ = (Cb)==(1.f); |
769 | ret = 1.f; |
770 | ret_mask = ~I32(_c14_); |
771 | auto _c15_ = (Cs)==(0.f); |
772 | auto _c16_ = (~(_c14_))&(_c15_); |
773 | ret = if_then_else(ret_mask & I32(_c16_), 0.f, ret); |
774 | ret_mask &= ~I32(_c16_); |
775 | _c15_ = (~(_c14_))&(~(_c15_)); |
776 | ret = if_then_else(ret_mask & I32(_c15_), (1.f)-(min(1.f, ((1.f)-(Cb))/(Cs))), ret); |
777 | ret_mask &= ~I32(_c15_); |
778 | return ret; |
779 | } |
780 | Float SoftLight(Float Cb, Float Cs) { |
781 | I32 ret_mask = ~0; |
782 | Float ret; |
783 | auto _c20_ = (Cs)<=(0.5f); |
784 | { |
785 | ret = (Cb)-((((1.f)-((2.f)*(Cs)))*(Cb))*((1.f)-(Cb))); |
786 | ret_mask = ~I32(_c20_); |
787 | } |
788 | { |
789 | Float D; |
790 | auto _c21_ = (Cb)<=(0.25f); |
791 | auto _c22_ = (~(_c20_))&(_c21_); |
792 | D = if_then_else(_c22_,(((((16.f)*(Cb))-(12.f))*(Cb))+(4.f))*(Cb),D); |
793 | _c21_ = (~(_c20_))&(~(_c21_)); |
794 | D = if_then_else(_c21_,sqrt__glsl_sqrt(Cb),D); |
795 | ret = if_then_else(ret_mask & I32(~(_c20_)), (Cb)+((((2.f)*(Cs))-(1.f))*((D)-(Cb))), ret); |
796 | ret_mask &= ~I32(~(_c20_)); |
797 | } |
798 | return ret; |
799 | } |
800 | vec3 Difference(vec3 Cb, vec3 Cs) { |
801 | return abs__glsl_abs((Cb)-(Cs)); |
802 | } |
803 | vec3 Exclusion(vec3 Cb, vec3 Cs) { |
804 | return ((Cb)+(Cs))-(((2.f)*(Cb))*(Cs)); |
805 | } |
806 | Float Lum(vec3 c) { |
807 | vec3_scalar f = make_vec3(0.3f, 0.59f, 0.11f); |
808 | return dot(c, f); |
809 | } |
810 | vec3 ClipColor(vec3 C) { |
811 | Float L = Lum(C); |
812 | Float n = min((C).x, min((C).y, (C).z)); |
813 | Float x = max((C).x, max((C).y, (C).z)); |
814 | auto _c25_ = (n)<(0.f); |
815 | C = if_then_else(_c25_,(L)+((((C)-(L))*(L))/((L)-(n))),C); |
816 | auto _c26_ = (x)>(1.f); |
817 | C = if_then_else(_c26_,(L)+((((C)-(L))*((1.f)-(L)))/((x)-(L))),C); |
818 | return C; |
819 | } |
820 | vec3 SetLum(vec3 C, Float l) { |
821 | Float d = (l)-(Lum(C)); |
822 | return ClipColor((C)+(d)); |
823 | } |
824 | void SetSatInner(Float& Cmin, Float& Cmid, Float& Cmax, Float s, I32 _cond_mask_) { |
825 | auto _c47_ = (Cmax)>(Cmin); |
826 | auto _c48_ = (_cond_mask_)&(_c47_); |
827 | { |
828 | Cmid = if_then_else(_c48_,(((Cmid)-(Cmin))*(s))/((Cmax)-(Cmin)),Cmid); |
829 | Cmax = if_then_else(_c48_,s,Cmax); |
830 | } |
831 | _c47_ = (_cond_mask_)&(~(_c47_)); |
832 | { |
833 | Cmid = if_then_else(_c47_,0.f,Cmid); |
834 | Cmax = if_then_else(_c47_,0.f,Cmax); |
835 | } |
836 | Cmin = if_then_else(_cond_mask_,0.f,Cmin); |
837 | } |
838 | vec3 SetSat(vec3 C, Float s) { |
839 | auto _c36_ = ((C).x)<=((C).y); |
840 | { |
841 | auto _c37_ = ((C).y)<=((C).z); |
842 | auto _c38_ = (_c36_)&(_c37_); |
843 | { |
844 | SetSatInner((C).x, (C).y, (C).z, s, _c38_); |
845 | } |
846 | _c37_ = (_c36_)&(~(_c37_)); |
847 | { |
848 | auto _c39_ = ((C).x)<=((C).z); |
849 | auto _c40_ = (_c37_)&(_c39_); |
850 | { |
851 | SetSatInner((C).x, (C).z, (C).y, s, _c40_); |
852 | } |
853 | _c39_ = (_c37_)&(~(_c39_)); |
854 | { |
855 | SetSatInner((C).z, (C).x, (C).y, s, _c39_); |
856 | } |
857 | } |
858 | } |
859 | { |
860 | auto _c41_ = ((C).x)<=((C).z); |
861 | auto _c42_ = (~(_c36_))&(_c41_); |
862 | { |
863 | SetSatInner((C).y, (C).x, (C).z, s, _c42_); |
864 | } |
865 | _c41_ = (~(_c36_))&(~(_c41_)); |
866 | { |
867 | auto _c43_ = ((C).y)<=((C).z); |
868 | auto _c44_ = (_c41_)&(_c43_); |
869 | { |
870 | SetSatInner((C).y, (C).z, (C).x, s, _c44_); |
871 | } |
872 | _c43_ = (_c41_)&(~(_c43_)); |
873 | { |
874 | SetSatInner((C).z, (C).y, (C).x, s, _c43_); |
875 | } |
876 | } |
877 | } |
878 | return C; |
879 | } |
880 | Float Sat(vec3 c) { |
881 | return (max((c).x, max((c).y, (c).z)))-(min((c).x, min((c).y, (c).z))); |
882 | } |
883 | vec3 Hue(vec3 Cb, vec3 Cs) { |
884 | return SetLum(SetSat(Cs, Sat(Cb)), Lum(Cb)); |
885 | } |
886 | vec3 Saturation(vec3 Cb, vec3 Cs) { |
887 | return SetLum(SetSat(Cb, Sat(Cs)), Lum(Cb)); |
888 | } |
889 | vec3 Color(vec3 Cb, vec3 Cs) { |
890 | return SetLum(Cs, Lum(Cb)); |
891 | } |
892 | vec3 Luminosity(vec3 Cb, vec3 Cs) { |
893 | return SetLum(Cb, Lum(Cs)); |
894 | } |
895 | vec4 blend(vec4 Cs, vec4 Cb, int32_t mode) { |
896 | vec4 result = make_vec4(1.f, 0.f, 0.f, 1.f); |
897 | switch (mode) { |
898 | case BlendMode_Normal: |
899 | (result).lsel(R,G,B) = (Cs).sel(R,G,B); |
900 | break; |
901 | case BlendMode_Multiply: |
902 | (result).lsel(R,G,B) = Multiply((Cb).sel(R,G,B), (Cs).sel(R,G,B)); |
903 | break; |
904 | case BlendMode_Screen: |
905 | (result).lsel(R,G,B) = Screen((Cb).sel(R,G,B), (Cs).sel(R,G,B)); |
906 | break; |
907 | case BlendMode_Overlay: |
908 | (result).lsel(R,G,B) = HardLight((Cs).sel(R,G,B), (Cb).sel(R,G,B)); |
909 | break; |
910 | case BlendMode_Darken: |
911 | (result).lsel(R,G,B) = min((Cs).sel(R,G,B), (Cb).sel(R,G,B)); |
912 | break; |
913 | case BlendMode_Lighten: |
914 | (result).lsel(R,G,B) = max((Cs).sel(R,G,B), (Cb).sel(R,G,B)); |
915 | break; |
916 | case BlendMode_ColorDodge: |
917 | (result).x = ColorDodge((Cb).x, (Cs).x); |
918 | (result).y = ColorDodge((Cb).y, (Cs).y); |
919 | (result).z = ColorDodge((Cb).z, (Cs).z); |
920 | break; |
921 | case BlendMode_ColorBurn: |
922 | (result).x = ColorBurn((Cb).x, (Cs).x); |
923 | (result).y = ColorBurn((Cb).y, (Cs).y); |
924 | (result).z = ColorBurn((Cb).z, (Cs).z); |
925 | break; |
926 | case BlendMode_HardLight: |
927 | (result).lsel(R,G,B) = HardLight((Cb).sel(R,G,B), (Cs).sel(R,G,B)); |
928 | break; |
929 | case BlendMode_SoftLight: |
930 | (result).x = SoftLight((Cb).x, (Cs).x); |
931 | (result).y = SoftLight((Cb).y, (Cs).y); |
932 | (result).z = SoftLight((Cb).z, (Cs).z); |
933 | break; |
934 | case BlendMode_Difference: |
935 | (result).lsel(R,G,B) = Difference((Cb).sel(R,G,B), (Cs).sel(R,G,B)); |
936 | break; |
937 | case BlendMode_Exclusion: |
938 | (result).lsel(R,G,B) = Exclusion((Cb).sel(R,G,B), (Cs).sel(R,G,B)); |
939 | break; |
940 | case BlendMode_Hue: |
941 | (result).lsel(R,G,B) = Hue((Cb).sel(R,G,B), (Cs).sel(R,G,B)); |
942 | break; |
943 | case BlendMode_Saturation: |
944 | (result).lsel(R,G,B) = Saturation((Cb).sel(R,G,B), (Cs).sel(R,G,B)); |
945 | break; |
946 | case BlendMode_Color: |
947 | (result).lsel(R,G,B) = Color((Cb).sel(R,G,B), (Cs).sel(R,G,B)); |
948 | break; |
949 | case BlendMode_Luminosity: |
950 | (result).lsel(R,G,B) = Luminosity((Cb).sel(R,G,B), (Cs).sel(R,G,B)); |
951 | break; |
952 | default: |
953 | break; |
954 | } |
955 | vec3 rgb = (((1.f)-((Cb).w))*((Cs).sel(R,G,B)))+(((Cb).w)*((result).sel(R,G,B))); |
956 | result = mix(make_vec4(((Cb).sel(R,G,B))*((Cb).w), (Cb).w), make_vec4(rgb, 1.f), (Cs).w); |
957 | return result; |
958 | } |
959 | vec3 LinearToSrgb(vec3 color) { |
960 | vec3 c1 = (color)*(12.92f); |
961 | vec3 c2 = ((make_vec3(1.055f))*(pow__glsl_pow(color, make_vec3((1.f)/(2.4f)))))-(make_vec3(0.055f)); |
962 | return mix(c2, c1, lessThanEqual(color, make_vec3(0.0031308f))); |
963 | } |
964 | vec3 SrgbToLinear(vec3 color) { |
965 | vec3 c1 = (color)/(12.92f); |
966 | vec3 c2 = pow__glsl_pow(((color)/(1.055f))+(make_vec3((0.055f)/(1.055f))), make_vec3(2.4f)); |
967 | return mix(c2, c1, lessThanEqual(color, make_vec3(0.04045f))); |
968 | } |
969 | Float point_inside_rect(vec2 p, vec2_scalar p0, vec2_scalar p1) { |
970 | vec2 s = (step(p0, p))-(step(p1, p)); |
971 | return ((s).x)*((s).y); |
972 | } |
973 | vec4 fetch_from_gpu_cache_1_direct(ivec2 address) { |
974 | return texelFetch(sGpuCache, address, 0); |
975 | } |
976 | vec4_scalar fetch_from_gpu_cache_1_direct(ivec2_scalar address) { |
977 | return texelFetch(sGpuCache, address, 0); |
978 | } |
979 | vec4 ComponentTransfer(vec4 colora) { |
980 | int32_t offset = 0; |
981 | vec4 texel; |
982 | I32 k; |
983 | Array<int32_t,4> funcs = Array<int32_t,4>{{(vFuncs).x, (vFuncs).y, (vFuncs).z, (vFuncs).w}}; |
984 | for ( int32_t i = 0; |
985 | (i)<(4); i++) { |
986 | switch (funcs[i]) { |
987 | case 0: |
988 | break; |
989 | case 1: |
990 | case 2: |
991 | k = make_int(floor__glsl_floor(((colora[i])*(255.f))+(0.5f))); |
992 | texel = fetch_from_gpu_cache_1_direct(((vData).sel(X,Y))+(make_ivec2((offset)+((k)/(4)), 0))); |
993 | colora[i] = clamp(texel[(k)%(4)], 0.f, 1.f); |
994 | offset = (offset)+(64); |
995 | break; |
996 | case 3: |
997 | texel = fetch_from_gpu_cache_1_direct(((vData).sel(X,Y))+(make_ivec2(offset, 0))); |
998 | colora[i] = clamp(((texel[0])*(colora[i]))+(texel[1]), 0.f, 1.f); |
999 | offset = (offset)+(1); |
1000 | break; |
1001 | case 4: |
1002 | texel = fetch_from_gpu_cache_1_direct(((vData).sel(X,Y))+(make_ivec2(offset, 0))); |
1003 | colora[i] = clamp(((texel[0])*(pow__glsl_pow(colora[i], texel[1])))+(texel[2]), 0.f, 1.f); |
1004 | offset = (offset)+(1); |
1005 | break; |
1006 | default: |
1007 | break; |
1008 | } |
1009 | } |
1010 | return colora; |
1011 | } |
1012 | vec4 composite(vec4 Cs, vec4 Cb, int32_t mode) { |
1013 | vec4 Cr = make_vec4(0.f, 1.f, 0.f, 1.f); |
1014 | switch (mode) { |
1015 | case 0: |
1016 | (Cr).lsel(R,G,B) = (((Cs).w)*((Cs).sel(R,G,B)))+((((Cb).w)*((Cb).sel(R,G,B)))*((1.f)-((Cs).w))); |
1017 | (Cr).w = ((Cs).w)+(((Cb).w)*((1.f)-((Cs).w))); |
1018 | break; |
1019 | case 1: |
1020 | (Cr).lsel(R,G,B) = (((Cs).w)*((Cs).sel(R,G,B)))*((Cb).w); |
1021 | (Cr).w = ((Cs).w)*((Cb).w); |
1022 | break; |
1023 | case 2: |
1024 | (Cr).lsel(R,G,B) = (((Cs).w)*((Cs).sel(R,G,B)))*((1.f)-((Cb).w)); |
1025 | (Cr).w = ((Cs).w)*((1.f)-((Cb).w)); |
1026 | break; |
1027 | case 3: |
1028 | (Cr).lsel(R,G,B) = ((((Cs).w)*((Cs).sel(R,G,B)))*((Cb).w))+((((Cb).w)*((Cb).sel(R,G,B)))*((1.f)-((Cs).w))); |
1029 | (Cr).w = (((Cs).w)*((Cb).w))+(((Cb).w)*((1.f)-((Cs).w))); |
1030 | break; |
1031 | case 4: |
1032 | (Cr).lsel(R,G,B) = ((((Cs).w)*((Cs).sel(R,G,B)))*((1.f)-((Cb).w)))+((((Cb).w)*((Cb).sel(R,G,B)))*((1.f)-((Cs).w))); |
1033 | (Cr).w = (((Cs).w)*((1.f)-((Cb).w)))+(((Cb).w)*((1.f)-((Cs).w))); |
1034 | break; |
1035 | case 5: |
1036 | (Cr).lsel(R,G,B) = (((Cs).w)*((Cs).sel(R,G,B)))+(((Cb).w)*((Cb).sel(R,G,B))); |
1037 | (Cr).w = ((Cs).w)+((Cb).w); |
1038 | Cr = clamp(Cr, make_vec4(0.f), make_vec4(1.f)); |
1039 | break; |
1040 | case 6: |
1041 | Cr = (((((make_vec4((vFilterData0).x))*(Cs))*(Cb))+((make_vec4((vFilterData0).y))*(Cs)))+((make_vec4((vFilterData0).z))*(Cb)))+(make_vec4((vFilterData0).w)); |
1042 | Cr = clamp(Cr, make_vec4(0.f), make_vec4(1.f)); |
1043 | break; |
1044 | default: |
1045 | break; |
1046 | } |
1047 | return Cr; |
1048 | } |
1049 | ALWAYS_INLINE__attribute__((always_inline)) inline void main(void) { |
1050 | vec4 Ca = make_vec4(0.f, 0.f, 0.f, 0.f); |
1051 | vec4 Cb = make_vec4(0.f, 0.f, 0.f, 0.f); |
1052 | if (((vFilterInputCountFilterKindVec).x)>(0)) { |
1053 | { |
1054 | Ca = sampleInUvRect(sColor0, vInput1Uv, vInput1UvRect); |
1055 | auto _c3_ = ((Ca).w)!=(0.f); |
1056 | { |
1057 | (Ca).lsel(R,G,B) = if_then_else(_c3_,(Ca).sel(R,G,B)/(Ca).w,(Ca).sel(R,G,B)); |
1058 | } |
1059 | } |
1060 | } |
1061 | if (((vFilterInputCountFilterKindVec).x)>(1)) { |
1062 | { |
1063 | Cb = sampleInUvRect(sColor1, vInput2Uv, vInput2UvRect); |
1064 | auto _c4_ = ((Cb).w)!=(0.f); |
1065 | { |
1066 | (Cb).lsel(R,G,B) = if_then_else(_c4_,(Cb).sel(R,G,B)/(Cb).w,(Cb).sel(R,G,B)); |
1067 | } |
1068 | } |
1069 | } |
1070 | vec4 result = make_vec4(1.f, 0.f, 0.f, 1.f); |
1071 | bool needsPremul = true; |
1072 | switch ((vFilterInputCountFilterKindVec).y) { |
1073 | case 0: |
1074 | result = blend(Ca, Cb, (vData).x); |
1075 | needsPremul = false; |
1076 | break; |
1077 | case 1: |
1078 | result = vFilterData0; |
1079 | needsPremul = false; |
1080 | break; |
1081 | case 2: |
1082 | (result).lsel(R,G,B) = LinearToSrgb((Ca).sel(R,G,B)); |
1083 | (result).w = (Ca).w; |
1084 | break; |
1085 | case 3: |
1086 | (result).lsel(R,G,B) = SrgbToLinear((Ca).sel(R,G,B)); |
1087 | (result).w = (Ca).w; |
1088 | break; |
1089 | case 4: |
1090 | (result).lsel(R,G,B) = (Ca).sel(R,G,B); |
1091 | (result).w = ((Ca).w)*((vFloat0).x); |
1092 | break; |
1093 | case 5: |
1094 | result = ((vColorMat)*(Ca))+(vFilterData0); |
1095 | result = clamp(result, make_vec4(0.f), make_vec4(1.f)); |
1096 | break; |
1097 | case 6: |
1098 | { |
1099 | vec4 shadow = make_vec4((vFilterData0).sel(R,G,B), ((Cb).w)*((vFilterData0).w)); |
1100 | result = blend(Ca, shadow, BlendMode_Normal); |
1101 | needsPremul = false; |
1102 | break; |
1103 | } |
1104 | case 7: |
1105 | { |
1106 | vec2 offsetUv = (vInput1Uv)+((vFilterData0).sel(X,Y)); |
1107 | result = sampleInUvRect(sColor0, offsetUv, vInput1UvRect); |
1108 | result *= point_inside_rect(offsetUv, (vFilterData1).sel(X,Y), (vFilterData1).sel(Z,W)); |
1109 | needsPremul = false; |
1110 | break; |
1111 | } |
1112 | case 8: |
1113 | result = ComponentTransfer(Ca); |
1114 | break; |
1115 | case 9: |
1116 | result = Ca; |
1117 | break; |
1118 | case 10: |
1119 | result = composite(Ca, Cb, (vData).x); |
1120 | needsPremul = false; |
1121 | default: |
1122 | break; |
1123 | } |
1124 | if (needsPremul) { |
1125 | { |
1126 | (result).lsel(R,G,B) *= (result).w; |
1127 | } |
1128 | } |
1129 | oFragColorgl_FragColor = result; |
1130 | } |
1131 | typedef cs_svg_filter_vert::InterpOutputs InterpInputs; |
1132 | InterpInputs interp_step; |
1133 | struct InterpPerspective { |
1134 | vec2 vInput1Uv; |
1135 | vec2 vInput2Uv; |
1136 | }; |
1137 | InterpPerspective interp_perspective; |
1138 | static void read_interp_inputs(FragmentShaderImpl* impl, const void* init_, const void* step_) {Self* self = (Self*)impl;const InterpInputs* init = (const InterpInputs*)init_;const InterpInputs* step = (const InterpInputs*)step_; |
1139 | self->vInput1Uv = init_interp(init->vInput1Uv, step->vInput1Uv); |
1140 | self->interp_step.vInput1Uv = step->vInput1Uv * 4.0f; |
1141 | self->vInput2Uv = init_interp(init->vInput2Uv, step->vInput2Uv); |
1142 | self->interp_step.vInput2Uv = step->vInput2Uv * 4.0f; |
1143 | } |
1144 | static void read_perspective_inputs(FragmentShaderImpl* impl, const void* init_, const void* step_) {Self* self = (Self*)impl;const InterpInputs* init = (const InterpInputs*)init_;const InterpInputs* step = (const InterpInputs*)step_; |
1145 | Float w = 1.0f / self->gl_FragCoord.w; |
1146 | self->interp_perspective.vInput1Uv = init_interp(init->vInput1Uv, step->vInput1Uv); |
1147 | self->vInput1Uv = self->interp_perspective.vInput1Uv * w; |
1148 | self->interp_step.vInput1Uv = step->vInput1Uv * 4.0f; |
1149 | self->interp_perspective.vInput2Uv = init_interp(init->vInput2Uv, step->vInput2Uv); |
1150 | self->vInput2Uv = self->interp_perspective.vInput2Uv * w; |
1151 | self->interp_step.vInput2Uv = step->vInput2Uv * 4.0f; |
1152 | } |
1153 | ALWAYS_INLINE__attribute__((always_inline)) inline void step_interp_inputs(int steps = 4) { |
1154 | float chunks = steps * 0.25f; |
1155 | vInput1Uv += interp_step.vInput1Uv * chunks; |
1156 | vInput2Uv += interp_step.vInput2Uv * chunks; |
1157 | } |
1158 | ALWAYS_INLINE__attribute__((always_inline)) inline void step_perspective_inputs(int steps = 4) { |
1159 | step_perspective(steps); |
1160 | float chunks = steps * 0.25f; |
1161 | Float w = 1.0f / gl_FragCoord.w; |
1162 | interp_perspective.vInput1Uv += interp_step.vInput1Uv * chunks; |
1163 | vInput1Uv = w * interp_perspective.vInput1Uv; |
1164 | interp_perspective.vInput2Uv += interp_step.vInput2Uv * chunks; |
1165 | vInput2Uv = w * interp_perspective.vInput2Uv; |
1166 | } |
1167 | static void run(FragmentShaderImpl* impl) { |
1168 | Self* self = (Self*)impl; |
1169 | self->main(); |
1170 | self->step_interp_inputs(); |
1171 | } |
1172 | static void skip(FragmentShaderImpl* impl, int steps) { |
1173 | Self* self = (Self*)impl; |
1174 | self->step_interp_inputs(steps); |
1175 | } |
1176 | static void run_perspective(FragmentShaderImpl* impl) { |
1177 | Self* self = (Self*)impl; |
1178 | self->main(); |
1179 | self->step_perspective_inputs(); |
1180 | } |
1181 | static void skip_perspective(FragmentShaderImpl* impl, int steps) { |
1182 | Self* self = (Self*)impl; |
1183 | self->step_perspective_inputs(steps); |
1184 | } |
1185 | public: |
1186 | cs_svg_filter_frag() { |
1187 | init_span_func = &read_interp_inputs; |
1188 | run_func = &run; |
1189 | skip_func = &skip; |
1190 | enable_perspective(); |
1191 | init_span_w_func = &read_perspective_inputs; |
1192 | run_w_func = &run_perspective; |
1193 | skip_w_func = &skip_perspective; |
1194 | } |
1195 | }; |
1196 | |
1197 | struct cs_svg_filter_program : ProgramImpl, cs_svg_filter_frag { |
1198 | int get_uniform(const char *name) const override { |
1199 | if (strcmp("sClipMask", name) == 0) { return 7; } |
1200 | if (strcmp("sColor0", name) == 0) { return 8; } |
1201 | if (strcmp("sColor1", name) == 0) { return 9; } |
1202 | if (strcmp("sGpuCache", name) == 0) { return 2; } |
1203 | if (strcmp("sPrimitiveHeadersF", name) == 0) { return 4; } |
1204 | if (strcmp("sPrimitiveHeadersI", name) == 0) { return 5; } |
1205 | if (strcmp("sRenderTasks", name) == 0) { return 1; } |
1206 | if (strcmp("sTransformPalette", name) == 0) { return 3; } |
1207 | if (strcmp("uTransform", name) == 0) { return 6; } |
1208 | return -1; |
1209 | } |
1210 | void bind_attrib(const char* name, int index) override { |
1211 | attrib_locations.bind_loc(name, index); |
1212 | } |
1213 | int get_attrib(const char* name) const override { |
1214 | return attrib_locations.get_loc(name); |
1215 | } |
1216 | size_t interpolants_size() const override { return sizeof(InterpOutputs); } |
1217 | VertexShaderImpl* get_vertex_shader() override { |
1218 | return this; |
1219 | } |
1220 | FragmentShaderImpl* get_fragment_shader() override { |
1221 | return this; |
1222 | } |
1223 | const char* get_name() const override { return "cs_svg_filter"; } |
1224 | static ProgramImpl* loader() { return new cs_svg_filter_program; } |
1225 | }; |
1226 | |
1227 | int32_t constexpr cs_svg_filter_frag::BlendMode_Normal; |
1228 | int32_t constexpr cs_svg_filter_frag::BlendMode_Multiply; |
1229 | int32_t constexpr cs_svg_filter_frag::BlendMode_Screen; |
1230 | int32_t constexpr cs_svg_filter_frag::BlendMode_Overlay; |
1231 | int32_t constexpr cs_svg_filter_frag::BlendMode_Darken; |
1232 | int32_t constexpr cs_svg_filter_frag::BlendMode_Lighten; |
1233 | int32_t constexpr cs_svg_filter_frag::BlendMode_ColorDodge; |
1234 | int32_t constexpr cs_svg_filter_frag::BlendMode_ColorBurn; |
1235 | int32_t constexpr cs_svg_filter_frag::BlendMode_HardLight; |
1236 | int32_t constexpr cs_svg_filter_frag::BlendMode_SoftLight; |
1237 | int32_t constexpr cs_svg_filter_frag::BlendMode_Difference; |
1238 | int32_t constexpr cs_svg_filter_frag::BlendMode_Exclusion; |
1239 | int32_t constexpr cs_svg_filter_frag::BlendMode_Hue; |
1240 | int32_t constexpr cs_svg_filter_frag::BlendMode_Saturation; |
1241 | int32_t constexpr cs_svg_filter_frag::BlendMode_Color; |
1242 | int32_t constexpr cs_svg_filter_frag::BlendMode_Luminosity; |