| File: | root/firefox-clang/obj-x86_64-pc-linux-gnu/x86_64-unknown-linux-gnu/debug/build/swgl-19ea748e17a3c52a/out/cs_blur_ALPHA_TARGET.h | 
| Warning: | line 367, column 14 Value stored to 'texel1' during its initialization is never read  | 
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | struct cs_blur_ALPHA_TARGET_common { | 
| 2 | struct Samplers { | 
| 3 | sampler2D_impl sClipMask_impl; | 
| 4 | int sClipMask_slot; | 
| 5 | sampler2D_impl sColor0_impl; | 
| 6 | int sColor0_slot; | 
| 7 | sampler2D_impl sGpuCache_impl; | 
| 8 | int sGpuCache_slot; | 
| 9 | sampler2D_impl sPrimitiveHeadersF_impl; | 
| 10 | int sPrimitiveHeadersF_slot; | 
| 11 | isampler2D_impl sPrimitiveHeadersI_impl; | 
| 12 | int sPrimitiveHeadersI_slot; | 
| 13 | sampler2D_impl sRenderTasks_impl; | 
| 14 | int sRenderTasks_slot; | 
| 15 | sampler2D_impl sTransformPalette_impl; | 
| 16 | int sTransformPalette_slot; | 
| 17 | bool set_slot(int index, int value) { | 
| 18 | switch (index) { | 
| 19 | case 7: | 
| 20 | sClipMask_slot = value; | 
| 21 | return true; | 
| 22 | case 8: | 
| 23 | sColor0_slot = value; | 
| 24 | return true; | 
| 25 | case 2: | 
| 26 | sGpuCache_slot = value; | 
| 27 | return true; | 
| 28 | case 4: | 
| 29 | sPrimitiveHeadersF_slot = value; | 
| 30 | return true; | 
| 31 | case 5: | 
| 32 | sPrimitiveHeadersI_slot = value; | 
| 33 | return true; | 
| 34 | case 1: | 
| 35 | sRenderTasks_slot = value; | 
| 36 | return true; | 
| 37 | case 3: | 
| 38 | sTransformPalette_slot = value; | 
| 39 | return true; | 
| 40 | } | 
| 41 | return false; | 
| 42 | } | 
| 43 | } samplers; | 
| 44 | struct AttribLocations { | 
| 45 | int aPosition = NULL_ATTRIB16; | 
| 46 | int aData = NULL_ATTRIB16; | 
| 47 | int aBlurRenderTaskAddress = NULL_ATTRIB16; | 
| 48 | int aBlurSourceTaskAddress = NULL_ATTRIB16; | 
| 49 | int aBlurDirection = NULL_ATTRIB16; | 
| 50 | int aBlurParams = NULL_ATTRIB16; | 
| 51 | void bind_loc(const char* name, int index) { | 
| 52 | if (strcmp("aPosition", name) == 0) { aPosition = index; return; } | 
| 53 | if (strcmp("aData", name) == 0) { aData = index; return; } | 
| 54 | if (strcmp("aBlurRenderTaskAddress", name) == 0) { aBlurRenderTaskAddress = index; return; } | 
| 55 | if (strcmp("aBlurSourceTaskAddress", name) == 0) { aBlurSourceTaskAddress = index; return; } | 
| 56 | if (strcmp("aBlurDirection", name) == 0) { aBlurDirection = index; return; } | 
| 57 | if (strcmp("aBlurParams", name) == 0) { aBlurParams = index; return; } | 
| 58 | } | 
| 59 | int get_loc(const char* name) const { | 
| 60 | if (strcmp("aPosition", name) == 0) { return aPosition != NULL_ATTRIB16 ? aPosition : -1; } | 
| 61 | if (strcmp("aData", name) == 0) { return aData != NULL_ATTRIB16 ? aData : -1; } | 
| 62 | if (strcmp("aBlurRenderTaskAddress", name) == 0) { return aBlurRenderTaskAddress != NULL_ATTRIB16 ? aBlurRenderTaskAddress : -1; } | 
| 63 | if (strcmp("aBlurSourceTaskAddress", name) == 0) { return aBlurSourceTaskAddress != NULL_ATTRIB16 ? aBlurSourceTaskAddress : -1; } | 
| 64 | if (strcmp("aBlurDirection", name) == 0) { return aBlurDirection != NULL_ATTRIB16 ? aBlurDirection : -1; } | 
| 65 | if (strcmp("aBlurParams", name) == 0) { return aBlurParams != NULL_ATTRIB16 ? aBlurParams : -1; } | 
| 66 | return -1; | 
| 67 | } | 
| 68 | } attrib_locations; | 
| 69 | vec4_scalar vTransformBounds; | 
| 70 | vec4_scalar vUvRect; | 
| 71 | vec2_scalar vOffsetScale; | 
| 72 | ivec2_scalar vSupport; | 
| 73 | vec2_scalar vGaussCoefficients; | 
| 74 | sampler2D sClipMask; | 
| 75 | sampler2D sColor0; | 
| 76 | sampler2D sGpuCache; | 
| 77 | sampler2D sPrimitiveHeadersF; | 
| 78 | isampler2D sPrimitiveHeadersI; | 
| 79 | sampler2D sRenderTasks; | 
| 80 | sampler2D sTransformPalette; | 
| 81 | mat4_scalar uTransform; | 
| 82 | void bind_textures() { | 
| 83 | sClipMask = lookup_sampler(&samplers.sClipMask_impl, samplers.sClipMask_slot); | 
| 84 | sColor0 = lookup_sampler(&samplers.sColor0_impl, samplers.sColor0_slot); | 
| 85 | sGpuCache = lookup_sampler(&samplers.sGpuCache_impl, samplers.sGpuCache_slot); | 
| 86 | sPrimitiveHeadersF = lookup_sampler(&samplers.sPrimitiveHeadersF_impl, samplers.sPrimitiveHeadersF_slot); | 
| 87 | sPrimitiveHeadersI = lookup_isampler(&samplers.sPrimitiveHeadersI_impl, samplers.sPrimitiveHeadersI_slot); | 
| 88 | sRenderTasks = lookup_sampler(&samplers.sRenderTasks_impl, samplers.sRenderTasks_slot); | 
| 89 | sTransformPalette = lookup_sampler(&samplers.sTransformPalette_impl, samplers.sTransformPalette_slot); | 
| 90 | } | 
| 91 | }; | 
| 92 | struct cs_blur_ALPHA_TARGET_vert : VertexShaderImpl, cs_blur_ALPHA_TARGET_common { | 
| 93 | private: | 
| 94 | typedef cs_blur_ALPHA_TARGET_vert Self; | 
| 95 | // mat4_scalar uTransform; | 
| 96 | vec2 aPosition; | 
| 97 | // sampler2D sColor0; | 
| 98 | // sampler2D sColor1; | 
| 99 | // sampler2D sColor2; | 
| 100 | struct RectWithSize_scalar { | 
| 101 | vec2_scalar p0; | 
| 102 | vec2_scalar size; | 
| 103 | RectWithSize_scalar() = default; | 
| 104 | RectWithSize_scalar(vec2_scalar p0, vec2_scalar size) : p0(p0), size(size){} | 
| 105 | }; | 
| 106 | struct RectWithSize { | 
| 107 | vec2 p0; | 
| 108 | vec2 size; | 
| 109 | RectWithSize() = default; | 
| 110 | RectWithSize(vec2 p0, vec2 size) : p0(p0), size(size){} | 
| 111 | RectWithSize(vec2_scalar p0, vec2_scalar size):p0(p0),size(size){ | 
| 112 | } | 
| 113 | IMPLICIT RectWithSize(RectWithSize_scalar s):p0(s.p0),size(s.size){ | 
| 114 | } | 
| 115 | friend RectWithSize if_then_else(I32 c, RectWithSize t, RectWithSize e) { return RectWithSize( | 
| 116 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.size, e.size)); | 
| 117 | }}; | 
| 118 | struct RectWithEndpoint_scalar { | 
| 119 | vec2_scalar p0; | 
| 120 | vec2_scalar p1; | 
| 121 | RectWithEndpoint_scalar() = default; | 
| 122 | RectWithEndpoint_scalar(vec2_scalar p0, vec2_scalar p1) : p0(p0), p1(p1){} | 
| 123 | }; | 
| 124 | struct RectWithEndpoint { | 
| 125 | vec2 p0; | 
| 126 | vec2 p1; | 
| 127 | RectWithEndpoint() = default; | 
| 128 | RectWithEndpoint(vec2 p0, vec2 p1) : p0(p0), p1(p1){} | 
| 129 | RectWithEndpoint(vec2_scalar p0, vec2_scalar p1):p0(p0),p1(p1){ | 
| 130 | } | 
| 131 | IMPLICIT RectWithEndpoint(RectWithEndpoint_scalar s):p0(s.p0),p1(s.p1){ | 
| 132 | } | 
| 133 | friend RectWithEndpoint if_then_else(I32 c, RectWithEndpoint t, RectWithEndpoint e) { return RectWithEndpoint( | 
| 134 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.p1, e.p1)); | 
| 135 | }}; | 
| 136 | // sampler2D sRenderTasks; | 
| 137 | struct RenderTaskData_scalar { | 
| 138 | RectWithEndpoint_scalar task_rect; | 
| 139 | vec4_scalar user_data; | 
| 140 | RenderTaskData_scalar() = default; | 
| 141 | RenderTaskData_scalar(RectWithEndpoint_scalar task_rect, vec4_scalar user_data) : task_rect(task_rect), user_data(user_data){} | 
| 142 | }; | 
| 143 | struct RenderTaskData { | 
| 144 | RectWithEndpoint task_rect; | 
| 145 | vec4 user_data; | 
| 146 | RenderTaskData() = default; | 
| 147 | RenderTaskData(RectWithEndpoint task_rect, vec4 user_data) : task_rect(task_rect), user_data(user_data){} | 
| 148 | RenderTaskData(RectWithEndpoint_scalar task_rect, vec4_scalar user_data):task_rect(task_rect),user_data(user_data){ | 
| 149 | } | 
| 150 | IMPLICIT RenderTaskData(RenderTaskData_scalar s):task_rect(s.task_rect),user_data(s.user_data){ | 
| 151 | } | 
| 152 | friend RenderTaskData if_then_else(I32 c, RenderTaskData t, RenderTaskData e) { return RenderTaskData( | 
| 153 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.user_data, e.user_data)); | 
| 154 | }}; | 
| 155 | struct PictureTask_scalar { | 
| 156 | RectWithEndpoint_scalar task_rect; | 
| 157 | float device_pixel_scale; | 
| 158 | vec2_scalar content_origin; | 
| 159 | PictureTask_scalar() = default; | 
| 160 | PictureTask_scalar(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar content_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), content_origin(content_origin){} | 
| 161 | }; | 
| 162 | struct PictureTask { | 
| 163 | RectWithEndpoint task_rect; | 
| 164 | Float device_pixel_scale; | 
| 165 | vec2 content_origin; | 
| 166 | PictureTask() = default; | 
| 167 | PictureTask(RectWithEndpoint task_rect, Float device_pixel_scale, vec2 content_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), content_origin(content_origin){} | 
| 168 | PictureTask(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar content_origin):task_rect(task_rect),device_pixel_scale(device_pixel_scale),content_origin(content_origin){ | 
| 169 | } | 
| 170 | IMPLICIT PictureTask(PictureTask_scalar s):task_rect(s.task_rect),device_pixel_scale(s.device_pixel_scale),content_origin(s.content_origin){ | 
| 171 | } | 
| 172 | friend PictureTask if_then_else(I32 c, PictureTask t, PictureTask e) { return PictureTask( | 
| 173 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.device_pixel_scale, e.device_pixel_scale), if_then_else(c, t.content_origin, e.content_origin)); | 
| 174 | }}; | 
| 175 | struct ClipArea_scalar { | 
| 176 | RectWithEndpoint_scalar task_rect; | 
| 177 | float device_pixel_scale; | 
| 178 | vec2_scalar screen_origin; | 
| 179 | ClipArea_scalar() = default; | 
| 180 | ClipArea_scalar(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar screen_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), screen_origin(screen_origin){} | 
| 181 | }; | 
| 182 | struct ClipArea { | 
| 183 | RectWithEndpoint task_rect; | 
| 184 | Float device_pixel_scale; | 
| 185 | vec2 screen_origin; | 
| 186 | ClipArea() = default; | 
| 187 | ClipArea(RectWithEndpoint task_rect, Float device_pixel_scale, vec2 screen_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), screen_origin(screen_origin){} | 
| 188 | ClipArea(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar screen_origin):task_rect(task_rect),device_pixel_scale(device_pixel_scale),screen_origin(screen_origin){ | 
| 189 | } | 
| 190 | IMPLICIT ClipArea(ClipArea_scalar s):task_rect(s.task_rect),device_pixel_scale(s.device_pixel_scale),screen_origin(s.screen_origin){ | 
| 191 | } | 
| 192 | friend ClipArea if_then_else(I32 c, ClipArea t, ClipArea e) { return ClipArea( | 
| 193 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.device_pixel_scale, e.device_pixel_scale), if_then_else(c, t.screen_origin, e.screen_origin)); | 
| 194 | }}; | 
| 195 | // sampler2D sGpuCache; | 
| 196 | struct ImageSource_scalar { | 
| 197 | RectWithEndpoint_scalar uv_rect; | 
| 198 | vec4_scalar user_data; | 
| 199 | ImageSource_scalar() = default; | 
| 200 | ImageSource_scalar(RectWithEndpoint_scalar uv_rect, vec4_scalar user_data) : uv_rect(uv_rect), user_data(user_data){} | 
| 201 | }; | 
| 202 | struct ImageSource { | 
| 203 | RectWithEndpoint uv_rect; | 
| 204 | vec4 user_data; | 
| 205 | ImageSource() = default; | 
| 206 | ImageSource(RectWithEndpoint uv_rect, vec4 user_data) : uv_rect(uv_rect), user_data(user_data){} | 
| 207 | ImageSource(RectWithEndpoint_scalar uv_rect, vec4_scalar user_data):uv_rect(uv_rect),user_data(user_data){ | 
| 208 | } | 
| 209 | IMPLICIT ImageSource(ImageSource_scalar s):uv_rect(s.uv_rect),user_data(s.user_data){ | 
| 210 | } | 
| 211 | friend ImageSource if_then_else(I32 c, ImageSource t, ImageSource e) { return ImageSource( | 
| 212 | if_then_else(c, t.uv_rect, e.uv_rect), if_then_else(c, t.user_data, e.user_data)); | 
| 213 | }}; | 
| 214 | struct ImageSourceExtra_scalar { | 
| 215 | vec4_scalar st_tl; | 
| 216 | vec4_scalar st_tr; | 
| 217 | vec4_scalar st_bl; | 
| 218 | vec4_scalar st_br; | 
| 219 | ImageSourceExtra_scalar() = default; | 
| 220 | ImageSourceExtra_scalar(vec4_scalar st_tl, vec4_scalar st_tr, vec4_scalar st_bl, vec4_scalar st_br) : st_tl(st_tl), st_tr(st_tr), st_bl(st_bl), st_br(st_br){} | 
| 221 | }; | 
| 222 | struct ImageSourceExtra { | 
| 223 | vec4 st_tl; | 
| 224 | vec4 st_tr; | 
| 225 | vec4 st_bl; | 
| 226 | vec4 st_br; | 
| 227 | ImageSourceExtra() = default; | 
| 228 | ImageSourceExtra(vec4 st_tl, vec4 st_tr, vec4 st_bl, vec4 st_br) : st_tl(st_tl), st_tr(st_tr), st_bl(st_bl), st_br(st_br){} | 
| 229 | ImageSourceExtra(vec4_scalar st_tl, vec4_scalar st_tr, vec4_scalar st_bl, vec4_scalar st_br):st_tl(st_tl),st_tr(st_tr),st_bl(st_bl),st_br(st_br){ | 
| 230 | } | 
| 231 | IMPLICIT ImageSourceExtra(ImageSourceExtra_scalar s):st_tl(s.st_tl),st_tr(s.st_tr),st_bl(s.st_bl),st_br(s.st_br){ | 
| 232 | } | 
| 233 | friend ImageSourceExtra if_then_else(I32 c, ImageSourceExtra t, ImageSourceExtra e) { return ImageSourceExtra( | 
| 234 | if_then_else(c, t.st_tl, e.st_tl), if_then_else(c, t.st_tr, e.st_tr), if_then_else(c, t.st_bl, e.st_bl), if_then_else(c, t.st_br, e.st_br)); | 
| 235 | }}; | 
| 236 | // vec4_scalar vTransformBounds; | 
| 237 | // sampler2D sTransformPalette; | 
| 238 | struct Transform_scalar { | 
| 239 | mat4_scalar m; | 
| 240 | mat4_scalar inv_m; | 
| 241 | bool is_axis_aligned; | 
| 242 | Transform_scalar() = default; | 
| 243 | Transform_scalar(mat4_scalar m, mat4_scalar inv_m, bool is_axis_aligned) : m(m), inv_m(inv_m), is_axis_aligned(is_axis_aligned){} | 
| 244 | }; | 
| 245 | struct Transform { | 
| 246 | mat4 m; | 
| 247 | mat4 inv_m; | 
| 248 | Bool is_axis_aligned; | 
| 249 | Transform() = default; | 
| 250 | Transform(mat4 m, mat4 inv_m, Bool is_axis_aligned) : m(m), inv_m(inv_m), is_axis_aligned(is_axis_aligned){} | 
| 251 | Transform(mat4_scalar m, mat4_scalar inv_m, bool is_axis_aligned):m(m),inv_m(inv_m),is_axis_aligned(is_axis_aligned){ | 
| 252 | } | 
| 253 | IMPLICIT Transform(Transform_scalar s):m(s.m),inv_m(s.inv_m),is_axis_aligned(s.is_axis_aligned){ | 
| 254 | } | 
| 255 | friend Transform if_then_else(I32 c, Transform t, Transform e) { return Transform( | 
| 256 | if_then_else(c, t.m, e.m), if_then_else(c, t.inv_m, e.inv_m), if_then_else(c, t.is_axis_aligned, e.is_axis_aligned)); | 
| 257 | }}; | 
| 258 | // sampler2D sClipMask; | 
| 259 | // sampler2D sPrimitiveHeadersF; | 
| 260 | // isampler2D sPrimitiveHeadersI; | 
| 261 | ivec4_scalar aData; | 
| 262 | struct Instance_scalar { | 
| 263 | int32_t prim_header_address; | 
| 264 | int32_t clip_address; | 
| 265 | int32_t segment_index; | 
| 266 | int32_t flags; | 
| 267 | int32_t resource_address; | 
| 268 | int32_t brush_kind; | 
| 269 | Instance_scalar() = default; | 
| 270 | Instance_scalar(int32_t prim_header_address, int32_t clip_address, int32_t segment_index, int32_t flags, int32_t resource_address, int32_t brush_kind) : prim_header_address(prim_header_address), clip_address(clip_address), segment_index(segment_index), flags(flags), resource_address(resource_address), brush_kind(brush_kind){} | 
| 271 | }; | 
| 272 | struct Instance { | 
| 273 | I32 prim_header_address; | 
| 274 | I32 clip_address; | 
| 275 | I32 segment_index; | 
| 276 | I32 flags; | 
| 277 | I32 resource_address; | 
| 278 | I32 brush_kind; | 
| 279 | Instance() = default; | 
| 280 | Instance(I32 prim_header_address, I32 clip_address, I32 segment_index, I32 flags, I32 resource_address, I32 brush_kind) : prim_header_address(prim_header_address), clip_address(clip_address), segment_index(segment_index), flags(flags), resource_address(resource_address), brush_kind(brush_kind){} | 
| 281 | Instance(int32_t prim_header_address, int32_t clip_address, int32_t segment_index, int32_t flags, int32_t resource_address, int32_t brush_kind):prim_header_address(prim_header_address),clip_address(clip_address),segment_index(segment_index),flags(flags),resource_address(resource_address),brush_kind(brush_kind){ | 
| 282 | } | 
| 283 | IMPLICIT Instance(Instance_scalar s):prim_header_address(s.prim_header_address),clip_address(s.clip_address),segment_index(s.segment_index),flags(s.flags),resource_address(s.resource_address),brush_kind(s.brush_kind){ | 
| 284 | } | 
| 285 | friend Instance if_then_else(I32 c, Instance t, Instance e) { return Instance( | 
| 286 | if_then_else(c, t.prim_header_address, e.prim_header_address), if_then_else(c, t.clip_address, e.clip_address), if_then_else(c, t.segment_index, e.segment_index), if_then_else(c, t.flags, e.flags), if_then_else(c, t.resource_address, e.resource_address), if_then_else(c, t.brush_kind, e.brush_kind)); | 
| 287 | }}; | 
| 288 | struct PrimitiveHeader_scalar { | 
| 289 | RectWithEndpoint_scalar local_rect; | 
| 290 | RectWithEndpoint_scalar local_clip_rect; | 
| 291 | float z; | 
| 292 | int32_t specific_prim_address; | 
| 293 | int32_t transform_id; | 
| 294 | int32_t picture_task_address; | 
| 295 | ivec4_scalar user_data; | 
| 296 | PrimitiveHeader_scalar() = default; | 
| 297 | PrimitiveHeader_scalar(RectWithEndpoint_scalar local_rect, RectWithEndpoint_scalar local_clip_rect, float z, int32_t specific_prim_address, int32_t transform_id, int32_t picture_task_address, ivec4_scalar user_data) : local_rect(local_rect), local_clip_rect(local_clip_rect), z(z), specific_prim_address(specific_prim_address), transform_id(transform_id), picture_task_address(picture_task_address), user_data(user_data){} | 
| 298 | }; | 
| 299 | struct PrimitiveHeader { | 
| 300 | RectWithEndpoint local_rect; | 
| 301 | RectWithEndpoint local_clip_rect; | 
| 302 | Float z; | 
| 303 | I32 specific_prim_address; | 
| 304 | I32 transform_id; | 
| 305 | I32 picture_task_address; | 
| 306 | ivec4 user_data; | 
| 307 | PrimitiveHeader() = default; | 
| 308 | PrimitiveHeader(RectWithEndpoint local_rect, RectWithEndpoint local_clip_rect, Float z, I32 specific_prim_address, I32 transform_id, I32 picture_task_address, ivec4 user_data) : local_rect(local_rect), local_clip_rect(local_clip_rect), z(z), specific_prim_address(specific_prim_address), transform_id(transform_id), picture_task_address(picture_task_address), user_data(user_data){} | 
| 309 | PrimitiveHeader(RectWithEndpoint_scalar local_rect, RectWithEndpoint_scalar local_clip_rect, float z, int32_t specific_prim_address, int32_t transform_id, int32_t picture_task_address, ivec4_scalar user_data):local_rect(local_rect),local_clip_rect(local_clip_rect),z(z),specific_prim_address(specific_prim_address),transform_id(transform_id),picture_task_address(picture_task_address),user_data(user_data){ | 
| 310 | } | 
| 311 | IMPLICIT PrimitiveHeader(PrimitiveHeader_scalar s):local_rect(s.local_rect),local_clip_rect(s.local_clip_rect),z(s.z),specific_prim_address(s.specific_prim_address),transform_id(s.transform_id),picture_task_address(s.picture_task_address),user_data(s.user_data){ | 
| 312 | } | 
| 313 | friend PrimitiveHeader if_then_else(I32 c, PrimitiveHeader t, PrimitiveHeader e) { return PrimitiveHeader( | 
| 314 | if_then_else(c, t.local_rect, e.local_rect), if_then_else(c, t.local_clip_rect, e.local_clip_rect), if_then_else(c, t.z, e.z), if_then_else(c, t.specific_prim_address, e.specific_prim_address), if_then_else(c, t.transform_id, e.transform_id), if_then_else(c, t.picture_task_address, e.picture_task_address), if_then_else(c, t.user_data, e.user_data)); | 
| 315 | }}; | 
| 316 | struct VertexInfo_scalar { | 
| 317 | vec2_scalar local_pos; | 
| 318 | vec4_scalar world_pos; | 
| 319 | VertexInfo_scalar() = default; | 
| 320 | VertexInfo_scalar(vec2_scalar local_pos, vec4_scalar world_pos) : local_pos(local_pos), world_pos(world_pos){} | 
| 321 | }; | 
| 322 | struct VertexInfo { | 
| 323 | vec2 local_pos; | 
| 324 | vec4 world_pos; | 
| 325 | VertexInfo() = default; | 
| 326 | VertexInfo(vec2 local_pos, vec4 world_pos) : local_pos(local_pos), world_pos(world_pos){} | 
| 327 | VertexInfo(vec2_scalar local_pos, vec4_scalar world_pos):local_pos(local_pos),world_pos(world_pos){ | 
| 328 | } | 
| 329 | IMPLICIT VertexInfo(VertexInfo_scalar s):local_pos(s.local_pos),world_pos(s.world_pos){ | 
| 330 | } | 
| 331 | friend VertexInfo if_then_else(I32 c, VertexInfo t, VertexInfo e) { return VertexInfo( | 
| 332 | if_then_else(c, t.local_pos, e.local_pos), if_then_else(c, t.world_pos, e.world_pos)); | 
| 333 | }}; | 
| 334 | vec2 vUv; | 
| 335 | // vec4_scalar vUvRect; | 
| 336 | // vec2_scalar vOffsetScale; | 
| 337 | // ivec2_scalar vSupport; | 
| 338 | // vec2_scalar vGaussCoefficients; | 
| 339 | int32_t aBlurRenderTaskAddress; | 
| 340 | int32_t aBlurSourceTaskAddress; | 
| 341 | int32_t aBlurDirection; | 
| 342 | vec3_scalar aBlurParams; | 
| 343 | struct BlurTask_scalar { | 
| 344 | RectWithEndpoint_scalar task_rect; | 
| 345 | float blur_radius; | 
| 346 | vec2_scalar blur_region; | 
| 347 | BlurTask_scalar() = default; | 
| 348 | BlurTask_scalar(RectWithEndpoint_scalar task_rect, float blur_radius, vec2_scalar blur_region) : task_rect(task_rect), blur_radius(blur_radius), blur_region(blur_region){} | 
| 349 | }; | 
| 350 | struct BlurTask { | 
| 351 | RectWithEndpoint task_rect; | 
| 352 | Float blur_radius; | 
| 353 | vec2 blur_region; | 
| 354 | BlurTask() = default; | 
| 355 | BlurTask(RectWithEndpoint task_rect, Float blur_radius, vec2 blur_region) : task_rect(task_rect), blur_radius(blur_radius), blur_region(blur_region){} | 
| 356 | BlurTask(RectWithEndpoint_scalar task_rect, float blur_radius, vec2_scalar blur_region):task_rect(task_rect),blur_radius(blur_radius),blur_region(blur_region){ | 
| 357 | } | 
| 358 | IMPLICIT BlurTask(BlurTask_scalar s):task_rect(s.task_rect),blur_radius(s.blur_radius),blur_region(s.blur_region){ | 
| 359 | } | 
| 360 | friend BlurTask if_then_else(I32 c, BlurTask t, BlurTask e) { return BlurTask( | 
| 361 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.blur_radius, e.blur_radius), if_then_else(c, t.blur_region, e.blur_region)); | 
| 362 | }}; | 
| 363 | RectWithEndpoint_scalar fetch_render_task_rect(int32_t index) { | 
| 364 | ivec2_scalar uv = make_ivec2(make_int((2u)*((make_uint(index))%((1024u)/(2u)))), make_int((make_uint(index))/((1024u)/(2u)))); | 
| 365 | auto sRenderTasks_uv_fetch = texelFetchPtr(sRenderTasks, uv, 0, 1, 0, 0); | 
| 366 | vec4_scalar texel0 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 0, 0); | 
| 367 | vec4_scalar texel1 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 1, 0); | 
Value stored to 'texel1' during its initialization is never read  | |
| 368 | RectWithEndpoint_scalar task_rect = RectWithEndpoint_scalar((texel0).sel(X,Y), (texel0).sel(Z,W)); | 
| 369 | return task_rect; | 
| 370 | } | 
| 371 | BlurTask_scalar fetch_blur_task(int32_t address) { | 
| 372 | RectWithEndpoint_scalar task_rect = fetch_render_task_rect(address); | 
| 373 | BlurTask_scalar task = BlurTask_scalar(task_rect, (aBlurParams).x, (aBlurParams).sel(Y,Z)); | 
| 374 | return task; | 
| 375 | } | 
| 376 | void calculate_gauss_coefficients(float sigma) { | 
| 377 | vGaussCoefficients = make_vec2((1.f)/((sqrt__glsl_sqrt((2.f)*(3.1415927f)))*(sigma)), exp__glsl_exp((-(0.5f))/((sigma)*(sigma)))); | 
| 378 | vec3_scalar gauss_coefficient = make_vec3(vGaussCoefficients, ((vGaussCoefficients).y)*((vGaussCoefficients).y)); | 
| 379 | float gauss_coefficient_total = (gauss_coefficient).x; | 
| 380 | for ( int32_t i = 1; | 
| 381 | (i)<=((vSupport).x); i += 2) { | 
| 382 | (gauss_coefficient).lsel(X,Y) *= (gauss_coefficient).sel(Y,Z); | 
| 383 | float gauss_coefficient_subtotal = (gauss_coefficient).x; | 
| 384 | (gauss_coefficient).lsel(X,Y) *= (gauss_coefficient).sel(Y,Z); | 
| 385 | gauss_coefficient_subtotal += (gauss_coefficient).x; | 
| 386 | gauss_coefficient_total += (2.f)*(gauss_coefficient_subtotal); | 
| 387 | } | 
| 388 | (vGaussCoefficients).x /= gauss_coefficient_total; | 
| 389 | } | 
| 390 | ALWAYS_INLINE__attribute__((always_inline)) inline void main(void) { | 
| 391 | BlurTask_scalar blur_task = fetch_blur_task(aBlurRenderTaskAddress); | 
| 392 | RectWithEndpoint_scalar src_rect = fetch_render_task_rect(aBlurSourceTaskAddress); | 
| 393 | RectWithEndpoint_scalar target_rect = (blur_task).task_rect; | 
| 394 | vec2_scalar texture_size = make_vec2((textureSize(sColor0, 0)).sel(X,Y)); | 
| 395 | (vSupport).x = (make_int(ceil__glsl_ceil((1.5f)*((blur_task).blur_radius))))*(2); | 
| 396 | if (((vSupport).x)>(0)) { | 
| 397 | { | 
| 398 | calculate_gauss_coefficients((blur_task).blur_radius); | 
| 399 | } | 
| 400 | } else { | 
| 401 | vGaussCoefficients = make_vec2(1.f, 1.f); | 
| 402 | } | 
| 403 | switch (aBlurDirection) { | 
| 404 | case 0: | 
| 405 | vOffsetScale = make_vec2((1.f)/((texture_size).x), 0.f); | 
| 406 | break; | 
| 407 | case 1: | 
| 408 | vOffsetScale = make_vec2(0.f, (1.f)/((texture_size).y)); | 
| 409 | break; | 
| 410 | default: | 
| 411 | vOffsetScale = make_vec2(0.f); | 
| 412 | } | 
| 413 | vUvRect = make_vec4(((src_rect).p0)+(make_vec2(0.5f)), (((src_rect).p0)+((blur_task).blur_region))-(make_vec2(0.5f))); | 
| 414 | vUvRect /= (texture_size).sel(X,Y,X,Y); | 
| 415 | vec2 pos = mix((target_rect).p0, (target_rect).p1, (aPosition).sel(X,Y)); | 
| 416 | vec2_scalar uv0 = ((src_rect).p0)/(texture_size); | 
| 417 | vec2_scalar uv1 = ((src_rect).p1)/(texture_size); | 
| 418 | vUv = mix(uv0, uv1, (aPosition).sel(X,Y)); | 
| 419 | gl_Position = (uTransform)*(make_vec4(pos, 0.f, 1.f)); | 
| 420 | } | 
| 421 | static void set_uniform_1i(VertexShaderImpl* impl, int index, int value) { | 
| 422 | Self* self = (Self*)impl; | 
| 423 | if (self->samplers.set_slot(index, value)) return; | 
| 424 | switch (index) { | 
| 425 | case 7: | 
| 426 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask  | 
| 427 | break; | 
| 428 | case 8: | 
| 429 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor0  | 
| 430 | break; | 
| 431 | case 2: | 
| 432 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache  | 
| 433 | break; | 
| 434 | case 4: | 
| 435 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF  | 
| 436 | break; | 
| 437 | case 5: | 
| 438 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI  | 
| 439 | break; | 
| 440 | case 1: | 
| 441 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks  | 
| 442 | break; | 
| 443 | case 3: | 
| 444 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette  | 
| 445 | break; | 
| 446 | case 6: | 
| 447 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // uTransform  | 
| 448 | break; | 
| 449 | } | 
| 450 | } | 
| 451 | static void set_uniform_4fv(VertexShaderImpl* impl, int index, const float *value) { | 
| 452 | Self* self = (Self*)impl; | 
| 453 | switch (index) { | 
| 454 | case 7: | 
| 455 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask  | 
| 456 | break; | 
| 457 | case 8: | 
| 458 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor0  | 
| 459 | break; | 
| 460 | case 2: | 
| 461 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache  | 
| 462 | break; | 
| 463 | case 4: | 
| 464 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF  | 
| 465 | break; | 
| 466 | case 5: | 
| 467 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI  | 
| 468 | break; | 
| 469 | case 1: | 
| 470 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks  | 
| 471 | break; | 
| 472 | case 3: | 
| 473 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette  | 
| 474 | break; | 
| 475 | case 6: | 
| 476 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // uTransform  | 
| 477 | break; | 
| 478 | } | 
| 479 | } | 
| 480 | static void set_uniform_matrix4fv(VertexShaderImpl* impl, int index, const float *value) { | 
| 481 | Self* self = (Self*)impl; | 
| 482 | switch (index) { | 
| 483 | case 7: | 
| 484 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask  | 
| 485 | break; | 
| 486 | case 8: | 
| 487 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sColor0  | 
| 488 | break; | 
| 489 | case 2: | 
| 490 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache  | 
| 491 | break; | 
| 492 | case 4: | 
| 493 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF  | 
| 494 | break; | 
| 495 | case 5: | 
| 496 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI  | 
| 497 | break; | 
| 498 | case 1: | 
| 499 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks  | 
| 500 | break; | 
| 501 | case 3: | 
| 502 |   assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette  | 
| 503 | break; | 
| 504 | case 6: | 
| 505 | self->uTransform = mat4_scalar::load_from_ptr(value); | 
| 506 | break; | 
| 507 | } | 
| 508 | } | 
| 509 | static void load_attribs(VertexShaderImpl* impl, VertexAttrib *attribs, uint32_t start, int instance, int count) {Self* self = (Self*)impl; | 
| 510 | load_attrib(self->aPosition, attribs[self->attrib_locations.aPosition], start, instance, count); | 
| 511 | load_flat_attrib(self->aBlurRenderTaskAddress, attribs[self->attrib_locations.aBlurRenderTaskAddress], start, instance, count); | 
| 512 | load_flat_attrib(self->aBlurSourceTaskAddress, attribs[self->attrib_locations.aBlurSourceTaskAddress], start, instance, count); | 
| 513 | load_flat_attrib(self->aBlurDirection, attribs[self->attrib_locations.aBlurDirection], start, instance, count); | 
| 514 | load_flat_attrib(self->aBlurParams, attribs[self->attrib_locations.aBlurParams], start, instance, count); | 
| 515 | } | 
| 516 | public: | 
| 517 | struct InterpOutputs { | 
| 518 | vec2_scalar vUv; | 
| 519 | }; | 
| 520 | private: | 
| 521 | ALWAYS_INLINE__attribute__((always_inline)) inline void store_interp_outputs(char* dest_ptr, size_t stride) { | 
| 522 | for(int n = 0; n < 4; n++) { | 
| 523 | auto* dest = reinterpret_cast<InterpOutputs*>(dest_ptr); | 
| 524 | dest->vUv = get_nth(vUv, n); | 
| 525 | dest_ptr += stride; | 
| 526 | } | 
| 527 | } | 
| 528 | static void run(VertexShaderImpl* impl, char* interps, size_t interp_stride) { | 
| 529 | Self* self = (Self*)impl; | 
| 530 | self->main(); | 
| 531 | self->store_interp_outputs(interps, interp_stride); | 
| 532 | } | 
| 533 | static void init_batch(VertexShaderImpl* impl) { | 
| 534 | Self* self = (Self*)impl; self->bind_textures(); } | 
| 535 | public: | 
| 536 | cs_blur_ALPHA_TARGET_vert() { | 
| 537 | set_uniform_1i_func = &set_uniform_1i; | 
| 538 | set_uniform_4fv_func = &set_uniform_4fv; | 
| 539 | set_uniform_matrix4fv_func = &set_uniform_matrix4fv; | 
| 540 | init_batch_func = &init_batch; | 
| 541 | load_attribs_func = &load_attribs; | 
| 542 | run_primitive_func = &run; | 
| 543 | } | 
| 544 | }; | 
| 545 | |
| 546 | |
| 547 | struct cs_blur_ALPHA_TARGET_frag : FragmentShaderImpl, cs_blur_ALPHA_TARGET_vert { | 
| 548 | private: | 
| 549 | typedef cs_blur_ALPHA_TARGET_frag Self; | 
| 550 | #define oFragColorgl_FragColor gl_FragColor | 
| 551 | // vec4 oFragColor; | 
| 552 | // sampler2D sColor0; | 
| 553 | // sampler2D sColor1; | 
| 554 | // sampler2D sColor2; | 
| 555 | struct RectWithSize_scalar { | 
| 556 | vec2_scalar p0; | 
| 557 | vec2_scalar size; | 
| 558 | RectWithSize_scalar() = default; | 
| 559 | RectWithSize_scalar(vec2_scalar p0, vec2_scalar size) : p0(p0), size(size){} | 
| 560 | }; | 
| 561 | struct RectWithSize { | 
| 562 | vec2 p0; | 
| 563 | vec2 size; | 
| 564 | RectWithSize() = default; | 
| 565 | RectWithSize(vec2 p0, vec2 size) : p0(p0), size(size){} | 
| 566 | RectWithSize(vec2_scalar p0, vec2_scalar size):p0(p0),size(size){ | 
| 567 | } | 
| 568 | IMPLICIT RectWithSize(RectWithSize_scalar s):p0(s.p0),size(s.size){ | 
| 569 | } | 
| 570 | friend RectWithSize if_then_else(I32 c, RectWithSize t, RectWithSize e) { return RectWithSize( | 
| 571 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.size, e.size)); | 
| 572 | }}; | 
| 573 | struct RectWithEndpoint_scalar { | 
| 574 | vec2_scalar p0; | 
| 575 | vec2_scalar p1; | 
| 576 | RectWithEndpoint_scalar() = default; | 
| 577 | RectWithEndpoint_scalar(vec2_scalar p0, vec2_scalar p1) : p0(p0), p1(p1){} | 
| 578 | }; | 
| 579 | struct RectWithEndpoint { | 
| 580 | vec2 p0; | 
| 581 | vec2 p1; | 
| 582 | RectWithEndpoint() = default; | 
| 583 | RectWithEndpoint(vec2 p0, vec2 p1) : p0(p0), p1(p1){} | 
| 584 | RectWithEndpoint(vec2_scalar p0, vec2_scalar p1):p0(p0),p1(p1){ | 
| 585 | } | 
| 586 | IMPLICIT RectWithEndpoint(RectWithEndpoint_scalar s):p0(s.p0),p1(s.p1){ | 
| 587 | } | 
| 588 | friend RectWithEndpoint if_then_else(I32 c, RectWithEndpoint t, RectWithEndpoint e) { return RectWithEndpoint( | 
| 589 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.p1, e.p1)); | 
| 590 | }}; | 
| 591 | // sampler2D sGpuCache; | 
| 592 | // vec4_scalar vTransformBounds; | 
| 593 | // sampler2D sClipMask; | 
| 594 | struct Fragment_scalar { | 
| 595 | vec4_scalar color; | 
| 596 | Fragment_scalar() = default; | 
| 597 | explicit Fragment_scalar(vec4_scalar color) : color(color){} | 
| 598 | }; | 
| 599 | struct Fragment { | 
| 600 | vec4 color; | 
| 601 | Fragment() = default; | 
| 602 | explicit Fragment(vec4 color) : color(color){} | 
| 603 | explicit Fragment(vec4_scalar color):color(color){ | 
| 604 | } | 
| 605 | IMPLICIT Fragment(Fragment_scalar s):color(s.color){ | 
| 606 | } | 
| 607 | friend Fragment if_then_else(I32 c, Fragment t, Fragment e) { return Fragment( | 
| 608 | if_then_else(c, t.color, e.color)); | 
| 609 | }}; | 
| 610 | vec2 vUv; | 
| 611 | // vec4_scalar vUvRect; | 
| 612 | // vec2_scalar vOffsetScale; | 
| 613 | // ivec2_scalar vSupport; | 
| 614 | // vec2_scalar vGaussCoefficients; | 
| 615 | ALWAYS_INLINE__attribute__((always_inline)) inline void main(void) { | 
| 616 | Float original_color = (texture(sColor0, vUv)).x; | 
| 617 | vec3_scalar gauss_coefficient = make_vec3(vGaussCoefficients, ((vGaussCoefficients).y)*((vGaussCoefficients).y)); | 
| 618 | Float avg_color = (original_color)*((gauss_coefficient).x); | 
| 619 | int32_t support = min((vSupport).x, 300); | 
| 620 | for ( int32_t i = 1; | 
| 621 | (i)<=(support); i += 2) { | 
| 622 | (gauss_coefficient).lsel(X,Y) *= (gauss_coefficient).sel(Y,Z); | 
| 623 | float gauss_coefficient_subtotal = (gauss_coefficient).x; | 
| 624 | (gauss_coefficient).lsel(X,Y) *= (gauss_coefficient).sel(Y,Z); | 
| 625 | gauss_coefficient_subtotal += (gauss_coefficient).x; | 
| 626 | float gauss_ratio = ((gauss_coefficient).x)/(gauss_coefficient_subtotal); | 
| 627 | vec2_scalar offset = (vOffsetScale)*((make_float(i))+(gauss_ratio)); | 
| 628 | vec2 st0 = max((vUv)-(offset), (vUvRect).sel(X,Y)); | 
| 629 | vec2 st1 = min((vUv)+(offset), (vUvRect).sel(Z,W)); | 
| 630 | avg_color += (((texture(sColor0, st0)).x)+((texture(sColor0, st1)).x))*(gauss_coefficient_subtotal); | 
| 631 | } | 
| 632 | oFragColorgl_FragColor = make_vec4(avg_color); | 
| 633 | } | 
| 634 | void swgl_drawSpanR8() { | 
| 635 |  swgl_commitGaussianBlurR8(sColor0, vUv, vUvRect, ((vOffsetScale).x)!=(0.f), (vSupport).x, vGaussCoefficients)do { int drawn = 0; if (blend_key) { drawn = blendGaussianBlur <true>(sColor0, vUv, vUvRect, swgl_OutR8, swgl_SpanLength , ((vOffsetScale).x)!=(0.f), (vSupport).x, vGaussCoefficients ); } else { drawn = blendGaussianBlur<false>(sColor0, vUv , vUvRect, swgl_OutR8, swgl_SpanLength, ((vOffsetScale).x)!=( 0.f), (vSupport).x, vGaussCoefficients); } swgl_OutR8 += drawn ; swgl_SpanLength -= drawn; } while (0);  | 
| 636 | } | 
| 637 | typedef cs_blur_ALPHA_TARGET_vert::InterpOutputs InterpInputs; | 
| 638 | InterpInputs interp_step; | 
| 639 | struct InterpPerspective { | 
| 640 | vec2 vUv; | 
| 641 | }; | 
| 642 | InterpPerspective interp_perspective; | 
| 643 | static void read_interp_inputs(FragmentShaderImpl* impl, const void* init_, const void* step_) {Self* self = (Self*)impl;const InterpInputs* init = (const InterpInputs*)init_;const InterpInputs* step = (const InterpInputs*)step_; | 
| 644 | self->vUv = init_interp(init->vUv, step->vUv); | 
| 645 | self->interp_step.vUv = step->vUv * 4.0f; | 
| 646 | } | 
| 647 | static void read_perspective_inputs(FragmentShaderImpl* impl, const void* init_, const void* step_) {Self* self = (Self*)impl;const InterpInputs* init = (const InterpInputs*)init_;const InterpInputs* step = (const InterpInputs*)step_; | 
| 648 | Float w = 1.0f / self->gl_FragCoord.w; | 
| 649 | self->interp_perspective.vUv = init_interp(init->vUv, step->vUv); | 
| 650 | self->vUv = self->interp_perspective.vUv * w; | 
| 651 | self->interp_step.vUv = step->vUv * 4.0f; | 
| 652 | } | 
| 653 | ALWAYS_INLINE__attribute__((always_inline)) inline void step_interp_inputs(int steps = 4) { | 
| 654 | float chunks = steps * 0.25f; | 
| 655 | vUv += interp_step.vUv * chunks; | 
| 656 | } | 
| 657 | ALWAYS_INLINE__attribute__((always_inline)) inline void step_perspective_inputs(int steps = 4) { | 
| 658 | step_perspective(steps); | 
| 659 | float chunks = steps * 0.25f; | 
| 660 | Float w = 1.0f / gl_FragCoord.w; | 
| 661 | interp_perspective.vUv += interp_step.vUv * chunks; | 
| 662 | vUv = w * interp_perspective.vUv; | 
| 663 | } | 
| 664 | static void run(FragmentShaderImpl* impl) { | 
| 665 | Self* self = (Self*)impl; | 
| 666 | self->main(); | 
| 667 | self->step_interp_inputs(); | 
| 668 | } | 
| 669 | static void skip(FragmentShaderImpl* impl, int steps) { | 
| 670 | Self* self = (Self*)impl; | 
| 671 | self->step_interp_inputs(steps); | 
| 672 | } | 
| 673 | static void run_perspective(FragmentShaderImpl* impl) { | 
| 674 | Self* self = (Self*)impl; | 
| 675 | self->main(); | 
| 676 | self->step_perspective_inputs(); | 
| 677 | } | 
| 678 | static void skip_perspective(FragmentShaderImpl* impl, int steps) { | 
| 679 | Self* self = (Self*)impl; | 
| 680 | self->step_perspective_inputs(steps); | 
| 681 | } | 
| 682 | static int draw_span_R8(FragmentShaderImpl* impl) { | 
| 683 |  Self* self = (Self*)impl; DISPATCH_DRAW_SPAN(self, R8)do { int total = self->swgl_SpanLength; self->swgl_drawSpanR8 (); int drawn = total - self->swgl_SpanLength; if (drawn) self ->step_interp_inputs(drawn); return drawn; } while (0); }  | 
| 684 | public: | 
| 685 | cs_blur_ALPHA_TARGET_frag() { | 
| 686 | init_span_func = &read_interp_inputs; | 
| 687 | run_func = &run; | 
| 688 | skip_func = &skip; | 
| 689 | draw_span_R8_func = &draw_span_R8; | 
| 690 | enable_perspective(); | 
| 691 | init_span_w_func = &read_perspective_inputs; | 
| 692 | run_w_func = &run_perspective; | 
| 693 | skip_w_func = &skip_perspective; | 
| 694 | } | 
| 695 | }; | 
| 696 | |
| 697 | struct cs_blur_ALPHA_TARGET_program : ProgramImpl, cs_blur_ALPHA_TARGET_frag { | 
| 698 | int get_uniform(const char *name) const override { | 
| 699 | if (strcmp("sClipMask", name) == 0) { return 7; } | 
| 700 | if (strcmp("sColor0", name) == 0) { return 8; } | 
| 701 | if (strcmp("sGpuCache", name) == 0) { return 2; } | 
| 702 | if (strcmp("sPrimitiveHeadersF", name) == 0) { return 4; } | 
| 703 | if (strcmp("sPrimitiveHeadersI", name) == 0) { return 5; } | 
| 704 | if (strcmp("sRenderTasks", name) == 0) { return 1; } | 
| 705 | if (strcmp("sTransformPalette", name) == 0) { return 3; } | 
| 706 | if (strcmp("uTransform", name) == 0) { return 6; } | 
| 707 | return -1; | 
| 708 | } | 
| 709 | void bind_attrib(const char* name, int index) override { | 
| 710 | attrib_locations.bind_loc(name, index); | 
| 711 | } | 
| 712 | int get_attrib(const char* name) const override { | 
| 713 | return attrib_locations.get_loc(name); | 
| 714 | } | 
| 715 | size_t interpolants_size() const override { return sizeof(InterpOutputs); } | 
| 716 | VertexShaderImpl* get_vertex_shader() override { | 
| 717 | return this; | 
| 718 | } | 
| 719 | FragmentShaderImpl* get_fragment_shader() override { | 
| 720 | return this; | 
| 721 | } | 
| 722 | const char* get_name() const override { return "cs_blur_ALPHA_TARGET"; } | 
| 723 | static ProgramImpl* loader() { return new cs_blur_ALPHA_TARGET_program; } | 
| 724 | }; | 
| 725 |