| File: | root/firefox-clang/obj-x86_64-pc-linux-gnu/x86_64-unknown-linux-gnu/debug/build/swgl-19ea748e17a3c52a/out/brush_solid_DEBUG_OVERDRAW.h |
| Warning: | line 671, column 9 Value stored to 'chunks' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | struct brush_solid_DEBUG_OVERDRAW_common { |
| 2 | struct Samplers { |
| 3 | sampler2D_impl sClipMask_impl; |
| 4 | int sClipMask_slot; |
| 5 | sampler2D_impl sGpuCache_impl; |
| 6 | int sGpuCache_slot; |
| 7 | sampler2D_impl sPrimitiveHeadersF_impl; |
| 8 | int sPrimitiveHeadersF_slot; |
| 9 | isampler2D_impl sPrimitiveHeadersI_impl; |
| 10 | int sPrimitiveHeadersI_slot; |
| 11 | sampler2D_impl sRenderTasks_impl; |
| 12 | int sRenderTasks_slot; |
| 13 | sampler2D_impl sTransformPalette_impl; |
| 14 | int sTransformPalette_slot; |
| 15 | bool set_slot(int index, int value) { |
| 16 | switch (index) { |
| 17 | case 7: |
| 18 | sClipMask_slot = value; |
| 19 | return true; |
| 20 | case 2: |
| 21 | sGpuCache_slot = value; |
| 22 | return true; |
| 23 | case 4: |
| 24 | sPrimitiveHeadersF_slot = value; |
| 25 | return true; |
| 26 | case 5: |
| 27 | sPrimitiveHeadersI_slot = value; |
| 28 | return true; |
| 29 | case 1: |
| 30 | sRenderTasks_slot = value; |
| 31 | return true; |
| 32 | case 3: |
| 33 | sTransformPalette_slot = value; |
| 34 | return true; |
| 35 | } |
| 36 | return false; |
| 37 | } |
| 38 | } samplers; |
| 39 | struct AttribLocations { |
| 40 | int aPosition = NULL_ATTRIB16; |
| 41 | int aData = NULL_ATTRIB16; |
| 42 | void bind_loc(const char* name, int index) { |
| 43 | if (strcmp("aPosition", name) == 0) { aPosition = index; return; } |
| 44 | if (strcmp("aData", name) == 0) { aData = index; return; } |
| 45 | } |
| 46 | int get_loc(const char* name) const { |
| 47 | if (strcmp("aPosition", name) == 0) { return aPosition != NULL_ATTRIB16 ? aPosition : -1; } |
| 48 | if (strcmp("aData", name) == 0) { return aData != NULL_ATTRIB16 ? aData : -1; } |
| 49 | return -1; |
| 50 | } |
| 51 | } attrib_locations; |
| 52 | vec4_scalar vTransformBounds; |
| 53 | vec4_scalar v_color; |
| 54 | sampler2D sClipMask; |
| 55 | sampler2D sGpuCache; |
| 56 | sampler2D sPrimitiveHeadersF; |
| 57 | isampler2D sPrimitiveHeadersI; |
| 58 | sampler2D sRenderTasks; |
| 59 | sampler2D sTransformPalette; |
| 60 | mat4_scalar uTransform; |
| 61 | void bind_textures() { |
| 62 | sClipMask = lookup_sampler(&samplers.sClipMask_impl, samplers.sClipMask_slot); |
| 63 | sGpuCache = lookup_sampler(&samplers.sGpuCache_impl, samplers.sGpuCache_slot); |
| 64 | sPrimitiveHeadersF = lookup_sampler(&samplers.sPrimitiveHeadersF_impl, samplers.sPrimitiveHeadersF_slot); |
| 65 | sPrimitiveHeadersI = lookup_isampler(&samplers.sPrimitiveHeadersI_impl, samplers.sPrimitiveHeadersI_slot); |
| 66 | sRenderTasks = lookup_sampler(&samplers.sRenderTasks_impl, samplers.sRenderTasks_slot); |
| 67 | sTransformPalette = lookup_sampler(&samplers.sTransformPalette_impl, samplers.sTransformPalette_slot); |
| 68 | } |
| 69 | }; |
| 70 | struct brush_solid_DEBUG_OVERDRAW_vert : VertexShaderImpl, brush_solid_DEBUG_OVERDRAW_common { |
| 71 | private: |
| 72 | typedef brush_solid_DEBUG_OVERDRAW_vert Self; |
| 73 | // mat4_scalar uTransform; |
| 74 | vec2 aPosition; |
| 75 | struct RectWithSize_scalar { |
| 76 | vec2_scalar p0; |
| 77 | vec2_scalar size; |
| 78 | RectWithSize_scalar() = default; |
| 79 | RectWithSize_scalar(vec2_scalar p0, vec2_scalar size) : p0(p0), size(size){} |
| 80 | }; |
| 81 | struct RectWithSize { |
| 82 | vec2 p0; |
| 83 | vec2 size; |
| 84 | RectWithSize() = default; |
| 85 | RectWithSize(vec2 p0, vec2 size) : p0(p0), size(size){} |
| 86 | RectWithSize(vec2_scalar p0, vec2_scalar size):p0(p0),size(size){ |
| 87 | } |
| 88 | IMPLICIT RectWithSize(RectWithSize_scalar s):p0(s.p0),size(s.size){ |
| 89 | } |
| 90 | friend RectWithSize if_then_else(I32 c, RectWithSize t, RectWithSize e) { return RectWithSize( |
| 91 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.size, e.size)); |
| 92 | }}; |
| 93 | struct RectWithEndpoint_scalar { |
| 94 | vec2_scalar p0; |
| 95 | vec2_scalar p1; |
| 96 | RectWithEndpoint_scalar() = default; |
| 97 | RectWithEndpoint_scalar(vec2_scalar p0, vec2_scalar p1) : p0(p0), p1(p1){} |
| 98 | }; |
| 99 | struct RectWithEndpoint { |
| 100 | vec2 p0; |
| 101 | vec2 p1; |
| 102 | RectWithEndpoint() = default; |
| 103 | RectWithEndpoint(vec2 p0, vec2 p1) : p0(p0), p1(p1){} |
| 104 | RectWithEndpoint(vec2_scalar p0, vec2_scalar p1):p0(p0),p1(p1){ |
| 105 | } |
| 106 | IMPLICIT RectWithEndpoint(RectWithEndpoint_scalar s):p0(s.p0),p1(s.p1){ |
| 107 | } |
| 108 | friend RectWithEndpoint if_then_else(I32 c, RectWithEndpoint t, RectWithEndpoint e) { return RectWithEndpoint( |
| 109 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.p1, e.p1)); |
| 110 | }}; |
| 111 | // sampler2D sRenderTasks; |
| 112 | struct RenderTaskData_scalar { |
| 113 | RectWithEndpoint_scalar task_rect; |
| 114 | vec4_scalar user_data; |
| 115 | RenderTaskData_scalar() = default; |
| 116 | RenderTaskData_scalar(RectWithEndpoint_scalar task_rect, vec4_scalar user_data) : task_rect(task_rect), user_data(user_data){} |
| 117 | }; |
| 118 | struct RenderTaskData { |
| 119 | RectWithEndpoint task_rect; |
| 120 | vec4 user_data; |
| 121 | RenderTaskData() = default; |
| 122 | RenderTaskData(RectWithEndpoint task_rect, vec4 user_data) : task_rect(task_rect), user_data(user_data){} |
| 123 | RenderTaskData(RectWithEndpoint_scalar task_rect, vec4_scalar user_data):task_rect(task_rect),user_data(user_data){ |
| 124 | } |
| 125 | IMPLICIT RenderTaskData(RenderTaskData_scalar s):task_rect(s.task_rect),user_data(s.user_data){ |
| 126 | } |
| 127 | friend RenderTaskData if_then_else(I32 c, RenderTaskData t, RenderTaskData e) { return RenderTaskData( |
| 128 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.user_data, e.user_data)); |
| 129 | }}; |
| 130 | struct PictureTask_scalar { |
| 131 | RectWithEndpoint_scalar task_rect; |
| 132 | float device_pixel_scale; |
| 133 | vec2_scalar content_origin; |
| 134 | PictureTask_scalar() = default; |
| 135 | PictureTask_scalar(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar content_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), content_origin(content_origin){} |
| 136 | }; |
| 137 | struct PictureTask { |
| 138 | RectWithEndpoint task_rect; |
| 139 | Float device_pixel_scale; |
| 140 | vec2 content_origin; |
| 141 | PictureTask() = default; |
| 142 | PictureTask(RectWithEndpoint task_rect, Float device_pixel_scale, vec2 content_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), content_origin(content_origin){} |
| 143 | PictureTask(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar content_origin):task_rect(task_rect),device_pixel_scale(device_pixel_scale),content_origin(content_origin){ |
| 144 | } |
| 145 | IMPLICIT PictureTask(PictureTask_scalar s):task_rect(s.task_rect),device_pixel_scale(s.device_pixel_scale),content_origin(s.content_origin){ |
| 146 | } |
| 147 | friend PictureTask if_then_else(I32 c, PictureTask t, PictureTask e) { return PictureTask( |
| 148 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.device_pixel_scale, e.device_pixel_scale), if_then_else(c, t.content_origin, e.content_origin)); |
| 149 | }}; |
| 150 | struct ClipArea_scalar { |
| 151 | RectWithEndpoint_scalar task_rect; |
| 152 | float device_pixel_scale; |
| 153 | vec2_scalar screen_origin; |
| 154 | ClipArea_scalar() = default; |
| 155 | ClipArea_scalar(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar screen_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), screen_origin(screen_origin){} |
| 156 | }; |
| 157 | struct ClipArea { |
| 158 | RectWithEndpoint task_rect; |
| 159 | Float device_pixel_scale; |
| 160 | vec2 screen_origin; |
| 161 | ClipArea() = default; |
| 162 | ClipArea(RectWithEndpoint task_rect, Float device_pixel_scale, vec2 screen_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), screen_origin(screen_origin){} |
| 163 | ClipArea(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar screen_origin):task_rect(task_rect),device_pixel_scale(device_pixel_scale),screen_origin(screen_origin){ |
| 164 | } |
| 165 | IMPLICIT ClipArea(ClipArea_scalar s):task_rect(s.task_rect),device_pixel_scale(s.device_pixel_scale),screen_origin(s.screen_origin){ |
| 166 | } |
| 167 | friend ClipArea if_then_else(I32 c, ClipArea t, ClipArea e) { return ClipArea( |
| 168 | if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.device_pixel_scale, e.device_pixel_scale), if_then_else(c, t.screen_origin, e.screen_origin)); |
| 169 | }}; |
| 170 | // sampler2D sGpuCache; |
| 171 | struct ImageSource_scalar { |
| 172 | RectWithEndpoint_scalar uv_rect; |
| 173 | vec4_scalar user_data; |
| 174 | ImageSource_scalar() = default; |
| 175 | ImageSource_scalar(RectWithEndpoint_scalar uv_rect, vec4_scalar user_data) : uv_rect(uv_rect), user_data(user_data){} |
| 176 | }; |
| 177 | struct ImageSource { |
| 178 | RectWithEndpoint uv_rect; |
| 179 | vec4 user_data; |
| 180 | ImageSource() = default; |
| 181 | ImageSource(RectWithEndpoint uv_rect, vec4 user_data) : uv_rect(uv_rect), user_data(user_data){} |
| 182 | ImageSource(RectWithEndpoint_scalar uv_rect, vec4_scalar user_data):uv_rect(uv_rect),user_data(user_data){ |
| 183 | } |
| 184 | IMPLICIT ImageSource(ImageSource_scalar s):uv_rect(s.uv_rect),user_data(s.user_data){ |
| 185 | } |
| 186 | friend ImageSource if_then_else(I32 c, ImageSource t, ImageSource e) { return ImageSource( |
| 187 | if_then_else(c, t.uv_rect, e.uv_rect), if_then_else(c, t.user_data, e.user_data)); |
| 188 | }}; |
| 189 | struct ImageSourceExtra_scalar { |
| 190 | vec4_scalar st_tl; |
| 191 | vec4_scalar st_tr; |
| 192 | vec4_scalar st_bl; |
| 193 | vec4_scalar st_br; |
| 194 | ImageSourceExtra_scalar() = default; |
| 195 | ImageSourceExtra_scalar(vec4_scalar st_tl, vec4_scalar st_tr, vec4_scalar st_bl, vec4_scalar st_br) : st_tl(st_tl), st_tr(st_tr), st_bl(st_bl), st_br(st_br){} |
| 196 | }; |
| 197 | struct ImageSourceExtra { |
| 198 | vec4 st_tl; |
| 199 | vec4 st_tr; |
| 200 | vec4 st_bl; |
| 201 | vec4 st_br; |
| 202 | ImageSourceExtra() = default; |
| 203 | ImageSourceExtra(vec4 st_tl, vec4 st_tr, vec4 st_bl, vec4 st_br) : st_tl(st_tl), st_tr(st_tr), st_bl(st_bl), st_br(st_br){} |
| 204 | ImageSourceExtra(vec4_scalar st_tl, vec4_scalar st_tr, vec4_scalar st_bl, vec4_scalar st_br):st_tl(st_tl),st_tr(st_tr),st_bl(st_bl),st_br(st_br){ |
| 205 | } |
| 206 | IMPLICIT ImageSourceExtra(ImageSourceExtra_scalar s):st_tl(s.st_tl),st_tr(s.st_tr),st_bl(s.st_bl),st_br(s.st_br){ |
| 207 | } |
| 208 | friend ImageSourceExtra if_then_else(I32 c, ImageSourceExtra t, ImageSourceExtra e) { return ImageSourceExtra( |
| 209 | if_then_else(c, t.st_tl, e.st_tl), if_then_else(c, t.st_tr, e.st_tr), if_then_else(c, t.st_bl, e.st_bl), if_then_else(c, t.st_br, e.st_br)); |
| 210 | }}; |
| 211 | // vec4_scalar vTransformBounds; |
| 212 | // sampler2D sTransformPalette; |
| 213 | struct Transform_scalar { |
| 214 | mat4_scalar m; |
| 215 | mat4_scalar inv_m; |
| 216 | bool is_axis_aligned; |
| 217 | Transform_scalar() = default; |
| 218 | Transform_scalar(mat4_scalar m, mat4_scalar inv_m, bool is_axis_aligned) : m(m), inv_m(inv_m), is_axis_aligned(is_axis_aligned){} |
| 219 | }; |
| 220 | struct Transform { |
| 221 | mat4 m; |
| 222 | mat4 inv_m; |
| 223 | Bool is_axis_aligned; |
| 224 | Transform() = default; |
| 225 | Transform(mat4 m, mat4 inv_m, Bool is_axis_aligned) : m(m), inv_m(inv_m), is_axis_aligned(is_axis_aligned){} |
| 226 | Transform(mat4_scalar m, mat4_scalar inv_m, bool is_axis_aligned):m(m),inv_m(inv_m),is_axis_aligned(is_axis_aligned){ |
| 227 | } |
| 228 | IMPLICIT Transform(Transform_scalar s):m(s.m),inv_m(s.inv_m),is_axis_aligned(s.is_axis_aligned){ |
| 229 | } |
| 230 | friend Transform if_then_else(I32 c, Transform t, Transform e) { return Transform( |
| 231 | if_then_else(c, t.m, e.m), if_then_else(c, t.inv_m, e.inv_m), if_then_else(c, t.is_axis_aligned, e.is_axis_aligned)); |
| 232 | }}; |
| 233 | // sampler2D sClipMask; |
| 234 | // sampler2D sPrimitiveHeadersF; |
| 235 | // isampler2D sPrimitiveHeadersI; |
| 236 | ivec4_scalar aData; |
| 237 | struct Instance_scalar { |
| 238 | int32_t prim_header_address; |
| 239 | int32_t clip_address; |
| 240 | int32_t segment_index; |
| 241 | int32_t flags; |
| 242 | int32_t resource_address; |
| 243 | int32_t brush_kind; |
| 244 | Instance_scalar() = default; |
| 245 | Instance_scalar(int32_t prim_header_address, int32_t clip_address, int32_t segment_index, int32_t flags, int32_t resource_address, int32_t brush_kind) : prim_header_address(prim_header_address), clip_address(clip_address), segment_index(segment_index), flags(flags), resource_address(resource_address), brush_kind(brush_kind){} |
| 246 | }; |
| 247 | struct Instance { |
| 248 | I32 prim_header_address; |
| 249 | I32 clip_address; |
| 250 | I32 segment_index; |
| 251 | I32 flags; |
| 252 | I32 resource_address; |
| 253 | I32 brush_kind; |
| 254 | Instance() = default; |
| 255 | Instance(I32 prim_header_address, I32 clip_address, I32 segment_index, I32 flags, I32 resource_address, I32 brush_kind) : prim_header_address(prim_header_address), clip_address(clip_address), segment_index(segment_index), flags(flags), resource_address(resource_address), brush_kind(brush_kind){} |
| 256 | Instance(int32_t prim_header_address, int32_t clip_address, int32_t segment_index, int32_t flags, int32_t resource_address, int32_t brush_kind):prim_header_address(prim_header_address),clip_address(clip_address),segment_index(segment_index),flags(flags),resource_address(resource_address),brush_kind(brush_kind){ |
| 257 | } |
| 258 | IMPLICIT Instance(Instance_scalar s):prim_header_address(s.prim_header_address),clip_address(s.clip_address),segment_index(s.segment_index),flags(s.flags),resource_address(s.resource_address),brush_kind(s.brush_kind){ |
| 259 | } |
| 260 | friend Instance if_then_else(I32 c, Instance t, Instance e) { return Instance( |
| 261 | if_then_else(c, t.prim_header_address, e.prim_header_address), if_then_else(c, t.clip_address, e.clip_address), if_then_else(c, t.segment_index, e.segment_index), if_then_else(c, t.flags, e.flags), if_then_else(c, t.resource_address, e.resource_address), if_then_else(c, t.brush_kind, e.brush_kind)); |
| 262 | }}; |
| 263 | struct PrimitiveHeader_scalar { |
| 264 | RectWithEndpoint_scalar local_rect; |
| 265 | RectWithEndpoint_scalar local_clip_rect; |
| 266 | float z; |
| 267 | int32_t specific_prim_address; |
| 268 | int32_t transform_id; |
| 269 | int32_t picture_task_address; |
| 270 | ivec4_scalar user_data; |
| 271 | PrimitiveHeader_scalar() = default; |
| 272 | PrimitiveHeader_scalar(RectWithEndpoint_scalar local_rect, RectWithEndpoint_scalar local_clip_rect, float z, int32_t specific_prim_address, int32_t transform_id, int32_t picture_task_address, ivec4_scalar user_data) : local_rect(local_rect), local_clip_rect(local_clip_rect), z(z), specific_prim_address(specific_prim_address), transform_id(transform_id), picture_task_address(picture_task_address), user_data(user_data){} |
| 273 | }; |
| 274 | struct PrimitiveHeader { |
| 275 | RectWithEndpoint local_rect; |
| 276 | RectWithEndpoint local_clip_rect; |
| 277 | Float z; |
| 278 | I32 specific_prim_address; |
| 279 | I32 transform_id; |
| 280 | I32 picture_task_address; |
| 281 | ivec4 user_data; |
| 282 | PrimitiveHeader() = default; |
| 283 | PrimitiveHeader(RectWithEndpoint local_rect, RectWithEndpoint local_clip_rect, Float z, I32 specific_prim_address, I32 transform_id, I32 picture_task_address, ivec4 user_data) : local_rect(local_rect), local_clip_rect(local_clip_rect), z(z), specific_prim_address(specific_prim_address), transform_id(transform_id), picture_task_address(picture_task_address), user_data(user_data){} |
| 284 | PrimitiveHeader(RectWithEndpoint_scalar local_rect, RectWithEndpoint_scalar local_clip_rect, float z, int32_t specific_prim_address, int32_t transform_id, int32_t picture_task_address, ivec4_scalar user_data):local_rect(local_rect),local_clip_rect(local_clip_rect),z(z),specific_prim_address(specific_prim_address),transform_id(transform_id),picture_task_address(picture_task_address),user_data(user_data){ |
| 285 | } |
| 286 | IMPLICIT PrimitiveHeader(PrimitiveHeader_scalar s):local_rect(s.local_rect),local_clip_rect(s.local_clip_rect),z(s.z),specific_prim_address(s.specific_prim_address),transform_id(s.transform_id),picture_task_address(s.picture_task_address),user_data(s.user_data){ |
| 287 | } |
| 288 | friend PrimitiveHeader if_then_else(I32 c, PrimitiveHeader t, PrimitiveHeader e) { return PrimitiveHeader( |
| 289 | if_then_else(c, t.local_rect, e.local_rect), if_then_else(c, t.local_clip_rect, e.local_clip_rect), if_then_else(c, t.z, e.z), if_then_else(c, t.specific_prim_address, e.specific_prim_address), if_then_else(c, t.transform_id, e.transform_id), if_then_else(c, t.picture_task_address, e.picture_task_address), if_then_else(c, t.user_data, e.user_data)); |
| 290 | }}; |
| 291 | struct VertexInfo_scalar { |
| 292 | vec2_scalar local_pos; |
| 293 | vec4_scalar world_pos; |
| 294 | VertexInfo_scalar() = default; |
| 295 | VertexInfo_scalar(vec2_scalar local_pos, vec4_scalar world_pos) : local_pos(local_pos), world_pos(world_pos){} |
| 296 | }; |
| 297 | struct VertexInfo { |
| 298 | vec2 local_pos; |
| 299 | vec4 world_pos; |
| 300 | VertexInfo() = default; |
| 301 | VertexInfo(vec2 local_pos, vec4 world_pos) : local_pos(local_pos), world_pos(world_pos){} |
| 302 | VertexInfo(vec2_scalar local_pos, vec4_scalar world_pos):local_pos(local_pos),world_pos(world_pos){ |
| 303 | } |
| 304 | IMPLICIT VertexInfo(VertexInfo_scalar s):local_pos(s.local_pos),world_pos(s.world_pos){ |
| 305 | } |
| 306 | friend VertexInfo if_then_else(I32 c, VertexInfo t, VertexInfo e) { return VertexInfo( |
| 307 | if_then_else(c, t.local_pos, e.local_pos), if_then_else(c, t.world_pos, e.world_pos)); |
| 308 | }}; |
| 309 | // vec4_scalar v_color; |
| 310 | struct SolidBrush_scalar { |
| 311 | vec4_scalar color; |
| 312 | SolidBrush_scalar() = default; |
| 313 | explicit SolidBrush_scalar(vec4_scalar color) : color(color){} |
| 314 | }; |
| 315 | struct SolidBrush { |
| 316 | vec4 color; |
| 317 | SolidBrush() = default; |
| 318 | explicit SolidBrush(vec4 color) : color(color){} |
| 319 | explicit SolidBrush(vec4_scalar color):color(color){ |
| 320 | } |
| 321 | IMPLICIT SolidBrush(SolidBrush_scalar s):color(s.color){ |
| 322 | } |
| 323 | friend SolidBrush if_then_else(I32 c, SolidBrush t, SolidBrush e) { return SolidBrush( |
| 324 | if_then_else(c, t.color, e.color)); |
| 325 | }}; |
| 326 | Instance_scalar decode_instance_attributes() { |
| 327 | Instance_scalar instance; |
| 328 | (instance).prim_header_address = (aData).x; |
| 329 | (instance).clip_address = (aData).y; |
| 330 | (instance).segment_index = ((aData).z)&(65535); |
| 331 | (instance).flags = ((aData).z)>>(16); |
| 332 | (instance).resource_address = ((aData).w)&(16777215); |
| 333 | (instance).brush_kind = ((aData).w)>>(24); |
| 334 | return instance; |
| 335 | } |
| 336 | PrimitiveHeader_scalar fetch_prim_header(int32_t index) { |
| 337 | PrimitiveHeader_scalar ph; |
| 338 | ivec2_scalar uv_f = make_ivec2(make_int((2u)*((make_uint(index))%((1024u)/(2u)))), make_int((make_uint(index))/((1024u)/(2u)))); |
| 339 | auto sPrimitiveHeadersF_uv_f_fetch = texelFetchPtr(sPrimitiveHeadersF, uv_f, 0, 1, 0, 0); |
| 340 | vec4_scalar local_rect = texelFetchUnchecked(sPrimitiveHeadersF, sPrimitiveHeadersF_uv_f_fetch, 0, 0); |
| 341 | vec4_scalar local_clip_rect = texelFetchUnchecked(sPrimitiveHeadersF, sPrimitiveHeadersF_uv_f_fetch, 1, 0); |
| 342 | (ph).local_rect = RectWithEndpoint_scalar((local_rect).sel(X,Y), (local_rect).sel(Z,W)); |
| 343 | (ph).local_clip_rect = RectWithEndpoint_scalar((local_clip_rect).sel(X,Y), (local_clip_rect).sel(Z,W)); |
| 344 | ivec2_scalar uv_i = make_ivec2(make_int((2u)*((make_uint(index))%((1024u)/(2u)))), make_int((make_uint(index))/((1024u)/(2u)))); |
| 345 | auto sPrimitiveHeadersI_uv_i_fetch = texelFetchPtr(sPrimitiveHeadersI, uv_i, 0, 1, 0, 0); |
| 346 | ivec4_scalar data0 = texelFetchUnchecked(sPrimitiveHeadersI, sPrimitiveHeadersI_uv_i_fetch, 0, 0); |
| 347 | ivec4_scalar data1 = texelFetchUnchecked(sPrimitiveHeadersI, sPrimitiveHeadersI_uv_i_fetch, 1, 0); |
| 348 | (ph).z = make_float((data0).x); |
| 349 | (ph).specific_prim_address = (data0).y; |
| 350 | (ph).transform_id = (data0).z; |
| 351 | (ph).picture_task_address = (data0).w; |
| 352 | (ph).user_data = data1; |
| 353 | return ph; |
| 354 | } |
| 355 | Transform_scalar fetch_transform(int32_t id) { |
| 356 | Transform_scalar transform; |
| 357 | (transform).is_axis_aligned = ((id)>>(23))==(0); |
| 358 | int32_t index = (id)&(8388607); |
| 359 | ivec2_scalar uv = make_ivec2(make_int((8u)*((make_uint(index))%((1024u)/(8u)))), make_int((make_uint(index))/((1024u)/(8u)))); |
| 360 | ivec2_scalar uv0 = make_ivec2(((uv).x)+(0), (uv).y); |
| 361 | auto sTransformPalette_uv0_fetch = texelFetchPtr(sTransformPalette, uv0, 0, 7, 0, 0); |
| 362 | (transform).m[0] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 0, 0); |
| 363 | (transform).m[1] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 1, 0); |
| 364 | (transform).m[2] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 2, 0); |
| 365 | (transform).m[3] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 3, 0); |
| 366 | (transform).inv_m[0] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 4, 0); |
| 367 | (transform).inv_m[1] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 5, 0); |
| 368 | (transform).inv_m[2] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 6, 0); |
| 369 | (transform).inv_m[3] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 7, 0); |
| 370 | return transform; |
| 371 | } |
| 372 | RenderTaskData_scalar fetch_render_task_data(int32_t index) { |
| 373 | ivec2_scalar uv = make_ivec2(make_int((2u)*((make_uint(index))%((1024u)/(2u)))), make_int((make_uint(index))/((1024u)/(2u)))); |
| 374 | auto sRenderTasks_uv_fetch = texelFetchPtr(sRenderTasks, uv, 0, 1, 0, 0); |
| 375 | vec4_scalar texel0 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 0, 0); |
| 376 | vec4_scalar texel1 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 1, 0); |
| 377 | RectWithEndpoint_scalar task_rect = RectWithEndpoint_scalar((texel0).sel(X,Y), (texel0).sel(Z,W)); |
| 378 | RenderTaskData_scalar data = RenderTaskData_scalar(task_rect, texel1); |
| 379 | return data; |
| 380 | } |
| 381 | PictureTask_scalar fetch_picture_task(int32_t address) { |
| 382 | RenderTaskData_scalar task_data = fetch_render_task_data(address); |
| 383 | PictureTask_scalar task = PictureTask_scalar((task_data).task_rect, ((task_data).user_data).x, ((task_data).user_data).sel(Y,Z)); |
| 384 | return task; |
| 385 | } |
| 386 | ClipArea_scalar fetch_clip_area(int32_t index) { |
| 387 | RenderTaskData_scalar task_data; |
| 388 | if ((index)>=(2147483647)) { |
| 389 | { |
| 390 | task_data = RenderTaskData_scalar(RectWithEndpoint_scalar(make_vec2(0.f), make_vec2(0.f)), make_vec4(0.f)); |
| 391 | } |
| 392 | } else { |
| 393 | task_data = fetch_render_task_data(index); |
| 394 | } |
| 395 | return ClipArea_scalar((task_data).task_rect, ((task_data).user_data).x, ((task_data).user_data).sel(Y,Z)); |
| 396 | } |
| 397 | ivec2_scalar get_gpu_cache_uv(int32_t address) { |
| 398 | return make_ivec2((make_uint(address))%(1024u), (make_uint(address))/(1024u)); |
| 399 | } |
| 400 | Array<vec4_scalar,2> fetch_from_gpu_cache_2(int32_t address) { |
| 401 | ivec2_scalar uv = get_gpu_cache_uv(address); |
| 402 | auto sGpuCache_uv_fetch = texelFetchPtr(sGpuCache, uv, 0, 1, 0, 0); |
| 403 | return Array<vec4_scalar,2>{{texelFetchUnchecked(sGpuCache, sGpuCache_uv_fetch, 0, 0), texelFetchUnchecked(sGpuCache, sGpuCache_uv_fetch, 1, 0)}}; |
| 404 | } |
| 405 | RectWithEndpoint_scalar clip_and_init_antialiasing(RectWithEndpoint_scalar segment_rect, RectWithEndpoint_scalar prim_rect, RectWithEndpoint_scalar clip_rect, int32_t edge_flags, float z, Transform_scalar transform, PictureTask_scalar task) { |
| 406 | bvec4_scalar clipped = make_bvec4(greaterThan((clip_rect).p0, (segment_rect).p0), lessThan((clip_rect).p1, (segment_rect).p1)); |
| 407 | swgl_antiAlias((edge_flags)|(((clipped).x ? 1 : 0)|(((clipped).y ? 2 : 0)|(((clipped).z ? 4 : 0)|((clipped).w ? 8 : 0)))))do { swgl_AAEdgeMask = calcAAEdgeMask((edge_flags)|(((clipped ).x ? 1 : 0)|(((clipped).y ? 2 : 0)|(((clipped).z ? 4 : 0)|(( clipped).w ? 8 : 0))))); if (swgl_AAEdgeMask) { swgl_ClipFlags |= SWGL_CLIP_FLAG_AA; } } while (0); |
| 408 | (segment_rect).p0 = clamp((segment_rect).p0, (clip_rect).p0, (clip_rect).p1); |
| 409 | (segment_rect).p1 = clamp((segment_rect).p1, (clip_rect).p0, (clip_rect).p1); |
| 410 | return segment_rect; |
| 411 | } |
| 412 | vec2 rect_clamp(RectWithEndpoint_scalar rect, vec2 pt) { |
| 413 | return clamp(pt, (rect).p0, (rect).p1); |
| 414 | } |
| 415 | VertexInfo write_vertex(vec2 local_pos, RectWithEndpoint_scalar local_clip_rect, float z, Transform_scalar transform, PictureTask_scalar task) { |
| 416 | vec2 clamped_local_pos = rect_clamp(local_clip_rect, local_pos); |
| 417 | vec4 world_pos = ((transform).m)*(make_vec4(clamped_local_pos, 0.f, 1.f)); |
| 418 | vec2 device_pos = ((world_pos).sel(X,Y))*((task).device_pixel_scale); |
| 419 | vec2_scalar final_offset = (-((task).content_origin))+(((task).task_rect).p0); |
| 420 | gl_Position = (uTransform)*(make_vec4((device_pos)+((final_offset)*((world_pos).w)), (z)*((world_pos).w), (world_pos).w)); |
| 421 | VertexInfo vi = VertexInfo(clamped_local_pos, world_pos); |
| 422 | return vi; |
| 423 | } |
| 424 | vec2_scalar rect_size(RectWithEndpoint_scalar rect) { |
| 425 | return ((rect).p1)-((rect).p0); |
| 426 | } |
| 427 | void write_clip(vec4 world_pos, ClipArea_scalar area, PictureTask_scalar task) { |
| 428 | swgl_clipMask(sClipMask, ((((task).task_rect).p0)-((task).content_origin))-((((area).task_rect).p0)-((area).screen_origin)), ((area).task_rect).p0, rect_size((area).task_rect))do { if (rect_size((area).task_rect) != vec2_scalar(0.0f, 0.0f )) { swgl_ClipFlags |= SWGL_CLIP_FLAG_MASK; swgl_ClipMask = sClipMask ; swgl_ClipMaskOffset = make_ivec2(((((task).task_rect).p0)-( (task).content_origin))-((((area).task_rect).p0)-((area).screen_origin ))); swgl_ClipMaskBounds = IntRect(make_ivec2(((area).task_rect ).p0), make_ivec2(rect_size((area).task_rect))); } } while (0 ); |
| 429 | } |
| 430 | vec4_scalar fetch_from_gpu_cache_1(int32_t address) { |
| 431 | ivec2_scalar uv = get_gpu_cache_uv(address); |
| 432 | return texelFetch(sGpuCache, uv, 0); |
| 433 | } |
| 434 | SolidBrush_scalar fetch_solid_primitive(int32_t address) { |
| 435 | vec4_scalar data = fetch_from_gpu_cache_1(address); |
| 436 | return SolidBrush_scalar(data); |
| 437 | } |
| 438 | void brush_vs(VertexInfo vi, int32_t prim_address, RectWithEndpoint_scalar local_rect, RectWithEndpoint_scalar segment_rect, ivec4_scalar prim_user_data, int32_t specific_resource_address, mat4_scalar transform, PictureTask_scalar pic_task, int32_t brush_flags, vec4_scalar unused) { |
| 439 | SolidBrush_scalar prim = fetch_solid_primitive(prim_address); |
| 440 | float opacity = (make_float((prim_user_data).x))/(65535.f); |
| 441 | v_color = ((prim).color)*(opacity); |
| 442 | } |
| 443 | void brush_shader_main_vs(Instance_scalar instance, PrimitiveHeader_scalar ph, Transform_scalar transform, PictureTask_scalar pic_task, ClipArea_scalar clip_area) { |
| 444 | int32_t edge_flags = (((instance).flags)>>(12))&(15); |
| 445 | int32_t brush_flags = ((instance).flags)&(4095); |
| 446 | vec4_scalar segment_data; |
| 447 | RectWithEndpoint_scalar segment_rect; |
| 448 | if (((instance).segment_index)==(65535)) { |
| 449 | { |
| 450 | segment_rect = (ph).local_rect; |
| 451 | segment_data = make_vec4(0.f); |
| 452 | } |
| 453 | } else { |
| 454 | int32_t segment_address = (((ph).specific_prim_address)+(1))+(((instance).segment_index)*(2)); |
| 455 | Array<vec4_scalar,2> segment_info = fetch_from_gpu_cache_2(segment_address); |
| 456 | segment_rect = RectWithEndpoint_scalar((segment_info[0]).sel(X,Y), (segment_info[0]).sel(Z,W)); |
| 457 | (segment_rect).p0 += ((ph).local_rect).p0; |
| 458 | (segment_rect).p1 += ((ph).local_rect).p0; |
| 459 | segment_data = segment_info[1]; |
| 460 | } |
| 461 | RectWithEndpoint_scalar adjusted_segment_rect = segment_rect; |
| 462 | bool antialiased = (!((transform).is_axis_aligned))||(((brush_flags)&(1024))!=(0)); |
| 463 | if (antialiased) { |
| 464 | { |
| 465 | adjusted_segment_rect = clip_and_init_antialiasing(segment_rect, (ph).local_rect, (ph).local_clip_rect, edge_flags, (ph).z, transform, pic_task); |
| 466 | ((ph).local_clip_rect).p0 = make_vec2(-(10000000000000000.f)); |
| 467 | ((ph).local_clip_rect).p1 = make_vec2(10000000000000000.f); |
| 468 | } |
| 469 | } else { |
| 470 | } |
| 471 | vec2 local_pos = mix((adjusted_segment_rect).p0, (adjusted_segment_rect).p1, (aPosition).sel(X,Y)); |
| 472 | VertexInfo vi = write_vertex(local_pos, (ph).local_clip_rect, (ph).z, transform, pic_task); |
| 473 | write_clip((vi).world_pos, clip_area, pic_task); |
| 474 | brush_vs(vi, (ph).specific_prim_address, (ph).local_rect, segment_rect, (ph).user_data, (instance).resource_address, (transform).m, pic_task, brush_flags, segment_data); |
| 475 | } |
| 476 | ALWAYS_INLINE__attribute__((always_inline)) inline void main(void) { |
| 477 | Instance_scalar instance = decode_instance_attributes(); |
| 478 | PrimitiveHeader_scalar ph = fetch_prim_header((instance).prim_header_address); |
| 479 | Transform_scalar transform = fetch_transform((ph).transform_id); |
| 480 | PictureTask_scalar task = fetch_picture_task((ph).picture_task_address); |
| 481 | ClipArea_scalar clip_area = fetch_clip_area((instance).clip_address); |
| 482 | brush_shader_main_vs(instance, ph, transform, task, clip_area); |
| 483 | } |
| 484 | static void set_uniform_1i(VertexShaderImpl* impl, int index, int value) { |
| 485 | Self* self = (Self*)impl; |
| 486 | if (self->samplers.set_slot(index, value)) return; |
| 487 | switch (index) { |
| 488 | case 7: |
| 489 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask |
| 490 | break; |
| 491 | case 2: |
| 492 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache |
| 493 | break; |
| 494 | case 4: |
| 495 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF |
| 496 | break; |
| 497 | case 5: |
| 498 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI |
| 499 | break; |
| 500 | case 1: |
| 501 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks |
| 502 | break; |
| 503 | case 3: |
| 504 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette |
| 505 | break; |
| 506 | case 6: |
| 507 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // uTransform |
| 508 | break; |
| 509 | } |
| 510 | } |
| 511 | static void set_uniform_4fv(VertexShaderImpl* impl, int index, const float *value) { |
| 512 | Self* self = (Self*)impl; |
| 513 | switch (index) { |
| 514 | case 7: |
| 515 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask |
| 516 | break; |
| 517 | case 2: |
| 518 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache |
| 519 | break; |
| 520 | case 4: |
| 521 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF |
| 522 | break; |
| 523 | case 5: |
| 524 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI |
| 525 | break; |
| 526 | case 1: |
| 527 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks |
| 528 | break; |
| 529 | case 3: |
| 530 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette |
| 531 | break; |
| 532 | case 6: |
| 533 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // uTransform |
| 534 | break; |
| 535 | } |
| 536 | } |
| 537 | static void set_uniform_matrix4fv(VertexShaderImpl* impl, int index, const float *value) { |
| 538 | Self* self = (Self*)impl; |
| 539 | switch (index) { |
| 540 | case 7: |
| 541 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sClipMask |
| 542 | break; |
| 543 | case 2: |
| 544 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sGpuCache |
| 545 | break; |
| 546 | case 4: |
| 547 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersF |
| 548 | break; |
| 549 | case 5: |
| 550 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sPrimitiveHeadersI |
| 551 | break; |
| 552 | case 1: |
| 553 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sRenderTasks |
| 554 | break; |
| 555 | case 3: |
| 556 | assert(0)(static_cast <bool> (0) ? void (0) : __assert_fail ("0" , __builtin_FILE (), __builtin_LINE (), __extension__ __PRETTY_FUNCTION__ )); // sTransformPalette |
| 557 | break; |
| 558 | case 6: |
| 559 | self->uTransform = mat4_scalar::load_from_ptr(value); |
| 560 | break; |
| 561 | } |
| 562 | } |
| 563 | static void load_attribs(VertexShaderImpl* impl, VertexAttrib *attribs, uint32_t start, int instance, int count) {Self* self = (Self*)impl; |
| 564 | load_attrib(self->aPosition, attribs[self->attrib_locations.aPosition], start, instance, count); |
| 565 | load_flat_attrib(self->aData, attribs[self->attrib_locations.aData], start, instance, count); |
| 566 | } |
| 567 | public: |
| 568 | struct InterpOutputs { |
| 569 | }; |
| 570 | private: |
| 571 | ALWAYS_INLINE__attribute__((always_inline)) inline void store_interp_outputs(char* dest_ptr, size_t stride) { |
| 572 | for(int n = 0; n < 4; n++) { |
| 573 | auto* dest = reinterpret_cast<InterpOutputs*>(dest_ptr); |
| 574 | dest_ptr += stride; |
| 575 | } |
| 576 | } |
| 577 | static void run(VertexShaderImpl* impl, char* interps, size_t interp_stride) { |
| 578 | Self* self = (Self*)impl; |
| 579 | self->main(); |
| 580 | self->store_interp_outputs(interps, interp_stride); |
| 581 | } |
| 582 | static void init_batch(VertexShaderImpl* impl) { |
| 583 | Self* self = (Self*)impl; self->bind_textures(); } |
| 584 | public: |
| 585 | brush_solid_DEBUG_OVERDRAW_vert() { |
| 586 | set_uniform_1i_func = &set_uniform_1i; |
| 587 | set_uniform_4fv_func = &set_uniform_4fv; |
| 588 | set_uniform_matrix4fv_func = &set_uniform_matrix4fv; |
| 589 | init_batch_func = &init_batch; |
| 590 | load_attribs_func = &load_attribs; |
| 591 | run_primitive_func = &run; |
| 592 | } |
| 593 | }; |
| 594 | |
| 595 | |
| 596 | struct brush_solid_DEBUG_OVERDRAW_frag : FragmentShaderImpl, brush_solid_DEBUG_OVERDRAW_vert { |
| 597 | private: |
| 598 | typedef brush_solid_DEBUG_OVERDRAW_frag Self; |
| 599 | #define oFragColorgl_FragColor gl_FragColor |
| 600 | // vec4 oFragColor; |
| 601 | struct RectWithSize_scalar { |
| 602 | vec2_scalar p0; |
| 603 | vec2_scalar size; |
| 604 | RectWithSize_scalar() = default; |
| 605 | RectWithSize_scalar(vec2_scalar p0, vec2_scalar size) : p0(p0), size(size){} |
| 606 | }; |
| 607 | struct RectWithSize { |
| 608 | vec2 p0; |
| 609 | vec2 size; |
| 610 | RectWithSize() = default; |
| 611 | RectWithSize(vec2 p0, vec2 size) : p0(p0), size(size){} |
| 612 | RectWithSize(vec2_scalar p0, vec2_scalar size):p0(p0),size(size){ |
| 613 | } |
| 614 | IMPLICIT RectWithSize(RectWithSize_scalar s):p0(s.p0),size(s.size){ |
| 615 | } |
| 616 | friend RectWithSize if_then_else(I32 c, RectWithSize t, RectWithSize e) { return RectWithSize( |
| 617 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.size, e.size)); |
| 618 | }}; |
| 619 | struct RectWithEndpoint_scalar { |
| 620 | vec2_scalar p0; |
| 621 | vec2_scalar p1; |
| 622 | RectWithEndpoint_scalar() = default; |
| 623 | RectWithEndpoint_scalar(vec2_scalar p0, vec2_scalar p1) : p0(p0), p1(p1){} |
| 624 | }; |
| 625 | struct RectWithEndpoint { |
| 626 | vec2 p0; |
| 627 | vec2 p1; |
| 628 | RectWithEndpoint() = default; |
| 629 | RectWithEndpoint(vec2 p0, vec2 p1) : p0(p0), p1(p1){} |
| 630 | RectWithEndpoint(vec2_scalar p0, vec2_scalar p1):p0(p0),p1(p1){ |
| 631 | } |
| 632 | IMPLICIT RectWithEndpoint(RectWithEndpoint_scalar s):p0(s.p0),p1(s.p1){ |
| 633 | } |
| 634 | friend RectWithEndpoint if_then_else(I32 c, RectWithEndpoint t, RectWithEndpoint e) { return RectWithEndpoint( |
| 635 | if_then_else(c, t.p0, e.p0), if_then_else(c, t.p1, e.p1)); |
| 636 | }}; |
| 637 | // sampler2D sGpuCache; |
| 638 | // vec4_scalar vTransformBounds; |
| 639 | // sampler2D sClipMask; |
| 640 | struct Fragment_scalar { |
| 641 | vec4_scalar color; |
| 642 | Fragment_scalar() = default; |
| 643 | explicit Fragment_scalar(vec4_scalar color) : color(color){} |
| 644 | }; |
| 645 | struct Fragment { |
| 646 | vec4 color; |
| 647 | Fragment() = default; |
| 648 | explicit Fragment(vec4 color) : color(color){} |
| 649 | explicit Fragment(vec4_scalar color):color(color){ |
| 650 | } |
| 651 | IMPLICIT Fragment(Fragment_scalar s):color(s.color){ |
| 652 | } |
| 653 | friend Fragment if_then_else(I32 c, Fragment t, Fragment e) { return Fragment( |
| 654 | if_then_else(c, t.color, e.color)); |
| 655 | }}; |
| 656 | // vec4_scalar v_color; |
| 657 | ALWAYS_INLINE__attribute__((always_inline)) inline void main(void) { |
| 658 | oFragColorgl_FragColor = make_vec4(0.11f, 0.077f, 0.027f, 0.125f); |
| 659 | } |
| 660 | void swgl_drawSpanRGBA8() { |
| 661 | swgl_commitSolidRGBA8(v_color)do { int len = (swgl_SpanLength); if (blend_key) { if (swgl_ClipFlags & SWGL_CLIP_FLAG_MASK) { commit_masked_solid_span(swgl_OutRGBA8 , packColor(swgl_OutRGBA8, (v_color)), len); } else if (swgl_ClipFlags & SWGL_CLIP_FLAG_AA) { commit_aa_solid_span(swgl_OutRGBA8 , pack_span(swgl_OutRGBA8, (v_color)), len); } else { commit_solid_span <true>(swgl_OutRGBA8, pack_span(swgl_OutRGBA8, (v_color )), len); } } else { commit_solid_span<false>(swgl_OutRGBA8 , pack_span(swgl_OutRGBA8, (v_color)), len); } swgl_OutRGBA8 += len; swgl_SpanLength -= len; } while (0); |
| 662 | } |
| 663 | void swgl_drawSpanR8() { |
| 664 | swgl_commitSolidR8((v_color).x)do { int len = (swgl_SpanLength); if (blend_key) { if (swgl_ClipFlags & SWGL_CLIP_FLAG_MASK) { commit_masked_solid_span(swgl_OutR8 , packColor(swgl_OutR8, ((v_color).x)), len); } else if (swgl_ClipFlags & SWGL_CLIP_FLAG_AA) { commit_aa_solid_span(swgl_OutR8, pack_span (swgl_OutR8, ((v_color).x)), len); } else { commit_solid_span <true>(swgl_OutR8, pack_span(swgl_OutR8, ((v_color).x)) , len); } } else { commit_solid_span<false>(swgl_OutR8, pack_span(swgl_OutR8, ((v_color).x)), len); } swgl_OutR8 += len ; swgl_SpanLength -= len; } while (0); |
| 665 | } |
| 666 | typedef brush_solid_DEBUG_OVERDRAW_vert::InterpOutputs InterpInputs; |
| 667 | InterpInputs interp_step; |
| 668 | static void read_interp_inputs(FragmentShaderImpl* impl, const void* init_, const void* step_) {Self* self = (Self*)impl;const InterpInputs* init = (const InterpInputs*)init_;const InterpInputs* step = (const InterpInputs*)step_; |
| 669 | } |
| 670 | ALWAYS_INLINE__attribute__((always_inline)) inline void step_interp_inputs(int steps = 4) { |
| 671 | float chunks = steps * 0.25f; |
Value stored to 'chunks' during its initialization is never read | |
| 672 | } |
| 673 | static void run(FragmentShaderImpl* impl) { |
| 674 | Self* self = (Self*)impl; |
| 675 | self->main(); |
| 676 | self->step_interp_inputs(); |
| 677 | } |
| 678 | static void skip(FragmentShaderImpl* impl, int steps) { |
| 679 | Self* self = (Self*)impl; |
| 680 | self->step_interp_inputs(steps); |
| 681 | } |
| 682 | static int draw_span_RGBA8(FragmentShaderImpl* impl) { |
| 683 | Self* self = (Self*)impl; DISPATCH_DRAW_SPAN(self, RGBA8)do { int total = self->swgl_SpanLength; self->swgl_drawSpanRGBA8 (); int drawn = total - self->swgl_SpanLength; if (drawn) self ->step_interp_inputs(drawn); return drawn; } while (0); } |
| 684 | static int draw_span_R8(FragmentShaderImpl* impl) { |
| 685 | Self* self = (Self*)impl; DISPATCH_DRAW_SPAN(self, R8)do { int total = self->swgl_SpanLength; self->swgl_drawSpanR8 (); int drawn = total - self->swgl_SpanLength; if (drawn) self ->step_interp_inputs(drawn); return drawn; } while (0); } |
| 686 | public: |
| 687 | brush_solid_DEBUG_OVERDRAW_frag() { |
| 688 | init_span_func = &read_interp_inputs; |
| 689 | run_func = &run; |
| 690 | skip_func = &skip; |
| 691 | draw_span_RGBA8_func = &draw_span_RGBA8; |
| 692 | draw_span_R8_func = &draw_span_R8; |
| 693 | init_span_w_func = &read_interp_inputs; |
| 694 | run_w_func = &run; |
| 695 | skip_w_func = &skip; |
| 696 | } |
| 697 | }; |
| 698 | |
| 699 | struct brush_solid_DEBUG_OVERDRAW_program : ProgramImpl, brush_solid_DEBUG_OVERDRAW_frag { |
| 700 | int get_uniform(const char *name) const override { |
| 701 | if (strcmp("sClipMask", name) == 0) { return 7; } |
| 702 | if (strcmp("sGpuCache", name) == 0) { return 2; } |
| 703 | if (strcmp("sPrimitiveHeadersF", name) == 0) { return 4; } |
| 704 | if (strcmp("sPrimitiveHeadersI", name) == 0) { return 5; } |
| 705 | if (strcmp("sRenderTasks", name) == 0) { return 1; } |
| 706 | if (strcmp("sTransformPalette", name) == 0) { return 3; } |
| 707 | if (strcmp("uTransform", name) == 0) { return 6; } |
| 708 | return -1; |
| 709 | } |
| 710 | void bind_attrib(const char* name, int index) override { |
| 711 | attrib_locations.bind_loc(name, index); |
| 712 | } |
| 713 | int get_attrib(const char* name) const override { |
| 714 | return attrib_locations.get_loc(name); |
| 715 | } |
| 716 | size_t interpolants_size() const override { return sizeof(InterpOutputs); } |
| 717 | VertexShaderImpl* get_vertex_shader() override { |
| 718 | return this; |
| 719 | } |
| 720 | FragmentShaderImpl* get_fragment_shader() override { |
| 721 | return this; |
| 722 | } |
| 723 | const char* get_name() const override { return "brush_solid_DEBUG_OVERDRAW"; } |
| 724 | static ProgramImpl* loader() { return new brush_solid_DEBUG_OVERDRAW_program; } |
| 725 | }; |
| 726 |