forked from eden-emu/eden
		
	glsl: Add Shader_GLSL logging
This commit is contained in:
		
							parent
							
								
									c44b3f664d
								
							
						
					
					
						commit
						d344489b5b
					
				
					 3 changed files with 32 additions and 28 deletions
				
			
		|  | @ -98,7 +98,7 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi | ||||||
| 
 | 
 | ||||||
| void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset, | void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset, | ||||||
|                                 std::string_view value) { |                                 std::string_view value) { | ||||||
|     // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic");
 |     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||||
|     ctx.AddU64("{}=packUint2x32(uvec2(smem[{}>>2],smem[({}+4)>>2]));", inst, pointer_offset, |     ctx.AddU64("{}=packUint2x32(uvec2(smem[{}>>2],smem[({}+4)>>2]));", inst, pointer_offset, | ||||||
|                pointer_offset); |                pointer_offset); | ||||||
|     ctx.Add("smem[{}>>2]=unpackUint2x32({}).x;smem[({}+4)>>2]=unpackUint2x32({}).y;", |     ctx.Add("smem[{}>>2]=unpackUint2x32({}).x;smem[({}+4)>>2]=unpackUint2x32({}).y;", | ||||||
|  | @ -171,7 +171,7 @@ void EmitStorageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Val | ||||||
| 
 | 
 | ||||||
| void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||||
|                              const IR::Value& offset, std::string_view value) { |                              const IR::Value& offset, std::string_view value) { | ||||||
|     // LOG_WARNING(..., "Op falling to non-atomic");
 |     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||||
|     ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, |     ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | ||||||
|                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, |                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | ||||||
|                binding.U32(), ctx.var_alloc.Consume(offset)); |                binding.U32(), ctx.var_alloc.Consume(offset)); | ||||||
|  | @ -182,7 +182,7 @@ void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | ||||||
| 
 | 
 | ||||||
| void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||||
|                              const IR::Value& offset, std::string_view value) { |                              const IR::Value& offset, std::string_view value) { | ||||||
|     // LOG_WARNING(..., "Op falling to non-atomic");
 |     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||||
|     ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, |     ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | ||||||
|                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, |                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | ||||||
|                binding.U32(), ctx.var_alloc.Consume(offset)); |                binding.U32(), ctx.var_alloc.Consume(offset)); | ||||||
|  | @ -195,7 +195,7 @@ void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | ||||||
| 
 | 
 | ||||||
| void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||||
|                              const IR::Value& offset, std::string_view value) { |                              const IR::Value& offset, std::string_view value) { | ||||||
|     // LOG_WARNING(..., "Op falling to non-atomic");
 |     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||||
|     ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, |     ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | ||||||
|                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, |                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | ||||||
|                binding.U32(), ctx.var_alloc.Consume(offset)); |                binding.U32(), ctx.var_alloc.Consume(offset)); | ||||||
|  | @ -207,7 +207,7 @@ void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | ||||||
| 
 | 
 | ||||||
| void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||||
|                              const IR::Value& offset, std::string_view value) { |                              const IR::Value& offset, std::string_view value) { | ||||||
|     // LOG_WARNING(..., "Op falling to non-atomic");
 |     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||||
|     ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, |     ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | ||||||
|                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, |                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | ||||||
|                binding.U32(), ctx.var_alloc.Consume(offset)); |                binding.U32(), ctx.var_alloc.Consume(offset)); | ||||||
|  | @ -220,8 +220,7 @@ void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | ||||||
| 
 | 
 | ||||||
| void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||||
|                              const IR::Value& offset, std::string_view value) { |                              const IR::Value& offset, std::string_view value) { | ||||||
|     // LOG_WARNING(..., "Op falling to non-atomic");
 |     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||||
| 
 |  | ||||||
|     ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, |     ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | ||||||
|                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, |                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | ||||||
|                binding.U32(), ctx.var_alloc.Consume(offset)); |                binding.U32(), ctx.var_alloc.Consume(offset)); | ||||||
|  |  | ||||||
|  | @ -42,7 +42,7 @@ void GetCbuf(EmitContext& ctx, std::string_view ret, const IR::Value& binding, | ||||||
|         const s32 signed_offset{static_cast<s32>(offset.U32())}; |         const s32 signed_offset{static_cast<s32>(offset.U32())}; | ||||||
|         static constexpr u32 cbuf_size{4096 * 16}; |         static constexpr u32 cbuf_size{4096 * 16}; | ||||||
|         if (signed_offset < 0 || offset.U32() > cbuf_size) { |         if (signed_offset < 0 || offset.U32() > cbuf_size) { | ||||||
|             // LOG_WARNING(..., "Immediate constant buffer offset is out of bounds");
 |             LOG_WARNING(Shader_GLSL, "Immediate constant buffer offset is out of bounds"); | ||||||
|             ctx.Add("{}=0u;", ret); |             ctx.Add("{}=0u;", ret); | ||||||
|             return; |             return; | ||||||
|         } |         } | ||||||
|  | @ -144,7 +144,7 @@ void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding | ||||||
|         const u32 u32_offset{offset.U32()}; |         const u32 u32_offset{offset.U32()}; | ||||||
|         const s32 signed_offset{static_cast<s32>(offset.U32())}; |         const s32 signed_offset{static_cast<s32>(offset.U32())}; | ||||||
|         if (signed_offset < 0 || u32_offset > cbuf_size) { |         if (signed_offset < 0 || u32_offset > cbuf_size) { | ||||||
|             // LOG_WARNING(..., "Immediate constant buffer offset is out of bounds");
 |             LOG_WARNING(Shader_GLSL, "Immediate constant buffer offset is out of bounds"); | ||||||
|             ctx.AddU32x2("{}=uvec2(0u);", inst); |             ctx.AddU32x2("{}=uvec2(0u);", inst); | ||||||
|             return; |             return; | ||||||
|         } |         } | ||||||
|  | @ -184,7 +184,8 @@ void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, | ||||||
|     } |     } | ||||||
|     // GLSL only exposes 8 legacy texcoords
 |     // GLSL only exposes 8 legacy texcoords
 | ||||||
|     if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) { |     if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) { | ||||||
|         // LOG_WARNING(..., "GLSL does not allow access to gl_TexCoord[{}]", TexCoordIndex(attr));
 |         LOG_WARNING(Shader_GLSL, "GLSL does not allow access to gl_TexCoord[{}]", | ||||||
|  |                     TexCoordIndex(attr)); | ||||||
|         ctx.AddF32("{}=0.f;", inst); |         ctx.AddF32("{}=0.f;", inst); | ||||||
|         return; |         return; | ||||||
|     } |     } | ||||||
|  | @ -257,7 +258,8 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val | ||||||
|     const char swizzle{"xyzw"[element]}; |     const char swizzle{"xyzw"[element]}; | ||||||
|     // GLSL only exposes 8 legacy texcoords
 |     // GLSL only exposes 8 legacy texcoords
 | ||||||
|     if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) { |     if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) { | ||||||
|         // LOG_WARNING(..., "GLSL does not allow access to gl_TexCoord[{}]", TexCoordIndex(attr));
 |         LOG_WARNING(Shader_GLSL, "GLSL does not allow access to gl_TexCoord[{}]", | ||||||
|  |                     TexCoordIndex(attr)); | ||||||
|         return; |         return; | ||||||
|     } |     } | ||||||
|     if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture7Q) { |     if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture7Q) { | ||||||
|  | @ -269,8 +271,8 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val | ||||||
|     case IR::Attribute::Layer: |     case IR::Attribute::Layer: | ||||||
|         if (ctx.stage != Stage::Geometry && |         if (ctx.stage != Stage::Geometry && | ||||||
|             !ctx.profile.support_viewport_index_layer_non_geometry) { |             !ctx.profile.support_viewport_index_layer_non_geometry) { | ||||||
|             // LOG_WARNING(..., "Shader stores viewport layer but device does not support viewport
 |             LOG_WARNING(Shader_GLSL, "Shader stores viewport layer but device does not support " | ||||||
|             // layer extension");
 |                                      "viewport layer extension"); | ||||||
|             break; |             break; | ||||||
|         } |         } | ||||||
|         ctx.Add("gl_Layer=ftoi({});", value); |         ctx.Add("gl_Layer=ftoi({});", value); | ||||||
|  | @ -278,16 +280,17 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val | ||||||
|     case IR::Attribute::ViewportIndex: |     case IR::Attribute::ViewportIndex: | ||||||
|         if (ctx.stage != Stage::Geometry && |         if (ctx.stage != Stage::Geometry && | ||||||
|             !ctx.profile.support_viewport_index_layer_non_geometry) { |             !ctx.profile.support_viewport_index_layer_non_geometry) { | ||||||
|             // LOG_WARNING(..., "Shader stores viewport index but device does not support viewport
 |             LOG_WARNING(Shader_GLSL, "Shader stores viewport index but device does not support " | ||||||
|             // layer extension");
 |                                      "viewport layer extension"); | ||||||
|             break; |             break; | ||||||
|         } |         } | ||||||
|         ctx.Add("gl_ViewportIndex=ftoi({});", value); |         ctx.Add("gl_ViewportIndex=ftoi({});", value); | ||||||
|         break; |         break; | ||||||
|     case IR::Attribute::ViewportMask: |     case IR::Attribute::ViewportMask: | ||||||
|         if (ctx.stage != Stage::Geometry && !ctx.profile.support_viewport_mask) { |         if (ctx.stage != Stage::Geometry && !ctx.profile.support_viewport_mask) { | ||||||
|             // LOG_WARNING(..., "Shader stores viewport mask but device does not support viewport
 |             LOG_WARNING( | ||||||
|             // mask extension");
 |                 Shader_GLSL, | ||||||
|  |                 "Shader stores viewport mask but device does not support viewport mask extension"); | ||||||
|             break; |             break; | ||||||
|         } |         } | ||||||
|         ctx.Add("gl_ViewportMask[0]=ftoi({});", value); |         ctx.Add("gl_ViewportMask[0]=ftoi({});", value); | ||||||
|  |  | ||||||
|  | @ -96,7 +96,7 @@ std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) { | ||||||
|     } |     } | ||||||
|     const bool has_var_aoffi{ctx.profile.support_gl_variable_aoffi}; |     const bool has_var_aoffi{ctx.profile.support_gl_variable_aoffi}; | ||||||
|     if (!has_var_aoffi) { |     if (!has_var_aoffi) { | ||||||
|         // LOG_WARNING("Device does not support variable texture offsets, STUBBING");
 |         LOG_WARNING(Shader_GLSL, "Device does not support variable texture offsets, STUBBING"); | ||||||
|     } |     } | ||||||
|     const auto offset_str{has_var_aoffi ? ctx.var_alloc.Consume(offset) : "0"}; |     const auto offset_str{has_var_aoffi ? ctx.var_alloc.Consume(offset) : "0"}; | ||||||
|     switch (offset.Type()) { |     switch (offset.Type()) { | ||||||
|  | @ -116,7 +116,7 @@ std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) { | ||||||
| std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) { | std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) { | ||||||
|     const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; |     const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; | ||||||
|     if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { |     if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { | ||||||
|         // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING");
 |         LOG_WARNING(Shader_GLSL, "Not all arguments in PTP are immediate, STUBBING"); | ||||||
|         return "ivec2[](ivec2(0), ivec2(1), ivec2(2), ivec2(3))"; |         return "ivec2[](ivec2(0), ivec2(1), ivec2(2), ivec2(3))"; | ||||||
|     } |     } | ||||||
|     const IR::Opcode opcode{values[0]->GetOpcode()}; |     const IR::Opcode opcode{values[0]->GetOpcode()}; | ||||||
|  | @ -152,7 +152,7 @@ void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu | ||||||
|     const auto sparse_inst{PrepareSparse(inst)}; |     const auto sparse_inst{PrepareSparse(inst)}; | ||||||
|     const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; |     const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; | ||||||
|     if (sparse_inst && !supports_sparse) { |     if (sparse_inst && !supports_sparse) { | ||||||
|         // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING");
 |         LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); | ||||||
|         ctx.AddU1("{}=true;", *sparse_inst); |         ctx.AddU1("{}=true;", *sparse_inst); | ||||||
|     } |     } | ||||||
|     if (!sparse_inst || !supports_sparse) { |     if (!sparse_inst || !supports_sparse) { | ||||||
|  | @ -196,7 +196,7 @@ void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu | ||||||
|     const auto sparse_inst{PrepareSparse(inst)}; |     const auto sparse_inst{PrepareSparse(inst)}; | ||||||
|     const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; |     const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; | ||||||
|     if (sparse_inst && !supports_sparse) { |     if (sparse_inst && !supports_sparse) { | ||||||
|         // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING");
 |         LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); | ||||||
|         ctx.AddU1("{}=true;", *sparse_inst); |         ctx.AddU1("{}=true;", *sparse_inst); | ||||||
|     } |     } | ||||||
|     if (!sparse_inst || !supports_sparse) { |     if (!sparse_inst || !supports_sparse) { | ||||||
|  | @ -239,9 +239,10 @@ void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR:: | ||||||
|     const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && |     const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && | ||||||
|                         ctx.stage != Stage::Fragment && needs_shadow_ext}; |                         ctx.stage != Stage::Fragment && needs_shadow_ext}; | ||||||
|     if (use_grad) { |     if (use_grad) { | ||||||
|         // LOG_WARNING(..., "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback");
 |         LOG_WARNING(Shader_GLSL, | ||||||
|  |                     "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); | ||||||
|         if (info.type == TextureType::ColorArrayCube) { |         if (info.type == TextureType::ColorArrayCube) { | ||||||
|             // LOG_WARNING(..., "textureGrad does not support ColorArrayCube. Stubbing");
 |             LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing"); | ||||||
|             ctx.AddF32("{}=0.0f;", inst); |             ctx.AddF32("{}=0.0f;", inst); | ||||||
|             return; |             return; | ||||||
|         } |         } | ||||||
|  | @ -291,9 +292,10 @@ void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR:: | ||||||
|     const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext}; |     const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext}; | ||||||
|     const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; |     const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; | ||||||
|     if (use_grad) { |     if (use_grad) { | ||||||
|         // LOG_WARNING(..., "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback");
 |         LOG_WARNING(Shader_GLSL, | ||||||
|  |                     "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); | ||||||
|         if (info.type == TextureType::ColorArrayCube) { |         if (info.type == TextureType::ColorArrayCube) { | ||||||
|             // LOG_WARNING(..., "textureGrad does not support ColorArrayCube. Stubbing");
 |             LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing"); | ||||||
|             ctx.AddF32("{}=0.0f;", inst); |             ctx.AddF32("{}=0.0f;", inst); | ||||||
|             return; |             return; | ||||||
|         } |         } | ||||||
|  | @ -329,7 +331,7 @@ void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, | ||||||
|     const auto sparse_inst{PrepareSparse(inst)}; |     const auto sparse_inst{PrepareSparse(inst)}; | ||||||
|     const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; |     const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; | ||||||
|     if (sparse_inst && !supports_sparse) { |     if (sparse_inst && !supports_sparse) { | ||||||
|         // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING");
 |         LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); | ||||||
|         ctx.AddU1("{}=true;", *sparse_inst); |         ctx.AddU1("{}=true;", *sparse_inst); | ||||||
|     } |     } | ||||||
|     if (!sparse_inst || !supports_sparse) { |     if (!sparse_inst || !supports_sparse) { | ||||||
|  | @ -376,7 +378,7 @@ void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& inde | ||||||
|     const auto sparse_inst{PrepareSparse(inst)}; |     const auto sparse_inst{PrepareSparse(inst)}; | ||||||
|     const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; |     const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; | ||||||
|     if (sparse_inst && !supports_sparse) { |     if (sparse_inst && !supports_sparse) { | ||||||
|         // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING");
 |         LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); | ||||||
|         ctx.AddU1("{}=true;", *sparse_inst); |         ctx.AddU1("{}=true;", *sparse_inst); | ||||||
|     } |     } | ||||||
|     if (!sparse_inst || !supports_sparse) { |     if (!sparse_inst || !supports_sparse) { | ||||||
|  | @ -426,7 +428,7 @@ void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, | ||||||
|     const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; |     const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; | ||||||
|     const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; |     const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; | ||||||
|     if (sparse_inst && !supports_sparse) { |     if (sparse_inst && !supports_sparse) { | ||||||
|         // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING");
 |         LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); | ||||||
|         ctx.AddU1("{}=true;", *sparse_inst); |         ctx.AddU1("{}=true;", *sparse_inst); | ||||||
|     } |     } | ||||||
|     if (!sparse_inst || !supports_sparse) { |     if (!sparse_inst || !supports_sparse) { | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 ameerj
						ameerj