forked from eden-emu/eden
		
	video_core: Resolve more variable shadowing scenarios
Resolves variable shadowing scenarios up to the end of the OpenGL code to make it nicer to review. The rest will be resolved in a following commit.
This commit is contained in:
		
							parent
							
								
									fad38ec6e8
								
							
						
					
					
						commit
						677a8b208d
					
				
					 42 changed files with 219 additions and 206 deletions
				
			
		|  | @ -29,8 +29,8 @@ | |||
| #include "video_core/memory_manager.h" | ||||
| 
 | ||||
| namespace Tegra { | ||||
| CDmaPusher::CDmaPusher(GPU& gpu) | ||||
|     : gpu(gpu), nvdec_processor(std::make_shared<Nvdec>(gpu)), | ||||
| CDmaPusher::CDmaPusher(GPU& gpu_) | ||||
|     : gpu{gpu_}, nvdec_processor(std::make_shared<Nvdec>(gpu)), | ||||
|       vic_processor(std::make_unique<Vic>(gpu, nvdec_processor)), | ||||
|       host1x_processor(std::make_unique<Host1x>(gpu)), | ||||
|       nvdec_sync(std::make_unique<SyncptIncrManager>(gpu)), | ||||
|  | @ -100,11 +100,11 @@ void CDmaPusher::Step() { | |||
|     } | ||||
| } | ||||
| 
 | ||||
| void CDmaPusher::ExecuteCommand(u32 offset, u32 data) { | ||||
| void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) { | ||||
|     switch (current_class) { | ||||
|     case ChClassId::NvDec: | ||||
|         ThiStateWrite(nvdec_thi_state, offset, {data}); | ||||
|         switch (static_cast<ThiMethod>(offset)) { | ||||
|         ThiStateWrite(nvdec_thi_state, state_offset, {data}); | ||||
|         switch (static_cast<ThiMethod>(state_offset)) { | ||||
|         case ThiMethod::IncSyncpt: { | ||||
|             LOG_DEBUG(Service_NVDRV, "NVDEC Class IncSyncpt Method"); | ||||
|             const auto syncpoint_id = static_cast<u32>(data & 0xFF); | ||||
|  | @ -120,16 +120,16 @@ void CDmaPusher::ExecuteCommand(u32 offset, u32 data) { | |||
|         case ThiMethod::SetMethod1: | ||||
|             LOG_DEBUG(Service_NVDRV, "NVDEC method 0x{:X}", | ||||
|                       static_cast<u32>(nvdec_thi_state.method_0)); | ||||
|             nvdec_processor->ProcessMethod( | ||||
|                 static_cast<Tegra::Nvdec::Method>(nvdec_thi_state.method_0), {data}); | ||||
|             nvdec_processor->ProcessMethod(static_cast<Nvdec::Method>(nvdec_thi_state.method_0), | ||||
|                                            {data}); | ||||
|             break; | ||||
|         default: | ||||
|             break; | ||||
|         } | ||||
|         break; | ||||
|     case ChClassId::GraphicsVic: | ||||
|         ThiStateWrite(vic_thi_state, static_cast<u32>(offset), {data}); | ||||
|         switch (static_cast<ThiMethod>(offset)) { | ||||
|         ThiStateWrite(vic_thi_state, static_cast<u32>(state_offset), {data}); | ||||
|         switch (static_cast<ThiMethod>(state_offset)) { | ||||
|         case ThiMethod::IncSyncpt: { | ||||
|             LOG_DEBUG(Service_NVDRV, "VIC Class IncSyncpt Method"); | ||||
|             const auto syncpoint_id = static_cast<u32>(data & 0xFF); | ||||
|  | @ -145,8 +145,7 @@ void CDmaPusher::ExecuteCommand(u32 offset, u32 data) { | |||
|         case ThiMethod::SetMethod1: | ||||
|             LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})", | ||||
|                       static_cast<u32>(vic_thi_state.method_0), data); | ||||
|             vic_processor->ProcessMethod(static_cast<Tegra::Vic::Method>(vic_thi_state.method_0), | ||||
|                                          {data}); | ||||
|             vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), {data}); | ||||
|             break; | ||||
|         default: | ||||
|             break; | ||||
|  | @ -155,7 +154,7 @@ void CDmaPusher::ExecuteCommand(u32 offset, u32 data) { | |||
|     case ChClassId::Host1x: | ||||
|         // This device is mainly for syncpoint synchronization
 | ||||
|         LOG_DEBUG(Service_NVDRV, "Host1X Class Method"); | ||||
|         host1x_processor->ProcessMethod(static_cast<Tegra::Host1x::Method>(offset), {data}); | ||||
|         host1x_processor->ProcessMethod(static_cast<Host1x::Method>(state_offset), {data}); | ||||
|         break; | ||||
|     default: | ||||
|         UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class)); | ||||
|  | @ -163,9 +162,10 @@ void CDmaPusher::ExecuteCommand(u32 offset, u32 data) { | |||
|     } | ||||
| } | ||||
| 
 | ||||
| void CDmaPusher::ThiStateWrite(ThiRegisters& state, u32 offset, const std::vector<u32>& arguments) { | ||||
|     u8* const state_offset = reinterpret_cast<u8*>(&state) + sizeof(u32) * offset; | ||||
|     std::memcpy(state_offset, arguments.data(), sizeof(u32) * arguments.size()); | ||||
| void CDmaPusher::ThiStateWrite(ThiRegisters& state, u32 state_offset, | ||||
|                                const std::vector<u32>& arguments) { | ||||
|     u8* const state_offset_ptr = reinterpret_cast<u8*>(&state) + sizeof(u32) * state_offset; | ||||
|     std::memcpy(state_offset_ptr, arguments.data(), sizeof(u32) * arguments.size()); | ||||
| } | ||||
| 
 | ||||
| } // namespace Tegra
 | ||||
|  |  | |||
|  | @ -68,8 +68,8 @@ struct ChCommand { | |||
|     std::vector<u32> arguments; | ||||
| }; | ||||
| 
 | ||||
| using ChCommandHeaderList = std::vector<Tegra::ChCommandHeader>; | ||||
| using ChCommandList = std::vector<Tegra::ChCommand>; | ||||
| using ChCommandHeaderList = std::vector<ChCommandHeader>; | ||||
| using ChCommandList = std::vector<ChCommand>; | ||||
| 
 | ||||
| struct ThiRegisters { | ||||
|     u32_le increment_syncpt{}; | ||||
|  | @ -96,7 +96,7 @@ enum class ThiMethod : u32 { | |||
| 
 | ||||
| class CDmaPusher { | ||||
| public: | ||||
|     explicit CDmaPusher(GPU& gpu); | ||||
|     explicit CDmaPusher(GPU& gpu_); | ||||
|     ~CDmaPusher(); | ||||
| 
 | ||||
|     /// Push NVDEC command buffer entries into queue
 | ||||
|  | @ -109,17 +109,17 @@ public: | |||
|     void Step(); | ||||
| 
 | ||||
|     /// Invoke command class devices to execute the command based on the current state
 | ||||
|     void ExecuteCommand(u32 offset, u32 data); | ||||
|     void ExecuteCommand(u32 state_offset, u32 data); | ||||
| 
 | ||||
| private: | ||||
|     /// Write arguments value to the ThiRegisters member at the specified offset
 | ||||
|     void ThiStateWrite(ThiRegisters& state, u32 offset, const std::vector<u32>& arguments); | ||||
|     void ThiStateWrite(ThiRegisters& state, u32 state_offset, const std::vector<u32>& arguments); | ||||
| 
 | ||||
|     GPU& gpu; | ||||
| 
 | ||||
|     std::shared_ptr<Tegra::Nvdec> nvdec_processor; | ||||
|     std::unique_ptr<Tegra::Vic> vic_processor; | ||||
|     std::unique_ptr<Tegra::Host1x> host1x_processor; | ||||
|     std::shared_ptr<Nvdec> nvdec_processor; | ||||
|     std::unique_ptr<Vic> vic_processor; | ||||
|     std::unique_ptr<Host1x> host1x_processor; | ||||
|     std::unique_ptr<SyncptIncrManager> nvdec_sync; | ||||
|     std::unique_ptr<SyncptIncrManager> vic_sync; | ||||
|     ChClassId current_class{}; | ||||
|  |  | |||
|  | @ -233,7 +233,7 @@ constexpr std::array<s32, 254> map_lut{ | |||
| } | ||||
| } // Anonymous namespace
 | ||||
| 
 | ||||
| VP9::VP9(GPU& gpu) : gpu(gpu) {} | ||||
| VP9::VP9(GPU& gpu_) : gpu{gpu_} {} | ||||
| 
 | ||||
| VP9::~VP9() = default; | ||||
| 
 | ||||
|  |  | |||
|  | @ -108,7 +108,7 @@ private: | |||
| 
 | ||||
| class VP9 { | ||||
| public: | ||||
|     explicit VP9(GPU& gpu); | ||||
|     explicit VP9(GPU& gpu_); | ||||
|     ~VP9(); | ||||
| 
 | ||||
|     VP9(const VP9&) = delete; | ||||
|  |  | |||
|  | @ -13,7 +13,7 @@ | |||
| 
 | ||||
| namespace Tegra { | ||||
| 
 | ||||
| DmaPusher::DmaPusher(Core::System& system, GPU& gpu) : gpu{gpu}, system{system} {} | ||||
| DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_) : gpu{gpu_}, system{system_} {} | ||||
| 
 | ||||
| DmaPusher::~DmaPusher() = default; | ||||
| 
 | ||||
|  | @ -152,7 +152,12 @@ void DmaPusher::SetState(const CommandHeader& command_header) { | |||
| 
 | ||||
| void DmaPusher::CallMethod(u32 argument) const { | ||||
|     if (dma_state.method < non_puller_methods) { | ||||
|         gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count}); | ||||
|         gpu.CallMethod(GPU::MethodCall{ | ||||
|             dma_state.method, | ||||
|             argument, | ||||
|             dma_state.subchannel, | ||||
|             dma_state.method_count, | ||||
|         }); | ||||
|     } else { | ||||
|         subchannels[dma_state.subchannel]->CallMethod(dma_state.method, argument, | ||||
|                                                       dma_state.is_last_call); | ||||
|  |  | |||
|  | @ -87,11 +87,11 @@ inline CommandHeader BuildCommandHeader(BufferMethods method, u32 arg_count, Sub | |||
| struct CommandList final { | ||||
|     CommandList() = default; | ||||
|     explicit CommandList(std::size_t size) : command_lists(size) {} | ||||
|     explicit CommandList(std::vector<Tegra::CommandHeader>&& prefetch_command_list) | ||||
|         : prefetch_command_list{std::move(prefetch_command_list)} {} | ||||
|     explicit CommandList(std::vector<CommandHeader>&& prefetch_command_list_) | ||||
|         : prefetch_command_list{std::move(prefetch_command_list_)} {} | ||||
| 
 | ||||
|     std::vector<Tegra::CommandListHeader> command_lists; | ||||
|     std::vector<Tegra::CommandHeader> prefetch_command_list; | ||||
|     std::vector<CommandListHeader> command_lists; | ||||
|     std::vector<CommandHeader> prefetch_command_list; | ||||
| }; | ||||
| 
 | ||||
| /**
 | ||||
|  | @ -103,7 +103,7 @@ struct CommandList final { | |||
|  */ | ||||
| class DmaPusher final { | ||||
| public: | ||||
|     explicit DmaPusher(Core::System& system, GPU& gpu); | ||||
|     explicit DmaPusher(Core::System& system_, GPU& gpu_); | ||||
|     ~DmaPusher(); | ||||
| 
 | ||||
|     void Push(CommandList&& entries) { | ||||
|  | @ -112,7 +112,7 @@ public: | |||
| 
 | ||||
|     void DispatchCalls(); | ||||
| 
 | ||||
|     void BindSubchannel(Tegra::Engines::EngineInterface* engine, u32 subchannel_id) { | ||||
|     void BindSubchannel(Engines::EngineInterface* engine, u32 subchannel_id) { | ||||
|         subchannels[subchannel_id] = engine; | ||||
|     } | ||||
| 
 | ||||
|  | @ -145,7 +145,7 @@ private: | |||
| 
 | ||||
|     bool ib_enable{true}; ///< IB mode enabled
 | ||||
| 
 | ||||
|     std::array<Tegra::Engines::EngineInterface*, max_subchannels> subchannels{}; | ||||
|     std::array<Engines::EngineInterface*, max_subchannels> subchannels{}; | ||||
| 
 | ||||
|     GPU& gpu; | ||||
|     Core::System& system; | ||||
|  |  | |||
|  | @ -11,16 +11,16 @@ | |||
| 
 | ||||
| namespace Tegra::Engines::Upload { | ||||
| 
 | ||||
| State::State(MemoryManager& memory_manager, Registers& regs) | ||||
|     : regs{regs}, memory_manager{memory_manager} {} | ||||
| State::State(MemoryManager& memory_manager_, Registers& regs_) | ||||
|     : regs{regs_}, memory_manager{memory_manager_} {} | ||||
| 
 | ||||
| State::~State() = default; | ||||
| 
 | ||||
| void State::ProcessExec(const bool is_linear) { | ||||
| void State::ProcessExec(const bool is_linear_) { | ||||
|     write_offset = 0; | ||||
|     copy_size = regs.line_length_in * regs.line_count; | ||||
|     inner_buffer.resize(copy_size); | ||||
|     this->is_linear = is_linear; | ||||
|     is_linear = is_linear_; | ||||
| } | ||||
| 
 | ||||
| void State::ProcessData(const u32 data, const bool is_last_call) { | ||||
|  |  | |||
|  | @ -54,10 +54,10 @@ struct Registers { | |||
| 
 | ||||
| class State { | ||||
| public: | ||||
|     State(MemoryManager& memory_manager, Registers& regs); | ||||
|     explicit State(MemoryManager& memory_manager_, Registers& regs_); | ||||
|     ~State(); | ||||
| 
 | ||||
|     void ProcessExec(bool is_linear); | ||||
|     void ProcessExec(bool is_linear_); | ||||
|     void ProcessData(u32 data, bool is_last_call); | ||||
| 
 | ||||
| private: | ||||
|  |  | |||
|  | @ -14,8 +14,8 @@ | |||
| 
 | ||||
| namespace Tegra::Engines { | ||||
| 
 | ||||
| KeplerMemory::KeplerMemory(Core::System& system, MemoryManager& memory_manager) | ||||
|     : system{system}, upload_state{memory_manager, regs.upload} {} | ||||
| KeplerMemory::KeplerMemory(Core::System& system_, MemoryManager& memory_manager) | ||||
|     : system{system_}, upload_state{memory_manager, regs.upload} {} | ||||
| 
 | ||||
| KeplerMemory::~KeplerMemory() = default; | ||||
| 
 | ||||
|  |  | |||
|  | @ -35,7 +35,7 @@ namespace Tegra::Engines { | |||
| 
 | ||||
| class KeplerMemory final : public EngineInterface { | ||||
| public: | ||||
|     KeplerMemory(Core::System& system, MemoryManager& memory_manager); | ||||
|     explicit KeplerMemory(Core::System& system_, MemoryManager& memory_manager); | ||||
|     ~KeplerMemory(); | ||||
| 
 | ||||
|     /// Write the value to the register identified by method.
 | ||||
|  |  | |||
|  | @ -16,8 +16,10 @@ namespace Tegra::Engines { | |||
| 
 | ||||
| using namespace Texture; | ||||
| 
 | ||||
| MaxwellDMA::MaxwellDMA(Core::System& system, MemoryManager& memory_manager) | ||||
|     : system{system}, memory_manager{memory_manager} {} | ||||
| MaxwellDMA::MaxwellDMA(Core::System& system_, MemoryManager& memory_manager_) | ||||
|     : system{system_}, memory_manager{memory_manager_} {} | ||||
| 
 | ||||
| MaxwellDMA::~MaxwellDMA() = default; | ||||
| 
 | ||||
| void MaxwellDMA::CallMethod(u32 method, u32 method_argument, bool is_last_call) { | ||||
|     ASSERT_MSG(method < NUM_REGS, "Invalid MaxwellDMA register"); | ||||
|  |  | |||
|  | @ -185,8 +185,8 @@ public: | |||
|     }; | ||||
|     static_assert(sizeof(RemapConst) == 12); | ||||
| 
 | ||||
|     explicit MaxwellDMA(Core::System& system, MemoryManager& memory_manager); | ||||
|     ~MaxwellDMA() = default; | ||||
|     explicit MaxwellDMA(Core::System& system_, MemoryManager& memory_manager_); | ||||
|     ~MaxwellDMA(); | ||||
| 
 | ||||
|     /// Write the value to the register identified by method.
 | ||||
|     void CallMethod(u32 method, u32 method_argument, bool is_last_call) override; | ||||
|  |  | |||
|  | @ -17,11 +17,11 @@ namespace VideoCommon { | |||
| 
 | ||||
| class FenceBase { | ||||
| public: | ||||
|     FenceBase(u32 payload, bool is_stubbed) | ||||
|         : address{}, payload{payload}, is_semaphore{false}, is_stubbed{is_stubbed} {} | ||||
|     explicit FenceBase(u32 payload_, bool is_stubbed_) | ||||
|         : address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {} | ||||
| 
 | ||||
|     FenceBase(GPUVAddr address, u32 payload, bool is_stubbed) | ||||
|         : address{address}, payload{payload}, is_semaphore{true}, is_stubbed{is_stubbed} {} | ||||
|     explicit FenceBase(GPUVAddr address_, u32 payload_, bool is_stubbed_) | ||||
|         : address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {} | ||||
| 
 | ||||
|     GPUVAddr GetAddress() const { | ||||
|         return address; | ||||
|  |  | |||
|  | @ -232,8 +232,12 @@ void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 | |||
|         CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending); | ||||
|     } else { | ||||
|         for (std::size_t i = 0; i < amount; i++) { | ||||
|             CallPullerMethod( | ||||
|                 {method, base_start[i], subchannel, methods_pending - static_cast<u32>(i)}); | ||||
|             CallPullerMethod(MethodCall{ | ||||
|                 method, | ||||
|                 base_start[i], | ||||
|                 subchannel, | ||||
|                 methods_pending - static_cast<u32>(i), | ||||
|             }); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  |  | |||
|  | @ -149,16 +149,16 @@ public: | |||
|         u32 subchannel{}; | ||||
|         u32 method_count{}; | ||||
| 
 | ||||
|         MethodCall(u32 method, u32 argument, u32 subchannel = 0, u32 method_count = 0) | ||||
|             : method(method), argument(argument), subchannel(subchannel), | ||||
|               method_count(method_count) {} | ||||
|         explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0) | ||||
|             : method(method_), argument(argument_), subchannel(subchannel_), | ||||
|               method_count(method_count_) {} | ||||
| 
 | ||||
|         [[nodiscard]] bool IsLastCall() const { | ||||
|             return method_count <= 1; | ||||
|         } | ||||
|     }; | ||||
| 
 | ||||
|     explicit GPU(Core::System& system, bool is_async, bool use_nvdec); | ||||
|     explicit GPU(Core::System& system_, bool is_async_, bool use_nvdec_); | ||||
|     virtual ~GPU(); | ||||
| 
 | ||||
|     /// Binds a renderer to the GPU.
 | ||||
|  | @ -414,8 +414,8 @@ private: | |||
|     std::condition_variable sync_cv; | ||||
| 
 | ||||
|     struct FlushRequest { | ||||
|         FlushRequest(u64 fence, VAddr addr, std::size_t size) | ||||
|             : fence{fence}, addr{addr}, size{size} {} | ||||
|         explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_) | ||||
|             : fence{fence_}, addr{addr_}, size{size_} {} | ||||
|         u64 fence; | ||||
|         VAddr addr; | ||||
|         std::size_t size; | ||||
|  |  | |||
|  | @ -10,8 +10,8 @@ | |||
| 
 | ||||
| namespace VideoCommon { | ||||
| 
 | ||||
| GPUAsynch::GPUAsynch(Core::System& system, bool use_nvdec) | ||||
|     : GPU{system, true, use_nvdec}, gpu_thread{system} {} | ||||
| GPUAsynch::GPUAsynch(Core::System& system_, bool use_nvdec_) | ||||
|     : GPU{system_, true, use_nvdec_}, gpu_thread{system_} {} | ||||
| 
 | ||||
| GPUAsynch::~GPUAsynch() = default; | ||||
| 
 | ||||
|  |  | |||
|  | @ -20,7 +20,7 @@ namespace VideoCommon { | |||
| /// Implementation of GPU interface that runs the GPU asynchronously
 | ||||
| class GPUAsynch final : public Tegra::GPU { | ||||
| public: | ||||
|     explicit GPUAsynch(Core::System& system, bool use_nvdec); | ||||
|     explicit GPUAsynch(Core::System& system_, bool use_nvdec_); | ||||
|     ~GPUAsynch() override; | ||||
| 
 | ||||
|     void Start() override; | ||||
|  |  | |||
|  | @ -7,7 +7,7 @@ | |||
| 
 | ||||
| namespace VideoCommon { | ||||
| 
 | ||||
| GPUSynch::GPUSynch(Core::System& system, bool use_nvdec) : GPU{system, false, use_nvdec} {} | ||||
| GPUSynch::GPUSynch(Core::System& system_, bool use_nvdec_) : GPU{system_, false, use_nvdec_} {} | ||||
| 
 | ||||
| GPUSynch::~GPUSynch() = default; | ||||
| 
 | ||||
|  |  | |||
|  | @ -19,7 +19,7 @@ namespace VideoCommon { | |||
| /// Implementation of GPU interface that runs the GPU synchronously
 | ||||
| class GPUSynch final : public Tegra::GPU { | ||||
| public: | ||||
|     explicit GPUSynch(Core::System& system, bool use_nvdec); | ||||
|     explicit GPUSynch(Core::System& system_, bool use_nvdec_); | ||||
|     ~GPUSynch() override; | ||||
| 
 | ||||
|     void Start() override; | ||||
|  |  | |||
|  | @ -39,23 +39,23 @@ static void RunThread(Core::System& system, VideoCore::RendererBase& renderer, | |||
|     CommandDataContainer next; | ||||
|     while (state.is_running) { | ||||
|         next = state.queue.PopWait(); | ||||
|         if (const auto submit_list = std::get_if<SubmitListCommand>(&next.data)) { | ||||
|         if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) { | ||||
|             dma_pusher.Push(std::move(submit_list->entries)); | ||||
|             dma_pusher.DispatchCalls(); | ||||
|         } else if (const auto command_list = std::get_if<SubmitChCommandEntries>(&next.data)) { | ||||
|         } else if (auto* command_list = std::get_if<SubmitChCommandEntries>(&next.data)) { | ||||
|             // NVDEC
 | ||||
|             cdma_pusher.Push(std::move(command_list->entries)); | ||||
|             cdma_pusher.DispatchCalls(); | ||||
|         } else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) { | ||||
|         } else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) { | ||||
|             renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); | ||||
|         } else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) { | ||||
|             renderer.Rasterizer().ReleaseFences(); | ||||
|         } else if (std::holds_alternative<GPUTickCommand>(next.data)) { | ||||
|             system.GPU().TickWork(); | ||||
|         } else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) { | ||||
|             renderer.Rasterizer().FlushRegion(data->addr, data->size); | ||||
|         } else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) { | ||||
|             renderer.Rasterizer().OnCPUWrite(data->addr, data->size); | ||||
|         } else if (const auto* flush = std::get_if<FlushRegionCommand>(&next.data)) { | ||||
|             renderer.Rasterizer().FlushRegion(flush->addr, flush->size); | ||||
|         } else if (const auto* invalidate = std::get_if<InvalidateRegionCommand>(&next.data)) { | ||||
|             renderer.Rasterizer().OnCPUWrite(invalidate->addr, invalidate->size); | ||||
|         } else if (std::holds_alternative<EndProcessingCommand>(next.data)) { | ||||
|             return; | ||||
|         } else { | ||||
|  | @ -65,7 +65,7 @@ static void RunThread(Core::System& system, VideoCore::RendererBase& renderer, | |||
|     } | ||||
| } | ||||
| 
 | ||||
| ThreadManager::ThreadManager(Core::System& system) : system{system} {} | ||||
| ThreadManager::ThreadManager(Core::System& system_) : system{system_} {} | ||||
| 
 | ||||
| ThreadManager::~ThreadManager() { | ||||
|     if (!thread.joinable()) { | ||||
|  |  | |||
|  | @ -32,30 +32,30 @@ struct EndProcessingCommand final {}; | |||
| 
 | ||||
| /// Command to signal to the GPU thread that a command list is ready for processing
 | ||||
| struct SubmitListCommand final { | ||||
|     explicit SubmitListCommand(Tegra::CommandList&& entries) : entries{std::move(entries)} {} | ||||
|     explicit SubmitListCommand(Tegra::CommandList&& entries_) : entries{std::move(entries_)} {} | ||||
| 
 | ||||
|     Tegra::CommandList entries; | ||||
| }; | ||||
| 
 | ||||
| /// Command to signal to the GPU thread that a cdma command list is ready for processing
 | ||||
| struct SubmitChCommandEntries final { | ||||
|     explicit SubmitChCommandEntries(Tegra::ChCommandHeaderList&& entries) | ||||
|         : entries{std::move(entries)} {} | ||||
|     explicit SubmitChCommandEntries(Tegra::ChCommandHeaderList&& entries_) | ||||
|         : entries{std::move(entries_)} {} | ||||
| 
 | ||||
|     Tegra::ChCommandHeaderList entries; | ||||
| }; | ||||
| 
 | ||||
| /// Command to signal to the GPU thread that a swap buffers is pending
 | ||||
| struct SwapBuffersCommand final { | ||||
|     explicit SwapBuffersCommand(std::optional<const Tegra::FramebufferConfig> framebuffer) | ||||
|         : framebuffer{std::move(framebuffer)} {} | ||||
|     explicit SwapBuffersCommand(std::optional<const Tegra::FramebufferConfig> framebuffer_) | ||||
|         : framebuffer{std::move(framebuffer_)} {} | ||||
| 
 | ||||
|     std::optional<Tegra::FramebufferConfig> framebuffer; | ||||
| }; | ||||
| 
 | ||||
| /// Command to signal to the GPU thread to flush a region
 | ||||
| struct FlushRegionCommand final { | ||||
|     explicit constexpr FlushRegionCommand(VAddr addr, u64 size) : addr{addr}, size{size} {} | ||||
|     explicit constexpr FlushRegionCommand(VAddr addr_, u64 size_) : addr{addr_}, size{size_} {} | ||||
| 
 | ||||
|     VAddr addr; | ||||
|     u64 size; | ||||
|  | @ -63,7 +63,7 @@ struct FlushRegionCommand final { | |||
| 
 | ||||
| /// Command to signal to the GPU thread to invalidate a region
 | ||||
| struct InvalidateRegionCommand final { | ||||
|     explicit constexpr InvalidateRegionCommand(VAddr addr, u64 size) : addr{addr}, size{size} {} | ||||
|     explicit constexpr InvalidateRegionCommand(VAddr addr_, u64 size_) : addr{addr_}, size{size_} {} | ||||
| 
 | ||||
|     VAddr addr; | ||||
|     u64 size; | ||||
|  | @ -71,8 +71,8 @@ struct InvalidateRegionCommand final { | |||
| 
 | ||||
| /// Command to signal to the GPU thread to flush and invalidate a region
 | ||||
| struct FlushAndInvalidateRegionCommand final { | ||||
|     explicit constexpr FlushAndInvalidateRegionCommand(VAddr addr, u64 size) | ||||
|         : addr{addr}, size{size} {} | ||||
|     explicit constexpr FlushAndInvalidateRegionCommand(VAddr addr_, u64 size_) | ||||
|         : addr{addr_}, size{size_} {} | ||||
| 
 | ||||
|     VAddr addr; | ||||
|     u64 size; | ||||
|  | @ -92,8 +92,8 @@ using CommandData = | |||
| struct CommandDataContainer { | ||||
|     CommandDataContainer() = default; | ||||
| 
 | ||||
|     CommandDataContainer(CommandData&& data, u64 next_fence) | ||||
|         : data{std::move(data)}, fence{next_fence} {} | ||||
|     explicit CommandDataContainer(CommandData&& data_, u64 next_fence_) | ||||
|         : data{std::move(data_)}, fence{next_fence_} {} | ||||
| 
 | ||||
|     CommandData data; | ||||
|     u64 fence{}; | ||||
|  | @ -112,7 +112,7 @@ struct SynchState final { | |||
| /// Class used to manage the GPU thread
 | ||||
| class ThreadManager final { | ||||
| public: | ||||
|     explicit ThreadManager(Core::System& system); | ||||
|     explicit ThreadManager(Core::System& system_); | ||||
|     ~ThreadManager(); | ||||
| 
 | ||||
|     /// Creates and starts the GPU thread.
 | ||||
|  | @ -146,7 +146,6 @@ private: | |||
|     /// Pushes a command to be executed by the GPU thread
 | ||||
|     u64 PushCommand(CommandData&& command_data); | ||||
| 
 | ||||
| private: | ||||
|     SynchState state; | ||||
|     Core::System& system; | ||||
|     std::thread thread; | ||||
|  |  | |||
|  | @ -19,8 +19,8 @@ namespace VideoCore { | |||
| class GuestDriverProfile { | ||||
| public: | ||||
|     explicit GuestDriverProfile() = default; | ||||
|     explicit GuestDriverProfile(std::optional<u32> texture_handler_size) | ||||
|         : texture_handler_size{texture_handler_size} {} | ||||
|     explicit GuestDriverProfile(std::optional<u32> texture_handler_size_) | ||||
|         : texture_handler_size{texture_handler_size_} {} | ||||
| 
 | ||||
|     void DeduceTextureHandlerSize(std::vector<u32> bound_offsets); | ||||
| 
 | ||||
|  |  | |||
|  | @ -85,7 +85,7 @@ constexpr std::array<std::pair<u64, HLEFunction>, 3> hle_funcs{{ | |||
|     {0x0217920100488FF7, &HLE_0217920100488FF7}, | ||||
| }}; | ||||
| 
 | ||||
| HLEMacro::HLEMacro(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {} | ||||
| HLEMacro::HLEMacro(Engines::Maxwell3D& maxwell3d_) : maxwell3d{maxwell3d_} {} | ||||
| HLEMacro::~HLEMacro() = default; | ||||
| 
 | ||||
| std::optional<std::unique_ptr<CachedMacro>> HLEMacro::GetHLEProgram(u64 hash) const { | ||||
|  | @ -99,8 +99,8 @@ std::optional<std::unique_ptr<CachedMacro>> HLEMacro::GetHLEProgram(u64 hash) co | |||
| 
 | ||||
| HLEMacroImpl::~HLEMacroImpl() = default; | ||||
| 
 | ||||
| HLEMacroImpl::HLEMacroImpl(Engines::Maxwell3D& maxwell3d, HLEFunction func) | ||||
|     : maxwell3d(maxwell3d), func(func) {} | ||||
| HLEMacroImpl::HLEMacroImpl(Engines::Maxwell3D& maxwell3d_, HLEFunction func_) | ||||
|     : maxwell3d{maxwell3d_}, func{func_} {} | ||||
| 
 | ||||
| void HLEMacroImpl::Execute(const std::vector<u32>& parameters, u32 method) { | ||||
|     func(maxwell3d, parameters); | ||||
|  |  | |||
|  | @ -20,7 +20,7 @@ using HLEFunction = void (*)(Engines::Maxwell3D& maxwell3d, const std::vector<u3 | |||
| 
 | ||||
| class HLEMacro { | ||||
| public: | ||||
|     explicit HLEMacro(Engines::Maxwell3D& maxwell3d); | ||||
|     explicit HLEMacro(Engines::Maxwell3D& maxwell3d_); | ||||
|     ~HLEMacro(); | ||||
| 
 | ||||
|     std::optional<std::unique_ptr<CachedMacro>> GetHLEProgram(u64 hash) const; | ||||
|  |  | |||
|  | @ -11,29 +11,29 @@ | |||
| MICROPROFILE_DEFINE(MacroInterp, "GPU", "Execute macro interpreter", MP_RGB(128, 128, 192)); | ||||
| 
 | ||||
| namespace Tegra { | ||||
| MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) | ||||
|     : MacroEngine::MacroEngine(maxwell3d), maxwell3d(maxwell3d) {} | ||||
| MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d_) | ||||
|     : MacroEngine{maxwell3d_}, maxwell3d{maxwell3d_} {} | ||||
| 
 | ||||
| std::unique_ptr<CachedMacro> MacroInterpreter::Compile(const std::vector<u32>& code) { | ||||
|     return std::make_unique<MacroInterpreterImpl>(maxwell3d, code); | ||||
| } | ||||
| 
 | ||||
| MacroInterpreterImpl::MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d, | ||||
|                                            const std::vector<u32>& code) | ||||
|     : maxwell3d(maxwell3d), code(code) {} | ||||
| MacroInterpreterImpl::MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d_, | ||||
|                                            const std::vector<u32>& code_) | ||||
|     : maxwell3d{maxwell3d_}, code{code_} {} | ||||
| 
 | ||||
| void MacroInterpreterImpl::Execute(const std::vector<u32>& parameters, u32 method) { | ||||
| void MacroInterpreterImpl::Execute(const std::vector<u32>& params, u32 method) { | ||||
|     MICROPROFILE_SCOPE(MacroInterp); | ||||
|     Reset(); | ||||
| 
 | ||||
|     registers[1] = parameters[0]; | ||||
|     num_parameters = parameters.size(); | ||||
|     registers[1] = params[0]; | ||||
|     num_parameters = params.size(); | ||||
| 
 | ||||
|     if (num_parameters > parameters_capacity) { | ||||
|         parameters_capacity = num_parameters; | ||||
|         this->parameters = std::make_unique<u32[]>(num_parameters); | ||||
|         parameters = std::make_unique<u32[]>(num_parameters); | ||||
|     } | ||||
|     std::memcpy(this->parameters.get(), parameters.data(), num_parameters * sizeof(u32)); | ||||
|     std::memcpy(parameters.get(), params.data(), num_parameters * sizeof(u32)); | ||||
| 
 | ||||
|     // Execute the code until we hit an exit condition.
 | ||||
|     bool keep_executing = true; | ||||
|  |  | |||
|  | @ -17,7 +17,7 @@ class Maxwell3D; | |||
| 
 | ||||
| class MacroInterpreter final : public MacroEngine { | ||||
| public: | ||||
|     explicit MacroInterpreter(Engines::Maxwell3D& maxwell3d); | ||||
|     explicit MacroInterpreter(Engines::Maxwell3D& maxwell3d_); | ||||
| 
 | ||||
| protected: | ||||
|     std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override; | ||||
|  | @ -28,8 +28,8 @@ private: | |||
| 
 | ||||
| class MacroInterpreterImpl : public CachedMacro { | ||||
| public: | ||||
|     MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code); | ||||
|     void Execute(const std::vector<u32>& parameters, u32 method) override; | ||||
|     explicit MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d_, const std::vector<u32>& code_); | ||||
|     void Execute(const std::vector<u32>& params, u32 method) override; | ||||
| 
 | ||||
| private: | ||||
|     /// Resets the execution engine state, zeroing registers, etc.
 | ||||
|  | @ -38,9 +38,9 @@ private: | |||
|     /**
 | ||||
|      * Executes a single macro instruction located at the current program counter. Returns whether | ||||
|      * the interpreter should keep running. | ||||
|      * @param offset Offset to start execution at. | ||||
|      * | ||||
|      * @param is_delay_slot Whether the current step is being executed due to a delay slot in a | ||||
|      * previous instruction. | ||||
|      *                      previous instruction. | ||||
|      */ | ||||
|     bool Step(bool is_delay_slot); | ||||
| 
 | ||||
|  |  | |||
|  | @ -28,15 +28,15 @@ static const std::bitset<32> PERSISTENT_REGISTERS = Common::X64::BuildRegSet({ | |||
|     BRANCH_HOLDER, | ||||
| }); | ||||
| 
 | ||||
| MacroJITx64::MacroJITx64(Engines::Maxwell3D& maxwell3d) | ||||
|     : MacroEngine::MacroEngine(maxwell3d), maxwell3d(maxwell3d) {} | ||||
| MacroJITx64::MacroJITx64(Engines::Maxwell3D& maxwell3d_) | ||||
|     : MacroEngine{maxwell3d_}, maxwell3d{maxwell3d_} {} | ||||
| 
 | ||||
| std::unique_ptr<CachedMacro> MacroJITx64::Compile(const std::vector<u32>& code) { | ||||
|     return std::make_unique<MacroJITx64Impl>(maxwell3d, code); | ||||
| } | ||||
| 
 | ||||
| MacroJITx64Impl::MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code) | ||||
|     : Xbyak::CodeGenerator(MAX_CODE_SIZE), code(code), maxwell3d(maxwell3d) { | ||||
| MacroJITx64Impl::MacroJITx64Impl(Engines::Maxwell3D& maxwell3d_, const std::vector<u32>& code_) | ||||
|     : CodeGenerator{MAX_CODE_SIZE}, code{code_}, maxwell3d{maxwell3d_} { | ||||
|     Compile(); | ||||
| } | ||||
| 
 | ||||
|  | @ -553,15 +553,15 @@ Xbyak::Reg32 MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg32 dst) { | |||
| } | ||||
| 
 | ||||
| void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u32 reg) { | ||||
|     const auto SetRegister = [this](u32 reg, const Xbyak::Reg32& result) { | ||||
|     const auto SetRegister = [this](u32 reg_index, const Xbyak::Reg32& result) { | ||||
|         // Register 0 is supposed to always return 0. NOP is implemented as a store to the zero
 | ||||
|         // register.
 | ||||
|         if (reg == 0) { | ||||
|         if (reg_index == 0) { | ||||
|             return; | ||||
|         } | ||||
|         mov(dword[STATE + offsetof(JITState, registers) + reg * sizeof(u32)], result); | ||||
|         mov(dword[STATE + offsetof(JITState, registers) + reg_index * sizeof(u32)], result); | ||||
|     }; | ||||
|     const auto SetMethodAddress = [this](const Xbyak::Reg32& reg) { mov(METHOD_ADDRESS, reg); }; | ||||
|     const auto SetMethodAddress = [this](const Xbyak::Reg32& reg32) { mov(METHOD_ADDRESS, reg32); }; | ||||
| 
 | ||||
|     switch (operation) { | ||||
|     case Macro::ResultOperation::IgnoreAndFetch: | ||||
|  |  | |||
|  | @ -23,7 +23,7 @@ constexpr size_t MAX_CODE_SIZE = 0x10000; | |||
| 
 | ||||
| class MacroJITx64 final : public MacroEngine { | ||||
| public: | ||||
|     explicit MacroJITx64(Engines::Maxwell3D& maxwell3d); | ||||
|     explicit MacroJITx64(Engines::Maxwell3D& maxwell3d_); | ||||
| 
 | ||||
| protected: | ||||
|     std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override; | ||||
|  | @ -34,7 +34,7 @@ private: | |||
| 
 | ||||
| class MacroJITx64Impl : public Xbyak::CodeGenerator, public CachedMacro { | ||||
| public: | ||||
|     MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code); | ||||
|     explicit MacroJITx64Impl(Engines::Maxwell3D& maxwell3d_, const std::vector<u32>& code_); | ||||
|     ~MacroJITx64Impl(); | ||||
| 
 | ||||
|     void Execute(const std::vector<u32>& parameters, u32 method) override; | ||||
|  |  | |||
|  | @ -28,7 +28,7 @@ public: | |||
|     }; | ||||
| 
 | ||||
|     constexpr PageEntry() = default; | ||||
|     constexpr PageEntry(State state) : state{state} {} | ||||
|     constexpr PageEntry(State state_) : state{state_} {} | ||||
|     constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {} | ||||
| 
 | ||||
|     [[nodiscard]] constexpr bool IsUnmapped() const { | ||||
|  | @ -68,7 +68,7 @@ static_assert(sizeof(PageEntry) == 4, "PageEntry is too large"); | |||
| 
 | ||||
| class MemoryManager final { | ||||
| public: | ||||
|     explicit MemoryManager(Core::System& system); | ||||
|     explicit MemoryManager(Core::System& system_); | ||||
|     ~MemoryManager(); | ||||
| 
 | ||||
|     /// Binds a renderer to the memory manager.
 | ||||
|  |  | |||
|  | @ -187,8 +187,8 @@ std::string TextureType(const MetaTexture& meta) { | |||
| 
 | ||||
| class ARBDecompiler final { | ||||
| public: | ||||
|     explicit ARBDecompiler(const Device& device, const ShaderIR& ir, const Registry& registry, | ||||
|                            ShaderType stage, std::string_view identifier); | ||||
|     explicit ARBDecompiler(const Device& device_, const ShaderIR& ir_, const Registry& registry_, | ||||
|                            ShaderType stage_, std::string_view identifier); | ||||
| 
 | ||||
|     std::string Code() const { | ||||
|         return shader_source; | ||||
|  | @ -802,9 +802,9 @@ private: | |||
|     }; | ||||
| }; | ||||
| 
 | ||||
| ARBDecompiler::ARBDecompiler(const Device& device, const ShaderIR& ir, const Registry& registry, | ||||
|                              ShaderType stage, std::string_view identifier) | ||||
|     : device{device}, ir{ir}, registry{registry}, stage{stage} { | ||||
| ARBDecompiler::ARBDecompiler(const Device& device_, const ShaderIR& ir_, const Registry& registry_, | ||||
|                              ShaderType stage_, std::string_view identifier) | ||||
|     : device{device_}, ir{ir_}, registry{registry_}, stage{stage_} { | ||||
|     DefineGlobalMemory(); | ||||
| 
 | ||||
|     AddLine("TEMP RC;"); | ||||
|  | @ -1134,44 +1134,44 @@ void ARBDecompiler::VisitAST(const ASTNode& node) { | |||
|         for (ASTNode current = ast->nodes.GetFirst(); current; current = current->GetNext()) { | ||||
|             VisitAST(current); | ||||
|         } | ||||
|     } else if (const auto ast = std::get_if<ASTIfThen>(&*node->GetInnerData())) { | ||||
|         const std::string condition = VisitExpression(ast->condition); | ||||
|     } else if (const auto if_then = std::get_if<ASTIfThen>(&*node->GetInnerData())) { | ||||
|         const std::string condition = VisitExpression(if_then->condition); | ||||
|         ResetTemporaries(); | ||||
| 
 | ||||
|         AddLine("MOVC.U RC.x, {};", condition); | ||||
|         AddLine("IF NE.x;"); | ||||
|         for (ASTNode current = ast->nodes.GetFirst(); current; current = current->GetNext()) { | ||||
|         for (ASTNode current = if_then->nodes.GetFirst(); current; current = current->GetNext()) { | ||||
|             VisitAST(current); | ||||
|         } | ||||
|         AddLine("ENDIF;"); | ||||
|     } else if (const auto ast = std::get_if<ASTIfElse>(&*node->GetInnerData())) { | ||||
|     } else if (const auto if_else = std::get_if<ASTIfElse>(&*node->GetInnerData())) { | ||||
|         AddLine("ELSE;"); | ||||
|         for (ASTNode current = ast->nodes.GetFirst(); current; current = current->GetNext()) { | ||||
|         for (ASTNode current = if_else->nodes.GetFirst(); current; current = current->GetNext()) { | ||||
|             VisitAST(current); | ||||
|         } | ||||
|     } else if (const auto ast = std::get_if<ASTBlockDecoded>(&*node->GetInnerData())) { | ||||
|         VisitBlock(ast->nodes); | ||||
|     } else if (const auto ast = std::get_if<ASTVarSet>(&*node->GetInnerData())) { | ||||
|         AddLine("MOV.U F{}, {};", ast->index, VisitExpression(ast->condition)); | ||||
|     } else if (const auto decoded = std::get_if<ASTBlockDecoded>(&*node->GetInnerData())) { | ||||
|         VisitBlock(decoded->nodes); | ||||
|     } else if (const auto var_set = std::get_if<ASTVarSet>(&*node->GetInnerData())) { | ||||
|         AddLine("MOV.U F{}, {};", var_set->index, VisitExpression(var_set->condition)); | ||||
|         ResetTemporaries(); | ||||
|     } else if (const auto ast = std::get_if<ASTDoWhile>(&*node->GetInnerData())) { | ||||
|         const std::string condition = VisitExpression(ast->condition); | ||||
|     } else if (const auto do_while = std::get_if<ASTDoWhile>(&*node->GetInnerData())) { | ||||
|         const std::string condition = VisitExpression(do_while->condition); | ||||
|         ResetTemporaries(); | ||||
|         AddLine("REP;"); | ||||
|         for (ASTNode current = ast->nodes.GetFirst(); current; current = current->GetNext()) { | ||||
|         for (ASTNode current = do_while->nodes.GetFirst(); current; current = current->GetNext()) { | ||||
|             VisitAST(current); | ||||
|         } | ||||
|         AddLine("MOVC.U RC.x, {};", condition); | ||||
|         AddLine("BRK (NE.x);"); | ||||
|         AddLine("ENDREP;"); | ||||
|     } else if (const auto ast = std::get_if<ASTReturn>(&*node->GetInnerData())) { | ||||
|         const bool is_true = ExprIsTrue(ast->condition); | ||||
|     } else if (const auto ast_return = std::get_if<ASTReturn>(&*node->GetInnerData())) { | ||||
|         const bool is_true = ExprIsTrue(ast_return->condition); | ||||
|         if (!is_true) { | ||||
|             AddLine("MOVC.U RC.x, {};", VisitExpression(ast->condition)); | ||||
|             AddLine("MOVC.U RC.x, {};", VisitExpression(ast_return->condition)); | ||||
|             AddLine("IF NE.x;"); | ||||
|             ResetTemporaries(); | ||||
|         } | ||||
|         if (ast->kills) { | ||||
|         if (ast_return->kills) { | ||||
|             AddLine("KIL TR;"); | ||||
|         } else { | ||||
|             Exit(); | ||||
|  | @ -1179,11 +1179,11 @@ void ARBDecompiler::VisitAST(const ASTNode& node) { | |||
|         if (!is_true) { | ||||
|             AddLine("ENDIF;"); | ||||
|         } | ||||
|     } else if (const auto ast = std::get_if<ASTBreak>(&*node->GetInnerData())) { | ||||
|         if (ExprIsTrue(ast->condition)) { | ||||
|     } else if (const auto ast_break = std::get_if<ASTBreak>(&*node->GetInnerData())) { | ||||
|         if (ExprIsTrue(ast_break->condition)) { | ||||
|             AddLine("BRK;"); | ||||
|         } else { | ||||
|             AddLine("MOVC.U RC.x, {};", VisitExpression(ast->condition)); | ||||
|             AddLine("MOVC.U RC.x, {};", VisitExpression(ast_break->condition)); | ||||
|             AddLine("BRK (NE.x);"); | ||||
|             ResetTemporaries(); | ||||
|         } | ||||
|  |  | |||
|  | @ -11,10 +11,10 @@ | |||
| 
 | ||||
| namespace OpenGL { | ||||
| 
 | ||||
| GLInnerFence::GLInnerFence(u32 payload, bool is_stubbed) : FenceBase(payload, is_stubbed) {} | ||||
| GLInnerFence::GLInnerFence(u32 payload_, bool is_stubbed_) : FenceBase{payload_, is_stubbed_} {} | ||||
| 
 | ||||
| GLInnerFence::GLInnerFence(GPUVAddr address, u32 payload, bool is_stubbed) | ||||
|     : FenceBase(address, payload, is_stubbed) {} | ||||
| GLInnerFence::GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_) | ||||
|     : FenceBase{address_, payload_, is_stubbed_} {} | ||||
| 
 | ||||
| GLInnerFence::~GLInnerFence() = default; | ||||
| 
 | ||||
|  | @ -45,10 +45,10 @@ void GLInnerFence::Wait() { | |||
|     glClientWaitSync(sync_object.handle, 0, GL_TIMEOUT_IGNORED); | ||||
| } | ||||
| 
 | ||||
| FenceManagerOpenGL::FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu, | ||||
|                                        TextureCacheOpenGL& texture_cache, | ||||
|                                        OGLBufferCache& buffer_cache, QueryCache& query_cache) | ||||
|     : GenericFenceManager{rasterizer, gpu, texture_cache, buffer_cache, query_cache} {} | ||||
| FenceManagerOpenGL::FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterizer_, | ||||
|                                        Tegra::GPU& gpu_, TextureCacheOpenGL& texture_cache_, | ||||
|                                        OGLBufferCache& buffer_cache_, QueryCache& query_cache_) | ||||
|     : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_} {} | ||||
| 
 | ||||
| Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) { | ||||
|     return std::make_shared<GLInnerFence>(value, is_stubbed); | ||||
|  |  | |||
|  | @ -17,8 +17,8 @@ namespace OpenGL { | |||
| 
 | ||||
| class GLInnerFence : public VideoCommon::FenceBase { | ||||
| public: | ||||
|     GLInnerFence(u32 payload, bool is_stubbed); | ||||
|     GLInnerFence(GPUVAddr address, u32 payload, bool is_stubbed); | ||||
|     explicit GLInnerFence(u32 payload_, bool is_stubbed_); | ||||
|     explicit GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_); | ||||
|     ~GLInnerFence(); | ||||
| 
 | ||||
|     void Queue(); | ||||
|  | @ -37,9 +37,9 @@ using GenericFenceManager = | |||
| 
 | ||||
| class FenceManagerOpenGL final : public GenericFenceManager { | ||||
| public: | ||||
|     explicit FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu, | ||||
|                                 TextureCacheOpenGL& texture_cache, OGLBufferCache& buffer_cache, | ||||
|                                 QueryCache& query_cache); | ||||
|     explicit FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, | ||||
|                                 TextureCacheOpenGL& texture_cache_, OGLBufferCache& buffer_cache_, | ||||
|                                 QueryCache& query_cache_); | ||||
| 
 | ||||
| protected: | ||||
|     Fence CreateFence(u32 value, bool is_stubbed) override; | ||||
|  |  | |||
|  | @ -59,10 +59,10 @@ bool QueryCache::AnyCommandQueued() const noexcept { | |||
|     return gl_rasterizer.AnyCommandQueued(); | ||||
| } | ||||
| 
 | ||||
| HostCounter::HostCounter(QueryCache& cache, std::shared_ptr<HostCounter> dependency, | ||||
|                          VideoCore::QueryType type) | ||||
|     : VideoCommon::HostCounterBase<QueryCache, HostCounter>{std::move(dependency)}, cache{cache}, | ||||
|       type{type}, query{cache.AllocateQuery(type)} { | ||||
| HostCounter::HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> dependency, | ||||
|                          VideoCore::QueryType type_) | ||||
|     : HostCounterBase<QueryCache, HostCounter>{std::move(dependency)}, cache{cache_}, type{type_}, | ||||
|       query{cache.AllocateQuery(type)} { | ||||
|     glBeginQuery(GetTarget(type), query.handle); | ||||
| } | ||||
| 
 | ||||
|  | @ -86,13 +86,14 @@ u64 HostCounter::BlockingQuery() const { | |||
|     return static_cast<u64>(value); | ||||
| } | ||||
| 
 | ||||
| CachedQuery::CachedQuery(QueryCache& cache, VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr) | ||||
|     : VideoCommon::CachedQueryBase<HostCounter>{cpu_addr, host_ptr}, cache{&cache}, type{type} {} | ||||
| CachedQuery::CachedQuery(QueryCache& cache_, VideoCore::QueryType type_, VAddr cpu_addr, | ||||
|                          u8* host_ptr) | ||||
|     : CachedQueryBase<HostCounter>{cpu_addr, host_ptr}, cache{&cache_}, type{type_} {} | ||||
| 
 | ||||
| CachedQuery::~CachedQuery() = default; | ||||
| 
 | ||||
| CachedQuery::CachedQuery(CachedQuery&& rhs) noexcept | ||||
|     : VideoCommon::CachedQueryBase<HostCounter>(std::move(rhs)), cache{rhs.cache}, type{rhs.type} {} | ||||
|     : CachedQueryBase<HostCounter>(std::move(rhs)), cache{rhs.cache}, type{rhs.type} {} | ||||
| 
 | ||||
| CachedQuery& CachedQuery::operator=(CachedQuery&& rhs) noexcept { | ||||
|     cache = rhs.cache; | ||||
|  |  | |||
|  | @ -46,8 +46,8 @@ private: | |||
| 
 | ||||
| class HostCounter final : public VideoCommon::HostCounterBase<QueryCache, HostCounter> { | ||||
| public: | ||||
|     explicit HostCounter(QueryCache& cache, std::shared_ptr<HostCounter> dependency, | ||||
|                          VideoCore::QueryType type); | ||||
|     explicit HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> dependency, | ||||
|                          VideoCore::QueryType type_); | ||||
|     ~HostCounter(); | ||||
| 
 | ||||
|     void EndQuery(); | ||||
|  | @ -62,7 +62,7 @@ private: | |||
| 
 | ||||
| class CachedQuery final : public VideoCommon::CachedQueryBase<HostCounter> { | ||||
| public: | ||||
|     explicit CachedQuery(QueryCache& cache, VideoCore::QueryType type, VAddr cpu_addr, | ||||
|     explicit CachedQuery(QueryCache& cache_, VideoCore::QueryType type_, VAddr cpu_addr, | ||||
|                          u8* host_ptr); | ||||
|     ~CachedQuery() override; | ||||
| 
 | ||||
|  |  | |||
|  | @ -198,10 +198,10 @@ ProgramSharedPtr BuildShader(const Device& device, ShaderType shader_type, u64 u | |||
|     return program; | ||||
| } | ||||
| 
 | ||||
| Shader::Shader(std::shared_ptr<VideoCommon::Shader::Registry> registry_, ShaderEntries entries_, | ||||
|                ProgramSharedPtr program_, bool is_built) | ||||
| Shader::Shader(std::shared_ptr<Registry> registry_, ShaderEntries entries_, | ||||
|                ProgramSharedPtr program_, bool is_built_) | ||||
|     : registry{std::move(registry_)}, entries{std::move(entries_)}, program{std::move(program_)}, | ||||
|       is_built(is_built) { | ||||
|       is_built{is_built_} { | ||||
|     handle = program->assembly_program.handle; | ||||
|     if (handle == 0) { | ||||
|         handle = program->source_program.handle; | ||||
|  |  | |||
|  | @ -108,7 +108,7 @@ public: | |||
| 
 | ||||
| private: | ||||
|     explicit Shader(std::shared_ptr<VideoCommon::Shader::Registry> registry, ShaderEntries entries, | ||||
|                     ProgramSharedPtr program, bool is_built = true); | ||||
|                     ProgramSharedPtr program, bool is_built_ = true); | ||||
| 
 | ||||
|     std::shared_ptr<VideoCommon::Shader::Registry> registry; | ||||
|     ShaderEntries entries; | ||||
|  |  | |||
|  | @ -131,7 +131,7 @@ private: | |||
| 
 | ||||
| class Expression final { | ||||
| public: | ||||
|     Expression(std::string code, Type type) : code{std::move(code)}, type{type} { | ||||
|     Expression(std::string code_, Type type_) : code{std::move(code_)}, type{type_} { | ||||
|         ASSERT(type != Type::Void); | ||||
|     } | ||||
|     Expression() : type{Type::Void} {} | ||||
|  | @ -148,8 +148,8 @@ public: | |||
|         ASSERT(type == Type::Void); | ||||
|     } | ||||
| 
 | ||||
|     std::string As(Type type) const { | ||||
|         switch (type) { | ||||
|     std::string As(Type type_) const { | ||||
|         switch (type_) { | ||||
|         case Type::Bool: | ||||
|             return AsBool(); | ||||
|         case Type::Bool2: | ||||
|  | @ -418,11 +418,12 @@ struct GenericVaryingDescription { | |||
| 
 | ||||
| class GLSLDecompiler final { | ||||
| public: | ||||
|     explicit GLSLDecompiler(const Device& device, const ShaderIR& ir, const Registry& registry, | ||||
|                             ShaderType stage, std::string_view identifier, std::string_view suffix) | ||||
|         : device{device}, ir{ir}, registry{registry}, stage{stage}, identifier{identifier}, | ||||
|           suffix{suffix}, header{ir.GetHeader()}, use_unified_uniforms{ | ||||
|                                                       UseUnifiedUniforms(device, ir, stage)} { | ||||
|     explicit GLSLDecompiler(const Device& device_, const ShaderIR& ir_, const Registry& registry_, | ||||
|                             ShaderType stage_, std::string_view identifier_, | ||||
|                             std::string_view suffix_) | ||||
|         : device{device_}, ir{ir_}, registry{registry_}, stage{stage_}, identifier{identifier_}, | ||||
|           suffix{suffix_}, header{ir.GetHeader()}, use_unified_uniforms{ | ||||
|                                                        UseUnifiedUniforms(device_, ir_, stage_)} { | ||||
|         if (stage != ShaderType::Compute) { | ||||
|             transform_feedback = BuildTransformFeedback(registry.GetGraphicsInfo()); | ||||
|         } | ||||
|  | @ -777,16 +778,16 @@ private: | |||
|             name = "gs_" + name + "[]"; | ||||
|         } | ||||
| 
 | ||||
|         std::string suffix; | ||||
|         std::string suffix_; | ||||
|         if (stage == ShaderType::Fragment) { | ||||
|             const auto input_mode{header.ps.GetPixelImap(location)}; | ||||
|             if (input_mode == PixelImap::Unused) { | ||||
|                 return; | ||||
|             } | ||||
|             suffix = GetInputFlags(input_mode); | ||||
|             suffix_ = GetInputFlags(input_mode); | ||||
|         } | ||||
| 
 | ||||
|         code.AddLine("layout (location = {}) {} in vec4 {};", location, suffix, name); | ||||
|         code.AddLine("layout (location = {}) {} in vec4 {};", location, suffix_, name); | ||||
|     } | ||||
| 
 | ||||
|     void DeclareOutputAttributes() { | ||||
|  | @ -2100,13 +2101,13 @@ private: | |||
|         const auto type = meta.sampler.is_shadow ? Type::Float : Type::Int; | ||||
|         const bool separate_dc = meta.sampler.is_shadow; | ||||
| 
 | ||||
|         std::vector<TextureIR> ir; | ||||
|         std::vector<TextureIR> ir_; | ||||
|         if (meta.sampler.is_shadow) { | ||||
|             ir = {TextureOffset{}}; | ||||
|             ir_ = {TextureOffset{}}; | ||||
|         } else { | ||||
|             ir = {TextureOffset{}, TextureArgument{type, meta.component}}; | ||||
|             ir_ = {TextureOffset{}, TextureArgument{type, meta.component}}; | ||||
|         } | ||||
|         return {GenerateTexture(operation, "Gather", ir, separate_dc) + GetSwizzle(meta.element), | ||||
|         return {GenerateTexture(operation, "Gather", ir_, separate_dc) + GetSwizzle(meta.element), | ||||
|                 Type::Float}; | ||||
|     } | ||||
| 
 | ||||
|  | @ -2801,7 +2802,7 @@ std::string GetFlowVariable(u32 index) { | |||
| 
 | ||||
| class ExprDecompiler { | ||||
| public: | ||||
|     explicit ExprDecompiler(GLSLDecompiler& decomp) : decomp{decomp} {} | ||||
|     explicit ExprDecompiler(GLSLDecompiler& decomp_) : decomp{decomp_} {} | ||||
| 
 | ||||
|     void operator()(const ExprAnd& expr) { | ||||
|         inner += '('; | ||||
|  | @ -2856,7 +2857,7 @@ private: | |||
| 
 | ||||
| class ASTDecompiler { | ||||
| public: | ||||
|     explicit ASTDecompiler(GLSLDecompiler& decomp) : decomp{decomp} {} | ||||
|     explicit ASTDecompiler(GLSLDecompiler& decomp_) : decomp{decomp_} {} | ||||
| 
 | ||||
|     void operator()(const ASTProgram& ast) { | ||||
|         ASTNode current = ast.nodes.GetFirst(); | ||||
|  |  | |||
|  | @ -25,8 +25,8 @@ using ImageEntry = VideoCommon::Shader::Image; | |||
| 
 | ||||
| class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer { | ||||
| public: | ||||
|     explicit ConstBufferEntry(u32 max_offset, bool is_indirect, u32 index) | ||||
|         : VideoCommon::Shader::ConstBuffer{max_offset, is_indirect}, index{index} {} | ||||
|     explicit ConstBufferEntry(u32 max_offset, bool is_indirect, u32 index_) | ||||
|         : ConstBuffer{max_offset, is_indirect}, index{index_} {} | ||||
| 
 | ||||
|     u32 GetIndex() const { | ||||
|         return index; | ||||
|  | @ -37,10 +37,10 @@ private: | |||
| }; | ||||
| 
 | ||||
| struct GlobalMemoryEntry { | ||||
|     constexpr explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, bool is_read, | ||||
|                                          bool is_written) | ||||
|         : cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset}, is_read{is_read}, is_written{ | ||||
|                                                                                   is_written} {} | ||||
|     constexpr explicit GlobalMemoryEntry(u32 cbuf_index_, u32 cbuf_offset_, bool is_read_, | ||||
|                                          bool is_written_) | ||||
|         : cbuf_index{cbuf_index_}, cbuf_offset{cbuf_offset_}, is_read{is_read_}, is_written{ | ||||
|                                                                                      is_written_} {} | ||||
| 
 | ||||
|     u32 cbuf_index = 0; | ||||
|     u32 cbuf_offset = 0; | ||||
|  |  | |||
|  | @ -258,9 +258,9 @@ constexpr u32 EncodeSwizzle(SwizzleSource x_source, SwizzleSource y_source, Swiz | |||
| 
 | ||||
| } // Anonymous namespace
 | ||||
| 
 | ||||
| CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params, | ||||
|                              bool is_astc_supported) | ||||
|     : VideoCommon::SurfaceBase<View>(gpu_addr, params, is_astc_supported) { | ||||
| CachedSurface::CachedSurface(const GPUVAddr gpu_addr_, const SurfaceParams& params_, | ||||
|                              bool is_astc_supported_) | ||||
|     : SurfaceBase<View>{gpu_addr_, params_, is_astc_supported_} { | ||||
|     if (is_converted) { | ||||
|         internal_format = params.srgb_conversion ? GL_SRGB8_ALPHA8 : GL_RGBA8; | ||||
|         format = GL_RGBA; | ||||
|  | @ -419,11 +419,11 @@ View CachedSurface::CreateViewInner(const ViewParams& view_key, const bool is_pr | |||
|     return view; | ||||
| } | ||||
| 
 | ||||
| CachedSurfaceView::CachedSurfaceView(CachedSurface& surface, const ViewParams& params, | ||||
|                                      bool is_proxy) | ||||
|     : VideoCommon::ViewBase(params), surface{surface}, format{surface.internal_format}, | ||||
|       target{GetTextureTarget(params.target)}, is_proxy{is_proxy} { | ||||
|     if (!is_proxy) { | ||||
| CachedSurfaceView::CachedSurfaceView(CachedSurface& surface_, const ViewParams& params_, | ||||
|                                      bool is_proxy_) | ||||
|     : ViewBase{params_}, surface{surface_}, format{surface_.internal_format}, | ||||
|       target{GetTextureTarget(params_.target)}, is_proxy{is_proxy_} { | ||||
|     if (!is_proxy_) { | ||||
|         main_view = CreateTextureView(); | ||||
|     } | ||||
| } | ||||
|  | @ -493,13 +493,13 @@ GLuint CachedSurfaceView::GetTexture(SwizzleSource x_source, SwizzleSource y_sou | |||
| 
 | ||||
|     std::array swizzle{x_source, y_source, z_source, w_source}; | ||||
| 
 | ||||
|     switch (const PixelFormat format = GetSurfaceParams().pixel_format) { | ||||
|     switch (const PixelFormat pixel_format = GetSurfaceParams().pixel_format) { | ||||
|     case PixelFormat::D24_UNORM_S8_UINT: | ||||
|     case PixelFormat::D32_FLOAT_S8_UINT: | ||||
|     case PixelFormat::S8_UINT_D24_UNORM: | ||||
|         UNIMPLEMENTED_IF(x_source != SwizzleSource::R && x_source != SwizzleSource::G); | ||||
|         glTextureParameteri(view.handle, GL_DEPTH_STENCIL_TEXTURE_MODE, | ||||
|                             GetComponent(format, x_source == SwizzleSource::R)); | ||||
|                             GetComponent(pixel_format, x_source == SwizzleSource::R)); | ||||
| 
 | ||||
|         // Make sure we sample the first component
 | ||||
|         std::transform(swizzle.begin(), swizzle.end(), swizzle.begin(), [](SwizzleSource value) { | ||||
|  |  | |||
|  | @ -37,7 +37,8 @@ class CachedSurface final : public VideoCommon::SurfaceBase<View> { | |||
|     friend CachedSurfaceView; | ||||
| 
 | ||||
| public: | ||||
|     explicit CachedSurface(GPUVAddr gpu_addr, const SurfaceParams& params, bool is_astc_supported); | ||||
|     explicit CachedSurface(GPUVAddr gpu_addr_, const SurfaceParams& params_, | ||||
|                            bool is_astc_supported_); | ||||
|     ~CachedSurface(); | ||||
| 
 | ||||
|     void UploadTexture(const std::vector<u8>& staging_buffer) override; | ||||
|  | @ -77,7 +78,7 @@ private: | |||
| 
 | ||||
| class CachedSurfaceView final : public VideoCommon::ViewBase { | ||||
| public: | ||||
|     explicit CachedSurfaceView(CachedSurface& surface, const ViewParams& params, bool is_proxy); | ||||
|     explicit CachedSurfaceView(CachedSurface& surface_, const ViewParams& params_, bool is_proxy_); | ||||
|     ~CachedSurfaceView(); | ||||
| 
 | ||||
|     /// @brief Attaches this texture view to the currently bound fb_target framebuffer
 | ||||
|  |  | |||
|  | @ -17,8 +17,8 @@ struct CommandPool::Pool { | |||
|     vk::CommandBuffers cmdbufs; | ||||
| }; | ||||
| 
 | ||||
| CommandPool::CommandPool(MasterSemaphore& master_semaphore, const VKDevice& device) | ||||
|     : ResourcePool(master_semaphore, COMMAND_BUFFER_POOL_SIZE), device{device} {} | ||||
| CommandPool::CommandPool(MasterSemaphore& master_semaphore, const VKDevice& device_) | ||||
|     : ResourcePool(master_semaphore, COMMAND_BUFFER_POOL_SIZE), device{device_} {} | ||||
| 
 | ||||
| CommandPool::~CommandPool() = default; | ||||
| 
 | ||||
|  |  | |||
|  | @ -17,7 +17,7 @@ class VKDevice; | |||
| 
 | ||||
| class CommandPool final : public ResourcePool { | ||||
| public: | ||||
|     explicit CommandPool(MasterSemaphore& master_semaphore, const VKDevice& device); | ||||
|     explicit CommandPool(MasterSemaphore& master_semaphore, const VKDevice& device_); | ||||
|     ~CommandPool() override; | ||||
| 
 | ||||
|     void Allocate(size_t begin, size_t end) override; | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Lioncash
						Lioncash