[dynarmic] remove use of mcl reverse iterator

Signed-off-by: lizzie <lizzie@eden-emu.dev>
This commit is contained in:
lizzie 2025-09-09 03:28:29 +00:00 committed by crueter
parent 213a7fedd6
commit 15137d33f5
3 changed files with 33 additions and 42 deletions

View file

@ -764,7 +764,7 @@ void A64EmitX64::EmitPatchMovRcx(CodePtr target_code_ptr) {
target_code_ptr = code.GetReturnFromRunCodeAddress(); target_code_ptr = code.GetReturnFromRunCodeAddress();
} }
const CodePtr patch_location = code.getCurr(); const CodePtr patch_location = code.getCurr();
code.mov(code.rcx, reinterpret_cast<u64>(target_code_ptr)); code.mov(code.rcx, u64(target_code_ptr));
code.EnsurePatchLocationSize(patch_location, 10); code.EnsurePatchLocationSize(patch_location, 10);
} }

View file

@ -10,7 +10,6 @@
#include <algorithm> #include <algorithm>
#include <mcl/iterator/reverse.hpp>
#include "dynarmic/common/common_types.h" #include "dynarmic/common/common_types.h"
#include <xbyak/xbyak.h> #include <xbyak/xbyak.h>
@ -76,7 +75,8 @@ void ABI_PopRegistersAndAdjustStack(BlockOfCode& code, const size_t frame_size,
const FrameInfo frame_info = CalculateFrameInfo(num_gprs, num_xmms, frame_size); const FrameInfo frame_info = CalculateFrameInfo(num_gprs, num_xmms, frame_size);
size_t xmm_offset = frame_info.xmm_offset + (num_xmms * XMM_SIZE); size_t xmm_offset = frame_info.xmm_offset + (num_xmms * XMM_SIZE);
for (auto const xmm : mcl::iterator::reverse(regs)) { for (auto it = regs.rbegin(); it != regs.rend(); ++it) {
auto const xmm = *it;
if (HostLocIsXMM(xmm)) { if (HostLocIsXMM(xmm)) {
xmm_offset -= XMM_SIZE; xmm_offset -= XMM_SIZE;
if (code.HasHostFeature(HostFeature::AVX)) { if (code.HasHostFeature(HostFeature::AVX)) {
@ -88,9 +88,11 @@ void ABI_PopRegistersAndAdjustStack(BlockOfCode& code, const size_t frame_size,
} }
if (frame_info.stack_subtraction != 0) if (frame_info.stack_subtraction != 0)
code.add(rsp, u32(frame_info.stack_subtraction)); code.add(rsp, u32(frame_info.stack_subtraction));
for (auto const gpr : mcl::iterator::reverse(regs)) for (auto it = regs.rbegin(); it != regs.rend(); ++it) {
auto const gpr = *it;
if (HostLocIsGPR(gpr)) if (HostLocIsGPR(gpr))
code.pop(HostLocToReg64(gpr)); code.pop(HostLocToReg64(gpr));
}
} }
void ABI_PushCalleeSaveRegistersAndAdjustStack(BlockOfCode& code, const std::size_t frame_size) { void ABI_PushCalleeSaveRegistersAndAdjustStack(BlockOfCode& code, const std::size_t frame_size) {

View file

@ -28,7 +28,6 @@
#include "dynarmic/ir/type.h" #include "dynarmic/ir/type.h"
#include "mcl/bit/swap.hpp" #include "mcl/bit/swap.hpp"
#include "mcl/bit/rotate.hpp" #include "mcl/bit/rotate.hpp"
#include "mcl/iterator/reverse.hpp"
namespace Dynarmic::Optimization { namespace Dynarmic::Optimization {
@ -36,50 +35,42 @@ static void A32ConstantMemoryReads(IR::Block& block, A32::UserCallbacks* cb) {
for (auto& inst : block) { for (auto& inst : block) {
switch (inst.GetOpcode()) { switch (inst.GetOpcode()) {
case IR::Opcode::A32ReadMemory8: { case IR::Opcode::A32ReadMemory8: {
if (!inst.AreAllArgsImmediates()) { if (inst.AreAllArgsImmediates()) {
break; const u32 vaddr = inst.GetArg(1).GetU32();
} if (cb->IsReadOnlyMemory(vaddr)) {
const u8 value_from_memory = cb->MemoryRead8(vaddr);
const u32 vaddr = inst.GetArg(1).GetU32(); inst.ReplaceUsesWith(IR::Value{value_from_memory});
if (cb->IsReadOnlyMemory(vaddr)) { }
const u8 value_from_memory = cb->MemoryRead8(vaddr);
inst.ReplaceUsesWith(IR::Value{value_from_memory});
} }
break; break;
} }
case IR::Opcode::A32ReadMemory16: { case IR::Opcode::A32ReadMemory16: {
if (!inst.AreAllArgsImmediates()) { if (inst.AreAllArgsImmediates()) {
break; const u32 vaddr = inst.GetArg(1).GetU32();
} if (cb->IsReadOnlyMemory(vaddr)) {
const u16 value_from_memory = cb->MemoryRead16(vaddr);
const u32 vaddr = inst.GetArg(1).GetU32(); inst.ReplaceUsesWith(IR::Value{value_from_memory});
if (cb->IsReadOnlyMemory(vaddr)) { }
const u16 value_from_memory = cb->MemoryRead16(vaddr);
inst.ReplaceUsesWith(IR::Value{value_from_memory});
} }
break; break;
} }
case IR::Opcode::A32ReadMemory32: { case IR::Opcode::A32ReadMemory32: {
if (!inst.AreAllArgsImmediates()) { if (inst.AreAllArgsImmediates()) {
break; const u32 vaddr = inst.GetArg(1).GetU32();
} if (cb->IsReadOnlyMemory(vaddr)) {
const u32 value_from_memory = cb->MemoryRead32(vaddr);
const u32 vaddr = inst.GetArg(1).GetU32(); inst.ReplaceUsesWith(IR::Value{value_from_memory});
if (cb->IsReadOnlyMemory(vaddr)) { }
const u32 value_from_memory = cb->MemoryRead32(vaddr);
inst.ReplaceUsesWith(IR::Value{value_from_memory});
} }
break; break;
} }
case IR::Opcode::A32ReadMemory64: { case IR::Opcode::A32ReadMemory64: {
if (!inst.AreAllArgsImmediates()) { if (inst.AreAllArgsImmediates()) {
break; const u32 vaddr = inst.GetArg(1).GetU32();
} if (cb->IsReadOnlyMemory(vaddr)) {
const u64 value_from_memory = cb->MemoryRead64(vaddr);
const u32 vaddr = inst.GetArg(1).GetU32(); inst.ReplaceUsesWith(IR::Value{value_from_memory});
if (cb->IsReadOnlyMemory(vaddr)) { }
const u64 value_from_memory = cb->MemoryRead64(vaddr);
inst.ReplaceUsesWith(IR::Value{value_from_memory});
} }
break; break;
} }
@ -1205,11 +1196,9 @@ static void ConstantPropagation(IR::Block& block) {
static void DeadCodeElimination(IR::Block& block) { static void DeadCodeElimination(IR::Block& block) {
// We iterate over the instructions in reverse order. // We iterate over the instructions in reverse order.
// This is because removing an instruction reduces the number of uses for earlier instructions. // This is because removing an instruction reduces the number of uses for earlier instructions.
for (auto& inst : mcl::iterator::reverse(block)) { for (auto it = block.rbegin(); it != block.rend(); ++it)
if (!inst.HasUses() && !MayHaveSideEffects(inst.GetOpcode())) { if (!it->HasUses() && !MayHaveSideEffects(it->GetOpcode()))
inst.Invalidate(); it->Invalidate();
}
}
} }
static void IdentityRemovalPass(IR::Block& block) { static void IdentityRemovalPass(IR::Block& block) {