[dynarmic] enforce higher constraints
All checks were successful
eden-license / license-header (pull_request) Successful in 18s

Signed-off-by: lizzie <lizzie@eden-emu.dev>
This commit is contained in:
lizzie 2025-09-23 03:18:07 +00:00
parent a31ba9555d
commit 6c448cb1c8
Signed by: Lizzie
GPG key ID: 00287378CADCAB13

View file

@ -273,34 +273,31 @@ void AxxEmitX64::EmitExclusiveWriteMemory(AxxEmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const bool ordered = IsOrdered(args[3].GetImmediateAccType()); const bool ordered = IsOrdered(args[3].GetImmediateAccType());
if constexpr (bitsize != 128) { if constexpr (bitsize == 128) {
ctx.reg_alloc.HostCall(inst, {}, args[1], args[2]);
} else {
ctx.reg_alloc.Use(args[1], ABI_PARAM2); ctx.reg_alloc.Use(args[1], ABI_PARAM2);
ctx.reg_alloc.Use(args[2], HostLoc::XMM1); ctx.reg_alloc.Use(args[2], HostLoc::XMM1);
ctx.reg_alloc.EndOfAllocScope(); ctx.reg_alloc.EndOfAllocScope();
ctx.reg_alloc.HostCall(inst); ctx.reg_alloc.HostCall(inst);
} else {
ctx.reg_alloc.HostCall(inst, {}, args[1], args[2]);
} }
const Xbyak::Reg64 tmp = ctx.reg_alloc.ScratchGpr();
Xbyak::Label end; Xbyak::Label end;
code.mov(code.ABI_RETURN, u32(1)); code.mov(code.ABI_RETURN, u32(1));
code.cmp(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(0)); code.movzx(tmp.cvt32(), code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)]);
code.test(tmp.cvt8(), tmp.cvt8());
code.je(end); code.je(end);
code.mov(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(0)); code.xor_(tmp.cvt32(), tmp.cvt32());
code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(&conf)); code.xchg(tmp.cvt8(), code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)]);
code.mov(code.ABI_PARAM1, u64(&conf));
if constexpr (bitsize != 128) { if constexpr (bitsize != 128) {
using T = mcl::unsigned_integer_of_size<bitsize>; using T = mcl::unsigned_integer_of_size<bitsize>;
code.CallLambda([](AxxUserConfig& conf, Axx::VAddr vaddr, T value) -> u32 {
code.CallLambda( return conf.global_monitor->DoExclusiveOperation<T>(conf.processor_id, vaddr, [&](T expected) -> bool {
[](AxxUserConfig& conf, Axx::VAddr vaddr, T value) -> u32 { return (conf.callbacks->*callback)(vaddr, value, expected);
return conf.global_monitor->DoExclusiveOperation<T>(conf.processor_id, vaddr, }) ? 0 : 1;
[&](T expected) -> bool { });
return (conf.callbacks->*callback)(vaddr, value, expected);
})
? 0
: 1;
});
if (ordered) { if (ordered) {
code.mfence(); code.mfence();
} }
@ -308,15 +305,11 @@ void AxxEmitX64::EmitExclusiveWriteMemory(AxxEmitContext& ctx, IR::Inst* inst) {
ctx.reg_alloc.AllocStackSpace(16 + ABI_SHADOW_SPACE); ctx.reg_alloc.AllocStackSpace(16 + ABI_SHADOW_SPACE);
code.lea(code.ABI_PARAM3, ptr[rsp + ABI_SHADOW_SPACE]); code.lea(code.ABI_PARAM3, ptr[rsp + ABI_SHADOW_SPACE]);
code.movaps(xword[code.ABI_PARAM3], xmm1); code.movaps(xword[code.ABI_PARAM3], xmm1);
code.CallLambda( code.CallLambda([](AxxUserConfig& conf, Axx::VAddr vaddr, Vector& value) -> u32 {
[](AxxUserConfig& conf, Axx::VAddr vaddr, Vector& value) -> u32 { return conf.global_monitor->DoExclusiveOperation<Vector>(conf.processor_id, vaddr, [&](Vector expected) -> bool {
return conf.global_monitor->DoExclusiveOperation<Vector>(conf.processor_id, vaddr, return (conf.callbacks->*callback)(vaddr, value, expected);
[&](Vector expected) -> bool { }) ? 0 : 1;
return (conf.callbacks->*callback)(vaddr, value, expected); });
})
? 0
: 1;
});
if (ordered) { if (ordered) {
code.mfence(); code.mfence();
} }
@ -437,10 +430,11 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
SharedLabel end = GenSharedLabel(); SharedLabel end = GenSharedLabel();
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.mov(status, u32(1)); code.mov(status, u32(1));
code.cmp(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(0)); code.movzx(tmp.cvt32(), code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)]);
code.test(tmp.cvt8(), tmp.cvt8());
code.je(*end, code.T_NEAR); code.je(*end, code.T_NEAR);
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.cmp(qword[tmp], vaddr); code.cmp(qword[tmp], vaddr);
code.jne(*end, code.T_NEAR); code.jne(*end, code.T_NEAR);
@ -474,30 +468,29 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
const auto location = code.getCurr(); const auto location = code.getCurr();
if constexpr (bitsize == 128) { switch (bitsize) {
case 8:
code.lock();
code.cmpxchg(code.byte[dest_ptr], value.cvt8());
break;
case 16:
code.lock();
code.cmpxchg(word[dest_ptr], value.cvt16());
break;
case 32:
code.lock();
code.cmpxchg(dword[dest_ptr], value.cvt32());
break;
case 64:
code.lock();
code.cmpxchg(qword[dest_ptr], value.cvt64());
break;
case 128:
code.lock(); code.lock();
code.cmpxchg16b(ptr[dest_ptr]); code.cmpxchg16b(ptr[dest_ptr]);
} else { break;
switch (bitsize) { default:
case 8: UNREACHABLE();
code.lock();
code.cmpxchg(code.byte[dest_ptr], value.cvt8());
break;
case 16:
code.lock();
code.cmpxchg(word[dest_ptr], value.cvt16());
break;
case 32:
code.lock();
code.cmpxchg(dword[dest_ptr], value.cvt32());
break;
case 64:
code.lock();
code.cmpxchg(qword[dest_ptr], value.cvt64());
break;
default:
UNREACHABLE();
}
} }
code.setnz(status.cvt8()); code.setnz(status.cvt8());