[dynarmic] tag unicorn tests separatedly

Signed-off-by: lizzie <lizzie@eden-emu.dev>
This commit is contained in:
lizzie 2025-08-08 00:32:35 +01:00 committed by crueter
parent 9bfc62a7cb
commit 5dbecc45c7
7 changed files with 48 additions and 56 deletions

View file

@ -13,9 +13,9 @@
namespace Dynarmic::Backend::X64 { namespace Dynarmic::Backend::X64 {
// Our static vector will contain 32 elements, stt. an uint16_t will fill up 64 bytes // Our static vector will contain 32 elements, stt. an uint8_t will fill up 64 bytes
// (an entire cache line). Thanks. // (an entire cache line). Thanks.
enum class HostLoc : uint16_t { enum class HostLoc : std::uint8_t {
// Ordering of the registers is intentional. See also: HostLocToX64. // Ordering of the registers is intentional. See also: HostLocToX64.
RAX, RAX,
RCX, RCX,
@ -60,48 +60,48 @@ enum class HostLoc : uint16_t {
constexpr size_t NonSpillHostLocCount = static_cast<size_t>(HostLoc::FirstSpill); constexpr size_t NonSpillHostLocCount = static_cast<size_t>(HostLoc::FirstSpill);
inline bool HostLocIsGPR(HostLoc reg) { constexpr bool HostLocIsGPR(HostLoc reg) {
return reg >= HostLoc::RAX && reg <= HostLoc::R15; return reg >= HostLoc::RAX && reg <= HostLoc::R15;
} }
inline bool HostLocIsXMM(HostLoc reg) { constexpr bool HostLocIsXMM(HostLoc reg) {
return reg >= HostLoc::XMM0 && reg <= HostLoc::XMM15; return reg >= HostLoc::XMM0 && reg <= HostLoc::XMM15;
} }
inline bool HostLocIsRegister(HostLoc reg) { constexpr bool HostLocIsRegister(HostLoc reg) {
return HostLocIsGPR(reg) || HostLocIsXMM(reg); return HostLocIsGPR(reg) || HostLocIsXMM(reg);
} }
inline bool HostLocIsFlag(HostLoc reg) { constexpr bool HostLocIsFlag(HostLoc reg) {
return reg >= HostLoc::CF && reg <= HostLoc::OF; return reg >= HostLoc::CF && reg <= HostLoc::OF;
} }
inline HostLoc HostLocRegIdx(int idx) { constexpr HostLoc HostLocRegIdx(int idx) {
ASSERT(idx >= 0 && idx <= 15); ASSERT(idx >= 0 && idx <= 15);
return HostLoc(idx); return HostLoc(idx);
} }
inline HostLoc HostLocXmmIdx(int idx) { constexpr HostLoc HostLocXmmIdx(int idx) {
ASSERT(idx >= 0 && idx <= 15); ASSERT(idx >= 0 && idx <= 15);
return HostLoc(size_t(HostLoc::XMM0) + idx); return HostLoc(size_t(HostLoc::XMM0) + idx);
} }
inline HostLoc HostLocSpill(size_t i) { constexpr HostLoc HostLocSpill(size_t i) {
return HostLoc(size_t(HostLoc::FirstSpill) + i); return HostLoc(size_t(HostLoc::FirstSpill) + i);
} }
inline bool HostLocIsSpill(HostLoc reg) { constexpr bool HostLocIsSpill(HostLoc reg) {
return reg >= HostLoc::FirstSpill; return reg >= HostLoc::FirstSpill;
} }
inline size_t HostLocBitWidth(HostLoc loc) { constexpr size_t HostLocBitWidth(HostLoc loc) {
if (HostLocIsGPR(loc)) if (HostLocIsGPR(loc))
return 64; return 64;
if (HostLocIsXMM(loc)) else if (HostLocIsXMM(loc))
return 128; return 128;
if (HostLocIsSpill(loc)) else if (HostLocIsSpill(loc))
return 128; return 128;
if (HostLocIsFlag(loc)) else if (HostLocIsFlag(loc))
return 1; return 1;
UNREACHABLE(); UNREACHABLE();
} }

View file

@ -357,9 +357,8 @@ void RegAlloc::HostCall(IR::Inst* result_def,
static const boost::container::static_vector<HostLoc, 28> other_caller_save = [args_hostloc]() noexcept { static const boost::container::static_vector<HostLoc, 28> other_caller_save = [args_hostloc]() noexcept {
boost::container::static_vector<HostLoc, 28> ret(ABI_ALL_CALLER_SAVE.begin(), ABI_ALL_CALLER_SAVE.end()); boost::container::static_vector<HostLoc, 28> ret(ABI_ALL_CALLER_SAVE.begin(), ABI_ALL_CALLER_SAVE.end());
ret.erase(std::find(ret.begin(), ret.end(), ABI_RETURN)); ret.erase(std::find(ret.begin(), ret.end(), ABI_RETURN));
for (auto const hostloc : args_hostloc) { for (auto const hostloc : args_hostloc)
ret.erase(std::find(ret.begin(), ret.end(), hostloc)); ret.erase(std::find(ret.begin(), ret.end(), hostloc));
}
return ret; return ret;
}(); }();
@ -368,7 +367,7 @@ void RegAlloc::HostCall(IR::Inst* result_def,
DefineValueImpl(result_def, ABI_RETURN); DefineValueImpl(result_def, ABI_RETURN);
} }
for (size_t i = 0; i < args_count; i++) { for (size_t i = 0; i < args.size(); i++) {
if (args[i] && !args[i]->get().IsVoid()) { if (args[i] && !args[i]->get().IsVoid()) {
UseScratch(*args[i], args_hostloc[i]); UseScratch(*args[i], args_hostloc[i]);
// LLVM puts the burden of zero-extension of 8 and 16 bit values on the caller instead of the callee // LLVM puts the burden of zero-extension of 8 and 16 bit values on the caller instead of the callee
@ -383,36 +382,35 @@ void RegAlloc::HostCall(IR::Inst* result_def,
case IR::Type::U32: case IR::Type::U32:
code->mov(reg.cvt32(), reg.cvt32()); code->mov(reg.cvt32(), reg.cvt32());
break; break;
case IR::Type::U64:
break; //no op
default: default:
break; // Nothing needs to be done UNREACHABLE();
} }
} }
} }
for (size_t i = 0; i < args_count; i++) { for (size_t i = 0; i < args.size(); i++)
if (!args[i]) { if (!args[i]) {
// TODO: Force spill // TODO: Force spill
ScratchGpr(args_hostloc[i]); ScratchGpr(args_hostloc[i]);
} }
} for (auto const caller_saved : other_caller_save)
for (HostLoc caller_saved : other_caller_save) {
ScratchImpl({caller_saved}); ScratchImpl({caller_saved});
}
} }
void RegAlloc::AllocStackSpace(const size_t stack_space) noexcept { void RegAlloc::AllocStackSpace(const size_t stack_space) noexcept {
ASSERT(stack_space < static_cast<size_t>(std::numeric_limits<s32>::max())); ASSERT(stack_space < size_t(std::numeric_limits<s32>::max()));
ASSERT(reserved_stack_space == 0); ASSERT(reserved_stack_space == 0);
reserved_stack_space = stack_space; reserved_stack_space = stack_space;
code->sub(code->rsp, static_cast<u32>(stack_space)); code->sub(code->rsp, u32(stack_space));
} }
void RegAlloc::ReleaseStackSpace(const size_t stack_space) noexcept { void RegAlloc::ReleaseStackSpace(const size_t stack_space) noexcept {
ASSERT(stack_space < static_cast<size_t>(std::numeric_limits<s32>::max())); ASSERT(stack_space < size_t(std::numeric_limits<s32>::max()));
ASSERT(reserved_stack_space == stack_space); ASSERT(reserved_stack_space == stack_space);
reserved_stack_space = 0; reserved_stack_space = 0;
code->add(code->rsp, static_cast<u32>(stack_space)); code->add(code->rsp, u32(stack_space));
} }
HostLoc RegAlloc::SelectARegister(const boost::container::static_vector<HostLoc, 28>& desired_locations) const noexcept { HostLoc RegAlloc::SelectARegister(const boost::container::static_vector<HostLoc, 28>& desired_locations) const noexcept {
@ -494,7 +492,6 @@ void RegAlloc::DefineValueImpl(IR::Inst* def_inst, const IR::Value& use_inst) no
HostLoc RegAlloc::LoadImmediate(IR::Value imm, HostLoc host_loc) noexcept { HostLoc RegAlloc::LoadImmediate(IR::Value imm, HostLoc host_loc) noexcept {
ASSERT_MSG(imm.IsImmediate(), "imm is not an immediate"); ASSERT_MSG(imm.IsImmediate(), "imm is not an immediate");
if (HostLocIsGPR(host_loc)) { if (HostLocIsGPR(host_loc)) {
const Xbyak::Reg64 reg = HostLocToReg64(host_loc); const Xbyak::Reg64 reg = HostLocToReg64(host_loc);
const u64 imm_value = imm.GetImmediateAsU64(); const u64 imm_value = imm.GetImmediateAsU64();
@ -503,10 +500,7 @@ HostLoc RegAlloc::LoadImmediate(IR::Value imm, HostLoc host_loc) noexcept {
} else { } else {
code->mov(reg, imm_value); code->mov(reg, imm_value);
} }
return host_loc; } else if (HostLocIsXMM(host_loc)) {
}
if (HostLocIsXMM(host_loc)) {
const Xbyak::Xmm reg = HostLocToXmm(host_loc); const Xbyak::Xmm reg = HostLocToXmm(host_loc);
const u64 imm_value = imm.GetImmediateAsU64(); const u64 imm_value = imm.GetImmediateAsU64();
if (imm_value == 0) { if (imm_value == 0) {
@ -514,23 +508,19 @@ HostLoc RegAlloc::LoadImmediate(IR::Value imm, HostLoc host_loc) noexcept {
} else { } else {
MAYBE_AVX(movaps, reg, code->Const(code->xword, imm_value)); MAYBE_AVX(movaps, reg, code->Const(code->xword, imm_value));
} }
return host_loc; } else {
}
UNREACHABLE(); UNREACHABLE();
}
return host_loc;
} }
void RegAlloc::Move(HostLoc to, HostLoc from) noexcept { void RegAlloc::Move(HostLoc to, HostLoc from) noexcept {
const size_t bit_width = LocInfo(from).GetMaxBitWidth(); const size_t bit_width = LocInfo(from).GetMaxBitWidth();
ASSERT(LocInfo(to).IsEmpty() && !LocInfo(from).IsLocked()); ASSERT(LocInfo(to).IsEmpty() && !LocInfo(from).IsLocked());
ASSERT(bit_width <= HostLocBitWidth(to)); ASSERT(bit_width <= HostLocBitWidth(to));
ASSERT_MSG(!LocInfo(from).IsEmpty(), "Mov eliminated"); ASSERT_MSG(!LocInfo(from).IsEmpty(), "Mov eliminated");
if (!LocInfo(from).IsEmpty()) {
EmitMove(bit_width, to, from); EmitMove(bit_width, to, from);
LocInfo(to) = std::exchange(LocInfo(from), {}); LocInfo(to) = std::exchange(LocInfo(from), {});
}
} }
void RegAlloc::CopyToScratch(size_t bit_width, HostLoc to, HostLoc from) noexcept { void RegAlloc::CopyToScratch(size_t bit_width, HostLoc to, HostLoc from) noexcept {

View file

@ -264,7 +264,7 @@ private:
BlockOfCode* code = nullptr; BlockOfCode* code = nullptr;
size_t reserved_stack_space = 0; size_t reserved_stack_space = 0;
}; };
// Ensure a cache line is used, this is primordial // Ensure a cache line (or less) is used, this is primordial
static_assert(sizeof(boost::container::static_vector<HostLoc, 28>) == 64); static_assert(sizeof(boost::container::static_vector<HostLoc, 28>) == 40);
} // namespace Dynarmic::Backend::X64 } // namespace Dynarmic::Backend::X64

View file

@ -16,7 +16,7 @@
namespace Dynarmic::Backend::X64 { namespace Dynarmic::Backend::X64 {
enum class HostLoc : uint16_t; enum class HostLoc : std::uint8_t;
using Vector = std::array<u64, 2>; using Vector = std::array<u64, 2>;
#ifdef _MSC_VER #ifdef _MSC_VER

View file

@ -91,6 +91,9 @@ static u32 GenRandomInst(u64 pc, bool is_last_inst) {
"MSR_reg", "MSR_reg",
"MSR_imm", "MSR_imm",
"MRS", "MRS",
// Does not need test
"SVC",
"BRK"
}; };
for (const auto& [fn, bitstring] : list) { for (const auto& [fn, bitstring] : list) {
@ -200,7 +203,7 @@ static void RunTestInstance(Dynarmic::A64::Jit& jit, A64Unicorn& uni, A64TestEnv
jit_env.ticks_left = instructions.size(); jit_env.ticks_left = instructions.size();
CheckedRun([&]() { jit.Run(); }); CheckedRun([&]() { jit.Run(); });
uni_env.ticks_left = instructions.size(); uni_env.ticks_left = instructions.size() * 4;
uni.Run(); uni.Run();
SCOPE_FAIL { SCOPE_FAIL {
@ -296,7 +299,7 @@ static void RunTestInstance(Dynarmic::A64::Jit& jit, A64Unicorn& uni, A64TestEnv
return; return;
} }
REQUIRE(uni.GetPC() == jit.GetPC()); REQUIRE(uni.GetPC() + 4 == jit.GetPC());
REQUIRE(uni.GetRegisters() == jit.GetRegisters()); REQUIRE(uni.GetRegisters() == jit.GetRegisters());
REQUIRE(uni.GetVectors() == jit.GetVectors()); REQUIRE(uni.GetVectors() == jit.GetVectors());
REQUIRE(uni.GetSP() == jit.GetSP()); REQUIRE(uni.GetSP() == jit.GetSP());
@ -306,7 +309,7 @@ static void RunTestInstance(Dynarmic::A64::Jit& jit, A64Unicorn& uni, A64TestEnv
REQUIRE(FP::FPSR{uni.GetFpsr()}.QC() == FP::FPSR{jit.GetFpsr()}.QC()); REQUIRE(FP::FPSR{uni.GetFpsr()}.QC() == FP::FPSR{jit.GetFpsr()}.QC());
} }
TEST_CASE("A64: Single random instruction", "[a64]") { TEST_CASE("A64: Single random instruction", "[a64][unicorn]") {
A64TestEnv jit_env{}; A64TestEnv jit_env{};
A64TestEnv uni_env{}; A64TestEnv uni_env{};
@ -333,7 +336,7 @@ TEST_CASE("A64: Single random instruction", "[a64]") {
} }
} }
TEST_CASE("A64: Floating point instructions", "[a64]") { TEST_CASE("A64: Floating point instructions", "[a64][unicorn]") {
A64TestEnv jit_env{}; A64TestEnv jit_env{};
A64TestEnv uni_env{}; A64TestEnv uni_env{};
@ -458,7 +461,7 @@ TEST_CASE("A64: Floating point instructions", "[a64]") {
} }
} }
TEST_CASE("A64: Small random block", "[a64]") { TEST_CASE("A64: Small random block", "[a64][unicorn]") {
A64TestEnv jit_env{}; A64TestEnv jit_env{};
A64TestEnv uni_env{}; A64TestEnv uni_env{};
@ -493,7 +496,7 @@ TEST_CASE("A64: Small random block", "[a64]") {
} }
} }
TEST_CASE("A64: Large random block", "[a64]") { TEST_CASE("A64: Large random block", "[a64][unicorn]") {
A64TestEnv jit_env{}; A64TestEnv jit_env{};
A64TestEnv uni_env{}; A64TestEnv uni_env{};

View file

@ -13,7 +13,7 @@
using namespace Dynarmic; using namespace Dynarmic;
TEST_CASE("Unicorn: Sanity test", "[a64]") { TEST_CASE("Unicorn: Sanity test", "[a64][unicorn]") {
A64TestEnv env; A64TestEnv env;
env.code_mem.emplace_back(0x8b020020); // ADD X0, X1, X2 env.code_mem.emplace_back(0x8b020020); // ADD X0, X1, X2
@ -39,7 +39,7 @@ TEST_CASE("Unicorn: Sanity test", "[a64]") {
REQUIRE(unicorn.GetPC() == 4); REQUIRE(unicorn.GetPC() == 4);
} }
TEST_CASE("Unicorn: Ensure 0xFFFF'FFFF'FFFF'FFFF is readable", "[a64]") { TEST_CASE("Unicorn: Ensure 0xFFFF'FFFF'FFFF'FFFF is readable", "[a64][unicorn]") {
A64TestEnv env; A64TestEnv env;
env.code_mem.emplace_back(0x385fed99); // LDRB W25, [X12, #0xfffffffffffffffe]! env.code_mem.emplace_back(0x385fed99); // LDRB W25, [X12, #0xfffffffffffffffe]!
@ -59,7 +59,7 @@ TEST_CASE("Unicorn: Ensure 0xFFFF'FFFF'FFFF'FFFF is readable", "[a64]") {
REQUIRE(unicorn.GetPC() == 4); REQUIRE(unicorn.GetPC() == 4);
} }
TEST_CASE("Unicorn: Ensure is able to read across page boundaries", "[a64]") { TEST_CASE("Unicorn: Ensure is able to read across page boundaries", "[a64][unicorn]") {
A64TestEnv env; A64TestEnv env;
env.code_mem.emplace_back(0xb85f93d9); // LDUR W25, [X30, #0xfffffffffffffff9] env.code_mem.emplace_back(0xb85f93d9); // LDUR W25, [X30, #0xfffffffffffffff9]

View file

@ -28,11 +28,10 @@ public:
for (u64 page = page_start; page < page_end; ++page) { for (u64 page = page_start; page < page_end; ++page) {
int& value = page_table[page]; int& value = page_table[page];
value += delta; value += delta;
if (value < 0) {
throw std::logic_error{"negative page"};
}
if (value == 0) { if (value == 0) {
page_table.erase(page); page_table.erase(page);
} else if (value < 0) {
throw std::logic_error{"negative page"};
} }
} }
} }