[core/memory] Remove defered heap allocation on Linux. #2587
8 changed files with 29 additions and 150 deletions
|
@ -1,13 +1,13 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "common/heap_tracker.h"
|
#include "common/heap_tracker.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/assert.h"
|
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
|
|
||||||
|
@ -36,6 +36,8 @@ HeapTracker::~HeapTracker() = default;
|
||||||
|
|
||||||
void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
|
void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
|
||||||
MemoryPermission perm, bool is_separate_heap) {
|
MemoryPermission perm, bool is_separate_heap) {
|
||||||
|
bool rebuild_required = false;
|
||||||
|
|
||||||
// When mapping other memory, map pages immediately.
|
// When mapping other memory, map pages immediately.
|
||||||
if (!is_separate_heap) {
|
if (!is_separate_heap) {
|
||||||
m_buffer.Map(virtual_offset, host_offset, length, perm, false);
|
m_buffer.Map(virtual_offset, host_offset, length, perm, false);
|
||||||
|
@ -57,11 +59,29 @@ void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
|
||||||
|
|
||||||
// Insert into mappings.
|
// Insert into mappings.
|
||||||
m_map_count++;
|
m_map_count++;
|
||||||
m_mappings.insert(*map);
|
const auto it = m_mappings.insert(*map);
|
||||||
|
|
||||||
|
// Update tick before possible rebuild.
|
||||||
|
it->tick = m_tick++;
|
||||||
|
|
||||||
|
// Check if we need to rebuild.
|
||||||
|
if (m_resident_map_count >= m_max_resident_map_count) {
|
||||||
|
rebuild_required = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map the area.
|
||||||
|
m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
|
||||||
|
|
||||||
|
// This map is now resident.
|
||||||
|
it->is_resident = true;
|
||||||
|
m_resident_map_count++;
|
||||||
|
m_resident_mappings.insert(*it);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, map.
|
if (rebuild_required) {
|
||||||
this->DeferredMapSeparateHeap(virtual_offset);
|
// A rebuild was required, so perform it now.
|
||||||
|
this->RebuildSeparateHeapAddressSpace();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
|
void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
|
||||||
|
@ -147,7 +167,8 @@ void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission p
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clamp to end.
|
// Clamp to end.
|
||||||
next = (std::min)(next, end);
|
next = std::min(next, end);
|
||||||
|
|
||||||
// Reprotect, if we need to.
|
// Reprotect, if we need to.
|
||||||
if (should_protect) {
|
if (should_protect) {
|
||||||
m_buffer.Protect(cur, next - cur, perm);
|
m_buffer.Protect(cur, next - cur, perm);
|
||||||
|
@ -158,51 +179,6 @@ void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
|
|
||||||
if (m_buffer.IsInVirtualRange(fault_address)) {
|
|
||||||
return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
|
|
||||||
bool rebuild_required = false;
|
|
||||||
|
|
||||||
{
|
|
||||||
std::scoped_lock lk{m_lock};
|
|
||||||
|
|
||||||
// Check to ensure this was a non-resident separate heap mapping.
|
|
||||||
const auto it = this->GetNearestHeapMapLocked(virtual_offset);
|
|
||||||
if (it == m_mappings.end() || it->is_resident) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update tick before possible rebuild.
|
|
||||||
it->tick = m_tick++;
|
|
||||||
|
|
||||||
// Check if we need to rebuild.
|
|
||||||
if (m_resident_map_count > m_max_resident_map_count) {
|
|
||||||
rebuild_required = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map the area.
|
|
||||||
m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
|
|
||||||
|
|
||||||
// This map is now resident.
|
|
||||||
it->is_resident = true;
|
|
||||||
m_resident_map_count++;
|
|
||||||
m_resident_mappings.insert(*it);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rebuild_required) {
|
|
||||||
// A rebuild was required, so perform it now.
|
|
||||||
this->RebuildSeparateHeapAddressSpace();
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void HeapTracker::RebuildSeparateHeapAddressSpace() {
|
void HeapTracker::RebuildSeparateHeapAddressSpace() {
|
||||||
std::scoped_lock lk{m_rebuild_lock, m_lock};
|
std::scoped_lock lk{m_rebuild_lock, m_lock};
|
||||||
|
|
||||||
|
@ -213,8 +189,8 @@ void HeapTracker::RebuildSeparateHeapAddressSpace() {
|
||||||
// Despite being worse in theory, this has proven to be better in practice than more
|
// Despite being worse in theory, this has proven to be better in practice than more
|
||||||
// regularly dumping a smaller amount, because it significantly reduces average case
|
// regularly dumping a smaller amount, because it significantly reduces average case
|
||||||
// lock contention.
|
// lock contention.
|
||||||
std::size_t const desired_count = (std::min)(m_resident_map_count, m_max_resident_map_count) / 2;
|
const size_t desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2;
|
||||||
std::size_t const evict_count = m_resident_map_count - desired_count;
|
const size_t evict_count = m_resident_map_count - desired_count;
|
||||||
auto it = m_resident_mappings.begin();
|
auto it = m_resident_mappings.begin();
|
||||||
|
|
||||||
for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
|
for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
|
||||||
|
|
|
@ -1229,7 +1229,6 @@ endif()
|
||||||
|
|
||||||
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
|
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
|
||||||
target_sources(core PRIVATE
|
target_sources(core PRIVATE
|
||||||
arm/dynarmic/arm_dynarmic.cpp
|
|
||||||
arm/dynarmic/arm_dynarmic.h
|
arm/dynarmic/arm_dynarmic.h
|
||||||
arm/dynarmic/arm_dynarmic_64.cpp
|
arm/dynarmic/arm_dynarmic_64.cpp
|
||||||
arm/dynarmic/arm_dynarmic_64.h
|
arm/dynarmic/arm_dynarmic_64.h
|
||||||
|
|
|
@ -1,49 +0,0 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
||||||
|
|
||||||
#ifdef __linux__
|
|
||||||
|
|
||||||
#include "common/signal_chain.h"
|
|
||||||
|
|
||||||
#include "core/arm/dynarmic/arm_dynarmic.h"
|
|
||||||
#include "core/hle/kernel/k_process.h"
|
|
||||||
#include "core/memory.h"
|
|
||||||
|
|
||||||
namespace Core {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
thread_local Core::Memory::Memory* g_current_memory{};
|
|
||||||
std::once_flag g_registered{};
|
|
||||||
struct sigaction g_old_segv {};
|
|
||||||
|
|
||||||
void HandleSigSegv(int sig, siginfo_t* info, void* ctx) {
|
|
||||||
if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
return g_old_segv.sa_sigaction(sig, info, ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) {
|
|
||||||
g_current_memory = std::addressof(process->GetMemory());
|
|
||||||
}
|
|
||||||
|
|
||||||
ScopedJitExecution::~ScopedJitExecution() {
|
|
||||||
g_current_memory = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ScopedJitExecution::RegisterHandler() {
|
|
||||||
std::call_once(g_registered, [] {
|
|
||||||
struct sigaction sa {};
|
|
||||||
sa.sa_sigaction = &HandleSigSegv;
|
|
||||||
sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
|
|
||||||
Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Core
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -26,24 +26,4 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) {
|
||||||
return static_cast<HaltReason>(hr);
|
return static_cast<HaltReason>(hr);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __linux__
|
|
||||||
|
|
||||||
class ScopedJitExecution {
|
|
||||||
public:
|
|
||||||
explicit ScopedJitExecution(Kernel::KProcess* process);
|
|
||||||
~ScopedJitExecution();
|
|
||||||
static void RegisterHandler();
|
|
||||||
};
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
class ScopedJitExecution {
|
|
||||||
public:
|
|
||||||
explicit ScopedJitExecution(Kernel::KProcess* process) {}
|
|
||||||
~ScopedJitExecution() {}
|
|
||||||
static void RegisterHandler() {}
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
} // namespace Core
|
} // namespace Core
|
||||||
|
|
|
@ -343,15 +343,11 @@ bool ArmDynarmic32::IsInThumbMode() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) {
|
HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) {
|
||||||
ScopedJitExecution sj(thread->GetOwnerProcess());
|
|
||||||
|
|
||||||
m_jit->ClearExclusiveState();
|
m_jit->ClearExclusiveState();
|
||||||
return TranslateHaltReason(m_jit->Run());
|
return TranslateHaltReason(m_jit->Run());
|
||||||
}
|
}
|
||||||
|
|
||||||
HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) {
|
HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) {
|
||||||
ScopedJitExecution sj(thread->GetOwnerProcess());
|
|
||||||
|
|
||||||
m_jit->ClearExclusiveState();
|
m_jit->ClearExclusiveState();
|
||||||
return TranslateHaltReason(m_jit->Step());
|
return TranslateHaltReason(m_jit->Step());
|
||||||
}
|
}
|
||||||
|
@ -393,7 +389,6 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc
|
||||||
m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {
|
m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {
|
||||||
auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();
|
auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();
|
||||||
m_jit = MakeJit(&page_table_impl);
|
m_jit = MakeJit(&page_table_impl);
|
||||||
ScopedJitExecution::RegisterHandler();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ArmDynarmic32::~ArmDynarmic32() = default;
|
ArmDynarmic32::~ArmDynarmic32() = default;
|
||||||
|
|
|
@ -374,15 +374,11 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa
|
||||||
}
|
}
|
||||||
|
|
||||||
HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) {
|
HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) {
|
||||||
ScopedJitExecution sj(thread->GetOwnerProcess());
|
|
||||||
|
|
||||||
m_jit->ClearExclusiveState();
|
m_jit->ClearExclusiveState();
|
||||||
return TranslateHaltReason(m_jit->Run());
|
return TranslateHaltReason(m_jit->Run());
|
||||||
}
|
}
|
||||||
|
|
||||||
HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) {
|
HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) {
|
||||||
ScopedJitExecution sj(thread->GetOwnerProcess());
|
|
||||||
|
|
||||||
m_jit->ClearExclusiveState();
|
m_jit->ClearExclusiveState();
|
||||||
return TranslateHaltReason(m_jit->Step());
|
return TranslateHaltReason(m_jit->Step());
|
||||||
}
|
}
|
||||||
|
@ -422,7 +418,6 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc
|
||||||
auto& page_table = process->GetPageTable().GetBasePageTable();
|
auto& page_table = process->GetPageTable().GetBasePageTable();
|
||||||
auto& page_table_impl = page_table.GetImpl();
|
auto& page_table_impl = page_table.GetImpl();
|
||||||
m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
|
m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
|
||||||
ScopedJitExecution::RegisterHandler();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ArmDynarmic64::~ArmDynarmic64() = default;
|
ArmDynarmic64::~ArmDynarmic64() = default;
|
||||||
|
|
|
@ -1230,22 +1230,7 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
|
||||||
if (rasterizer) {
|
if (rasterizer) {
|
||||||
impl->InvalidateGPUMemory(ptr, size);
|
impl->InvalidateGPUMemory(ptr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __linux__
|
|
||||||
if (!rasterizer && mapped) {
|
|
||||||
impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return mapped && ptr != nullptr;
|
return mapped && ptr != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Memory::InvalidateSeparateHeap(void* fault_address) {
|
|
||||||
#ifdef __linux__
|
|
||||||
return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
|
|
||||||
#else
|
|
||||||
return false;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Core::Memory
|
} // namespace Core::Memory
|
||||||
|
|
|
@ -492,8 +492,6 @@ public:
|
||||||
|
|
||||||
bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
|
bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
|
||||||
|
|
||||||
bool InvalidateSeparateHeap(void* fault_address);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue