forked from eden-emu/eden
[heap_tracker] revert use of ::map, use old rb tree
Signed-off-by: lizzie <lizzie@eden-emu.dev>
This commit is contained in:
parent
0f5680ece9
commit
adbb1789b1
3 changed files with 168 additions and 92 deletions
|
@ -1,94 +1,112 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <fstream>
|
||||
#include <vector>
|
||||
|
||||
#include "common/heap_tracker.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/assert.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
namespace {
|
||||
|
||||
s64 GetMaxPermissibleResidentMapCount() {
|
||||
inline s64 GetMaxPermissibleResidentMapCount() {
|
||||
// Default value.
|
||||
s64 value = 65530;
|
||||
|
||||
// Try to read how many mappings we can make.
|
||||
std::ifstream s("/proc/sys/vm/max_map_count");
|
||||
s >> value;
|
||||
|
||||
// Print, for debug.
|
||||
LOG_INFO(HW_Memory, "Current maximum map count: {}", value);
|
||||
|
||||
// Allow 20000 maps for other code and to account for split inaccuracy.
|
||||
return std::max<s64>(value - 20000, 0);
|
||||
long int value = 65530;
|
||||
std::unique_ptr<FILE, decltype(&fclose)> fp(fopen("/proc/sys/vm/max_map_count", "rt"), &fclose);
|
||||
if (fp.get())
|
||||
fscanf(fp.get(), "%li", &value);
|
||||
LOG_INFO(HW_Memory, "Max map count: {}", value);
|
||||
// Allow 32767 maps for other code and to account for split inaccuracy.
|
||||
return std::max<s64>(s64(value) - 32767, 0);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
HeapTracker::HeapTracker(Common::HostMemory& buffer)
|
||||
: m_buffer(buffer), m_max_resident_map_count(GetMaxPermissibleResidentMapCount()) {}
|
||||
HeapTracker::~HeapTracker() = default;
|
||||
|
||||
void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
|
||||
MemoryPermission perm, bool is_separate_heap) {
|
||||
bool rebuild_required = false;
|
||||
|
||||
// When mapping other memory, map pages immediately.
|
||||
if (!is_separate_heap) {
|
||||
m_buffer.Map(virtual_offset, host_offset, length, perm, false);
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
// We are mapping part of a separate heap and insert into mappings.
|
||||
// We are mapping part of a separate heap.
|
||||
std::scoped_lock lk{m_lock};
|
||||
m_map_count++;
|
||||
const auto it = m_mappings.insert_or_assign(virtual_offset, SeparateHeapMap{
|
||||
|
||||
auto* const map = new SeparateHeapMap{
|
||||
.vaddr = virtual_offset,
|
||||
.paddr = host_offset,
|
||||
.size = length,
|
||||
.tick = m_tick++,
|
||||
.perm = perm,
|
||||
.is_resident = false,
|
||||
});
|
||||
};
|
||||
|
||||
// Insert into mappings.
|
||||
m_map_count++;
|
||||
const auto it = m_mappings.insert(*map);
|
||||
|
||||
// Update tick before possible rebuild.
|
||||
it.first->second.tick = m_tick++;
|
||||
it->tick = m_tick++;
|
||||
|
||||
// Check if we need to rebuild.
|
||||
if (m_resident_map_count >= m_max_resident_map_count)
|
||||
if (m_resident_map_count >= m_max_resident_map_count) {
|
||||
rebuild_required = true;
|
||||
// Map the area.
|
||||
m_buffer.Map(it.first->first, it.first->second.paddr, it.first->second.size, it.first->second.perm, false);
|
||||
// This map is now resident.
|
||||
it.first->second.is_resident = true;
|
||||
m_resident_map_count++;
|
||||
m_resident_mappings.insert(*it.first);
|
||||
}
|
||||
|
||||
// Map the area.
|
||||
m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
|
||||
|
||||
// This map is now resident.
|
||||
it->is_resident = true;
|
||||
m_resident_map_count++;
|
||||
m_resident_mappings.insert(*it);
|
||||
}
|
||||
|
||||
if (rebuild_required) {
|
||||
// A rebuild was required, so perform it now.
|
||||
if (rebuild_required)
|
||||
this->RebuildSeparateHeapAddressSpace();
|
||||
}
|
||||
}
|
||||
|
||||
void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
|
||||
// If this is a separate heap...
|
||||
if (is_separate_heap) {
|
||||
std::scoped_lock lk{m_lock};
|
||||
|
||||
const SeparateHeapMap key{
|
||||
.vaddr = virtual_offset,
|
||||
};
|
||||
|
||||
// Split at the boundaries of the region we are removing.
|
||||
this->SplitHeapMapLocked(virtual_offset);
|
||||
this->SplitHeapMapLocked(virtual_offset + size);
|
||||
|
||||
// Erase all mappings in range.
|
||||
auto it = m_mappings.find(virtual_offset);
|
||||
while (it != m_mappings.end() && it->first < virtual_offset + size) {
|
||||
auto it = m_mappings.find(key);
|
||||
while (it != m_mappings.end() && it->vaddr < virtual_offset + size) {
|
||||
// Get underlying item.
|
||||
auto* const item = std::addressof(*it);
|
||||
|
||||
// If resident, erase from resident map.
|
||||
if (it->second.is_resident) {
|
||||
if (item->is_resident) {
|
||||
ASSERT(--m_resident_map_count >= 0);
|
||||
m_resident_mappings.erase(m_resident_mappings.find(it->first));
|
||||
m_resident_mappings.erase(m_resident_mappings.iterator_to(*item));
|
||||
}
|
||||
|
||||
// Erase from map.
|
||||
ASSERT(--m_map_count >= 0);
|
||||
it = m_mappings.erase(it);
|
||||
|
||||
// Free the item.
|
||||
delete item;
|
||||
}
|
||||
}
|
||||
|
||||
// Unmap pages.
|
||||
m_buffer.Unmap(virtual_offset, size, false);
|
||||
}
|
||||
|
@ -110,32 +128,41 @@ void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission p
|
|||
|
||||
{
|
||||
std::scoped_lock lk2{m_lock};
|
||||
|
||||
const SeparateHeapMap key{
|
||||
.vaddr = next,
|
||||
};
|
||||
|
||||
// Try to get the next mapping corresponding to this address.
|
||||
const auto it = m_mappings.find(next);
|
||||
const auto it = m_mappings.nfind(key);
|
||||
|
||||
if (it == m_mappings.end()) {
|
||||
// There are no separate heap mappings remaining.
|
||||
next = end;
|
||||
should_protect = true;
|
||||
} else if (it->first == cur) {
|
||||
} else if (it->vaddr == cur) {
|
||||
// We are in range.
|
||||
// Update permission bits.
|
||||
it->second.perm = perm;
|
||||
it->perm = perm;
|
||||
|
||||
// Determine next address and whether we should protect.
|
||||
next = cur + it->second.size;
|
||||
should_protect = it->second.is_resident;
|
||||
next = cur + it->size;
|
||||
should_protect = it->is_resident;
|
||||
} else /* if (it->vaddr > cur) */ {
|
||||
// We weren't in range, but there is a block coming up that will be.
|
||||
next = it->first;
|
||||
next = it->vaddr;
|
||||
should_protect = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Clamp to end.
|
||||
next = std::min(next, end);
|
||||
|
||||
// Reprotect, if we need to.
|
||||
if (should_protect)
|
||||
if (should_protect) {
|
||||
m_buffer.Protect(cur, next - cur, perm);
|
||||
}
|
||||
|
||||
// Advance.
|
||||
cur = next;
|
||||
}
|
||||
|
@ -143,18 +170,23 @@ void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission p
|
|||
|
||||
void HeapTracker::RebuildSeparateHeapAddressSpace() {
|
||||
std::scoped_lock lk{m_rebuild_lock, m_lock};
|
||||
|
||||
ASSERT(!m_resident_mappings.empty());
|
||||
|
||||
// Dump half of the mappings.
|
||||
//
|
||||
// Despite being worse in theory, this has proven to be better in practice than more
|
||||
// regularly dumping a smaller amount, because it significantly reduces average case
|
||||
// lock contention.
|
||||
std::size_t const desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2;
|
||||
std::size_t const evict_count = m_resident_map_count - desired_count;
|
||||
const size_t desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2;
|
||||
const size_t evict_count = m_resident_map_count - desired_count;
|
||||
auto it = m_resident_mappings.begin();
|
||||
for (std::size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
|
||||
|
||||
for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
|
||||
// Unmark and unmap.
|
||||
it->second.is_resident = false;
|
||||
m_buffer.Unmap(it->first, it->second.size, false);
|
||||
it->is_resident = false;
|
||||
m_buffer.Unmap(it->vaddr, it->size, false);
|
||||
|
||||
// Advance.
|
||||
ASSERT(--m_resident_map_count >= 0);
|
||||
it = m_resident_mappings.erase(it);
|
||||
|
@ -163,30 +195,46 @@ void HeapTracker::RebuildSeparateHeapAddressSpace() {
|
|||
|
||||
void HeapTracker::SplitHeapMap(VAddr offset, size_t size) {
|
||||
std::scoped_lock lk{m_lock};
|
||||
|
||||
this->SplitHeapMapLocked(offset);
|
||||
this->SplitHeapMapLocked(offset + size);
|
||||
}
|
||||
|
||||
void HeapTracker::SplitHeapMapLocked(VAddr offset) {
|
||||
if (auto it = this->GetNearestHeapMapLocked(offset); it != m_mappings.end() && it->first != offset) {
|
||||
// Adjust left iterator
|
||||
auto const orig_size = it->second.size;
|
||||
auto const left_size = offset - it->first;
|
||||
it->second.size = left_size;
|
||||
// Insert the new right map.
|
||||
auto const right = SeparateHeapMap{
|
||||
.paddr = it->second.paddr + left_size,
|
||||
.size = orig_size - left_size,
|
||||
.tick = it->second.tick,
|
||||
.perm = it->second.perm,
|
||||
.is_resident = it->second.is_resident,
|
||||
};
|
||||
m_map_count++;
|
||||
auto rit = m_mappings.insert_or_assign(it->first + left_size, right);
|
||||
if (rit.first->second.is_resident) {
|
||||
m_resident_map_count++;
|
||||
m_resident_mappings.insert(*rit.first);
|
||||
const auto it = m_mappings.find(SeparateHeapMap{
|
||||
.vaddr = offset,
|
||||
});
|
||||
if (it == m_mappings.end() || it->vaddr == offset) {
|
||||
// Not contained or no split required.
|
||||
return;
|
||||
}
|
||||
|
||||
// Cache the original values.
|
||||
auto* const left = std::addressof(*it);
|
||||
const size_t orig_size = left->size;
|
||||
|
||||
// Adjust the left map.
|
||||
const size_t left_size = offset - left->vaddr;
|
||||
left->size = left_size;
|
||||
|
||||
// Create the new right map.
|
||||
auto* const right = new SeparateHeapMap{
|
||||
.vaddr = left->vaddr + left_size,
|
||||
.paddr = left->paddr + left_size,
|
||||
.size = orig_size - left_size,
|
||||
.tick = left->tick,
|
||||
.perm = left->perm,
|
||||
.is_resident = left->is_resident,
|
||||
};
|
||||
|
||||
// Insert the new right map.
|
||||
m_map_count++;
|
||||
m_mappings.insert(*right);
|
||||
|
||||
// If resident, also insert into resident map.
|
||||
if (right->is_resident) {
|
||||
m_resident_map_count++;
|
||||
m_resident_mappings.insert(*right);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,54 +1,82 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
#include <shared_mutex>
|
||||
#include <boost/container/map.hpp>
|
||||
|
||||
#include "common/host_memory.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
struct SeparateHeapMap {
|
||||
PAddr paddr{}; //8
|
||||
std::size_t size{}; //8 (16)
|
||||
std::size_t tick{}; //8 (24)
|
||||
// 4 bits needed, sync with host_memory.h if needed
|
||||
MemoryPermission perm : 4 = MemoryPermission::Read;
|
||||
bool is_resident : 1 = false;
|
||||
Common::IntrusiveRedBlackTreeNode addr_node{};
|
||||
Common::IntrusiveRedBlackTreeNode tick_node{};
|
||||
VAddr vaddr{};
|
||||
PAddr paddr{};
|
||||
size_t size{};
|
||||
size_t tick{};
|
||||
MemoryPermission perm{};
|
||||
bool is_resident{};
|
||||
};
|
||||
|
||||
struct SeparateHeapMapAddrComparator {
|
||||
static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
|
||||
if (lhs.vaddr < rhs.vaddr) {
|
||||
return -1;
|
||||
} else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct SeparateHeapMapTickComparator {
|
||||
static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
|
||||
if (lhs.tick < rhs.tick) {
|
||||
return -1;
|
||||
} else if (lhs.tick > rhs.tick) {
|
||||
return 1;
|
||||
} else {
|
||||
return SeparateHeapMapAddrComparator::Compare(lhs, rhs);
|
||||
}
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(SeparateHeapMap) == 32); //half a cache line! good for coherency
|
||||
|
||||
class HeapTracker {
|
||||
public:
|
||||
explicit HeapTracker(Common::HostMemory& buffer);
|
||||
~HeapTracker();
|
||||
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm, bool is_separate_heap);
|
||||
~HeapTracker() = default;
|
||||
|
||||
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm,
|
||||
bool is_separate_heap);
|
||||
void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap);
|
||||
void Protect(size_t virtual_offset, size_t length, MemoryPermission perm);
|
||||
inline u8* VirtualBasePointer() noexcept {
|
||||
return m_buffer.VirtualBasePointer();
|
||||
}
|
||||
|
||||
private:
|
||||
// TODO: You may want to "fake-map" the first 2GB of 64-bit address space
|
||||
// and dedicate it entirely to a recursive PTE mapping :)
|
||||
// However Ankerl would be way better than using an RB tree, in all senses - but
|
||||
// there is a strict requirement for ordering to be imposed accross the map itself
|
||||
// which is not achievable with the unordered property.
|
||||
using AddrTree = boost::container::map<VAddr, SeparateHeapMap>;
|
||||
AddrTree m_mappings;
|
||||
using TicksTree = boost::container::map<VAddr, SeparateHeapMap>;
|
||||
TicksTree m_resident_mappings;
|
||||
using AddrTreeTraits =
|
||||
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>;
|
||||
using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>;
|
||||
|
||||
using TickTreeTraits =
|
||||
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>;
|
||||
using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>;
|
||||
|
||||
AddrTree m_mappings{};
|
||||
TickTree m_resident_mappings{};
|
||||
|
||||
private:
|
||||
void SplitHeapMap(VAddr offset, size_t size);
|
||||
void SplitHeapMapLocked(VAddr offset);
|
||||
void RebuildSeparateHeapAddressSpace();
|
||||
inline HeapTracker::AddrTree::iterator GetNearestHeapMapLocked(VAddr offset) noexcept {
|
||||
return m_mappings.find(offset);
|
||||
}
|
||||
private:
|
||||
Common::HostMemory& m_buffer;
|
||||
const s64 m_max_resident_map_count;
|
||||
|
|
|
@ -60,7 +60,7 @@ struct Memory::Impl {
|
|||
current_page_table->fastmem_arena = nullptr;
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
#ifdef __unix__
|
||||
buffer.emplace(system.DeviceMemory().buffer);
|
||||
#else
|
||||
buffer = std::addressof(system.DeviceMemory().buffer);
|
||||
|
@ -1023,7 +1023,7 @@ struct Memory::Impl {
|
|||
std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
|
||||
std::mutex sys_core_guard;
|
||||
|
||||
#ifdef __linux__
|
||||
#ifdef __unix__
|
||||
std::optional<Common::HeapTracker> buffer;
|
||||
#else
|
||||
Common::HostMemory* buffer{};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue