2025-08-27 05:00:38 +02:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
2023-12-25 23:21:08 -05:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <mutex>
|
|
|
|
#include <shared_mutex>
|
2025-08-31 16:37:17 +00:00
|
|
|
#include <boost/container/map.hpp>
|
2023-12-25 23:21:08 -05:00
|
|
|
#include "common/host_memory.h"
|
|
|
|
|
|
|
|
namespace Common {
|
|
|
|
|
|
|
|
struct SeparateHeapMap {
|
2025-08-27 05:00:38 +02:00
|
|
|
PAddr paddr{}; //8
|
|
|
|
std::size_t size{}; //8 (16)
|
|
|
|
std::size_t tick{}; //8 (24)
|
|
|
|
// 4 bits needed, sync with host_memory.h if needed
|
|
|
|
MemoryPermission perm : 4 = MemoryPermission::Read;
|
|
|
|
bool is_resident : 1 = false;
|
2023-12-25 23:21:08 -05:00
|
|
|
};
|
2025-08-27 05:00:38 +02:00
|
|
|
static_assert(sizeof(SeparateHeapMap) == 32); //half a cache line! good for coherency
|
2023-12-25 23:21:08 -05:00
|
|
|
|
|
|
|
class HeapTracker {
|
|
|
|
public:
|
|
|
|
explicit HeapTracker(Common::HostMemory& buffer);
|
|
|
|
~HeapTracker();
|
2025-08-27 05:00:38 +02:00
|
|
|
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm, bool is_separate_heap);
|
2023-12-25 23:21:08 -05:00
|
|
|
void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap);
|
|
|
|
void Protect(size_t virtual_offset, size_t length, MemoryPermission perm);
|
2025-08-27 05:00:38 +02:00
|
|
|
inline u8* VirtualBasePointer() noexcept {
|
2023-12-25 23:21:08 -05:00
|
|
|
return m_buffer.VirtualBasePointer();
|
|
|
|
}
|
|
|
|
private:
|
2025-08-27 05:00:38 +02:00
|
|
|
// TODO: You may want to "fake-map" the first 2GB of 64-bit address space
|
|
|
|
// and dedicate it entirely to a recursive PTE mapping :)
|
2025-08-31 16:37:17 +00:00
|
|
|
// However Ankerl would be way better than using an RB tree, in all senses - but
|
|
|
|
// there is a strict requirement for ordering to be imposed accross the map itself
|
|
|
|
// which is not achievable with the unordered property.
|
|
|
|
using AddrTree = boost::container::map<VAddr, SeparateHeapMap>;
|
2025-08-27 05:00:38 +02:00
|
|
|
AddrTree m_mappings;
|
2025-08-31 16:37:17 +00:00
|
|
|
using TicksTree = boost::container::map<VAddr, SeparateHeapMap>;
|
2025-08-27 05:00:38 +02:00
|
|
|
TicksTree m_resident_mappings;
|
2023-12-25 23:21:08 -05:00
|
|
|
private:
|
|
|
|
void SplitHeapMap(VAddr offset, size_t size);
|
|
|
|
void SplitHeapMapLocked(VAddr offset);
|
|
|
|
void RebuildSeparateHeapAddressSpace();
|
2025-08-27 05:00:38 +02:00
|
|
|
inline HeapTracker::AddrTree::iterator GetNearestHeapMapLocked(VAddr offset) noexcept {
|
|
|
|
return m_mappings.find(offset);
|
|
|
|
}
|
2023-12-25 23:21:08 -05:00
|
|
|
private:
|
|
|
|
Common::HostMemory& m_buffer;
|
2023-12-27 01:02:51 -05:00
|
|
|
const s64 m_max_resident_map_count;
|
2023-12-25 23:21:08 -05:00
|
|
|
std::shared_mutex m_rebuild_lock{};
|
|
|
|
std::mutex m_lock{};
|
|
|
|
s64 m_map_count{};
|
|
|
|
s64 m_resident_map_count{};
|
|
|
|
size_t m_tick{};
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace Common
|