forked from eden-emu/eden
		
	Revert "k_page_group: synchronize"
This commit is contained in:
		
							parent
							
								
									22f12c976e
								
							
						
					
					
						commit
						118d57a8f0
					
				
					 11 changed files with 191 additions and 332 deletions
				
			
		|  | @ -226,7 +226,6 @@ add_library(core STATIC | |||
|     hle/kernel/k_page_buffer.h | ||||
|     hle/kernel/k_page_heap.cpp | ||||
|     hle/kernel/k_page_heap.h | ||||
|     hle/kernel/k_page_group.cpp | ||||
|     hle/kernel/k_page_group.h | ||||
|     hle/kernel/k_page_table.cpp | ||||
|     hle/kernel/k_page_table.h | ||||
|  |  | |||
|  | @ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si | |||
|     auto& page_table = m_owner->PageTable(); | ||||
| 
 | ||||
|     // Construct the page group.
 | ||||
|     m_page_group.emplace(kernel, page_table.GetBlockInfoManager()); | ||||
|     m_page_group = {}; | ||||
| 
 | ||||
|     // Lock the memory.
 | ||||
|     R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size)) | ||||
|     R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size)) | ||||
| 
 | ||||
|     // Clear the memory.
 | ||||
|     for (const auto& block : *m_page_group) { | ||||
|     for (const auto& block : m_page_group.Nodes()) { | ||||
|         std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); | ||||
|     } | ||||
| 
 | ||||
|  | @ -51,13 +51,12 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si | |||
| void KCodeMemory::Finalize() { | ||||
|     // Unlock.
 | ||||
|     if (!m_is_mapped && !m_is_owner_mapped) { | ||||
|         const size_t size = m_page_group->GetNumPages() * PageSize; | ||||
|         m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group); | ||||
|         const size_t size = m_page_group.GetNumPages() * PageSize; | ||||
|         m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group); | ||||
|     } | ||||
| 
 | ||||
|     // Close the page group.
 | ||||
|     m_page_group->Close(); | ||||
|     m_page_group->Finalize(); | ||||
|     m_page_group = {}; | ||||
| 
 | ||||
|     // Close our reference to our owner.
 | ||||
|     m_owner->Close(); | ||||
|  | @ -65,7 +64,7 @@ void KCodeMemory::Finalize() { | |||
| 
 | ||||
| Result KCodeMemory::Map(VAddr address, size_t size) { | ||||
|     // Validate the size.
 | ||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
| 
 | ||||
|     // Lock ourselves.
 | ||||
|     KScopedLightLock lk(m_lock); | ||||
|  | @ -75,7 +74,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) { | |||
| 
 | ||||
|     // Map the memory.
 | ||||
|     R_TRY(kernel.CurrentProcess()->PageTable().MapPages( | ||||
|         address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); | ||||
|         address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); | ||||
| 
 | ||||
|     // Mark ourselves as mapped.
 | ||||
|     m_is_mapped = true; | ||||
|  | @ -85,13 +84,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) { | |||
| 
 | ||||
| Result KCodeMemory::Unmap(VAddr address, size_t size) { | ||||
|     // Validate the size.
 | ||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
| 
 | ||||
|     // Lock ourselves.
 | ||||
|     KScopedLightLock lk(m_lock); | ||||
| 
 | ||||
|     // Unmap the memory.
 | ||||
|     R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group, | ||||
|     R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group, | ||||
|                                                           KMemoryState::CodeOut)); | ||||
| 
 | ||||
|     // Mark ourselves as unmapped.
 | ||||
|  | @ -102,7 +101,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) { | |||
| 
 | ||||
| Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { | ||||
|     // Validate the size.
 | ||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
| 
 | ||||
|     // Lock ourselves.
 | ||||
|     KScopedLightLock lk(m_lock); | ||||
|  | @ -126,7 +125,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission | |||
| 
 | ||||
|     // Map the memory.
 | ||||
|     R_TRY( | ||||
|         m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm)); | ||||
|         m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm)); | ||||
| 
 | ||||
|     // Mark ourselves as mapped.
 | ||||
|     m_is_owner_mapped = true; | ||||
|  | @ -136,13 +135,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission | |||
| 
 | ||||
| Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { | ||||
|     // Validate the size.
 | ||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
|     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||
| 
 | ||||
|     // Lock ourselves.
 | ||||
|     KScopedLightLock lk(m_lock); | ||||
| 
 | ||||
|     // Unmap the memory.
 | ||||
|     R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode)); | ||||
|     R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode)); | ||||
| 
 | ||||
|     // Mark ourselves as unmapped.
 | ||||
|     m_is_owner_mapped = false; | ||||
|  |  | |||
|  | @ -3,8 +3,6 @@ | |||
| 
 | ||||
| #pragma once | ||||
| 
 | ||||
| #include <optional> | ||||
| 
 | ||||
| #include "common/common_types.h" | ||||
| #include "core/device_memory.h" | ||||
| #include "core/hle/kernel/k_auto_object.h" | ||||
|  | @ -51,11 +49,11 @@ public: | |||
|         return m_address; | ||||
|     } | ||||
|     size_t GetSize() const { | ||||
|         return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0; | ||||
|         return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0; | ||||
|     } | ||||
| 
 | ||||
| private: | ||||
|     std::optional<KPageGroup> m_page_group{}; | ||||
|     KPageGroup m_page_group{}; | ||||
|     KProcess* m_owner{}; | ||||
|     VAddr m_address{}; | ||||
|     KLightLock m_lock; | ||||
|  |  | |||
|  | @ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | |||
| 
 | ||||
|     // Ensure that we don't leave anything un-freed.
 | ||||
|     ON_RESULT_FAILURE { | ||||
|         for (const auto& it : *out) { | ||||
|         for (const auto& it : out->Nodes()) { | ||||
|             auto& manager = this->GetManager(it.GetAddress()); | ||||
|             const size_t node_num_pages = std::min<u64>( | ||||
|                 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); | ||||
|  | @ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
|                                       m_has_optimized_process[static_cast<size_t>(pool)], true)); | ||||
| 
 | ||||
|     // Open the first reference to the pages.
 | ||||
|     for (const auto& block : *out) { | ||||
|     for (const auto& block : out->Nodes()) { | ||||
|         PAddr cur_address = block.GetAddress(); | ||||
|         size_t remaining_pages = block.GetNumPages(); | ||||
|         while (remaining_pages > 0) { | ||||
|  | @ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 | |||
|     // Perform optimized memory tracking, if we should.
 | ||||
|     if (optimized) { | ||||
|         // Iterate over the allocated blocks.
 | ||||
|         for (const auto& block : *out) { | ||||
|         for (const auto& block : out->Nodes()) { | ||||
|             // Get the block extents.
 | ||||
|             const PAddr block_address = block.GetAddress(); | ||||
|             const size_t block_pages = block.GetNumPages(); | ||||
|  | @ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 | |||
|         } | ||||
|     } else { | ||||
|         // Set all the allocated memory.
 | ||||
|         for (const auto& block : *out) { | ||||
|         for (const auto& block : out->Nodes()) { | ||||
|             std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, | ||||
|                         block.GetSize()); | ||||
|         } | ||||
|  |  | |||
|  | @ -1,121 +0,0 @@ | |||
| // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 | ||||
| // SPDX-License-Identifier: GPL-2.0-or-later
 | ||||
| 
 | ||||
| #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||||
| #include "core/hle/kernel/k_memory_manager.h" | ||||
| #include "core/hle/kernel/k_page_group.h" | ||||
| #include "core/hle/kernel/kernel.h" | ||||
| #include "core/hle/kernel/svc_results.h" | ||||
| 
 | ||||
| namespace Kernel { | ||||
| 
 | ||||
| void KPageGroup::Finalize() { | ||||
|     KBlockInfo* cur = m_first_block; | ||||
|     while (cur != nullptr) { | ||||
|         KBlockInfo* next = cur->GetNext(); | ||||
|         m_manager->Free(cur); | ||||
|         cur = next; | ||||
|     } | ||||
| 
 | ||||
|     m_first_block = nullptr; | ||||
|     m_last_block = nullptr; | ||||
| } | ||||
| 
 | ||||
| void KPageGroup::CloseAndReset() { | ||||
|     auto& mm = m_kernel.MemoryManager(); | ||||
| 
 | ||||
|     KBlockInfo* cur = m_first_block; | ||||
|     while (cur != nullptr) { | ||||
|         KBlockInfo* next = cur->GetNext(); | ||||
|         mm.Close(cur->GetAddress(), cur->GetNumPages()); | ||||
|         m_manager->Free(cur); | ||||
|         cur = next; | ||||
|     } | ||||
| 
 | ||||
|     m_first_block = nullptr; | ||||
|     m_last_block = nullptr; | ||||
| } | ||||
| 
 | ||||
| size_t KPageGroup::GetNumPages() const { | ||||
|     size_t num_pages = 0; | ||||
| 
 | ||||
|     for (const auto& it : *this) { | ||||
|         num_pages += it.GetNumPages(); | ||||
|     } | ||||
| 
 | ||||
|     return num_pages; | ||||
| } | ||||
| 
 | ||||
| Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) { | ||||
|     // Succeed immediately if we're adding no pages.
 | ||||
|     R_SUCCEED_IF(num_pages == 0); | ||||
| 
 | ||||
|     // Check for overflow.
 | ||||
|     ASSERT(addr < addr + num_pages * PageSize); | ||||
| 
 | ||||
|     // Try to just append to the last block.
 | ||||
|     if (m_last_block != nullptr) { | ||||
|         R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages)); | ||||
|     } | ||||
| 
 | ||||
|     // Allocate a new block.
 | ||||
|     KBlockInfo* new_block = m_manager->Allocate(); | ||||
|     R_UNLESS(new_block != nullptr, ResultOutOfResource); | ||||
| 
 | ||||
|     // Initialize the block.
 | ||||
|     new_block->Initialize(addr, num_pages); | ||||
| 
 | ||||
|     // Add the block to our list.
 | ||||
|     if (m_last_block != nullptr) { | ||||
|         m_last_block->SetNext(new_block); | ||||
|     } else { | ||||
|         m_first_block = new_block; | ||||
|     } | ||||
|     m_last_block = new_block; | ||||
| 
 | ||||
|     R_SUCCEED(); | ||||
| } | ||||
| 
 | ||||
| void KPageGroup::Open() const { | ||||
|     auto& mm = m_kernel.MemoryManager(); | ||||
| 
 | ||||
|     for (const auto& it : *this) { | ||||
|         mm.Open(it.GetAddress(), it.GetNumPages()); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| void KPageGroup::OpenFirst() const { | ||||
|     auto& mm = m_kernel.MemoryManager(); | ||||
| 
 | ||||
|     for (const auto& it : *this) { | ||||
|         mm.OpenFirst(it.GetAddress(), it.GetNumPages()); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| void KPageGroup::Close() const { | ||||
|     auto& mm = m_kernel.MemoryManager(); | ||||
| 
 | ||||
|     for (const auto& it : *this) { | ||||
|         mm.Close(it.GetAddress(), it.GetNumPages()); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const { | ||||
|     auto lit = this->begin(); | ||||
|     auto rit = rhs.begin(); | ||||
|     auto lend = this->end(); | ||||
|     auto rend = rhs.end(); | ||||
| 
 | ||||
|     while (lit != lend && rit != rend) { | ||||
|         if (*lit != *rit) { | ||||
|             return false; | ||||
|         } | ||||
| 
 | ||||
|         ++lit; | ||||
|         ++rit; | ||||
|     } | ||||
| 
 | ||||
|     return lit == lend && rit == rend; | ||||
| } | ||||
| 
 | ||||
| } // namespace Kernel
 | ||||
|  | @ -1,4 +1,4 @@ | |||
| // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 | ||||
| // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
 | ||||
| // SPDX-License-Identifier: GPL-2.0-or-later
 | ||||
| 
 | ||||
| #pragma once | ||||
|  | @ -13,23 +13,24 @@ | |||
| 
 | ||||
| namespace Kernel { | ||||
| 
 | ||||
| class KBlockInfoManager; | ||||
| class KernelCore; | ||||
| class KPageGroup; | ||||
| 
 | ||||
| class KBlockInfo { | ||||
| public: | ||||
|     constexpr explicit KBlockInfo() : m_next(nullptr) {} | ||||
| private: | ||||
|     friend class KPageGroup; | ||||
| 
 | ||||
|     constexpr void Initialize(KPhysicalAddress addr, size_t np) { | ||||
| public: | ||||
|     constexpr KBlockInfo() = default; | ||||
| 
 | ||||
|     constexpr void Initialize(PAddr addr, size_t np) { | ||||
|         ASSERT(Common::IsAligned(addr, PageSize)); | ||||
|         ASSERT(static_cast<u32>(np) == np); | ||||
| 
 | ||||
|         m_page_index = static_cast<u32>(addr / PageSize); | ||||
|         m_page_index = static_cast<u32>(addr) / PageSize; | ||||
|         m_num_pages = static_cast<u32>(np); | ||||
|     } | ||||
| 
 | ||||
|     constexpr KPhysicalAddress GetAddress() const { | ||||
|     constexpr PAddr GetAddress() const { | ||||
|         return m_page_index * PageSize; | ||||
|     } | ||||
|     constexpr size_t GetNumPages() const { | ||||
|  | @ -38,10 +39,10 @@ public: | |||
|     constexpr size_t GetSize() const { | ||||
|         return this->GetNumPages() * PageSize; | ||||
|     } | ||||
|     constexpr KPhysicalAddress GetEndAddress() const { | ||||
|     constexpr PAddr GetEndAddress() const { | ||||
|         return (m_page_index + m_num_pages) * PageSize; | ||||
|     } | ||||
|     constexpr KPhysicalAddress GetLastAddress() const { | ||||
|     constexpr PAddr GetLastAddress() const { | ||||
|         return this->GetEndAddress() - 1; | ||||
|     } | ||||
| 
 | ||||
|  | @ -61,8 +62,8 @@ public: | |||
|         return !(*this == rhs); | ||||
|     } | ||||
| 
 | ||||
|     constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const { | ||||
|         const KPhysicalAddress end = this->GetEndAddress(); | ||||
|     constexpr bool IsStrictlyBefore(PAddr addr) const { | ||||
|         const PAddr end = this->GetEndAddress(); | ||||
| 
 | ||||
|         if (m_page_index != 0 && end == 0) { | ||||
|             return false; | ||||
|  | @ -71,11 +72,11 @@ public: | |||
|         return end < addr; | ||||
|     } | ||||
| 
 | ||||
|     constexpr bool operator<(KPhysicalAddress addr) const { | ||||
|     constexpr bool operator<(PAddr addr) const { | ||||
|         return this->IsStrictlyBefore(addr); | ||||
|     } | ||||
| 
 | ||||
|     constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) { | ||||
|     constexpr bool TryConcatenate(PAddr addr, size_t np) { | ||||
|         if (addr != 0 && addr == this->GetEndAddress()) { | ||||
|             m_num_pages += static_cast<u32>(np); | ||||
|             return true; | ||||
|  | @ -89,118 +90,96 @@ private: | |||
|     } | ||||
| 
 | ||||
| private: | ||||
|     friend class KPageGroup; | ||||
| 
 | ||||
|     KBlockInfo* m_next{}; | ||||
|     u32 m_page_index{}; | ||||
|     u32 m_num_pages{}; | ||||
| }; | ||||
| static_assert(sizeof(KBlockInfo) <= 0x10); | ||||
| 
 | ||||
| class KPageGroup { | ||||
| class KPageGroup final { | ||||
| public: | ||||
|     class Iterator { | ||||
|     class Node final { | ||||
|     public: | ||||
|         using iterator_category = std::forward_iterator_tag; | ||||
|         using value_type = const KBlockInfo; | ||||
|         using difference_type = std::ptrdiff_t; | ||||
|         using pointer = value_type*; | ||||
|         using reference = value_type&; | ||||
|         constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {} | ||||
| 
 | ||||
|         constexpr explicit Iterator(pointer n) : m_node(n) {} | ||||
| 
 | ||||
|         constexpr bool operator==(const Iterator& rhs) const { | ||||
|             return m_node == rhs.m_node; | ||||
|         } | ||||
|         constexpr bool operator!=(const Iterator& rhs) const { | ||||
|             return !(*this == rhs); | ||||
|         constexpr u64 GetAddress() const { | ||||
|             return addr; | ||||
|         } | ||||
| 
 | ||||
|         constexpr pointer operator->() const { | ||||
|             return m_node; | ||||
|         } | ||||
|         constexpr reference operator*() const { | ||||
|             return *m_node; | ||||
|         constexpr std::size_t GetNumPages() const { | ||||
|             return num_pages; | ||||
|         } | ||||
| 
 | ||||
|         constexpr Iterator& operator++() { | ||||
|             m_node = m_node->GetNext(); | ||||
|             return *this; | ||||
|         } | ||||
| 
 | ||||
|         constexpr Iterator operator++(int) { | ||||
|             const Iterator it{*this}; | ||||
|             ++(*this); | ||||
|             return it; | ||||
|         constexpr std::size_t GetSize() const { | ||||
|             return GetNumPages() * PageSize; | ||||
|         } | ||||
| 
 | ||||
|     private: | ||||
|         pointer m_node{}; | ||||
|         u64 addr{}; | ||||
|         std::size_t num_pages{}; | ||||
|     }; | ||||
| 
 | ||||
|     explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m) | ||||
|         : m_kernel{kernel}, m_manager{m} {} | ||||
|     ~KPageGroup() { | ||||
|         this->Finalize(); | ||||
|     } | ||||
| 
 | ||||
|     void CloseAndReset(); | ||||
|     void Finalize(); | ||||
| 
 | ||||
|     Iterator begin() const { | ||||
|         return Iterator{m_first_block}; | ||||
|     } | ||||
|     Iterator end() const { | ||||
|         return Iterator{nullptr}; | ||||
|     } | ||||
|     bool empty() const { | ||||
|         return m_first_block == nullptr; | ||||
|     } | ||||
| 
 | ||||
|     Result AddBlock(KPhysicalAddress addr, size_t num_pages); | ||||
|     void Open() const; | ||||
|     void OpenFirst() const; | ||||
|     void Close() const; | ||||
| 
 | ||||
|     size_t GetNumPages() const; | ||||
| 
 | ||||
|     bool IsEquivalentTo(const KPageGroup& rhs) const; | ||||
| 
 | ||||
|     bool operator==(const KPageGroup& rhs) const { | ||||
|         return this->IsEquivalentTo(rhs); | ||||
|     } | ||||
| 
 | ||||
|     bool operator!=(const KPageGroup& rhs) const { | ||||
|         return !(*this == rhs); | ||||
|     } | ||||
| 
 | ||||
| private: | ||||
|     KernelCore& m_kernel; | ||||
|     KBlockInfo* m_first_block{}; | ||||
|     KBlockInfo* m_last_block{}; | ||||
|     KBlockInfoManager* m_manager{}; | ||||
| }; | ||||
| 
 | ||||
| class KScopedPageGroup { | ||||
| public: | ||||
|     explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) { | ||||
|         if (m_pg) { | ||||
|             m_pg->Open(); | ||||
|         } | ||||
|     } | ||||
|     explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {} | ||||
|     ~KScopedPageGroup() { | ||||
|         if (m_pg) { | ||||
|             m_pg->Close(); | ||||
|         } | ||||
|     KPageGroup() = default; | ||||
|     KPageGroup(u64 address, u64 num_pages) { | ||||
|         ASSERT(AddBlock(address, num_pages).IsSuccess()); | ||||
|     } | ||||
| 
 | ||||
|     void CancelClose() { | ||||
|         m_pg = nullptr; | ||||
|     constexpr std::list<Node>& Nodes() { | ||||
|         return nodes; | ||||
|     } | ||||
| 
 | ||||
|     constexpr const std::list<Node>& Nodes() const { | ||||
|         return nodes; | ||||
|     } | ||||
| 
 | ||||
|     std::size_t GetNumPages() const { | ||||
|         std::size_t num_pages = 0; | ||||
|         for (const Node& node : nodes) { | ||||
|             num_pages += node.GetNumPages(); | ||||
|         } | ||||
|         return num_pages; | ||||
|     } | ||||
| 
 | ||||
|     bool IsEqual(KPageGroup& other) const { | ||||
|         auto this_node = nodes.begin(); | ||||
|         auto other_node = other.nodes.begin(); | ||||
|         while (this_node != nodes.end() && other_node != other.nodes.end()) { | ||||
|             if (this_node->GetAddress() != other_node->GetAddress() || | ||||
|                 this_node->GetNumPages() != other_node->GetNumPages()) { | ||||
|                 return false; | ||||
|             } | ||||
|             this_node = std::next(this_node); | ||||
|             other_node = std::next(other_node); | ||||
|         } | ||||
| 
 | ||||
|         return this_node == nodes.end() && other_node == other.nodes.end(); | ||||
|     } | ||||
| 
 | ||||
|     Result AddBlock(u64 address, u64 num_pages) { | ||||
|         if (!num_pages) { | ||||
|             return ResultSuccess; | ||||
|         } | ||||
|         if (!nodes.empty()) { | ||||
|             const auto node = nodes.back(); | ||||
|             if (node.GetAddress() + node.GetNumPages() * PageSize == address) { | ||||
|                 address = node.GetAddress(); | ||||
|                 num_pages += node.GetNumPages(); | ||||
|                 nodes.pop_back(); | ||||
|             } | ||||
|         } | ||||
|         nodes.push_back({address, num_pages}); | ||||
|         return ResultSuccess; | ||||
|     } | ||||
| 
 | ||||
|     bool Empty() const { | ||||
|         return nodes.empty(); | ||||
|     } | ||||
| 
 | ||||
|     void Finalize() {} | ||||
| 
 | ||||
| private: | ||||
|     const KPageGroup* m_pg{}; | ||||
|     std::list<Node> nodes; | ||||
| }; | ||||
| 
 | ||||
| } // namespace Kernel
 | ||||
|  |  | |||
|  | @ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a | |||
| 
 | ||||
| KPageTable::KPageTable(Core::System& system_) | ||||
|     : m_general_lock{system_.Kernel()}, | ||||
|       m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {} | ||||
|       m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} | ||||
| 
 | ||||
| KPageTable::~KPageTable() = default; | ||||
| 
 | ||||
|  | @ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta | |||
|                                                  m_memory_block_slab_manager); | ||||
| 
 | ||||
|     // Allocate and open.
 | ||||
|     KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
|     KPageGroup pg; | ||||
|     R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||||
|         &pg, num_pages, | ||||
|         KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); | ||||
|  | @ -432,7 +432,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si | |||
|         const size_t num_pages = size / PageSize; | ||||
| 
 | ||||
|         // Create page groups for the memory being mapped.
 | ||||
|         KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
|         KPageGroup pg; | ||||
|         AddRegionToPages(src_address, num_pages, pg); | ||||
| 
 | ||||
|         // Reprotect the source as kernel-read/not mapped.
 | ||||
|  | @ -593,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | |||
|     const size_t size = num_pages * PageSize; | ||||
| 
 | ||||
|     // We're making a new group, not adding to an existing one.
 | ||||
|     R_UNLESS(pg.empty(), ResultInvalidCurrentMemory); | ||||
|     R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory); | ||||
| 
 | ||||
|     // Begin traversal.
 | ||||
|     Common::PageTable::TraversalContext context; | ||||
|  | @ -640,10 +640,11 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | |||
|     R_SUCCEED(); | ||||
| } | ||||
| 
 | ||||
| bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) { | ||||
| bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { | ||||
|     ASSERT(this->IsLockedByCurrentThread()); | ||||
| 
 | ||||
|     const size_t size = num_pages * PageSize; | ||||
|     const auto& pg = pg_ll.Nodes(); | ||||
|     const auto& memory_layout = m_system.Kernel().MemoryLayout(); | ||||
| 
 | ||||
|     // Empty groups are necessarily invalid.
 | ||||
|  | @ -941,6 +942,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 
 | ||||
|     ON_RESULT_FAILURE { | ||||
|         if (cur_mapped_addr != dst_addr) { | ||||
|             // HACK: Manually close the pages.
 | ||||
|             HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize); | ||||
| 
 | ||||
|             ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, | ||||
|                            KMemoryPermission::None, OperationType::Unmap) | ||||
|                        .IsSuccess()); | ||||
|  | @ -1016,6 +1020,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
|         // Map the page.
 | ||||
|         R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); | ||||
| 
 | ||||
|         // HACK: Manually open the pages.
 | ||||
|         HACK_OpenPages(start_partial_page, 1); | ||||
| 
 | ||||
|         // Update tracking extents.
 | ||||
|         cur_mapped_addr += PageSize; | ||||
|         cur_block_addr += PageSize; | ||||
|  | @ -1044,6 +1051,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
|             R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, | ||||
|                           cur_block_addr)); | ||||
| 
 | ||||
|             // HACK: Manually open the pages.
 | ||||
|             HACK_OpenPages(cur_block_addr, cur_block_size / PageSize); | ||||
| 
 | ||||
|             // Update tracking extents.
 | ||||
|             cur_mapped_addr += cur_block_size; | ||||
|             cur_block_addr = next_entry.phys_addr; | ||||
|  | @ -1063,6 +1073,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
|         R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, | ||||
|                       cur_block_addr)); | ||||
| 
 | ||||
|         // HACK: Manually open the pages.
 | ||||
|         HACK_OpenPages(cur_block_addr, last_block_size / PageSize); | ||||
| 
 | ||||
|         // Update tracking extents.
 | ||||
|         cur_mapped_addr += last_block_size; | ||||
|         cur_block_addr += last_block_size; | ||||
|  | @ -1094,6 +1107,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 
 | ||||
|         // Map the page.
 | ||||
|         R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); | ||||
| 
 | ||||
|         // HACK: Manually open the pages.
 | ||||
|         HACK_OpenPages(end_partial_page, 1); | ||||
|     } | ||||
| 
 | ||||
|     // Update memory blocks to reflect our changes
 | ||||
|  | @ -1195,6 +1211,9 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState | |||
|     const size_t aligned_size = aligned_end - aligned_start; | ||||
|     const size_t aligned_num_pages = aligned_size / PageSize; | ||||
| 
 | ||||
|     // HACK: Manually close the pages.
 | ||||
|     HACK_ClosePages(aligned_start, aligned_num_pages); | ||||
| 
 | ||||
|     // Unmap the pages.
 | ||||
|     R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||||
| 
 | ||||
|  | @ -1482,6 +1501,17 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi | |||
|     } | ||||
| } | ||||
| 
 | ||||
| void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { | ||||
|     m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); | ||||
| } | ||||
| 
 | ||||
| void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) { | ||||
|     for (size_t index = 0; index < num_pages; ++index) { | ||||
|         const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize)); | ||||
|         m_system.Kernel().MemoryManager().Close(paddr, 1); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||
|     // Lock the physical memory lock.
 | ||||
|     KScopedLightLock phys_lk(m_map_physical_memory_lock); | ||||
|  | @ -1542,7 +1572,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
|             R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||||
| 
 | ||||
|             // Allocate pages for the new memory.
 | ||||
|             KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
|             KPageGroup pg; | ||||
|             R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( | ||||
|                 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); | ||||
| 
 | ||||
|  | @ -1620,7 +1650,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
|                 KScopedPageTableUpdater updater(this); | ||||
| 
 | ||||
|                 // Prepare to iterate over the memory.
 | ||||
|                 auto pg_it = pg.begin(); | ||||
|                 auto pg_it = pg.Nodes().begin(); | ||||
|                 PAddr pg_phys_addr = pg_it->GetAddress(); | ||||
|                 size_t pg_pages = pg_it->GetNumPages(); | ||||
| 
 | ||||
|  | @ -1650,6 +1680,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
|                                              last_unmap_address + 1 - cur_address) / | ||||
|                                     PageSize; | ||||
| 
 | ||||
|                                 // HACK: Manually close the pages.
 | ||||
|                                 HACK_ClosePages(cur_address, cur_pages); | ||||
| 
 | ||||
|                                 // Unmap.
 | ||||
|                                 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | ||||
|                                                OperationType::Unmap) | ||||
|  | @ -1670,7 +1703,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
|                     // Release any remaining unmapped memory.
 | ||||
|                     m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); | ||||
|                     m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); | ||||
|                     for (++pg_it; pg_it != pg.end(); ++pg_it) { | ||||
|                     for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { | ||||
|                         m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), | ||||
|                                                                     pg_it->GetNumPages()); | ||||
|                         m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), | ||||
|  | @ -1698,7 +1731,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
|                             // Check if we're at the end of the physical block.
 | ||||
|                             if (pg_pages == 0) { | ||||
|                                 // Ensure there are more pages to map.
 | ||||
|                                 ASSERT(pg_it != pg.end()); | ||||
|                                 ASSERT(pg_it != pg.Nodes().end()); | ||||
| 
 | ||||
|                                 // Advance our physical block.
 | ||||
|                                 ++pg_it; | ||||
|  | @ -1709,7 +1742,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
|                             // Map whatever we can.
 | ||||
|                             const size_t cur_pages = std::min(pg_pages, map_pages); | ||||
|                             R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | ||||
|                                           OperationType::MapFirst, pg_phys_addr)); | ||||
|                                           OperationType::Map, pg_phys_addr)); | ||||
| 
 | ||||
|                             // HACK: Manually open the pages.
 | ||||
|                             HACK_OpenPages(pg_phys_addr, cur_pages); | ||||
| 
 | ||||
|                             // Advance.
 | ||||
|                             cur_address += cur_pages * PageSize; | ||||
|  | @ -1852,6 +1888,9 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
|                                               last_address + 1 - cur_address) / | ||||
|                                      PageSize; | ||||
| 
 | ||||
|             // HACK: Manually close the pages.
 | ||||
|             HACK_ClosePages(cur_address, cur_pages); | ||||
| 
 | ||||
|             // Unmap.
 | ||||
|             ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) | ||||
|                        .IsSuccess()); | ||||
|  | @ -1916,7 +1955,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) | |||
|     R_TRY(dst_allocator_result); | ||||
| 
 | ||||
|     // Map the memory.
 | ||||
|     KPageGroup page_linked_list{m_kernel, m_block_info_manager}; | ||||
|     KPageGroup page_linked_list; | ||||
|     const size_t num_pages{size / PageSize}; | ||||
|     const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | ||||
|         KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | ||||
|  | @ -1983,14 +2022,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size | |||
|                                                      num_dst_allocator_blocks); | ||||
|     R_TRY(dst_allocator_result); | ||||
| 
 | ||||
|     KPageGroup src_pages{m_kernel, m_block_info_manager}; | ||||
|     KPageGroup dst_pages{m_kernel, m_block_info_manager}; | ||||
|     KPageGroup src_pages; | ||||
|     KPageGroup dst_pages; | ||||
|     const size_t num_pages{size / PageSize}; | ||||
| 
 | ||||
|     AddRegionToPages(src_address, num_pages, src_pages); | ||||
|     AddRegionToPages(dst_address, num_pages, dst_pages); | ||||
| 
 | ||||
|     R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion); | ||||
|     R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); | ||||
| 
 | ||||
|     { | ||||
|         auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); | ||||
|  | @ -2021,7 +2060,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, | |||
| 
 | ||||
|     VAddr cur_addr{addr}; | ||||
| 
 | ||||
|     for (const auto& node : page_linked_list) { | ||||
|     for (const auto& node : page_linked_list.Nodes()) { | ||||
|         if (const auto result{ | ||||
|                 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; | ||||
|             result.IsError()) { | ||||
|  | @ -2121,7 +2160,7 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { | |||
| 
 | ||||
|     VAddr cur_addr{addr}; | ||||
| 
 | ||||
|     for (const auto& node : page_linked_list) { | ||||
|     for (const auto& node : page_linked_list.Nodes()) { | ||||
|         if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, | ||||
|                                       OperationType::Unmap)}; | ||||
|             result.IsError()) { | ||||
|  | @ -2488,13 +2527,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | |||
|     R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||||
| 
 | ||||
|     // Allocate pages for the heap extension.
 | ||||
|     KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
|     KPageGroup pg; | ||||
|     R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||||
|         &pg, allocation_size / PageSize, | ||||
|         KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); | ||||
| 
 | ||||
|     // Clear all the newly allocated pages.
 | ||||
|     for (const auto& it : pg) { | ||||
|     for (const auto& it : pg.Nodes()) { | ||||
|         std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, | ||||
|                     it.GetSize()); | ||||
|     } | ||||
|  | @ -2571,23 +2610,11 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | |||
|     if (is_map_only) { | ||||
|         R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | ||||
|     } else { | ||||
|         // Create a page group tohold the pages we allocate.
 | ||||
|         KPageGroup pg{m_kernel, m_block_info_manager}; | ||||
| 
 | ||||
|         R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||||
|             &pg, needed_num_pages, | ||||
|             KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); | ||||
| 
 | ||||
|         // Ensure that the page group is closed when we're done working with it.
 | ||||
|         SCOPE_EXIT({ pg.Close(); }); | ||||
| 
 | ||||
|         // Clear all pages.
 | ||||
|         for (const auto& it : pg) { | ||||
|             std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), | ||||
|                         m_heap_fill_value, it.GetSize()); | ||||
|         } | ||||
| 
 | ||||
|         R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup)); | ||||
|         KPageGroup page_group; | ||||
|         R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( | ||||
|             &page_group, needed_num_pages, | ||||
|             KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | ||||
|         R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | ||||
|     } | ||||
| 
 | ||||
|     // Update the blocks.
 | ||||
|  | @ -2768,30 +2795,21 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_ | |||
|     ASSERT(num_pages > 0); | ||||
|     ASSERT(num_pages == page_group.GetNumPages()); | ||||
| 
 | ||||
|     switch (operation) { | ||||
|     case OperationType::MapGroup: { | ||||
|         // We want to maintain a new reference to every page in the group.
 | ||||
|         KScopedPageGroup spg(page_group); | ||||
| 
 | ||||
|         for (const auto& node : page_group) { | ||||
|     for (const auto& node : page_group.Nodes()) { | ||||
|         const size_t size{node.GetNumPages() * PageSize}; | ||||
| 
 | ||||
|             // Map the pages.
 | ||||
|         switch (operation) { | ||||
|         case OperationType::MapGroup: | ||||
|             m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); | ||||
| 
 | ||||
|             addr += size; | ||||
|         } | ||||
| 
 | ||||
|         // We succeeded! We want to persist the reference to the pages.
 | ||||
|         spg.CancelClose(); | ||||
| 
 | ||||
|             break; | ||||
|     } | ||||
|         default: | ||||
|             ASSERT(false); | ||||
|             break; | ||||
|         } | ||||
| 
 | ||||
|         addr += size; | ||||
|     } | ||||
| 
 | ||||
|     R_SUCCEED(); | ||||
| } | ||||
| 
 | ||||
|  | @ -2804,29 +2822,13 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | |||
|     ASSERT(ContainsPages(addr, num_pages)); | ||||
| 
 | ||||
|     switch (operation) { | ||||
|     case OperationType::Unmap: { | ||||
|         // Ensure that any pages we track close on exit.
 | ||||
|         KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()}; | ||||
|         SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); | ||||
| 
 | ||||
|         this->AddRegionToPages(addr, num_pages, pages_to_close); | ||||
|     case OperationType::Unmap: | ||||
|         m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); | ||||
|         break; | ||||
|     } | ||||
|     case OperationType::MapFirst: | ||||
|     case OperationType::Map: { | ||||
|         ASSERT(map_addr); | ||||
|         ASSERT(Common::IsAligned(map_addr, PageSize)); | ||||
|         m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); | ||||
| 
 | ||||
|         // Open references to pages, if we should.
 | ||||
|         if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { | ||||
|             if (operation == OperationType::MapFirst) { | ||||
|                 m_kernel.MemoryManager().OpenFirst(map_addr, num_pages); | ||||
|             } else { | ||||
|                 m_kernel.MemoryManager().Open(map_addr, num_pages); | ||||
|             } | ||||
|         } | ||||
|         break; | ||||
|     } | ||||
|     case OperationType::Separate: { | ||||
|  |  | |||
|  | @ -107,10 +107,6 @@ public: | |||
|         return *m_page_table_impl; | ||||
|     } | ||||
| 
 | ||||
|     KBlockInfoManager* GetBlockInfoManager() { | ||||
|         return m_block_info_manager; | ||||
|     } | ||||
| 
 | ||||
|     bool CanContain(VAddr addr, size_t size, KMemoryState state) const; | ||||
| 
 | ||||
| protected: | ||||
|  | @ -265,6 +261,10 @@ private: | |||
|     void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, | ||||
|                                                  size_t size, KMemoryPermission prot_perm); | ||||
| 
 | ||||
|     // HACK: These will be removed once we automatically manage page reference counts.
 | ||||
|     void HACK_OpenPages(PAddr phys_addr, size_t num_pages); | ||||
|     void HACK_ClosePages(VAddr virt_addr, size_t num_pages); | ||||
| 
 | ||||
|     mutable KLightLock m_general_lock; | ||||
|     mutable KLightLock m_map_physical_memory_lock; | ||||
| 
 | ||||
|  | @ -488,7 +488,6 @@ private: | |||
|     std::unique_ptr<Common::PageTable> m_page_table_impl; | ||||
| 
 | ||||
|     Core::System& m_system; | ||||
|     KernelCore& m_kernel; | ||||
| }; | ||||
| 
 | ||||
| } // namespace Kernel
 | ||||
|  |  | |||
|  | @ -13,7 +13,10 @@ | |||
| namespace Kernel { | ||||
| 
 | ||||
| KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | ||||
| KSharedMemory::~KSharedMemory() = default; | ||||
| 
 | ||||
| KSharedMemory::~KSharedMemory() { | ||||
|     kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size); | ||||
| } | ||||
| 
 | ||||
| Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, | ||||
|                                  Svc::MemoryPermission owner_permission_, | ||||
|  | @ -46,8 +49,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | |||
|     R_UNLESS(physical_address != 0, ResultOutOfMemory); | ||||
| 
 | ||||
|     //! Insert the result into our page group.
 | ||||
|     page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager()); | ||||
|     page_group->AddBlock(physical_address, num_pages); | ||||
|     page_group.emplace(physical_address, num_pages); | ||||
| 
 | ||||
|     // Commit our reservation.
 | ||||
|     memory_reservation.Commit(); | ||||
|  | @ -60,7 +62,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | |||
|     is_initialized = true; | ||||
| 
 | ||||
|     // Clear all pages in the memory.
 | ||||
|     for (const auto& block : *page_group) { | ||||
|     for (const auto& block : page_group->Nodes()) { | ||||
|         std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); | ||||
|     } | ||||
| 
 | ||||
|  | @ -69,8 +71,13 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | |||
| 
 | ||||
| void KSharedMemory::Finalize() { | ||||
|     // Close and finalize the page group.
 | ||||
|     page_group->Close(); | ||||
|     page_group->Finalize(); | ||||
|     // page_group->Close();
 | ||||
|     // page_group->Finalize();
 | ||||
| 
 | ||||
|     //! HACK: Manually close.
 | ||||
|     for (const auto& block : page_group->Nodes()) { | ||||
|         kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages()); | ||||
|     } | ||||
| 
 | ||||
|     // Release the memory reservation.
 | ||||
|     resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); | ||||
|  |  | |||
|  | @ -14,7 +14,4 @@ constexpr std::size_t PageSize{1 << PageBits}; | |||
| 
 | ||||
| using Page = std::array<u8, PageSize>; | ||||
| 
 | ||||
| using KPhysicalAddress = PAddr; | ||||
| using KProcessAddress = VAddr; | ||||
| 
 | ||||
| } // namespace Kernel
 | ||||
|  |  | |||
|  | @ -1485,7 +1485,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p | |||
|              ResultInvalidMemoryRegion); | ||||
| 
 | ||||
|     // Create a new page group.
 | ||||
|     KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()}; | ||||
|     KPageGroup pg; | ||||
|     R_TRY(src_pt.MakeAndOpenPageGroup( | ||||
|         std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess, | ||||
|         KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 gidoly
						gidoly