forked from eden-emu/eden
		
	Revert "k_page_group: synchronize"
This commit is contained in:
		
							parent
							
								
									22f12c976e
								
							
						
					
					
						commit
						118d57a8f0
					
				
					 11 changed files with 191 additions and 332 deletions
				
			
		|  | @ -226,7 +226,6 @@ add_library(core STATIC | ||||||
|     hle/kernel/k_page_buffer.h |     hle/kernel/k_page_buffer.h | ||||||
|     hle/kernel/k_page_heap.cpp |     hle/kernel/k_page_heap.cpp | ||||||
|     hle/kernel/k_page_heap.h |     hle/kernel/k_page_heap.h | ||||||
|     hle/kernel/k_page_group.cpp |  | ||||||
|     hle/kernel/k_page_group.h |     hle/kernel/k_page_group.h | ||||||
|     hle/kernel/k_page_table.cpp |     hle/kernel/k_page_table.cpp | ||||||
|     hle/kernel/k_page_table.h |     hle/kernel/k_page_table.h | ||||||
|  |  | ||||||
|  | @ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si | ||||||
|     auto& page_table = m_owner->PageTable(); |     auto& page_table = m_owner->PageTable(); | ||||||
| 
 | 
 | ||||||
|     // Construct the page group.
 |     // Construct the page group.
 | ||||||
|     m_page_group.emplace(kernel, page_table.GetBlockInfoManager()); |     m_page_group = {}; | ||||||
| 
 | 
 | ||||||
|     // Lock the memory.
 |     // Lock the memory.
 | ||||||
|     R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size)) |     R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size)) | ||||||
| 
 | 
 | ||||||
|     // Clear the memory.
 |     // Clear the memory.
 | ||||||
|     for (const auto& block : *m_page_group) { |     for (const auto& block : m_page_group.Nodes()) { | ||||||
|         std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); |         std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | @ -51,13 +51,12 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si | ||||||
| void KCodeMemory::Finalize() { | void KCodeMemory::Finalize() { | ||||||
|     // Unlock.
 |     // Unlock.
 | ||||||
|     if (!m_is_mapped && !m_is_owner_mapped) { |     if (!m_is_mapped && !m_is_owner_mapped) { | ||||||
|         const size_t size = m_page_group->GetNumPages() * PageSize; |         const size_t size = m_page_group.GetNumPages() * PageSize; | ||||||
|         m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group); |         m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Close the page group.
 |     // Close the page group.
 | ||||||
|     m_page_group->Close(); |     m_page_group = {}; | ||||||
|     m_page_group->Finalize(); |  | ||||||
| 
 | 
 | ||||||
|     // Close our reference to our owner.
 |     // Close our reference to our owner.
 | ||||||
|     m_owner->Close(); |     m_owner->Close(); | ||||||
|  | @ -65,7 +64,7 @@ void KCodeMemory::Finalize() { | ||||||
| 
 | 
 | ||||||
| Result KCodeMemory::Map(VAddr address, size_t size) { | Result KCodeMemory::Map(VAddr address, size_t size) { | ||||||
|     // Validate the size.
 |     // Validate the size.
 | ||||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); |     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||||
| 
 | 
 | ||||||
|     // Lock ourselves.
 |     // Lock ourselves.
 | ||||||
|     KScopedLightLock lk(m_lock); |     KScopedLightLock lk(m_lock); | ||||||
|  | @ -75,7 +74,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) { | ||||||
| 
 | 
 | ||||||
|     // Map the memory.
 |     // Map the memory.
 | ||||||
|     R_TRY(kernel.CurrentProcess()->PageTable().MapPages( |     R_TRY(kernel.CurrentProcess()->PageTable().MapPages( | ||||||
|         address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); |         address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); | ||||||
| 
 | 
 | ||||||
|     // Mark ourselves as mapped.
 |     // Mark ourselves as mapped.
 | ||||||
|     m_is_mapped = true; |     m_is_mapped = true; | ||||||
|  | @ -85,13 +84,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) { | ||||||
| 
 | 
 | ||||||
| Result KCodeMemory::Unmap(VAddr address, size_t size) { | Result KCodeMemory::Unmap(VAddr address, size_t size) { | ||||||
|     // Validate the size.
 |     // Validate the size.
 | ||||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); |     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||||
| 
 | 
 | ||||||
|     // Lock ourselves.
 |     // Lock ourselves.
 | ||||||
|     KScopedLightLock lk(m_lock); |     KScopedLightLock lk(m_lock); | ||||||
| 
 | 
 | ||||||
|     // Unmap the memory.
 |     // Unmap the memory.
 | ||||||
|     R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group, |     R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group, | ||||||
|                                                           KMemoryState::CodeOut)); |                                                           KMemoryState::CodeOut)); | ||||||
| 
 | 
 | ||||||
|     // Mark ourselves as unmapped.
 |     // Mark ourselves as unmapped.
 | ||||||
|  | @ -102,7 +101,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) { | ||||||
| 
 | 
 | ||||||
| Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { | Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { | ||||||
|     // Validate the size.
 |     // Validate the size.
 | ||||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); |     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||||
| 
 | 
 | ||||||
|     // Lock ourselves.
 |     // Lock ourselves.
 | ||||||
|     KScopedLightLock lk(m_lock); |     KScopedLightLock lk(m_lock); | ||||||
|  | @ -126,7 +125,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission | ||||||
| 
 | 
 | ||||||
|     // Map the memory.
 |     // Map the memory.
 | ||||||
|     R_TRY( |     R_TRY( | ||||||
|         m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm)); |         m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm)); | ||||||
| 
 | 
 | ||||||
|     // Mark ourselves as mapped.
 |     // Mark ourselves as mapped.
 | ||||||
|     m_is_owner_mapped = true; |     m_is_owner_mapped = true; | ||||||
|  | @ -136,13 +135,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission | ||||||
| 
 | 
 | ||||||
| Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { | Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { | ||||||
|     // Validate the size.
 |     // Validate the size.
 | ||||||
|     R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); |     R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); | ||||||
| 
 | 
 | ||||||
|     // Lock ourselves.
 |     // Lock ourselves.
 | ||||||
|     KScopedLightLock lk(m_lock); |     KScopedLightLock lk(m_lock); | ||||||
| 
 | 
 | ||||||
|     // Unmap the memory.
 |     // Unmap the memory.
 | ||||||
|     R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode)); |     R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode)); | ||||||
| 
 | 
 | ||||||
|     // Mark ourselves as unmapped.
 |     // Mark ourselves as unmapped.
 | ||||||
|     m_is_owner_mapped = false; |     m_is_owner_mapped = false; | ||||||
|  |  | ||||||
|  | @ -3,8 +3,6 @@ | ||||||
| 
 | 
 | ||||||
| #pragma once | #pragma once | ||||||
| 
 | 
 | ||||||
| #include <optional> |  | ||||||
| 
 |  | ||||||
| #include "common/common_types.h" | #include "common/common_types.h" | ||||||
| #include "core/device_memory.h" | #include "core/device_memory.h" | ||||||
| #include "core/hle/kernel/k_auto_object.h" | #include "core/hle/kernel/k_auto_object.h" | ||||||
|  | @ -51,11 +49,11 @@ public: | ||||||
|         return m_address; |         return m_address; | ||||||
|     } |     } | ||||||
|     size_t GetSize() const { |     size_t GetSize() const { | ||||||
|         return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0; |         return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     std::optional<KPageGroup> m_page_group{}; |     KPageGroup m_page_group{}; | ||||||
|     KProcess* m_owner{}; |     KProcess* m_owner{}; | ||||||
|     VAddr m_address{}; |     VAddr m_address{}; | ||||||
|     KLightLock m_lock; |     KLightLock m_lock; | ||||||
|  |  | ||||||
|  | @ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | ||||||
| 
 | 
 | ||||||
|     // Ensure that we don't leave anything un-freed.
 |     // Ensure that we don't leave anything un-freed.
 | ||||||
|     ON_RESULT_FAILURE { |     ON_RESULT_FAILURE { | ||||||
|         for (const auto& it : *out) { |         for (const auto& it : out->Nodes()) { | ||||||
|             auto& manager = this->GetManager(it.GetAddress()); |             auto& manager = this->GetManager(it.GetAddress()); | ||||||
|             const size_t node_num_pages = std::min<u64>( |             const size_t node_num_pages = std::min<u64>( | ||||||
|                 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); |                 it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); | ||||||
|  | @ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | ||||||
|                                       m_has_optimized_process[static_cast<size_t>(pool)], true)); |                                       m_has_optimized_process[static_cast<size_t>(pool)], true)); | ||||||
| 
 | 
 | ||||||
|     // Open the first reference to the pages.
 |     // Open the first reference to the pages.
 | ||||||
|     for (const auto& block : *out) { |     for (const auto& block : out->Nodes()) { | ||||||
|         PAddr cur_address = block.GetAddress(); |         PAddr cur_address = block.GetAddress(); | ||||||
|         size_t remaining_pages = block.GetNumPages(); |         size_t remaining_pages = block.GetNumPages(); | ||||||
|         while (remaining_pages > 0) { |         while (remaining_pages > 0) { | ||||||
|  | @ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 | ||||||
|     // Perform optimized memory tracking, if we should.
 |     // Perform optimized memory tracking, if we should.
 | ||||||
|     if (optimized) { |     if (optimized) { | ||||||
|         // Iterate over the allocated blocks.
 |         // Iterate over the allocated blocks.
 | ||||||
|         for (const auto& block : *out) { |         for (const auto& block : out->Nodes()) { | ||||||
|             // Get the block extents.
 |             // Get the block extents.
 | ||||||
|             const PAddr block_address = block.GetAddress(); |             const PAddr block_address = block.GetAddress(); | ||||||
|             const size_t block_pages = block.GetNumPages(); |             const size_t block_pages = block.GetNumPages(); | ||||||
|  | @ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 | ||||||
|         } |         } | ||||||
|     } else { |     } else { | ||||||
|         // Set all the allocated memory.
 |         // Set all the allocated memory.
 | ||||||
|         for (const auto& block : *out) { |         for (const auto& block : out->Nodes()) { | ||||||
|             std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, |             std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, | ||||||
|                         block.GetSize()); |                         block.GetSize()); | ||||||
|         } |         } | ||||||
|  |  | ||||||
|  | @ -1,121 +0,0 @@ | ||||||
| // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 |  | ||||||
| // SPDX-License-Identifier: GPL-2.0-or-later
 |  | ||||||
| 
 |  | ||||||
| #include "core/hle/kernel/k_dynamic_resource_manager.h" |  | ||||||
| #include "core/hle/kernel/k_memory_manager.h" |  | ||||||
| #include "core/hle/kernel/k_page_group.h" |  | ||||||
| #include "core/hle/kernel/kernel.h" |  | ||||||
| #include "core/hle/kernel/svc_results.h" |  | ||||||
| 
 |  | ||||||
| namespace Kernel { |  | ||||||
| 
 |  | ||||||
| void KPageGroup::Finalize() { |  | ||||||
|     KBlockInfo* cur = m_first_block; |  | ||||||
|     while (cur != nullptr) { |  | ||||||
|         KBlockInfo* next = cur->GetNext(); |  | ||||||
|         m_manager->Free(cur); |  | ||||||
|         cur = next; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     m_first_block = nullptr; |  | ||||||
|     m_last_block = nullptr; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void KPageGroup::CloseAndReset() { |  | ||||||
|     auto& mm = m_kernel.MemoryManager(); |  | ||||||
| 
 |  | ||||||
|     KBlockInfo* cur = m_first_block; |  | ||||||
|     while (cur != nullptr) { |  | ||||||
|         KBlockInfo* next = cur->GetNext(); |  | ||||||
|         mm.Close(cur->GetAddress(), cur->GetNumPages()); |  | ||||||
|         m_manager->Free(cur); |  | ||||||
|         cur = next; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     m_first_block = nullptr; |  | ||||||
|     m_last_block = nullptr; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| size_t KPageGroup::GetNumPages() const { |  | ||||||
|     size_t num_pages = 0; |  | ||||||
| 
 |  | ||||||
|     for (const auto& it : *this) { |  | ||||||
|         num_pages += it.GetNumPages(); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     return num_pages; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) { |  | ||||||
|     // Succeed immediately if we're adding no pages.
 |  | ||||||
|     R_SUCCEED_IF(num_pages == 0); |  | ||||||
| 
 |  | ||||||
|     // Check for overflow.
 |  | ||||||
|     ASSERT(addr < addr + num_pages * PageSize); |  | ||||||
| 
 |  | ||||||
|     // Try to just append to the last block.
 |  | ||||||
|     if (m_last_block != nullptr) { |  | ||||||
|         R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages)); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     // Allocate a new block.
 |  | ||||||
|     KBlockInfo* new_block = m_manager->Allocate(); |  | ||||||
|     R_UNLESS(new_block != nullptr, ResultOutOfResource); |  | ||||||
| 
 |  | ||||||
|     // Initialize the block.
 |  | ||||||
|     new_block->Initialize(addr, num_pages); |  | ||||||
| 
 |  | ||||||
|     // Add the block to our list.
 |  | ||||||
|     if (m_last_block != nullptr) { |  | ||||||
|         m_last_block->SetNext(new_block); |  | ||||||
|     } else { |  | ||||||
|         m_first_block = new_block; |  | ||||||
|     } |  | ||||||
|     m_last_block = new_block; |  | ||||||
| 
 |  | ||||||
|     R_SUCCEED(); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void KPageGroup::Open() const { |  | ||||||
|     auto& mm = m_kernel.MemoryManager(); |  | ||||||
| 
 |  | ||||||
|     for (const auto& it : *this) { |  | ||||||
|         mm.Open(it.GetAddress(), it.GetNumPages()); |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void KPageGroup::OpenFirst() const { |  | ||||||
|     auto& mm = m_kernel.MemoryManager(); |  | ||||||
| 
 |  | ||||||
|     for (const auto& it : *this) { |  | ||||||
|         mm.OpenFirst(it.GetAddress(), it.GetNumPages()); |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void KPageGroup::Close() const { |  | ||||||
|     auto& mm = m_kernel.MemoryManager(); |  | ||||||
| 
 |  | ||||||
|     for (const auto& it : *this) { |  | ||||||
|         mm.Close(it.GetAddress(), it.GetNumPages()); |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const { |  | ||||||
|     auto lit = this->begin(); |  | ||||||
|     auto rit = rhs.begin(); |  | ||||||
|     auto lend = this->end(); |  | ||||||
|     auto rend = rhs.end(); |  | ||||||
| 
 |  | ||||||
|     while (lit != lend && rit != rend) { |  | ||||||
|         if (*lit != *rit) { |  | ||||||
|             return false; |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         ++lit; |  | ||||||
|         ++rit; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     return lit == lend && rit == rend; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| } // namespace Kernel
 |  | ||||||
|  | @ -1,4 +1,4 @@ | ||||||
| // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 | // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
 | ||||||
| // SPDX-License-Identifier: GPL-2.0-or-later
 | // SPDX-License-Identifier: GPL-2.0-or-later
 | ||||||
| 
 | 
 | ||||||
| #pragma once | #pragma once | ||||||
|  | @ -13,23 +13,24 @@ | ||||||
| 
 | 
 | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
| class KBlockInfoManager; |  | ||||||
| class KernelCore; |  | ||||||
| class KPageGroup; | class KPageGroup; | ||||||
| 
 | 
 | ||||||
| class KBlockInfo { | class KBlockInfo { | ||||||
| public: | private: | ||||||
|     constexpr explicit KBlockInfo() : m_next(nullptr) {} |     friend class KPageGroup; | ||||||
| 
 | 
 | ||||||
|     constexpr void Initialize(KPhysicalAddress addr, size_t np) { | public: | ||||||
|  |     constexpr KBlockInfo() = default; | ||||||
|  | 
 | ||||||
|  |     constexpr void Initialize(PAddr addr, size_t np) { | ||||||
|         ASSERT(Common::IsAligned(addr, PageSize)); |         ASSERT(Common::IsAligned(addr, PageSize)); | ||||||
|         ASSERT(static_cast<u32>(np) == np); |         ASSERT(static_cast<u32>(np) == np); | ||||||
| 
 | 
 | ||||||
|         m_page_index = static_cast<u32>(addr / PageSize); |         m_page_index = static_cast<u32>(addr) / PageSize; | ||||||
|         m_num_pages = static_cast<u32>(np); |         m_num_pages = static_cast<u32>(np); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     constexpr KPhysicalAddress GetAddress() const { |     constexpr PAddr GetAddress() const { | ||||||
|         return m_page_index * PageSize; |         return m_page_index * PageSize; | ||||||
|     } |     } | ||||||
|     constexpr size_t GetNumPages() const { |     constexpr size_t GetNumPages() const { | ||||||
|  | @ -38,10 +39,10 @@ public: | ||||||
|     constexpr size_t GetSize() const { |     constexpr size_t GetSize() const { | ||||||
|         return this->GetNumPages() * PageSize; |         return this->GetNumPages() * PageSize; | ||||||
|     } |     } | ||||||
|     constexpr KPhysicalAddress GetEndAddress() const { |     constexpr PAddr GetEndAddress() const { | ||||||
|         return (m_page_index + m_num_pages) * PageSize; |         return (m_page_index + m_num_pages) * PageSize; | ||||||
|     } |     } | ||||||
|     constexpr KPhysicalAddress GetLastAddress() const { |     constexpr PAddr GetLastAddress() const { | ||||||
|         return this->GetEndAddress() - 1; |         return this->GetEndAddress() - 1; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | @ -61,8 +62,8 @@ public: | ||||||
|         return !(*this == rhs); |         return !(*this == rhs); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const { |     constexpr bool IsStrictlyBefore(PAddr addr) const { | ||||||
|         const KPhysicalAddress end = this->GetEndAddress(); |         const PAddr end = this->GetEndAddress(); | ||||||
| 
 | 
 | ||||||
|         if (m_page_index != 0 && end == 0) { |         if (m_page_index != 0 && end == 0) { | ||||||
|             return false; |             return false; | ||||||
|  | @ -71,11 +72,11 @@ public: | ||||||
|         return end < addr; |         return end < addr; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     constexpr bool operator<(KPhysicalAddress addr) const { |     constexpr bool operator<(PAddr addr) const { | ||||||
|         return this->IsStrictlyBefore(addr); |         return this->IsStrictlyBefore(addr); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) { |     constexpr bool TryConcatenate(PAddr addr, size_t np) { | ||||||
|         if (addr != 0 && addr == this->GetEndAddress()) { |         if (addr != 0 && addr == this->GetEndAddress()) { | ||||||
|             m_num_pages += static_cast<u32>(np); |             m_num_pages += static_cast<u32>(np); | ||||||
|             return true; |             return true; | ||||||
|  | @ -89,118 +90,96 @@ private: | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     friend class KPageGroup; |  | ||||||
| 
 |  | ||||||
|     KBlockInfo* m_next{}; |     KBlockInfo* m_next{}; | ||||||
|     u32 m_page_index{}; |     u32 m_page_index{}; | ||||||
|     u32 m_num_pages{}; |     u32 m_num_pages{}; | ||||||
| }; | }; | ||||||
| static_assert(sizeof(KBlockInfo) <= 0x10); | static_assert(sizeof(KBlockInfo) <= 0x10); | ||||||
| 
 | 
 | ||||||
| class KPageGroup { | class KPageGroup final { | ||||||
| public: | public: | ||||||
|     class Iterator { |     class Node final { | ||||||
|     public: |     public: | ||||||
|         using iterator_category = std::forward_iterator_tag; |         constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {} | ||||||
|         using value_type = const KBlockInfo; |  | ||||||
|         using difference_type = std::ptrdiff_t; |  | ||||||
|         using pointer = value_type*; |  | ||||||
|         using reference = value_type&; |  | ||||||
| 
 | 
 | ||||||
|         constexpr explicit Iterator(pointer n) : m_node(n) {} |         constexpr u64 GetAddress() const { | ||||||
| 
 |             return addr; | ||||||
|         constexpr bool operator==(const Iterator& rhs) const { |  | ||||||
|             return m_node == rhs.m_node; |  | ||||||
|         } |  | ||||||
|         constexpr bool operator!=(const Iterator& rhs) const { |  | ||||||
|             return !(*this == rhs); |  | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         constexpr pointer operator->() const { |         constexpr std::size_t GetNumPages() const { | ||||||
|             return m_node; |             return num_pages; | ||||||
|         } |  | ||||||
|         constexpr reference operator*() const { |  | ||||||
|             return *m_node; |  | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         constexpr Iterator& operator++() { |         constexpr std::size_t GetSize() const { | ||||||
|             m_node = m_node->GetNext(); |             return GetNumPages() * PageSize; | ||||||
|             return *this; |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         constexpr Iterator operator++(int) { |  | ||||||
|             const Iterator it{*this}; |  | ||||||
|             ++(*this); |  | ||||||
|             return it; |  | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|     private: |     private: | ||||||
|         pointer m_node{}; |         u64 addr{}; | ||||||
|  |         std::size_t num_pages{}; | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|     explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m) |  | ||||||
|         : m_kernel{kernel}, m_manager{m} {} |  | ||||||
|     ~KPageGroup() { |  | ||||||
|         this->Finalize(); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     void CloseAndReset(); |  | ||||||
|     void Finalize(); |  | ||||||
| 
 |  | ||||||
|     Iterator begin() const { |  | ||||||
|         return Iterator{m_first_block}; |  | ||||||
|     } |  | ||||||
|     Iterator end() const { |  | ||||||
|         return Iterator{nullptr}; |  | ||||||
|     } |  | ||||||
|     bool empty() const { |  | ||||||
|         return m_first_block == nullptr; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     Result AddBlock(KPhysicalAddress addr, size_t num_pages); |  | ||||||
|     void Open() const; |  | ||||||
|     void OpenFirst() const; |  | ||||||
|     void Close() const; |  | ||||||
| 
 |  | ||||||
|     size_t GetNumPages() const; |  | ||||||
| 
 |  | ||||||
|     bool IsEquivalentTo(const KPageGroup& rhs) const; |  | ||||||
| 
 |  | ||||||
|     bool operator==(const KPageGroup& rhs) const { |  | ||||||
|         return this->IsEquivalentTo(rhs); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     bool operator!=(const KPageGroup& rhs) const { |  | ||||||
|         return !(*this == rhs); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
| private: |  | ||||||
|     KernelCore& m_kernel; |  | ||||||
|     KBlockInfo* m_first_block{}; |  | ||||||
|     KBlockInfo* m_last_block{}; |  | ||||||
|     KBlockInfoManager* m_manager{}; |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| class KScopedPageGroup { |  | ||||||
| public: | public: | ||||||
|     explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) { |     KPageGroup() = default; | ||||||
|         if (m_pg) { |     KPageGroup(u64 address, u64 num_pages) { | ||||||
|             m_pg->Open(); |         ASSERT(AddBlock(address, num_pages).IsSuccess()); | ||||||
|         } |  | ||||||
|     } |  | ||||||
|     explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {} |  | ||||||
|     ~KScopedPageGroup() { |  | ||||||
|         if (m_pg) { |  | ||||||
|             m_pg->Close(); |  | ||||||
|         } |  | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     void CancelClose() { |     constexpr std::list<Node>& Nodes() { | ||||||
|         m_pg = nullptr; |         return nodes; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     constexpr const std::list<Node>& Nodes() const { | ||||||
|  |         return nodes; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     std::size_t GetNumPages() const { | ||||||
|  |         std::size_t num_pages = 0; | ||||||
|  |         for (const Node& node : nodes) { | ||||||
|  |             num_pages += node.GetNumPages(); | ||||||
|  |         } | ||||||
|  |         return num_pages; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     bool IsEqual(KPageGroup& other) const { | ||||||
|  |         auto this_node = nodes.begin(); | ||||||
|  |         auto other_node = other.nodes.begin(); | ||||||
|  |         while (this_node != nodes.end() && other_node != other.nodes.end()) { | ||||||
|  |             if (this_node->GetAddress() != other_node->GetAddress() || | ||||||
|  |                 this_node->GetNumPages() != other_node->GetNumPages()) { | ||||||
|  |                 return false; | ||||||
|  |             } | ||||||
|  |             this_node = std::next(this_node); | ||||||
|  |             other_node = std::next(other_node); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         return this_node == nodes.end() && other_node == other.nodes.end(); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     Result AddBlock(u64 address, u64 num_pages) { | ||||||
|  |         if (!num_pages) { | ||||||
|  |             return ResultSuccess; | ||||||
|  |         } | ||||||
|  |         if (!nodes.empty()) { | ||||||
|  |             const auto node = nodes.back(); | ||||||
|  |             if (node.GetAddress() + node.GetNumPages() * PageSize == address) { | ||||||
|  |                 address = node.GetAddress(); | ||||||
|  |                 num_pages += node.GetNumPages(); | ||||||
|  |                 nodes.pop_back(); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |         nodes.push_back({address, num_pages}); | ||||||
|  |         return ResultSuccess; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     bool Empty() const { | ||||||
|  |         return nodes.empty(); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     void Finalize() {} | ||||||
|  | 
 | ||||||
| private: | private: | ||||||
|     const KPageGroup* m_pg{}; |     std::list<Node> nodes; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| } // namespace Kernel
 | } // namespace Kernel
 | ||||||
|  |  | ||||||
|  | @ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a | ||||||
| 
 | 
 | ||||||
| KPageTable::KPageTable(Core::System& system_) | KPageTable::KPageTable(Core::System& system_) | ||||||
|     : m_general_lock{system_.Kernel()}, |     : m_general_lock{system_.Kernel()}, | ||||||
|       m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {} |       m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} | ||||||
| 
 | 
 | ||||||
| KPageTable::~KPageTable() = default; | KPageTable::~KPageTable() = default; | ||||||
| 
 | 
 | ||||||
|  | @ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta | ||||||
|                                                  m_memory_block_slab_manager); |                                                  m_memory_block_slab_manager); | ||||||
| 
 | 
 | ||||||
|     // Allocate and open.
 |     // Allocate and open.
 | ||||||
|     KPageGroup pg{m_kernel, m_block_info_manager}; |     KPageGroup pg; | ||||||
|     R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( |     R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||||||
|         &pg, num_pages, |         &pg, num_pages, | ||||||
|         KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); |         KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); | ||||||
|  | @ -432,7 +432,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si | ||||||
|         const size_t num_pages = size / PageSize; |         const size_t num_pages = size / PageSize; | ||||||
| 
 | 
 | ||||||
|         // Create page groups for the memory being mapped.
 |         // Create page groups for the memory being mapped.
 | ||||||
|         KPageGroup pg{m_kernel, m_block_info_manager}; |         KPageGroup pg; | ||||||
|         AddRegionToPages(src_address, num_pages, pg); |         AddRegionToPages(src_address, num_pages, pg); | ||||||
| 
 | 
 | ||||||
|         // Reprotect the source as kernel-read/not mapped.
 |         // Reprotect the source as kernel-read/not mapped.
 | ||||||
|  | @ -593,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | ||||||
|     const size_t size = num_pages * PageSize; |     const size_t size = num_pages * PageSize; | ||||||
| 
 | 
 | ||||||
|     // We're making a new group, not adding to an existing one.
 |     // We're making a new group, not adding to an existing one.
 | ||||||
|     R_UNLESS(pg.empty(), ResultInvalidCurrentMemory); |     R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory); | ||||||
| 
 | 
 | ||||||
|     // Begin traversal.
 |     // Begin traversal.
 | ||||||
|     Common::PageTable::TraversalContext context; |     Common::PageTable::TraversalContext context; | ||||||
|  | @ -640,10 +640,11 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { | ||||||
|     R_SUCCEED(); |     R_SUCCEED(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) { | bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { | ||||||
|     ASSERT(this->IsLockedByCurrentThread()); |     ASSERT(this->IsLockedByCurrentThread()); | ||||||
| 
 | 
 | ||||||
|     const size_t size = num_pages * PageSize; |     const size_t size = num_pages * PageSize; | ||||||
|  |     const auto& pg = pg_ll.Nodes(); | ||||||
|     const auto& memory_layout = m_system.Kernel().MemoryLayout(); |     const auto& memory_layout = m_system.Kernel().MemoryLayout(); | ||||||
| 
 | 
 | ||||||
|     // Empty groups are necessarily invalid.
 |     // Empty groups are necessarily invalid.
 | ||||||
|  | @ -941,6 +942,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | ||||||
| 
 | 
 | ||||||
|     ON_RESULT_FAILURE { |     ON_RESULT_FAILURE { | ||||||
|         if (cur_mapped_addr != dst_addr) { |         if (cur_mapped_addr != dst_addr) { | ||||||
|  |             // HACK: Manually close the pages.
 | ||||||
|  |             HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize); | ||||||
|  | 
 | ||||||
|             ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, |             ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, | ||||||
|                            KMemoryPermission::None, OperationType::Unmap) |                            KMemoryPermission::None, OperationType::Unmap) | ||||||
|                        .IsSuccess()); |                        .IsSuccess()); | ||||||
|  | @ -1016,6 +1020,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | ||||||
|         // Map the page.
 |         // Map the page.
 | ||||||
|         R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); |         R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); | ||||||
| 
 | 
 | ||||||
|  |         // HACK: Manually open the pages.
 | ||||||
|  |         HACK_OpenPages(start_partial_page, 1); | ||||||
|  | 
 | ||||||
|         // Update tracking extents.
 |         // Update tracking extents.
 | ||||||
|         cur_mapped_addr += PageSize; |         cur_mapped_addr += PageSize; | ||||||
|         cur_block_addr += PageSize; |         cur_block_addr += PageSize; | ||||||
|  | @ -1044,6 +1051,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | ||||||
|             R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, |             R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, | ||||||
|                           cur_block_addr)); |                           cur_block_addr)); | ||||||
| 
 | 
 | ||||||
|  |             // HACK: Manually open the pages.
 | ||||||
|  |             HACK_OpenPages(cur_block_addr, cur_block_size / PageSize); | ||||||
|  | 
 | ||||||
|             // Update tracking extents.
 |             // Update tracking extents.
 | ||||||
|             cur_mapped_addr += cur_block_size; |             cur_mapped_addr += cur_block_size; | ||||||
|             cur_block_addr = next_entry.phys_addr; |             cur_block_addr = next_entry.phys_addr; | ||||||
|  | @ -1063,6 +1073,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | ||||||
|         R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, |         R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, | ||||||
|                       cur_block_addr)); |                       cur_block_addr)); | ||||||
| 
 | 
 | ||||||
|  |         // HACK: Manually open the pages.
 | ||||||
|  |         HACK_OpenPages(cur_block_addr, last_block_size / PageSize); | ||||||
|  | 
 | ||||||
|         // Update tracking extents.
 |         // Update tracking extents.
 | ||||||
|         cur_mapped_addr += last_block_size; |         cur_mapped_addr += last_block_size; | ||||||
|         cur_block_addr += last_block_size; |         cur_block_addr += last_block_size; | ||||||
|  | @ -1094,6 +1107,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | ||||||
| 
 | 
 | ||||||
|         // Map the page.
 |         // Map the page.
 | ||||||
|         R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); |         R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); | ||||||
|  | 
 | ||||||
|  |         // HACK: Manually open the pages.
 | ||||||
|  |         HACK_OpenPages(end_partial_page, 1); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Update memory blocks to reflect our changes
 |     // Update memory blocks to reflect our changes
 | ||||||
|  | @ -1195,6 +1211,9 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState | ||||||
|     const size_t aligned_size = aligned_end - aligned_start; |     const size_t aligned_size = aligned_end - aligned_start; | ||||||
|     const size_t aligned_num_pages = aligned_size / PageSize; |     const size_t aligned_num_pages = aligned_size / PageSize; | ||||||
| 
 | 
 | ||||||
|  |     // HACK: Manually close the pages.
 | ||||||
|  |     HACK_ClosePages(aligned_start, aligned_num_pages); | ||||||
|  | 
 | ||||||
|     // Unmap the pages.
 |     // Unmap the pages.
 | ||||||
|     R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); |     R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||||||
| 
 | 
 | ||||||
|  | @ -1482,6 +1501,17 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { | ||||||
|  |     m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) { | ||||||
|  |     for (size_t index = 0; index < num_pages; ++index) { | ||||||
|  |         const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize)); | ||||||
|  |         m_system.Kernel().MemoryManager().Close(paddr, 1); | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||||
|     // Lock the physical memory lock.
 |     // Lock the physical memory lock.
 | ||||||
|     KScopedLightLock phys_lk(m_map_physical_memory_lock); |     KScopedLightLock phys_lk(m_map_physical_memory_lock); | ||||||
|  | @ -1542,7 +1572,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||||
|             R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |             R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||||||
| 
 | 
 | ||||||
|             // Allocate pages for the new memory.
 |             // Allocate pages for the new memory.
 | ||||||
|             KPageGroup pg{m_kernel, m_block_info_manager}; |             KPageGroup pg; | ||||||
|             R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( |             R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( | ||||||
|                 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); |                 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); | ||||||
| 
 | 
 | ||||||
|  | @ -1620,7 +1650,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||||
|                 KScopedPageTableUpdater updater(this); |                 KScopedPageTableUpdater updater(this); | ||||||
| 
 | 
 | ||||||
|                 // Prepare to iterate over the memory.
 |                 // Prepare to iterate over the memory.
 | ||||||
|                 auto pg_it = pg.begin(); |                 auto pg_it = pg.Nodes().begin(); | ||||||
|                 PAddr pg_phys_addr = pg_it->GetAddress(); |                 PAddr pg_phys_addr = pg_it->GetAddress(); | ||||||
|                 size_t pg_pages = pg_it->GetNumPages(); |                 size_t pg_pages = pg_it->GetNumPages(); | ||||||
| 
 | 
 | ||||||
|  | @ -1650,6 +1680,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||||
|                                              last_unmap_address + 1 - cur_address) / |                                              last_unmap_address + 1 - cur_address) / | ||||||
|                                     PageSize; |                                     PageSize; | ||||||
| 
 | 
 | ||||||
|  |                                 // HACK: Manually close the pages.
 | ||||||
|  |                                 HACK_ClosePages(cur_address, cur_pages); | ||||||
|  | 
 | ||||||
|                                 // Unmap.
 |                                 // Unmap.
 | ||||||
|                                 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, |                                 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | ||||||
|                                                OperationType::Unmap) |                                                OperationType::Unmap) | ||||||
|  | @ -1670,7 +1703,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||||
|                     // Release any remaining unmapped memory.
 |                     // Release any remaining unmapped memory.
 | ||||||
|                     m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); |                     m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); | ||||||
|                     m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); |                     m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); | ||||||
|                     for (++pg_it; pg_it != pg.end(); ++pg_it) { |                     for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { | ||||||
|                         m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), |                         m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), | ||||||
|                                                                     pg_it->GetNumPages()); |                                                                     pg_it->GetNumPages()); | ||||||
|                         m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), |                         m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), | ||||||
|  | @ -1698,7 +1731,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||||
|                             // Check if we're at the end of the physical block.
 |                             // Check if we're at the end of the physical block.
 | ||||||
|                             if (pg_pages == 0) { |                             if (pg_pages == 0) { | ||||||
|                                 // Ensure there are more pages to map.
 |                                 // Ensure there are more pages to map.
 | ||||||
|                                 ASSERT(pg_it != pg.end()); |                                 ASSERT(pg_it != pg.Nodes().end()); | ||||||
| 
 | 
 | ||||||
|                                 // Advance our physical block.
 |                                 // Advance our physical block.
 | ||||||
|                                 ++pg_it; |                                 ++pg_it; | ||||||
|  | @ -1709,7 +1742,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | ||||||
|                             // Map whatever we can.
 |                             // Map whatever we can.
 | ||||||
|                             const size_t cur_pages = std::min(pg_pages, map_pages); |                             const size_t cur_pages = std::min(pg_pages, map_pages); | ||||||
|                             R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, |                             R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | ||||||
|                                           OperationType::MapFirst, pg_phys_addr)); |                                           OperationType::Map, pg_phys_addr)); | ||||||
|  | 
 | ||||||
|  |                             // HACK: Manually open the pages.
 | ||||||
|  |                             HACK_OpenPages(pg_phys_addr, cur_pages); | ||||||
| 
 | 
 | ||||||
|                             // Advance.
 |                             // Advance.
 | ||||||
|                             cur_address += cur_pages * PageSize; |                             cur_address += cur_pages * PageSize; | ||||||
|  | @ -1852,6 +1888,9 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | ||||||
|                                               last_address + 1 - cur_address) / |                                               last_address + 1 - cur_address) / | ||||||
|                                      PageSize; |                                      PageSize; | ||||||
| 
 | 
 | ||||||
|  |             // HACK: Manually close the pages.
 | ||||||
|  |             HACK_ClosePages(cur_address, cur_pages); | ||||||
|  | 
 | ||||||
|             // Unmap.
 |             // Unmap.
 | ||||||
|             ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) |             ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) | ||||||
|                        .IsSuccess()); |                        .IsSuccess()); | ||||||
|  | @ -1916,7 +1955,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) | ||||||
|     R_TRY(dst_allocator_result); |     R_TRY(dst_allocator_result); | ||||||
| 
 | 
 | ||||||
|     // Map the memory.
 |     // Map the memory.
 | ||||||
|     KPageGroup page_linked_list{m_kernel, m_block_info_manager}; |     KPageGroup page_linked_list; | ||||||
|     const size_t num_pages{size / PageSize}; |     const size_t num_pages{size / PageSize}; | ||||||
|     const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( |     const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | ||||||
|         KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); |         KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | ||||||
|  | @ -1983,14 +2022,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size | ||||||
|                                                      num_dst_allocator_blocks); |                                                      num_dst_allocator_blocks); | ||||||
|     R_TRY(dst_allocator_result); |     R_TRY(dst_allocator_result); | ||||||
| 
 | 
 | ||||||
|     KPageGroup src_pages{m_kernel, m_block_info_manager}; |     KPageGroup src_pages; | ||||||
|     KPageGroup dst_pages{m_kernel, m_block_info_manager}; |     KPageGroup dst_pages; | ||||||
|     const size_t num_pages{size / PageSize}; |     const size_t num_pages{size / PageSize}; | ||||||
| 
 | 
 | ||||||
|     AddRegionToPages(src_address, num_pages, src_pages); |     AddRegionToPages(src_address, num_pages, src_pages); | ||||||
|     AddRegionToPages(dst_address, num_pages, dst_pages); |     AddRegionToPages(dst_address, num_pages, dst_pages); | ||||||
| 
 | 
 | ||||||
|     R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion); |     R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); | ||||||
| 
 | 
 | ||||||
|     { |     { | ||||||
|         auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); |         auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); | ||||||
|  | @ -2021,7 +2060,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, | ||||||
| 
 | 
 | ||||||
|     VAddr cur_addr{addr}; |     VAddr cur_addr{addr}; | ||||||
| 
 | 
 | ||||||
|     for (const auto& node : page_linked_list) { |     for (const auto& node : page_linked_list.Nodes()) { | ||||||
|         if (const auto result{ |         if (const auto result{ | ||||||
|                 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; |                 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; | ||||||
|             result.IsError()) { |             result.IsError()) { | ||||||
|  | @ -2121,7 +2160,7 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { | ||||||
| 
 | 
 | ||||||
|     VAddr cur_addr{addr}; |     VAddr cur_addr{addr}; | ||||||
| 
 | 
 | ||||||
|     for (const auto& node : page_linked_list) { |     for (const auto& node : page_linked_list.Nodes()) { | ||||||
|         if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, |         if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, | ||||||
|                                       OperationType::Unmap)}; |                                       OperationType::Unmap)}; | ||||||
|             result.IsError()) { |             result.IsError()) { | ||||||
|  | @ -2488,13 +2527,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | ||||||
|     R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |     R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||||||
| 
 | 
 | ||||||
|     // Allocate pages for the heap extension.
 |     // Allocate pages for the heap extension.
 | ||||||
|     KPageGroup pg{m_kernel, m_block_info_manager}; |     KPageGroup pg; | ||||||
|     R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( |     R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||||||
|         &pg, allocation_size / PageSize, |         &pg, allocation_size / PageSize, | ||||||
|         KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); |         KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); | ||||||
| 
 | 
 | ||||||
|     // Clear all the newly allocated pages.
 |     // Clear all the newly allocated pages.
 | ||||||
|     for (const auto& it : pg) { |     for (const auto& it : pg.Nodes()) { | ||||||
|         std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, |         std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, | ||||||
|                     it.GetSize()); |                     it.GetSize()); | ||||||
|     } |     } | ||||||
|  | @ -2571,23 +2610,11 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | ||||||
|     if (is_map_only) { |     if (is_map_only) { | ||||||
|         R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); |         R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | ||||||
|     } else { |     } else { | ||||||
|         // Create a page group tohold the pages we allocate.
 |         KPageGroup page_group; | ||||||
|         KPageGroup pg{m_kernel, m_block_info_manager}; |         R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( | ||||||
| 
 |             &page_group, needed_num_pages, | ||||||
|         R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( |             KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | ||||||
|             &pg, needed_num_pages, |         R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | ||||||
|             KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); |  | ||||||
| 
 |  | ||||||
|         // Ensure that the page group is closed when we're done working with it.
 |  | ||||||
|         SCOPE_EXIT({ pg.Close(); }); |  | ||||||
| 
 |  | ||||||
|         // Clear all pages.
 |  | ||||||
|         for (const auto& it : pg) { |  | ||||||
|             std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), |  | ||||||
|                         m_heap_fill_value, it.GetSize()); |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup)); |  | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Update the blocks.
 |     // Update the blocks.
 | ||||||
|  | @ -2768,28 +2795,19 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_ | ||||||
|     ASSERT(num_pages > 0); |     ASSERT(num_pages > 0); | ||||||
|     ASSERT(num_pages == page_group.GetNumPages()); |     ASSERT(num_pages == page_group.GetNumPages()); | ||||||
| 
 | 
 | ||||||
|     switch (operation) { |     for (const auto& node : page_group.Nodes()) { | ||||||
|     case OperationType::MapGroup: { |         const size_t size{node.GetNumPages() * PageSize}; | ||||||
|         // We want to maintain a new reference to every page in the group.
 |  | ||||||
|         KScopedPageGroup spg(page_group); |  | ||||||
| 
 | 
 | ||||||
|         for (const auto& node : page_group) { |         switch (operation) { | ||||||
|             const size_t size{node.GetNumPages() * PageSize}; |         case OperationType::MapGroup: | ||||||
| 
 |  | ||||||
|             // Map the pages.
 |  | ||||||
|             m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); |             m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); | ||||||
| 
 |             break; | ||||||
|             addr += size; |         default: | ||||||
|  |             ASSERT(false); | ||||||
|  |             break; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // We succeeded! We want to persist the reference to the pages.
 |         addr += size; | ||||||
|         spg.CancelClose(); |  | ||||||
| 
 |  | ||||||
|         break; |  | ||||||
|     } |  | ||||||
|     default: |  | ||||||
|         ASSERT(false); |  | ||||||
|         break; |  | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     R_SUCCEED(); |     R_SUCCEED(); | ||||||
|  | @ -2804,29 +2822,13 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | ||||||
|     ASSERT(ContainsPages(addr, num_pages)); |     ASSERT(ContainsPages(addr, num_pages)); | ||||||
| 
 | 
 | ||||||
|     switch (operation) { |     switch (operation) { | ||||||
|     case OperationType::Unmap: { |     case OperationType::Unmap: | ||||||
|         // Ensure that any pages we track close on exit.
 |  | ||||||
|         KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()}; |  | ||||||
|         SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); |  | ||||||
| 
 |  | ||||||
|         this->AddRegionToPages(addr, num_pages, pages_to_close); |  | ||||||
|         m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); |         m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); | ||||||
|         break; |         break; | ||||||
|     } |  | ||||||
|     case OperationType::MapFirst: |  | ||||||
|     case OperationType::Map: { |     case OperationType::Map: { | ||||||
|         ASSERT(map_addr); |         ASSERT(map_addr); | ||||||
|         ASSERT(Common::IsAligned(map_addr, PageSize)); |         ASSERT(Common::IsAligned(map_addr, PageSize)); | ||||||
|         m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); |         m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); | ||||||
| 
 |  | ||||||
|         // Open references to pages, if we should.
 |  | ||||||
|         if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { |  | ||||||
|             if (operation == OperationType::MapFirst) { |  | ||||||
|                 m_kernel.MemoryManager().OpenFirst(map_addr, num_pages); |  | ||||||
|             } else { |  | ||||||
|                 m_kernel.MemoryManager().Open(map_addr, num_pages); |  | ||||||
|             } |  | ||||||
|         } |  | ||||||
|         break; |         break; | ||||||
|     } |     } | ||||||
|     case OperationType::Separate: { |     case OperationType::Separate: { | ||||||
|  |  | ||||||
|  | @ -107,10 +107,6 @@ public: | ||||||
|         return *m_page_table_impl; |         return *m_page_table_impl; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     KBlockInfoManager* GetBlockInfoManager() { |  | ||||||
|         return m_block_info_manager; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     bool CanContain(VAddr addr, size_t size, KMemoryState state) const; |     bool CanContain(VAddr addr, size_t size, KMemoryState state) const; | ||||||
| 
 | 
 | ||||||
| protected: | protected: | ||||||
|  | @ -265,6 +261,10 @@ private: | ||||||
|     void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, |     void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, | ||||||
|                                                  size_t size, KMemoryPermission prot_perm); |                                                  size_t size, KMemoryPermission prot_perm); | ||||||
| 
 | 
 | ||||||
|  |     // HACK: These will be removed once we automatically manage page reference counts.
 | ||||||
|  |     void HACK_OpenPages(PAddr phys_addr, size_t num_pages); | ||||||
|  |     void HACK_ClosePages(VAddr virt_addr, size_t num_pages); | ||||||
|  | 
 | ||||||
|     mutable KLightLock m_general_lock; |     mutable KLightLock m_general_lock; | ||||||
|     mutable KLightLock m_map_physical_memory_lock; |     mutable KLightLock m_map_physical_memory_lock; | ||||||
| 
 | 
 | ||||||
|  | @ -488,7 +488,6 @@ private: | ||||||
|     std::unique_ptr<Common::PageTable> m_page_table_impl; |     std::unique_ptr<Common::PageTable> m_page_table_impl; | ||||||
| 
 | 
 | ||||||
|     Core::System& m_system; |     Core::System& m_system; | ||||||
|     KernelCore& m_kernel; |  | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| } // namespace Kernel
 | } // namespace Kernel
 | ||||||
|  |  | ||||||
|  | @ -13,7 +13,10 @@ | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
| KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | ||||||
| KSharedMemory::~KSharedMemory() = default; | 
 | ||||||
|  | KSharedMemory::~KSharedMemory() { | ||||||
|  |     kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size); | ||||||
|  | } | ||||||
| 
 | 
 | ||||||
| Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, | Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, | ||||||
|                                  Svc::MemoryPermission owner_permission_, |                                  Svc::MemoryPermission owner_permission_, | ||||||
|  | @ -46,8 +49,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | ||||||
|     R_UNLESS(physical_address != 0, ResultOutOfMemory); |     R_UNLESS(physical_address != 0, ResultOutOfMemory); | ||||||
| 
 | 
 | ||||||
|     //! Insert the result into our page group.
 |     //! Insert the result into our page group.
 | ||||||
|     page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager()); |     page_group.emplace(physical_address, num_pages); | ||||||
|     page_group->AddBlock(physical_address, num_pages); |  | ||||||
| 
 | 
 | ||||||
|     // Commit our reservation.
 |     // Commit our reservation.
 | ||||||
|     memory_reservation.Commit(); |     memory_reservation.Commit(); | ||||||
|  | @ -60,7 +62,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | ||||||
|     is_initialized = true; |     is_initialized = true; | ||||||
| 
 | 
 | ||||||
|     // Clear all pages in the memory.
 |     // Clear all pages in the memory.
 | ||||||
|     for (const auto& block : *page_group) { |     for (const auto& block : page_group->Nodes()) { | ||||||
|         std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); |         std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | @ -69,8 +71,13 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | ||||||
| 
 | 
 | ||||||
| void KSharedMemory::Finalize() { | void KSharedMemory::Finalize() { | ||||||
|     // Close and finalize the page group.
 |     // Close and finalize the page group.
 | ||||||
|     page_group->Close(); |     // page_group->Close();
 | ||||||
|     page_group->Finalize(); |     // page_group->Finalize();
 | ||||||
|  | 
 | ||||||
|  |     //! HACK: Manually close.
 | ||||||
|  |     for (const auto& block : page_group->Nodes()) { | ||||||
|  |         kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages()); | ||||||
|  |     } | ||||||
| 
 | 
 | ||||||
|     // Release the memory reservation.
 |     // Release the memory reservation.
 | ||||||
|     resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); |     resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); | ||||||
|  |  | ||||||
|  | @ -14,7 +14,4 @@ constexpr std::size_t PageSize{1 << PageBits}; | ||||||
| 
 | 
 | ||||||
| using Page = std::array<u8, PageSize>; | using Page = std::array<u8, PageSize>; | ||||||
| 
 | 
 | ||||||
| using KPhysicalAddress = PAddr; |  | ||||||
| using KProcessAddress = VAddr; |  | ||||||
| 
 |  | ||||||
| } // namespace Kernel
 | } // namespace Kernel
 | ||||||
|  |  | ||||||
|  | @ -1485,7 +1485,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p | ||||||
|              ResultInvalidMemoryRegion); |              ResultInvalidMemoryRegion); | ||||||
| 
 | 
 | ||||||
|     // Create a new page group.
 |     // Create a new page group.
 | ||||||
|     KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()}; |     KPageGroup pg; | ||||||
|     R_TRY(src_pt.MakeAndOpenPageGroup( |     R_TRY(src_pt.MakeAndOpenPageGroup( | ||||||
|         std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess, |         std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess, | ||||||
|         KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, |         KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 gidoly
						gidoly