forked from eden-emu/eden
		
	Merge pull request #9173 from bunnei/kern-update-15
Kernel: Various updates for FW 15.0.x
This commit is contained in:
		
						commit
						c14f27ee5f
					
				
					 38 changed files with 2794 additions and 745 deletions
				
			
		|  | @ -190,11 +190,13 @@ add_library(core STATIC | ||||||
|     hle/kernel/k_code_memory.h |     hle/kernel/k_code_memory.h | ||||||
|     hle/kernel/k_condition_variable.cpp |     hle/kernel/k_condition_variable.cpp | ||||||
|     hle/kernel/k_condition_variable.h |     hle/kernel/k_condition_variable.h | ||||||
|  |     hle/kernel/k_debug.h | ||||||
|     hle/kernel/k_dynamic_page_manager.h |     hle/kernel/k_dynamic_page_manager.h | ||||||
|     hle/kernel/k_dynamic_resource_manager.h |     hle/kernel/k_dynamic_resource_manager.h | ||||||
|     hle/kernel/k_dynamic_slab_heap.h |     hle/kernel/k_dynamic_slab_heap.h | ||||||
|     hle/kernel/k_event.cpp |     hle/kernel/k_event.cpp | ||||||
|     hle/kernel/k_event.h |     hle/kernel/k_event.h | ||||||
|  |     hle/kernel/k_event_info.h | ||||||
|     hle/kernel/k_handle_table.cpp |     hle/kernel/k_handle_table.cpp | ||||||
|     hle/kernel/k_handle_table.h |     hle/kernel/k_handle_table.h | ||||||
|     hle/kernel/k_interrupt_manager.cpp |     hle/kernel/k_interrupt_manager.cpp | ||||||
|  | @ -222,6 +224,8 @@ add_library(core STATIC | ||||||
|     hle/kernel/k_page_group.h |     hle/kernel/k_page_group.h | ||||||
|     hle/kernel/k_page_table.cpp |     hle/kernel/k_page_table.cpp | ||||||
|     hle/kernel/k_page_table.h |     hle/kernel/k_page_table.h | ||||||
|  |     hle/kernel/k_page_table_manager.h | ||||||
|  |     hle/kernel/k_page_table_slab_heap.h | ||||||
|     hle/kernel/k_port.cpp |     hle/kernel/k_port.cpp | ||||||
|     hle/kernel/k_port.h |     hle/kernel/k_port.h | ||||||
|     hle/kernel/k_priority_queue.h |     hle/kernel/k_priority_queue.h | ||||||
|  | @ -254,6 +258,8 @@ add_library(core STATIC | ||||||
|     hle/kernel/k_synchronization_object.cpp |     hle/kernel/k_synchronization_object.cpp | ||||||
|     hle/kernel/k_synchronization_object.h |     hle/kernel/k_synchronization_object.h | ||||||
|     hle/kernel/k_system_control.h |     hle/kernel/k_system_control.h | ||||||
|  |     hle/kernel/k_system_resource.cpp | ||||||
|  |     hle/kernel/k_system_resource.h | ||||||
|     hle/kernel/k_thread.cpp |     hle/kernel/k_thread.cpp | ||||||
|     hle/kernel/k_thread.h |     hle/kernel/k_thread.h | ||||||
|     hle/kernel/k_thread_local_page.cpp |     hle/kernel/k_thread_local_page.cpp | ||||||
|  |  | ||||||
|  | @ -8,6 +8,10 @@ | ||||||
| namespace Kernel::Board::Nintendo::Nx { | namespace Kernel::Board::Nintendo::Nx { | ||||||
| 
 | 
 | ||||||
| class KSystemControl { | class KSystemControl { | ||||||
|  | public: | ||||||
|  |     // This can be overridden as needed.
 | ||||||
|  |     static constexpr size_t SecureAppletMemorySize = 4 * 1024 * 1024; // 4_MB
 | ||||||
|  | 
 | ||||||
| public: | public: | ||||||
|     class Init { |     class Init { | ||||||
|     public: |     public: | ||||||
|  |  | ||||||
|  | @ -10,7 +10,9 @@ | ||||||
| #include "core/hardware_properties.h" | #include "core/hardware_properties.h" | ||||||
| #include "core/hle/kernel/init/init_slab_setup.h" | #include "core/hle/kernel/init/init_slab_setup.h" | ||||||
| #include "core/hle/kernel/k_code_memory.h" | #include "core/hle/kernel/k_code_memory.h" | ||||||
|  | #include "core/hle/kernel/k_debug.h" | ||||||
| #include "core/hle/kernel/k_event.h" | #include "core/hle/kernel/k_event.h" | ||||||
|  | #include "core/hle/kernel/k_event_info.h" | ||||||
| #include "core/hle/kernel/k_memory_layout.h" | #include "core/hle/kernel/k_memory_layout.h" | ||||||
| #include "core/hle/kernel/k_memory_manager.h" | #include "core/hle/kernel/k_memory_manager.h" | ||||||
| #include "core/hle/kernel/k_page_buffer.h" | #include "core/hle/kernel/k_page_buffer.h" | ||||||
|  | @ -22,6 +24,7 @@ | ||||||
| #include "core/hle/kernel/k_shared_memory.h" | #include "core/hle/kernel/k_shared_memory.h" | ||||||
| #include "core/hle/kernel/k_shared_memory_info.h" | #include "core/hle/kernel/k_shared_memory_info.h" | ||||||
| #include "core/hle/kernel/k_system_control.h" | #include "core/hle/kernel/k_system_control.h" | ||||||
|  | #include "core/hle/kernel/k_system_resource.h" | ||||||
| #include "core/hle/kernel/k_thread.h" | #include "core/hle/kernel/k_thread.h" | ||||||
| #include "core/hle/kernel/k_thread_local_page.h" | #include "core/hle/kernel/k_thread_local_page.h" | ||||||
| #include "core/hle/kernel/k_transfer_memory.h" | #include "core/hle/kernel/k_transfer_memory.h" | ||||||
|  | @ -44,7 +47,10 @@ namespace Kernel::Init { | ||||||
|     HANDLER(KThreadLocalPage,                                                                      \ |     HANDLER(KThreadLocalPage,                                                                      \ | ||||||
|             (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8),             \ |             (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8),             \ | ||||||
|             ##__VA_ARGS__)                                                                         \ |             ##__VA_ARGS__)                                                                         \ | ||||||
|     HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) |     HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)                           \ | ||||||
|  |     HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__)                 \ | ||||||
|  |     HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__)                                           \ | ||||||
|  |     HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) | ||||||
| 
 | 
 | ||||||
| namespace { | namespace { | ||||||
| 
 | 
 | ||||||
|  | @ -73,8 +79,20 @@ constexpr size_t SlabCountKResourceLimit = 5; | ||||||
| constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; | constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; | ||||||
| constexpr size_t SlabCountKIoPool = 1; | constexpr size_t SlabCountKIoPool = 1; | ||||||
| constexpr size_t SlabCountKIoRegion = 6; | constexpr size_t SlabCountKIoRegion = 6; | ||||||
|  | constexpr size_t SlabcountKSessionRequestMappings = 40; | ||||||
| 
 | 
 | ||||||
| constexpr size_t SlabCountExtraKThread = 160; | constexpr size_t SlabCountExtraKThread = (1024 + 256 + 256) - SlabCountKThread; | ||||||
|  | 
 | ||||||
|  | namespace test { | ||||||
|  | 
 | ||||||
|  | static_assert(KernelPageBufferHeapSize == | ||||||
|  |               2 * PageSize + (SlabCountKProcess + SlabCountKThread + | ||||||
|  |                               (SlabCountKProcess + SlabCountKThread) / 8) * | ||||||
|  |                                  PageSize); | ||||||
|  | static_assert(KernelPageBufferAdditionalSize == | ||||||
|  |               (SlabCountExtraKThread + (SlabCountExtraKThread / 8)) * PageSize); | ||||||
|  | 
 | ||||||
|  | } // namespace test
 | ||||||
| 
 | 
 | ||||||
| /// Helper function to translate from the slab virtual address to the reserved location in physical
 | /// Helper function to translate from the slab virtual address to the reserved location in physical
 | ||||||
| /// memory.
 | /// memory.
 | ||||||
|  | @ -109,7 +127,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| size_t CalculateSlabHeapGapSize() { | size_t CalculateSlabHeapGapSize() { | ||||||
|     constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB; |     constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB; | ||||||
|     static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax); |     static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax); | ||||||
|     return KernelSlabHeapGapSize; |     return KernelSlabHeapGapSize; | ||||||
| } | } | ||||||
|  | @ -134,6 +152,7 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() { | ||||||
|         .num_KDebug = SlabCountKDebug, |         .num_KDebug = SlabCountKDebug, | ||||||
|         .num_KIoPool = SlabCountKIoPool, |         .num_KIoPool = SlabCountKIoPool, | ||||||
|         .num_KIoRegion = SlabCountKIoRegion, |         .num_KIoRegion = SlabCountKIoRegion, | ||||||
|  |         .num_KSessionRequestMappings = SlabcountKSessionRequestMappings, | ||||||
|     }; |     }; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -164,29 +183,6 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) { | ||||||
|     return size; |     return size; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void InitializeKPageBufferSlabHeap(Core::System& system) { |  | ||||||
|     auto& kernel = system.Kernel(); |  | ||||||
| 
 |  | ||||||
|     const auto& counts = kernel.SlabResourceCounts(); |  | ||||||
|     const size_t num_pages = |  | ||||||
|         counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; |  | ||||||
|     const size_t slab_size = num_pages * PageSize; |  | ||||||
| 
 |  | ||||||
|     // Reserve memory from the system resource limit.
 |  | ||||||
|     ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size)); |  | ||||||
| 
 |  | ||||||
|     // Allocate memory for the slab.
 |  | ||||||
|     constexpr auto AllocateOption = KMemoryManager::EncodeOption( |  | ||||||
|         KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); |  | ||||||
|     const PAddr slab_address = |  | ||||||
|         kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); |  | ||||||
|     ASSERT(slab_address != 0); |  | ||||||
| 
 |  | ||||||
|     // Initialize the slabheap.
 |  | ||||||
|     KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), |  | ||||||
|                                     slab_size); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | ||||||
|     auto& kernel = system.Kernel(); |     auto& kernel = system.Kernel(); | ||||||
| 
 | 
 | ||||||
|  | @ -258,3 +254,29 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| } // namespace Kernel::Init
 | } // namespace Kernel::Init
 | ||||||
|  | 
 | ||||||
|  | namespace Kernel { | ||||||
|  | 
 | ||||||
|  | void KPageBufferSlabHeap::Initialize(Core::System& system) { | ||||||
|  |     auto& kernel = system.Kernel(); | ||||||
|  |     const auto& counts = kernel.SlabResourceCounts(); | ||||||
|  |     const size_t num_pages = | ||||||
|  |         counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; | ||||||
|  |     const size_t slab_size = num_pages * PageSize; | ||||||
|  | 
 | ||||||
|  |     // Reserve memory from the system resource limit.
 | ||||||
|  |     ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size)); | ||||||
|  | 
 | ||||||
|  |     // Allocate memory for the slab.
 | ||||||
|  |     constexpr auto AllocateOption = KMemoryManager::EncodeOption( | ||||||
|  |         KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); | ||||||
|  |     const PAddr slab_address = | ||||||
|  |         kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); | ||||||
|  |     ASSERT(slab_address != 0); | ||||||
|  | 
 | ||||||
|  |     // Initialize the slabheap.
 | ||||||
|  |     KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), | ||||||
|  |                                     slab_size); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | } // namespace Kernel
 | ||||||
|  |  | ||||||
|  | @ -33,11 +33,11 @@ struct KSlabResourceCounts { | ||||||
|     size_t num_KDebug; |     size_t num_KDebug; | ||||||
|     size_t num_KIoPool; |     size_t num_KIoPool; | ||||||
|     size_t num_KIoRegion; |     size_t num_KIoRegion; | ||||||
|  |     size_t num_KSessionRequestMappings; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| void InitializeSlabResourceCounts(KernelCore& kernel); | void InitializeSlabResourceCounts(KernelCore& kernel); | ||||||
| size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); | size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); | ||||||
| void InitializeKPageBufferSlabHeap(Core::System& system); |  | ||||||
| void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); | ||||||
| 
 | 
 | ||||||
| } // namespace Kernel::Init
 | } // namespace Kernel::Init
 | ||||||
|  |  | ||||||
|  | @ -16,6 +16,7 @@ | ||||||
| #include "core/hle/kernel/k_session.h" | #include "core/hle/kernel/k_session.h" | ||||||
| #include "core/hle/kernel/k_shared_memory.h" | #include "core/hle/kernel/k_shared_memory.h" | ||||||
| #include "core/hle/kernel/k_synchronization_object.h" | #include "core/hle/kernel/k_synchronization_object.h" | ||||||
|  | #include "core/hle/kernel/k_system_resource.h" | ||||||
| #include "core/hle/kernel/k_thread.h" | #include "core/hle/kernel/k_thread.h" | ||||||
| #include "core/hle/kernel/k_transfer_memory.h" | #include "core/hle/kernel/k_transfer_memory.h" | ||||||
| 
 | 
 | ||||||
|  | @ -119,4 +120,6 @@ static_assert(std::is_final_v<KTransferMemory> && std::is_base_of_v<KAutoObject, | ||||||
| // static_assert(std::is_final_v<KCodeMemory> &&
 | // static_assert(std::is_final_v<KCodeMemory> &&
 | ||||||
| //              std::is_base_of_v<KAutoObject, KCodeMemory>);
 | //              std::is_base_of_v<KAutoObject, KCodeMemory>);
 | ||||||
| 
 | 
 | ||||||
|  | static_assert(std::is_base_of_v<KAutoObject, KSystemResource>); | ||||||
|  | 
 | ||||||
| } // namespace Kernel
 | } // namespace Kernel
 | ||||||
|  |  | ||||||
|  | @ -10,6 +10,8 @@ namespace Kernel { | ||||||
| 
 | 
 | ||||||
| class KAutoObject; | class KAutoObject; | ||||||
| 
 | 
 | ||||||
|  | class KSystemResource; | ||||||
|  | 
 | ||||||
| class KClassTokenGenerator { | class KClassTokenGenerator { | ||||||
| public: | public: | ||||||
|     using TokenBaseType = u16; |     using TokenBaseType = u16; | ||||||
|  | @ -58,7 +60,7 @@ private: | ||||||
|         if constexpr (std::is_same<T, KAutoObject>::value) { |         if constexpr (std::is_same<T, KAutoObject>::value) { | ||||||
|             static_assert(T::ObjectType == ObjectType::KAutoObject); |             static_assert(T::ObjectType == ObjectType::KAutoObject); | ||||||
|             return 0; |             return 0; | ||||||
|         } else if constexpr (!std::is_final<T>::value) { |         } else if constexpr (!std::is_final<T>::value && !std::same_as<T, KSystemResource>) { | ||||||
|             static_assert(ObjectType::BaseClassesStart <= T::ObjectType && |             static_assert(ObjectType::BaseClassesStart <= T::ObjectType && | ||||||
|                           T::ObjectType < ObjectType::BaseClassesEnd); |                           T::ObjectType < ObjectType::BaseClassesEnd); | ||||||
|             constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - |             constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - | ||||||
|  | @ -108,6 +110,8 @@ public: | ||||||
|         KSessionRequest, |         KSessionRequest, | ||||||
|         KCodeMemory, |         KCodeMemory, | ||||||
| 
 | 
 | ||||||
|  |         KSystemResource, | ||||||
|  | 
 | ||||||
|         // NOTE: True order for these has not been determined yet.
 |         // NOTE: True order for these has not been determined yet.
 | ||||||
|         KAlpha, |         KAlpha, | ||||||
|         KBeta, |         KBeta, | ||||||
|  |  | ||||||
							
								
								
									
										20
									
								
								src/core/hle/kernel/k_debug.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								src/core/hle/kernel/k_debug.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,20 @@ | ||||||
|  | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 | ||||||
|  | // SPDX-License-Identifier: GPL-2.0-or-later
 | ||||||
|  | 
 | ||||||
|  | #pragma once | ||||||
|  | 
 | ||||||
|  | #include "core/hle/kernel/k_auto_object.h" | ||||||
|  | #include "core/hle/kernel/slab_helpers.h" | ||||||
|  | 
 | ||||||
|  | namespace Kernel { | ||||||
|  | 
 | ||||||
|  | class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObjectWithList> { | ||||||
|  |     KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject); | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | ||||||
|  | 
 | ||||||
|  |     static void PostDestroy([[maybe_unused]] uintptr_t arg) {} | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | } // namespace Kernel
 | ||||||
|  | @ -3,6 +3,8 @@ | ||||||
| 
 | 
 | ||||||
| #pragma once | #pragma once | ||||||
| 
 | 
 | ||||||
|  | #include <vector> | ||||||
|  | 
 | ||||||
| #include "common/alignment.h" | #include "common/alignment.h" | ||||||
| #include "common/common_types.h" | #include "common/common_types.h" | ||||||
| #include "core/hle/kernel/k_page_bitmap.h" | #include "core/hle/kernel/k_page_bitmap.h" | ||||||
|  | @ -33,28 +35,36 @@ public: | ||||||
|         return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); |         return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     Result Initialize(VAddr addr, size_t sz) { |     Result Initialize(VAddr memory, size_t size, size_t align) { | ||||||
|         // We need to have positive size.
 |         // We need to have positive size.
 | ||||||
|         R_UNLESS(sz > 0, ResultOutOfMemory); |         R_UNLESS(size > 0, ResultOutOfMemory); | ||||||
|         m_backing_memory.resize(sz); |         m_backing_memory.resize(size); | ||||||
| 
 | 
 | ||||||
|         // Calculate management overhead.
 |         // Set addresses.
 | ||||||
|         const size_t management_size = |         m_address = memory; | ||||||
|             KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); |         m_aligned_address = Common::AlignDown(memory, align); | ||||||
|         const size_t allocatable_size = sz - management_size; | 
 | ||||||
|  |         // Calculate extents.
 | ||||||
|  |         const size_t managed_size = m_address + size - m_aligned_address; | ||||||
|  |         const size_t overhead_size = Common::AlignUp( | ||||||
|  |             KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)), | ||||||
|  |             sizeof(PageBuffer)); | ||||||
|  |         R_UNLESS(overhead_size < size, ResultOutOfMemory); | ||||||
| 
 | 
 | ||||||
|         // Set tracking fields.
 |         // Set tracking fields.
 | ||||||
|         m_address = addr; |         m_size = Common::AlignDown(size - overhead_size, sizeof(PageBuffer)); | ||||||
|         m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); |         m_count = m_size / sizeof(PageBuffer); | ||||||
|         m_count = allocatable_size / sizeof(PageBuffer); |  | ||||||
|         R_UNLESS(m_count > 0, ResultOutOfMemory); |  | ||||||
| 
 | 
 | ||||||
|         // Clear the management region.
 |         // Clear the management region.
 | ||||||
|         u64* management_ptr = GetPointer<u64>(m_address + allocatable_size); |         u64* management_ptr = GetPointer<u64>(m_address + size - overhead_size); | ||||||
|         std::memset(management_ptr, 0, management_size); |         std::memset(management_ptr, 0, overhead_size); | ||||||
| 
 | 
 | ||||||
|         // Initialize the bitmap.
 |         // Initialize the bitmap.
 | ||||||
|         m_page_bitmap.Initialize(management_ptr, m_count); |         const size_t allocatable_region_size = | ||||||
|  |             (m_address + size - overhead_size) - m_aligned_address; | ||||||
|  |         ASSERT(allocatable_region_size >= sizeof(PageBuffer)); | ||||||
|  | 
 | ||||||
|  |         m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer)); | ||||||
| 
 | 
 | ||||||
|         // Free the pages to the bitmap.
 |         // Free the pages to the bitmap.
 | ||||||
|         for (size_t i = 0; i < m_count; i++) { |         for (size_t i = 0; i < m_count; i++) { | ||||||
|  | @ -62,7 +72,8 @@ public: | ||||||
|             std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); |             std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); | ||||||
| 
 | 
 | ||||||
|             // Set the bit for the free page.
 |             // Set the bit for the free page.
 | ||||||
|             m_page_bitmap.SetBit(i); |             m_page_bitmap.SetBit((m_address + (i * sizeof(PageBuffer)) - m_aligned_address) / | ||||||
|  |                                  sizeof(PageBuffer)); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         R_SUCCEED(); |         R_SUCCEED(); | ||||||
|  | @ -101,7 +112,28 @@ public: | ||||||
|         m_page_bitmap.ClearBit(offset); |         m_page_bitmap.ClearBit(offset); | ||||||
|         m_peak = std::max(m_peak, (++m_used)); |         m_peak = std::max(m_peak, (++m_used)); | ||||||
| 
 | 
 | ||||||
|         return GetPointer<PageBuffer>(m_address) + offset; |         return GetPointer<PageBuffer>(m_aligned_address) + offset; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     PageBuffer* Allocate(size_t count) { | ||||||
|  |         // Take the lock.
 | ||||||
|  |         // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
 | ||||||
|  |         KScopedSpinLock lk(m_lock); | ||||||
|  | 
 | ||||||
|  |         // Find a random free block.
 | ||||||
|  |         s64 soffset = m_page_bitmap.FindFreeRange(count); | ||||||
|  |         if (soffset < 0) [[likely]] { | ||||||
|  |             return nullptr; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         const size_t offset = static_cast<size_t>(soffset); | ||||||
|  | 
 | ||||||
|  |         // Update our tracking.
 | ||||||
|  |         m_page_bitmap.ClearRange(offset, count); | ||||||
|  |         m_used += count; | ||||||
|  |         m_peak = std::max(m_peak, m_used); | ||||||
|  | 
 | ||||||
|  |         return GetPointer<PageBuffer>(m_aligned_address) + offset; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     void Free(PageBuffer* pb) { |     void Free(PageBuffer* pb) { | ||||||
|  | @ -113,7 +145,7 @@ public: | ||||||
|         KScopedSpinLock lk(m_lock); |         KScopedSpinLock lk(m_lock); | ||||||
| 
 | 
 | ||||||
|         // Set the bit for the free page.
 |         // Set the bit for the free page.
 | ||||||
|         size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer); |         size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer); | ||||||
|         m_page_bitmap.SetBit(offset); |         m_page_bitmap.SetBit(offset); | ||||||
| 
 | 
 | ||||||
|         // Decrement our used count.
 |         // Decrement our used count.
 | ||||||
|  | @ -127,6 +159,7 @@ private: | ||||||
|     size_t m_peak{}; |     size_t m_peak{}; | ||||||
|     size_t m_count{}; |     size_t m_count{}; | ||||||
|     VAddr m_address{}; |     VAddr m_address{}; | ||||||
|  |     VAddr m_aligned_address{}; | ||||||
|     size_t m_size{}; |     size_t m_size{}; | ||||||
| 
 | 
 | ||||||
|     // TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
 |     // TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
 | ||||||
|  |  | ||||||
|  | @ -6,6 +6,7 @@ | ||||||
| #include "common/common_funcs.h" | #include "common/common_funcs.h" | ||||||
| #include "core/hle/kernel/k_dynamic_slab_heap.h" | #include "core/hle/kernel/k_dynamic_slab_heap.h" | ||||||
| #include "core/hle/kernel/k_memory_block.h" | #include "core/hle/kernel/k_memory_block.h" | ||||||
|  | #include "core/hle/kernel/k_page_group.h" | ||||||
| 
 | 
 | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
|  | @ -51,8 +52,10 @@ private: | ||||||
|     DynamicSlabType* m_slab_heap{}; |     DynamicSlabType* m_slab_heap{}; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | class KBlockInfoManager : public KDynamicResourceManager<KBlockInfo> {}; | ||||||
| class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; | class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; | ||||||
| 
 | 
 | ||||||
|  | using KBlockInfoSlabHeap = typename KBlockInfoManager::DynamicSlabType; | ||||||
| using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; | using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; | ||||||
| 
 | 
 | ||||||
| } // namespace Kernel
 | } // namespace Kernel
 | ||||||
|  |  | ||||||
							
								
								
									
										64
									
								
								src/core/hle/kernel/k_event_info.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								src/core/hle/kernel/k_event_info.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,64 @@ | ||||||
|  | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 | ||||||
|  | // SPDX-License-Identifier: GPL-2.0-or-later
 | ||||||
|  | 
 | ||||||
|  | #pragma once | ||||||
|  | 
 | ||||||
|  | #include <array> | ||||||
|  | 
 | ||||||
|  | #include <boost/intrusive/list.hpp> | ||||||
|  | 
 | ||||||
|  | #include "core/hle/kernel/slab_helpers.h" | ||||||
|  | #include "core/hle/kernel/svc_types.h" | ||||||
|  | 
 | ||||||
|  | namespace Kernel { | ||||||
|  | 
 | ||||||
|  | class KEventInfo : public KSlabAllocated<KEventInfo>, public boost::intrusive::list_base_hook<> { | ||||||
|  | public: | ||||||
|  |     struct InfoCreateThread { | ||||||
|  |         u32 thread_id{}; | ||||||
|  |         uintptr_t tls_address{}; | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     struct InfoExitProcess { | ||||||
|  |         Svc::ProcessExitReason reason{}; | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     struct InfoExitThread { | ||||||
|  |         Svc::ThreadExitReason reason{}; | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     struct InfoException { | ||||||
|  |         Svc::DebugException exception_type{}; | ||||||
|  |         s32 exception_data_count{}; | ||||||
|  |         uintptr_t exception_address{}; | ||||||
|  |         std::array<uintptr_t, 4> exception_data{}; | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     struct InfoSystemCall { | ||||||
|  |         s64 tick{}; | ||||||
|  |         s32 id{}; | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     KEventInfo() = default; | ||||||
|  |     ~KEventInfo() = default; | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     Svc::DebugEvent event{}; | ||||||
|  |     u32 thread_id{}; | ||||||
|  |     u32 flags{}; | ||||||
|  |     bool is_attached{}; | ||||||
|  |     bool continue_flag{}; | ||||||
|  |     bool ignore_continue{}; | ||||||
|  |     bool close_once{}; | ||||||
|  |     union { | ||||||
|  |         InfoCreateThread create_thread; | ||||||
|  |         InfoExitProcess exit_process; | ||||||
|  |         InfoExitThread exit_thread; | ||||||
|  |         InfoException exception; | ||||||
|  |         InfoSystemCall system_call; | ||||||
|  |     } info{}; | ||||||
|  |     KThread* debug_thread{}; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | } // namespace Kernel
 | ||||||
|  | @ -5,14 +5,11 @@ | ||||||
| 
 | 
 | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
| KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {} |  | ||||||
| KHandleTable::~KHandleTable() = default; |  | ||||||
| 
 |  | ||||||
| Result KHandleTable::Finalize() { | Result KHandleTable::Finalize() { | ||||||
|     // Get the table and clear our record of it.
 |     // Get the table and clear our record of it.
 | ||||||
|     u16 saved_table_size = 0; |     u16 saved_table_size = 0; | ||||||
|     { |     { | ||||||
|         KScopedDisableDispatch dd(kernel); |         KScopedDisableDispatch dd{m_kernel}; | ||||||
|         KScopedSpinLock lk(m_lock); |         KScopedSpinLock lk(m_lock); | ||||||
| 
 | 
 | ||||||
|         std::swap(m_table_size, saved_table_size); |         std::swap(m_table_size, saved_table_size); | ||||||
|  | @ -25,28 +22,28 @@ Result KHandleTable::Finalize() { | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     return ResultSuccess; |     R_SUCCEED(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| bool KHandleTable::Remove(Handle handle) { | bool KHandleTable::Remove(Handle handle) { | ||||||
|     // Don't allow removal of a pseudo-handle.
 |     // Don't allow removal of a pseudo-handle.
 | ||||||
|     if (Svc::IsPseudoHandle(handle)) { |     if (Svc::IsPseudoHandle(handle)) [[unlikely]] { | ||||||
|         return false; |         return false; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Handles must not have reserved bits set.
 |     // Handles must not have reserved bits set.
 | ||||||
|     const auto handle_pack = HandlePack(handle); |     const auto handle_pack = HandlePack(handle); | ||||||
|     if (handle_pack.reserved != 0) { |     if (handle_pack.reserved != 0) [[unlikely]] { | ||||||
|         return false; |         return false; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Find the object and free the entry.
 |     // Find the object and free the entry.
 | ||||||
|     KAutoObject* obj = nullptr; |     KAutoObject* obj = nullptr; | ||||||
|     { |     { | ||||||
|         KScopedDisableDispatch dd(kernel); |         KScopedDisableDispatch dd{m_kernel}; | ||||||
|         KScopedSpinLock lk(m_lock); |         KScopedSpinLock lk(m_lock); | ||||||
| 
 | 
 | ||||||
|         if (this->IsValidHandle(handle)) { |         if (this->IsValidHandle(handle)) [[likely]] { | ||||||
|             const auto index = handle_pack.index; |             const auto index = handle_pack.index; | ||||||
| 
 | 
 | ||||||
|             obj = m_objects[index]; |             obj = m_objects[index]; | ||||||
|  | @ -57,13 +54,13 @@ bool KHandleTable::Remove(Handle handle) { | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Close the object.
 |     // Close the object.
 | ||||||
|     kernel.UnregisterInUseObject(obj); |     m_kernel.UnregisterInUseObject(obj); | ||||||
|     obj->Close(); |     obj->Close(); | ||||||
|     return true; |     return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { | Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { | ||||||
|     KScopedDisableDispatch dd(kernel); |     KScopedDisableDispatch dd{m_kernel}; | ||||||
|     KScopedSpinLock lk(m_lock); |     KScopedSpinLock lk(m_lock); | ||||||
| 
 | 
 | ||||||
|     // Never exceed our capacity.
 |     // Never exceed our capacity.
 | ||||||
|  | @ -82,22 +79,22 @@ Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { | ||||||
|         *out_handle = EncodeHandle(static_cast<u16>(index), linear_id); |         *out_handle = EncodeHandle(static_cast<u16>(index), linear_id); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     return ResultSuccess; |     R_SUCCEED(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Result KHandleTable::Reserve(Handle* out_handle) { | Result KHandleTable::Reserve(Handle* out_handle) { | ||||||
|     KScopedDisableDispatch dd(kernel); |     KScopedDisableDispatch dd{m_kernel}; | ||||||
|     KScopedSpinLock lk(m_lock); |     KScopedSpinLock lk(m_lock); | ||||||
| 
 | 
 | ||||||
|     // Never exceed our capacity.
 |     // Never exceed our capacity.
 | ||||||
|     R_UNLESS(m_count < m_table_size, ResultOutOfHandles); |     R_UNLESS(m_count < m_table_size, ResultOutOfHandles); | ||||||
| 
 | 
 | ||||||
|     *out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId()); |     *out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId()); | ||||||
|     return ResultSuccess; |     R_SUCCEED(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void KHandleTable::Unreserve(Handle handle) { | void KHandleTable::Unreserve(Handle handle) { | ||||||
|     KScopedDisableDispatch dd(kernel); |     KScopedDisableDispatch dd{m_kernel}; | ||||||
|     KScopedSpinLock lk(m_lock); |     KScopedSpinLock lk(m_lock); | ||||||
| 
 | 
 | ||||||
|     // Unpack the handle.
 |     // Unpack the handle.
 | ||||||
|  | @ -108,7 +105,7 @@ void KHandleTable::Unreserve(Handle handle) { | ||||||
|     ASSERT(reserved == 0); |     ASSERT(reserved == 0); | ||||||
|     ASSERT(linear_id != 0); |     ASSERT(linear_id != 0); | ||||||
| 
 | 
 | ||||||
|     if (index < m_table_size) { |     if (index < m_table_size) [[likely]] { | ||||||
|         // NOTE: This code does not check the linear id.
 |         // NOTE: This code does not check the linear id.
 | ||||||
|         ASSERT(m_objects[index] == nullptr); |         ASSERT(m_objects[index] == nullptr); | ||||||
|         this->FreeEntry(index); |         this->FreeEntry(index); | ||||||
|  | @ -116,7 +113,7 @@ void KHandleTable::Unreserve(Handle handle) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void KHandleTable::Register(Handle handle, KAutoObject* obj) { | void KHandleTable::Register(Handle handle, KAutoObject* obj) { | ||||||
|     KScopedDisableDispatch dd(kernel); |     KScopedDisableDispatch dd{m_kernel}; | ||||||
|     KScopedSpinLock lk(m_lock); |     KScopedSpinLock lk(m_lock); | ||||||
| 
 | 
 | ||||||
|     // Unpack the handle.
 |     // Unpack the handle.
 | ||||||
|  | @ -127,7 +124,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj) { | ||||||
|     ASSERT(reserved == 0); |     ASSERT(reserved == 0); | ||||||
|     ASSERT(linear_id != 0); |     ASSERT(linear_id != 0); | ||||||
| 
 | 
 | ||||||
|     if (index < m_table_size) { |     if (index < m_table_size) [[likely]] { | ||||||
|         // Set the entry.
 |         // Set the entry.
 | ||||||
|         ASSERT(m_objects[index] == nullptr); |         ASSERT(m_objects[index] == nullptr); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -21,33 +21,38 @@ namespace Kernel { | ||||||
| class KernelCore; | class KernelCore; | ||||||
| 
 | 
 | ||||||
| class KHandleTable { | class KHandleTable { | ||||||
| public: |  | ||||||
|     YUZU_NON_COPYABLE(KHandleTable); |     YUZU_NON_COPYABLE(KHandleTable); | ||||||
|     YUZU_NON_MOVEABLE(KHandleTable); |     YUZU_NON_MOVEABLE(KHandleTable); | ||||||
| 
 | 
 | ||||||
|  | public: | ||||||
|     static constexpr size_t MaxTableSize = 1024; |     static constexpr size_t MaxTableSize = 1024; | ||||||
| 
 | 
 | ||||||
|     explicit KHandleTable(KernelCore& kernel_); | public: | ||||||
|     ~KHandleTable(); |     explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {} | ||||||
| 
 | 
 | ||||||
|     Result Initialize(s32 size) { |     Result Initialize(s32 size) { | ||||||
|  |         // Check that the table size is valid.
 | ||||||
|         R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory); |         R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory); | ||||||
| 
 | 
 | ||||||
|  |         // Lock.
 | ||||||
|  |         KScopedDisableDispatch dd{m_kernel}; | ||||||
|  |         KScopedSpinLock lk(m_lock); | ||||||
|  | 
 | ||||||
|         // Initialize all fields.
 |         // Initialize all fields.
 | ||||||
|         m_max_count = 0; |         m_max_count = 0; | ||||||
|         m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size); |         m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size); | ||||||
|         m_next_linear_id = MinLinearId; |         m_next_linear_id = MinLinearId; | ||||||
|         m_count = 0; |         m_count = 0; | ||||||
|         m_free_head_index = -1; |         m_free_head_index = -1; | ||||||
| 
 | 
 | ||||||
|         // Free all entries.
 |         // Free all entries.
 | ||||||
|         for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) { |         for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) { | ||||||
|             m_objects[i] = nullptr; |             m_objects[i] = nullptr; | ||||||
|             m_entry_infos[i].next_free_index = i - 1; |             m_entry_infos[i].next_free_index = static_cast<s16>(i - 1); | ||||||
|             m_free_head_index = i; |             m_free_head_index = i; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         return ResultSuccess; |         R_SUCCEED(); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     size_t GetTableSize() const { |     size_t GetTableSize() const { | ||||||
|  | @ -66,13 +71,13 @@ public: | ||||||
|     template <typename T = KAutoObject> |     template <typename T = KAutoObject> | ||||||
|     KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { |     KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | ||||||
|         // Lock and look up in table.
 |         // Lock and look up in table.
 | ||||||
|         KScopedDisableDispatch dd(kernel); |         KScopedDisableDispatch dd{m_kernel}; | ||||||
|         KScopedSpinLock lk(m_lock); |         KScopedSpinLock lk(m_lock); | ||||||
| 
 | 
 | ||||||
|         if constexpr (std::is_same_v<T, KAutoObject>) { |         if constexpr (std::is_same_v<T, KAutoObject>) { | ||||||
|             return this->GetObjectImpl(handle); |             return this->GetObjectImpl(handle); | ||||||
|         } else { |         } else { | ||||||
|             if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) { |             if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] { | ||||||
|                 return obj->DynamicCast<T*>(); |                 return obj->DynamicCast<T*>(); | ||||||
|             } else { |             } else { | ||||||
|                 return nullptr; |                 return nullptr; | ||||||
|  | @ -85,13 +90,13 @@ public: | ||||||
|         // Handle pseudo-handles.
 |         // Handle pseudo-handles.
 | ||||||
|         if constexpr (std::derived_from<KProcess, T>) { |         if constexpr (std::derived_from<KProcess, T>) { | ||||||
|             if (handle == Svc::PseudoHandle::CurrentProcess) { |             if (handle == Svc::PseudoHandle::CurrentProcess) { | ||||||
|                 auto* const cur_process = kernel.CurrentProcess(); |                 auto* const cur_process = m_kernel.CurrentProcess(); | ||||||
|                 ASSERT(cur_process != nullptr); |                 ASSERT(cur_process != nullptr); | ||||||
|                 return cur_process; |                 return cur_process; | ||||||
|             } |             } | ||||||
|         } else if constexpr (std::derived_from<KThread, T>) { |         } else if constexpr (std::derived_from<KThread, T>) { | ||||||
|             if (handle == Svc::PseudoHandle::CurrentThread) { |             if (handle == Svc::PseudoHandle::CurrentThread) { | ||||||
|                 auto* const cur_thread = GetCurrentThreadPointer(kernel); |                 auto* const cur_thread = GetCurrentThreadPointer(m_kernel); | ||||||
|                 ASSERT(cur_thread != nullptr); |                 ASSERT(cur_thread != nullptr); | ||||||
|                 return cur_thread; |                 return cur_thread; | ||||||
|             } |             } | ||||||
|  | @ -100,6 +105,37 @@ public: | ||||||
|         return this->template GetObjectWithoutPseudoHandle<T>(handle); |         return this->template GetObjectWithoutPseudoHandle<T>(handle); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const { | ||||||
|  |         // Lock and look up in table.
 | ||||||
|  |         KScopedDisableDispatch dd{m_kernel}; | ||||||
|  |         KScopedSpinLock lk(m_lock); | ||||||
|  | 
 | ||||||
|  |         return this->GetObjectImpl(handle); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const { | ||||||
|  |         // Handle pseudo-handles.
 | ||||||
|  |         ASSERT(cur_thread != nullptr); | ||||||
|  |         if (handle == Svc::PseudoHandle::CurrentProcess) { | ||||||
|  |             auto* const cur_process = | ||||||
|  |                 static_cast<KAutoObject*>(static_cast<void*>(cur_thread->GetOwnerProcess())); | ||||||
|  |             ASSERT(cur_process != nullptr); | ||||||
|  |             return cur_process; | ||||||
|  |         } | ||||||
|  |         if (handle == Svc::PseudoHandle::CurrentThread) { | ||||||
|  |             return static_cast<KAutoObject*>(cur_thread); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         return GetObjectForIpcWithoutPseudoHandle(handle); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     KScopedAutoObject<KAutoObject> GetObjectByIndex(Handle* out_handle, size_t index) const { | ||||||
|  |         KScopedDisableDispatch dd{m_kernel}; | ||||||
|  |         KScopedSpinLock lk(m_lock); | ||||||
|  | 
 | ||||||
|  |         return this->GetObjectByIndexImpl(out_handle, index); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     Result Reserve(Handle* out_handle); |     Result Reserve(Handle* out_handle); | ||||||
|     void Unreserve(Handle handle); |     void Unreserve(Handle handle); | ||||||
| 
 | 
 | ||||||
|  | @ -112,7 +148,7 @@ public: | ||||||
|         size_t num_opened; |         size_t num_opened; | ||||||
|         { |         { | ||||||
|             // Lock the table.
 |             // Lock the table.
 | ||||||
|             KScopedDisableDispatch dd(kernel); |             KScopedDisableDispatch dd{m_kernel}; | ||||||
|             KScopedSpinLock lk(m_lock); |             KScopedSpinLock lk(m_lock); | ||||||
|             for (num_opened = 0; num_opened < num_handles; num_opened++) { |             for (num_opened = 0; num_opened < num_handles; num_opened++) { | ||||||
|                 // Get the current handle.
 |                 // Get the current handle.
 | ||||||
|  | @ -120,13 +156,13 @@ public: | ||||||
| 
 | 
 | ||||||
|                 // Get the object for the current handle.
 |                 // Get the object for the current handle.
 | ||||||
|                 KAutoObject* cur_object = this->GetObjectImpl(cur_handle); |                 KAutoObject* cur_object = this->GetObjectImpl(cur_handle); | ||||||
|                 if (cur_object == nullptr) { |                 if (cur_object == nullptr) [[unlikely]] { | ||||||
|                     break; |                     break; | ||||||
|                 } |                 } | ||||||
| 
 | 
 | ||||||
|                 // Cast the current object to the desired type.
 |                 // Cast the current object to the desired type.
 | ||||||
|                 T* cur_t = cur_object->DynamicCast<T*>(); |                 T* cur_t = cur_object->DynamicCast<T*>(); | ||||||
|                 if (cur_t == nullptr) { |                 if (cur_t == nullptr) [[unlikely]] { | ||||||
|                     break; |                     break; | ||||||
|                 } |                 } | ||||||
| 
 | 
 | ||||||
|  | @ -137,7 +173,7 @@ public: | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // If we converted every object, succeed.
 |         // If we converted every object, succeed.
 | ||||||
|         if (num_opened == num_handles) { |         if (num_opened == num_handles) [[likely]] { | ||||||
|             return true; |             return true; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|  | @ -191,21 +227,21 @@ private: | ||||||
|         ASSERT(reserved == 0); |         ASSERT(reserved == 0); | ||||||
| 
 | 
 | ||||||
|         // Validate our indexing information.
 |         // Validate our indexing information.
 | ||||||
|         if (raw_value == 0) { |         if (raw_value == 0) [[unlikely]] { | ||||||
|             return false; |             return false; | ||||||
|         } |         } | ||||||
|         if (linear_id == 0) { |         if (linear_id == 0) [[unlikely]] { | ||||||
|             return false; |             return false; | ||||||
|         } |         } | ||||||
|         if (index >= m_table_size) { |         if (index >= m_table_size) [[unlikely]] { | ||||||
|             return false; |             return false; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // Check that there's an object, and our serial id is correct.
 |         // Check that there's an object, and our serial id is correct.
 | ||||||
|         if (m_objects[index] == nullptr) { |         if (m_objects[index] == nullptr) [[unlikely]] { | ||||||
|             return false; |             return false; | ||||||
|         } |         } | ||||||
|         if (m_entry_infos[index].GetLinearId() != linear_id) { |         if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] { | ||||||
|             return false; |             return false; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|  | @ -215,11 +251,11 @@ private: | ||||||
|     KAutoObject* GetObjectImpl(Handle handle) const { |     KAutoObject* GetObjectImpl(Handle handle) const { | ||||||
|         // Handles must not have reserved bits set.
 |         // Handles must not have reserved bits set.
 | ||||||
|         const auto handle_pack = HandlePack(handle); |         const auto handle_pack = HandlePack(handle); | ||||||
|         if (handle_pack.reserved != 0) { |         if (handle_pack.reserved != 0) [[unlikely]] { | ||||||
|             return nullptr; |             return nullptr; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         if (this->IsValidHandle(handle)) { |         if (this->IsValidHandle(handle)) [[likely]] { | ||||||
|             return m_objects[handle_pack.index]; |             return m_objects[handle_pack.index]; | ||||||
|         } else { |         } else { | ||||||
|             return nullptr; |             return nullptr; | ||||||
|  | @ -227,9 +263,8 @@ private: | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const { |     KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const { | ||||||
| 
 |  | ||||||
|         // Index must be in bounds.
 |         // Index must be in bounds.
 | ||||||
|         if (index >= m_table_size) { |         if (index >= m_table_size) [[unlikely]] { | ||||||
|             return nullptr; |             return nullptr; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|  | @ -244,18 +279,15 @@ private: | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     union HandlePack { |     union HandlePack { | ||||||
|         HandlePack() = default; |         constexpr HandlePack() = default; | ||||||
|         HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {} |         constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {} | ||||||
| 
 | 
 | ||||||
|         u32 raw; |         u32 raw{}; | ||||||
|         BitField<0, 15, u32> index; |         BitField<0, 15, u32> index; | ||||||
|         BitField<15, 15, u32> linear_id; |         BitField<15, 15, u32> linear_id; | ||||||
|         BitField<30, 2, u32> reserved; |         BitField<30, 2, u32> reserved; | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|     static constexpr u16 MinLinearId = 1; |  | ||||||
|     static constexpr u16 MaxLinearId = 0x7FFF; |  | ||||||
| 
 |  | ||||||
|     static constexpr Handle EncodeHandle(u16 index, u16 linear_id) { |     static constexpr Handle EncodeHandle(u16 index, u16 linear_id) { | ||||||
|         HandlePack handle{}; |         HandlePack handle{}; | ||||||
|         handle.index.Assign(index); |         handle.index.Assign(index); | ||||||
|  | @ -264,6 +296,10 @@ private: | ||||||
|         return handle.raw; |         return handle.raw; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | private: | ||||||
|  |     static constexpr u16 MinLinearId = 1; | ||||||
|  |     static constexpr u16 MaxLinearId = 0x7FFF; | ||||||
|  | 
 | ||||||
|     union EntryInfo { |     union EntryInfo { | ||||||
|         u16 linear_id; |         u16 linear_id; | ||||||
|         s16 next_free_index; |         s16 next_free_index; | ||||||
|  | @ -271,21 +307,21 @@ private: | ||||||
|         constexpr u16 GetLinearId() const { |         constexpr u16 GetLinearId() const { | ||||||
|             return linear_id; |             return linear_id; | ||||||
|         } |         } | ||||||
|         constexpr s16 GetNextFreeIndex() const { |         constexpr s32 GetNextFreeIndex() const { | ||||||
|             return next_free_index; |             return next_free_index; | ||||||
|         } |         } | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|  |     KernelCore& m_kernel; | ||||||
|     std::array<EntryInfo, MaxTableSize> m_entry_infos{}; |     std::array<EntryInfo, MaxTableSize> m_entry_infos{}; | ||||||
|     std::array<KAutoObject*, MaxTableSize> m_objects{}; |     std::array<KAutoObject*, MaxTableSize> m_objects{}; | ||||||
|     s32 m_free_head_index{-1}; |     mutable KSpinLock m_lock; | ||||||
|  |     s32 m_free_head_index{}; | ||||||
|     u16 m_table_size{}; |     u16 m_table_size{}; | ||||||
|     u16 m_max_count{}; |     u16 m_max_count{}; | ||||||
|     u16 m_next_linear_id{MinLinearId}; |     u16 m_next_linear_id{}; | ||||||
|     u16 m_count{}; |     u16 m_count{}; | ||||||
|     mutable KSpinLock m_lock; |  | ||||||
|     KernelCore& kernel; |  | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| } // namespace Kernel
 | } // namespace Kernel
 | ||||||
|  |  | ||||||
|  | @ -35,26 +35,32 @@ enum class KMemoryState : u32 { | ||||||
|     FlagCanMapProcess = (1 << 23), |     FlagCanMapProcess = (1 << 23), | ||||||
|     FlagCanChangeAttribute = (1 << 24), |     FlagCanChangeAttribute = (1 << 24), | ||||||
|     FlagCanCodeMemory = (1 << 25), |     FlagCanCodeMemory = (1 << 25), | ||||||
|  |     FlagLinearMapped = (1 << 26), | ||||||
| 
 | 
 | ||||||
|     FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | |     FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | | ||||||
|                 FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | |                 FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | | ||||||
|                 FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | |                 FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | | ||||||
|                 FlagReferenceCounted | FlagCanChangeAttribute, |                 FlagReferenceCounted | FlagCanChangeAttribute | FlagLinearMapped, | ||||||
| 
 | 
 | ||||||
|     FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | |     FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | | ||||||
|                 FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | |                 FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | | ||||||
|                 FlagCanAlignedDeviceMap | FlagReferenceCounted, |                 FlagCanAlignedDeviceMap | FlagReferenceCounted | FlagLinearMapped, | ||||||
| 
 | 
 | ||||||
|     FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap, |     FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap | | ||||||
|  |                 FlagLinearMapped, | ||||||
| 
 | 
 | ||||||
|     Free = static_cast<u32>(Svc::MemoryState::Free), |     Free = static_cast<u32>(Svc::MemoryState::Free), | ||||||
|     Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped, |     Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | | ||||||
|  |          FlagCanAlignedDeviceMap, | ||||||
|     Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, |     Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, | ||||||
|     Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, |     Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, | ||||||
|     CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | |     CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | | ||||||
|                FlagCanCodeMemory, |                FlagCanCodeMemory, | ||||||
|     Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted, |  | ||||||
|     Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, |     Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, | ||||||
|  |     Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted | | ||||||
|  |              FlagLinearMapped, | ||||||
|  | 
 | ||||||
|  |     // Alias was removed after 1.0.0.
 | ||||||
| 
 | 
 | ||||||
|     AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | |     AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | | ||||||
|                 FlagCanCodeAlias, |                 FlagCanCodeAlias, | ||||||
|  | @ -67,18 +73,18 @@ enum class KMemoryState : u32 { | ||||||
|     Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | |     Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | | ||||||
|             FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |             FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | ||||||
| 
 | 
 | ||||||
|     ThreadLocal = |     ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped, | ||||||
|         static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted, |  | ||||||
| 
 | 
 | ||||||
|     Transfered = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc | |     Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | | ||||||
|                  FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | |                  FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | | ||||||
|                  FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |                  FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | ||||||
| 
 | 
 | ||||||
|     SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc | |     SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc | | ||||||
|                        FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |                        FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | ||||||
| 
 | 
 | ||||||
|     SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | |     SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | | ||||||
|                  FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |                  FlagReferenceCounted | FlagLinearMapped | FlagCanUseNonSecureIpc | | ||||||
|  |                  FlagCanUseNonDeviceIpc, | ||||||
| 
 | 
 | ||||||
|     Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible), |     Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible), | ||||||
| 
 | 
 | ||||||
|  | @ -91,69 +97,69 @@ enum class KMemoryState : u32 { | ||||||
|     Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, |     Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, | ||||||
| 
 | 
 | ||||||
|     GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | |     GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | | ||||||
|                     FlagReferenceCounted | FlagCanDebug, |                     FlagReferenceCounted | FlagCanDebug | FlagLinearMapped, | ||||||
|     CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted, |     CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted | | ||||||
|  |               FlagLinearMapped, | ||||||
| 
 | 
 | ||||||
|     Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped, |     Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped, | ||||||
|  | 
 | ||||||
|  |     Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted | | ||||||
|  |                FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap | | ||||||
|  |                FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | ||||||
| }; | }; | ||||||
| DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); | DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); | ||||||
| 
 | 
 | ||||||
| static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); | static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); | ||||||
| static_assert(static_cast<u32>(KMemoryState::Io) == 0x00002001); | static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001); | ||||||
| static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); | static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); | ||||||
| static_assert(static_cast<u32>(KMemoryState::Code) == 0x00DC7E03); | static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03); | ||||||
| static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x03FEBD04); | static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04); | ||||||
| static_assert(static_cast<u32>(KMemoryState::Normal) == 0x037EBD05); | static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05); | ||||||
| static_assert(static_cast<u32>(KMemoryState::Shared) == 0x00402006); | static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006); | ||||||
| static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x00DD7E08); | 
 | ||||||
| static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x03FFBD09); | static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08); | ||||||
| static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x005C3C0A); | static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09); | ||||||
| static_assert(static_cast<u32>(KMemoryState::Stack) == 0x005C3C0B); | static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A); | ||||||
| static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0040200C); | static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B); | ||||||
| static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x015C3C0D); | static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C); | ||||||
| static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x005C380E); | static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D); | ||||||
| static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0040380F); | static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E); | ||||||
|  | static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F); | ||||||
| static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); | static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); | ||||||
| static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x005C3811); | static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811); | ||||||
| static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x004C2812); | static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812); | ||||||
| static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); | static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); | ||||||
| static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x00402214); | static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214); | ||||||
| static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015); | static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015); | ||||||
| static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); | static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); | ||||||
|  | static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817); | ||||||
| 
 | 
 | ||||||
| enum class KMemoryPermission : u8 { | enum class KMemoryPermission : u8 { | ||||||
|     None = 0, |     None = 0, | ||||||
|     All = static_cast<u8>(~None), |     All = static_cast<u8>(~None), | ||||||
| 
 | 
 | ||||||
|     Read = 1 << 0, |  | ||||||
|     Write = 1 << 1, |  | ||||||
|     Execute = 1 << 2, |  | ||||||
| 
 |  | ||||||
|     ReadAndWrite = Read | Write, |  | ||||||
|     ReadAndExecute = Read | Execute, |  | ||||||
| 
 |  | ||||||
|     UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | |  | ||||||
|                                Svc::MemoryPermission::Execute), |  | ||||||
| 
 |  | ||||||
|     KernelShift = 3, |     KernelShift = 3, | ||||||
| 
 | 
 | ||||||
|     KernelRead = Read << KernelShift, |     KernelRead = static_cast<u8>(Svc::MemoryPermission::Read) << KernelShift, | ||||||
|     KernelWrite = Write << KernelShift, |     KernelWrite = static_cast<u8>(Svc::MemoryPermission::Write) << KernelShift, | ||||||
|     KernelExecute = Execute << KernelShift, |     KernelExecute = static_cast<u8>(Svc::MemoryPermission::Execute) << KernelShift, | ||||||
| 
 | 
 | ||||||
|     NotMapped = (1 << (2 * KernelShift)), |     NotMapped = (1 << (2 * KernelShift)), | ||||||
| 
 | 
 | ||||||
|     KernelReadWrite = KernelRead | KernelWrite, |     KernelReadWrite = KernelRead | KernelWrite, | ||||||
|     KernelReadExecute = KernelRead | KernelExecute, |     KernelReadExecute = KernelRead | KernelExecute, | ||||||
| 
 | 
 | ||||||
|     UserRead = Read | KernelRead, |     UserRead = static_cast<u8>(Svc::MemoryPermission::Read) | KernelRead, | ||||||
|     UserWrite = Write | KernelWrite, |     UserWrite = static_cast<u8>(Svc::MemoryPermission::Write) | KernelWrite, | ||||||
|     UserExecute = Execute, |     UserExecute = static_cast<u8>(Svc::MemoryPermission::Execute), | ||||||
| 
 | 
 | ||||||
|     UserReadWrite = UserRead | UserWrite, |     UserReadWrite = UserRead | UserWrite, | ||||||
|     UserReadExecute = UserRead | UserExecute, |     UserReadExecute = UserRead | UserExecute, | ||||||
| 
 | 
 | ||||||
|     IpcLockChangeMask = NotMapped | UserReadWrite |     UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | | ||||||
|  |                                Svc::MemoryPermission::Execute), | ||||||
|  | 
 | ||||||
|  |     IpcLockChangeMask = NotMapped | UserReadWrite, | ||||||
| }; | }; | ||||||
| DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); | DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); | ||||||
| 
 | 
 | ||||||
|  | @ -468,6 +474,7 @@ public: | ||||||
| 
 | 
 | ||||||
|     constexpr void UpdateDeviceDisableMergeStateForShareLeft( |     constexpr void UpdateDeviceDisableMergeStateForShareLeft( | ||||||
|         [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { |         [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { | ||||||
|  |         // New permission/right aren't used.
 | ||||||
|         if (left) { |         if (left) { | ||||||
|             m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( |             m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | ||||||
|                 m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); |                 m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); | ||||||
|  | @ -478,6 +485,7 @@ public: | ||||||
| 
 | 
 | ||||||
|     constexpr void UpdateDeviceDisableMergeStateForShareRight( |     constexpr void UpdateDeviceDisableMergeStateForShareRight( | ||||||
|         [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { |         [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { | ||||||
|  |         // New permission/left aren't used.
 | ||||||
|         if (right) { |         if (right) { | ||||||
|             m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( |             m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | ||||||
|                 m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); |                 m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); | ||||||
|  | @ -494,6 +502,8 @@ public: | ||||||
| 
 | 
 | ||||||
|     constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, |     constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, | ||||||
|                                  bool right) { |                                  bool right) { | ||||||
|  |         // New permission isn't used.
 | ||||||
|  | 
 | ||||||
|         // We must either be shared or have a zero lock count.
 |         // We must either be shared or have a zero lock count.
 | ||||||
|         ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || |         ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || | ||||||
|                m_device_use_count == 0); |                m_device_use_count == 0); | ||||||
|  | @ -509,6 +519,7 @@ public: | ||||||
| 
 | 
 | ||||||
|     constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( |     constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( | ||||||
|         [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { |         [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { | ||||||
|  |         // New permission/right aren't used.
 | ||||||
| 
 | 
 | ||||||
|         if (left) { |         if (left) { | ||||||
|             if (!m_device_disable_merge_left_count) { |             if (!m_device_disable_merge_left_count) { | ||||||
|  | @ -528,6 +539,8 @@ public: | ||||||
| 
 | 
 | ||||||
|     constexpr void UpdateDeviceDisableMergeStateForUnshareRight( |     constexpr void UpdateDeviceDisableMergeStateForUnshareRight( | ||||||
|         [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { |         [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { | ||||||
|  |         // New permission/left aren't used.
 | ||||||
|  | 
 | ||||||
|         if (right) { |         if (right) { | ||||||
|             const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; |             const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; | ||||||
|             ASSERT(old_device_disable_merge_right_count > 0); |             ASSERT(old_device_disable_merge_right_count > 0); | ||||||
|  | @ -546,6 +559,8 @@ public: | ||||||
| 
 | 
 | ||||||
|     constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, |     constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, | ||||||
|                                    bool right) { |                                    bool right) { | ||||||
|  |         // New permission isn't used.
 | ||||||
|  | 
 | ||||||
|         // We must be shared.
 |         // We must be shared.
 | ||||||
|         ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); |         ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); | ||||||
| 
 | 
 | ||||||
|  | @ -563,6 +578,7 @@ public: | ||||||
| 
 | 
 | ||||||
|     constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, |     constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, | ||||||
|                                         bool right) { |                                         bool right) { | ||||||
|  |         // New permission isn't used.
 | ||||||
| 
 | 
 | ||||||
|         // We must be shared.
 |         // We must be shared.
 | ||||||
|         ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); |         ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); | ||||||
|  | @ -613,6 +629,8 @@ public: | ||||||
| 
 | 
 | ||||||
|     constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, |     constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, | ||||||
|                                 [[maybe_unused]] bool right) { |                                 [[maybe_unused]] bool right) { | ||||||
|  |         // New permission isn't used.
 | ||||||
|  | 
 | ||||||
|         // We must be locked.
 |         // We must be locked.
 | ||||||
|         ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); |         ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -153,13 +153,9 @@ void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_ | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| size_t KMemoryLayout::GetResourceRegionSizeForInit() { | size_t KMemoryLayout::GetResourceRegionSizeForInit(bool use_extra_resource) { | ||||||
|     // Calculate resource region size based on whether we allow extra threads.
 |     return KernelResourceSize + KSystemControl::SecureAppletMemorySize + | ||||||
|     const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); |            (use_extra_resource ? KernelSlabHeapAdditionalSize + KernelPageBufferAdditionalSize : 0); | ||||||
|     size_t resource_region_size = |  | ||||||
|         KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0); |  | ||||||
| 
 |  | ||||||
|     return resource_region_size; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| } // namespace Kernel
 | } // namespace Kernel
 | ||||||
|  |  | ||||||
|  | @ -60,10 +60,12 @@ constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB; | ||||||
| constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax; | constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax; | ||||||
| 
 | 
 | ||||||
| // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
 | // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
 | ||||||
| constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000; | constexpr size_t KernelPageBufferHeapSize = 0x3E0000; | ||||||
|  | constexpr size_t KernelSlabHeapAdditionalSize = 0x148000; | ||||||
|  | constexpr size_t KernelPageBufferAdditionalSize = 0x33C000; | ||||||
| 
 | 
 | ||||||
| constexpr std::size_t KernelResourceSize = | constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + | ||||||
|     KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize; |                                            KernelSlabHeapSize + KernelPageBufferHeapSize; | ||||||
| 
 | 
 | ||||||
| constexpr bool IsKernelAddressKey(VAddr key) { | constexpr bool IsKernelAddressKey(VAddr key) { | ||||||
|     return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; |     return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; | ||||||
|  | @ -168,6 +170,11 @@ public: | ||||||
|             KMemoryRegionType_VirtualDramKernelTraceBuffer)); |             KMemoryRegionType_VirtualDramKernelTraceBuffer)); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     const KMemoryRegion& GetSecureAppletMemoryRegion() { | ||||||
|  |         return Dereference(GetVirtualMemoryRegionTree().FindByType( | ||||||
|  |             KMemoryRegionType_VirtualDramKernelSecureAppletMemory)); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { |     const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { | ||||||
|         return Dereference(FindVirtualLinear(address)); |         return Dereference(FindVirtualLinear(address)); | ||||||
|     } |     } | ||||||
|  | @ -229,7 +236,7 @@ public: | ||||||
| 
 | 
 | ||||||
|     void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, |     void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, | ||||||
|                                            VAddr linear_virtual_start); |                                            VAddr linear_virtual_start); | ||||||
|     static size_t GetResourceRegionSizeForInit(); |     static size_t GetResourceRegionSizeForInit(bool use_extra_resource); | ||||||
| 
 | 
 | ||||||
|     auto GetKernelRegionExtents() const { |     auto GetKernelRegionExtents() const { | ||||||
|         return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); |         return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); | ||||||
|  | @ -279,6 +286,10 @@ public: | ||||||
|         return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( |         return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||||||
|             KMemoryRegionType_DramKernelSlab); |             KMemoryRegionType_DramKernelSlab); | ||||||
|     } |     } | ||||||
|  |     auto GetKernelSecureAppletMemoryRegionPhysicalExtents() { | ||||||
|  |         return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||||||
|  |             KMemoryRegionType_DramKernelSecureAppletMemory); | ||||||
|  |     } | ||||||
|     auto GetKernelPageTableHeapRegionPhysicalExtents() const { |     auto GetKernelPageTableHeapRegionPhysicalExtents() const { | ||||||
|         return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( |         return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||||||
|             KMemoryRegionType_DramKernelPtHeap); |             KMemoryRegionType_DramKernelPtHeap); | ||||||
|  |  | ||||||
|  | @ -29,43 +29,44 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) { | ||||||
|     } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { |     } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { | ||||||
|         return KMemoryManager::Pool::SystemNonSecure; |         return KMemoryManager::Pool::SystemNonSecure; | ||||||
|     } else { |     } else { | ||||||
|         ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool"); |         UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool"); | ||||||
|         return {}; |  | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| } // namespace
 | } // namespace
 | ||||||
| 
 | 
 | ||||||
| KMemoryManager::KMemoryManager(Core::System& system_) | KMemoryManager::KMemoryManager(Core::System& system) | ||||||
|     : system{system_}, pool_locks{ |     : m_system{system}, m_memory_layout{system.Kernel().MemoryLayout()}, | ||||||
|                            KLightLock{system_.Kernel()}, |       m_pool_locks{ | ||||||
|                            KLightLock{system_.Kernel()}, |           KLightLock{system.Kernel()}, | ||||||
|                            KLightLock{system_.Kernel()}, |           KLightLock{system.Kernel()}, | ||||||
|                            KLightLock{system_.Kernel()}, |           KLightLock{system.Kernel()}, | ||||||
|                        } {} |           KLightLock{system.Kernel()}, | ||||||
|  |       } {} | ||||||
| 
 | 
 | ||||||
| void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { | void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { | ||||||
| 
 | 
 | ||||||
|     // Clear the management region to zero.
 |     // Clear the management region to zero.
 | ||||||
|     const VAddr management_region_end = management_region + management_region_size; |     const VAddr management_region_end = management_region + management_region_size; | ||||||
|  |     // std::memset(GetVoidPointer(management_region), 0, management_region_size);
 | ||||||
| 
 | 
 | ||||||
|     // Reset our manager count.
 |     // Reset our manager count.
 | ||||||
|     num_managers = 0; |     m_num_managers = 0; | ||||||
| 
 | 
 | ||||||
|     // Traverse the virtual memory layout tree, initializing each manager as appropriate.
 |     // Traverse the virtual memory layout tree, initializing each manager as appropriate.
 | ||||||
|     while (num_managers != MaxManagerCount) { |     while (m_num_managers != MaxManagerCount) { | ||||||
|         // Locate the region that should initialize the current manager.
 |         // Locate the region that should initialize the current manager.
 | ||||||
|         PAddr region_address = 0; |         PAddr region_address = 0; | ||||||
|         size_t region_size = 0; |         size_t region_size = 0; | ||||||
|         Pool region_pool = Pool::Count; |         Pool region_pool = Pool::Count; | ||||||
|         for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { |         for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | ||||||
|             // We only care about regions that we need to create managers for.
 |             // We only care about regions that we need to create managers for.
 | ||||||
|             if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { |             if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | ||||||
|                 continue; |                 continue; | ||||||
|             } |             } | ||||||
| 
 | 
 | ||||||
|             // We want to initialize the managers in order.
 |             // We want to initialize the managers in order.
 | ||||||
|             if (it.GetAttributes() != num_managers) { |             if (it.GetAttributes() != m_num_managers) { | ||||||
|                 continue; |                 continue; | ||||||
|             } |             } | ||||||
| 
 | 
 | ||||||
|  | @ -97,8 +98,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // Initialize a new manager for the region.
 |         // Initialize a new manager for the region.
 | ||||||
|         Impl* manager = std::addressof(managers[num_managers++]); |         Impl* manager = std::addressof(m_managers[m_num_managers++]); | ||||||
|         ASSERT(num_managers <= managers.size()); |         ASSERT(m_num_managers <= m_managers.size()); | ||||||
| 
 | 
 | ||||||
|         const size_t cur_size = manager->Initialize(region_address, region_size, management_region, |         const size_t cur_size = manager->Initialize(region_address, region_size, management_region, | ||||||
|                                                     management_region_end, region_pool); |                                                     management_region_end, region_pool); | ||||||
|  | @ -107,13 +108,13 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | ||||||
| 
 | 
 | ||||||
|         // Insert the manager into the pool list.
 |         // Insert the manager into the pool list.
 | ||||||
|         const auto region_pool_index = static_cast<u32>(region_pool); |         const auto region_pool_index = static_cast<u32>(region_pool); | ||||||
|         if (pool_managers_tail[region_pool_index] == nullptr) { |         if (m_pool_managers_tail[region_pool_index] == nullptr) { | ||||||
|             pool_managers_head[region_pool_index] = manager; |             m_pool_managers_head[region_pool_index] = manager; | ||||||
|         } else { |         } else { | ||||||
|             pool_managers_tail[region_pool_index]->SetNext(manager); |             m_pool_managers_tail[region_pool_index]->SetNext(manager); | ||||||
|             manager->SetPrev(pool_managers_tail[region_pool_index]); |             manager->SetPrev(m_pool_managers_tail[region_pool_index]); | ||||||
|         } |         } | ||||||
|         pool_managers_tail[region_pool_index] = manager; |         m_pool_managers_tail[region_pool_index] = manager; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Free each region to its corresponding heap.
 |     // Free each region to its corresponding heap.
 | ||||||
|  | @ -121,11 +122,10 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | ||||||
|     const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); |     const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); | ||||||
|     const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; |     const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; | ||||||
|     const PAddr ini_last = ini_end - 1; |     const PAddr ini_last = ini_end - 1; | ||||||
|     for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { |     for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | ||||||
|         if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { |         if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | ||||||
|             // Get the manager for the region.
 |             // Get the manager for the region.
 | ||||||
|             auto index = it.GetAttributes(); |             auto& manager = m_managers[it.GetAttributes()]; | ||||||
|             auto& manager = managers[index]; |  | ||||||
| 
 | 
 | ||||||
|             const PAddr cur_start = it.GetAddress(); |             const PAddr cur_start = it.GetAddress(); | ||||||
|             const PAddr cur_last = it.GetLastAddress(); |             const PAddr cur_last = it.GetLastAddress(); | ||||||
|  | @ -162,11 +162,19 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Update the used size for all managers.
 |     // Update the used size for all managers.
 | ||||||
|     for (size_t i = 0; i < num_managers; ++i) { |     for (size_t i = 0; i < m_num_managers; ++i) { | ||||||
|         managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); |         m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) { | ||||||
|  |     UNREACHABLE(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) { | ||||||
|  |     UNREACHABLE(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { | PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { | ||||||
|     // Early return if we're allocating no pages.
 |     // Early return if we're allocating no pages.
 | ||||||
|     if (num_pages == 0) { |     if (num_pages == 0) { | ||||||
|  | @ -175,7 +183,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | ||||||
| 
 | 
 | ||||||
|     // Lock the pool that we're allocating from.
 |     // Lock the pool that we're allocating from.
 | ||||||
|     const auto [pool, dir] = DecodeOption(option); |     const auto [pool, dir] = DecodeOption(option); | ||||||
|     KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]); |     KScopedLightLock lk(m_pool_locks[static_cast<std::size_t>(pool)]); | ||||||
| 
 | 
 | ||||||
|     // Choose a heap based on our page size request.
 |     // Choose a heap based on our page size request.
 | ||||||
|     const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); |     const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); | ||||||
|  | @ -185,7 +193,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | ||||||
|     PAddr allocated_block = 0; |     PAddr allocated_block = 0; | ||||||
|     for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; |     for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; | ||||||
|          chosen_manager = this->GetNextManager(chosen_manager, dir)) { |          chosen_manager = this->GetNextManager(chosen_manager, dir)) { | ||||||
|         allocated_block = chosen_manager->AllocateBlock(heap_index, true); |         allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages); | ||||||
|         if (allocated_block != 0) { |         if (allocated_block != 0) { | ||||||
|             break; |             break; | ||||||
|         } |         } | ||||||
|  | @ -196,10 +204,9 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | ||||||
|         return 0; |         return 0; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // If we allocated more than we need, free some.
 |     // Maintain the optimized memory bitmap, if we should.
 | ||||||
|     const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index); |     if (m_has_optimized_process[static_cast<size_t>(pool)]) { | ||||||
|     if (allocated_pages > num_pages) { |         UNIMPLEMENTED(); | ||||||
|         chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); |  | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Open the first reference to the pages.
 |     // Open the first reference to the pages.
 | ||||||
|  | @ -209,20 +216,21 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, | Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, | ||||||
|                                              Direction dir, bool random) { |                                              Direction dir, bool unoptimized, bool random) { | ||||||
|     // Choose a heap based on our page size request.
 |     // Choose a heap based on our page size request.
 | ||||||
|     const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); |     const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); | ||||||
|     R_UNLESS(0 <= heap_index, ResultOutOfMemory); |     R_UNLESS(0 <= heap_index, ResultOutOfMemory); | ||||||
| 
 | 
 | ||||||
|     // Ensure that we don't leave anything un-freed.
 |     // Ensure that we don't leave anything un-freed.
 | ||||||
|     auto group_guard = SCOPE_GUARD({ |     ON_RESULT_FAILURE { | ||||||
|         for (const auto& it : out->Nodes()) { |         for (const auto& it : out->Nodes()) { | ||||||
|             auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress()); |             auto& manager = this->GetManager(it.GetAddress()); | ||||||
|             const size_t num_pages_to_free = |             const size_t node_num_pages = | ||||||
|                 std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); |                 std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); | ||||||
|             manager.Free(it.GetAddress(), num_pages_to_free); |             manager.Free(it.GetAddress(), node_num_pages); | ||||||
|         } |         } | ||||||
|     }); |         out->Finalize(); | ||||||
|  |     }; | ||||||
| 
 | 
 | ||||||
|     // Keep allocating until we've allocated all our pages.
 |     // Keep allocating until we've allocated all our pages.
 | ||||||
|     for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { |     for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { | ||||||
|  | @ -236,12 +244,17 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | ||||||
|                     break; |                     break; | ||||||
|                 } |                 } | ||||||
| 
 | 
 | ||||||
|                 // Safely add it to our group.
 |                 // Ensure we don't leak the block if we fail.
 | ||||||
|                 { |                 ON_RESULT_FAILURE_2 { | ||||||
|                     auto block_guard = |                     cur_manager->Free(allocated_block, pages_per_alloc); | ||||||
|                         SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); }); |                 }; | ||||||
|                     R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | 
 | ||||||
|                     block_guard.Cancel(); |                 // Add the block to our group.
 | ||||||
|  |                 R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | ||||||
|  | 
 | ||||||
|  |                 // Maintain the optimized memory bitmap, if we should.
 | ||||||
|  |                 if (unoptimized) { | ||||||
|  |                     UNIMPLEMENTED(); | ||||||
|                 } |                 } | ||||||
| 
 | 
 | ||||||
|                 num_pages -= pages_per_alloc; |                 num_pages -= pages_per_alloc; | ||||||
|  | @ -253,8 +266,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | ||||||
|     R_UNLESS(num_pages == 0, ResultOutOfMemory); |     R_UNLESS(num_pages == 0, ResultOutOfMemory); | ||||||
| 
 | 
 | ||||||
|     // We succeeded!
 |     // We succeeded!
 | ||||||
|     group_guard.Cancel(); |     R_SUCCEED(); | ||||||
|     return ResultSuccess; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) { | Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) { | ||||||
|  | @ -266,10 +278,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | ||||||
| 
 | 
 | ||||||
|     // Lock the pool that we're allocating from.
 |     // Lock the pool that we're allocating from.
 | ||||||
|     const auto [pool, dir] = DecodeOption(option); |     const auto [pool, dir] = DecodeOption(option); | ||||||
|     KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); |     KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); | ||||||
| 
 | 
 | ||||||
|     // Allocate the page group.
 |     // Allocate the page group.
 | ||||||
|     R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); |     R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, | ||||||
|  |                                       m_has_optimized_process[static_cast<size_t>(pool)], true)); | ||||||
| 
 | 
 | ||||||
|     // Open the first reference to the pages.
 |     // Open the first reference to the pages.
 | ||||||
|     for (const auto& block : out->Nodes()) { |     for (const auto& block : out->Nodes()) { | ||||||
|  | @ -277,7 +290,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | ||||||
|         size_t remaining_pages = block.GetNumPages(); |         size_t remaining_pages = block.GetNumPages(); | ||||||
|         while (remaining_pages > 0) { |         while (remaining_pages > 0) { | ||||||
|             // Get the manager for the current address.
 |             // Get the manager for the current address.
 | ||||||
|             auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); |             auto& manager = this->GetManager(cur_address); | ||||||
| 
 | 
 | ||||||
|             // Process part or all of the block.
 |             // Process part or all of the block.
 | ||||||
|             const size_t cur_pages = |             const size_t cur_pages = | ||||||
|  | @ -290,11 +303,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     return ResultSuccess; |     R_SUCCEED(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, | Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, | ||||||
|                                                  u64 process_id, u8 fill_pattern) { |                                           u64 process_id, u8 fill_pattern) { | ||||||
|     ASSERT(out != nullptr); |     ASSERT(out != nullptr); | ||||||
|     ASSERT(out->GetNumPages() == 0); |     ASSERT(out->GetNumPages() == 0); | ||||||
| 
 | 
 | ||||||
|  | @ -302,83 +315,89 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag | ||||||
|     const auto [pool, dir] = DecodeOption(option); |     const auto [pool, dir] = DecodeOption(option); | ||||||
| 
 | 
 | ||||||
|     // Allocate the memory.
 |     // Allocate the memory.
 | ||||||
|  |     bool optimized; | ||||||
|     { |     { | ||||||
|         // Lock the pool that we're allocating from.
 |         // Lock the pool that we're allocating from.
 | ||||||
|         KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); |         KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); | ||||||
|  | 
 | ||||||
|  |         // Check if we have an optimized process.
 | ||||||
|  |         const bool has_optimized = m_has_optimized_process[static_cast<size_t>(pool)]; | ||||||
|  |         const bool is_optimized = m_optimized_process_ids[static_cast<size_t>(pool)] == process_id; | ||||||
| 
 | 
 | ||||||
|         // Allocate the page group.
 |         // Allocate the page group.
 | ||||||
|         R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); |         R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, | ||||||
|  |                                           false)); | ||||||
| 
 | 
 | ||||||
|         // Open the first reference to the pages.
 |         // Set whether we should optimize.
 | ||||||
|  |         optimized = has_optimized && is_optimized; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     // Perform optimized memory tracking, if we should.
 | ||||||
|  |     if (optimized) { | ||||||
|  |         // Iterate over the allocated blocks.
 | ||||||
|         for (const auto& block : out->Nodes()) { |         for (const auto& block : out->Nodes()) { | ||||||
|             PAddr cur_address = block.GetAddress(); |             // Get the block extents.
 | ||||||
|             size_t remaining_pages = block.GetNumPages(); |             const PAddr block_address = block.GetAddress(); | ||||||
|             while (remaining_pages > 0) { |             const size_t block_pages = block.GetNumPages(); | ||||||
|                 // Get the manager for the current address.
 |  | ||||||
|                 auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); |  | ||||||
| 
 | 
 | ||||||
|                 // Process part or all of the block.
 |             // If it has no pages, we don't need to do anything.
 | ||||||
|                 const size_t cur_pages = |             if (block_pages == 0) { | ||||||
|                     std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); |                 continue; | ||||||
|                 manager.OpenFirst(cur_address, cur_pages); |             } | ||||||
| 
 | 
 | ||||||
|                 // Advance.
 |             // Fill all the pages that we need to fill.
 | ||||||
|                 cur_address += cur_pages * PageSize; |             bool any_new = false; | ||||||
|                 remaining_pages -= cur_pages; |             { | ||||||
|  |                 PAddr cur_address = block_address; | ||||||
|  |                 size_t remaining_pages = block_pages; | ||||||
|  |                 while (remaining_pages > 0) { | ||||||
|  |                     // Get the manager for the current address.
 | ||||||
|  |                     auto& manager = this->GetManager(cur_address); | ||||||
|  | 
 | ||||||
|  |                     // Process part or all of the block.
 | ||||||
|  |                     const size_t cur_pages = | ||||||
|  |                         std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||||||
|  |                     any_new = | ||||||
|  |                         manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern); | ||||||
|  | 
 | ||||||
|  |                     // Advance.
 | ||||||
|  |                     cur_address += cur_pages * PageSize; | ||||||
|  |                     remaining_pages -= cur_pages; | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  | 
 | ||||||
|  |             // If there are new pages, update tracking for the allocation.
 | ||||||
|  |             if (any_new) { | ||||||
|  |                 // Update tracking for the allocation.
 | ||||||
|  |                 PAddr cur_address = block_address; | ||||||
|  |                 size_t remaining_pages = block_pages; | ||||||
|  |                 while (remaining_pages > 0) { | ||||||
|  |                     // Get the manager for the current address.
 | ||||||
|  |                     auto& manager = this->GetManager(cur_address); | ||||||
|  | 
 | ||||||
|  |                     // Lock the pool for the manager.
 | ||||||
|  |                     KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||||||
|  | 
 | ||||||
|  |                     // Track some or all of the current pages.
 | ||||||
|  |                     const size_t cur_pages = | ||||||
|  |                         std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||||||
|  |                     manager.TrackOptimizedAllocation(cur_address, cur_pages); | ||||||
|  | 
 | ||||||
|  |                     // Advance.
 | ||||||
|  |                     cur_address += cur_pages * PageSize; | ||||||
|  |                     remaining_pages -= cur_pages; | ||||||
|  |                 } | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|     } |     } else { | ||||||
| 
 |         // Set all the allocated memory.
 | ||||||
|     // Set all the allocated memory.
 |         for (const auto& block : out->Nodes()) { | ||||||
|     for (const auto& block : out->Nodes()) { |             std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, | ||||||
|         std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, |                         block.GetSize()); | ||||||
|                     block.GetSize()); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     return ResultSuccess; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void KMemoryManager::Open(PAddr address, size_t num_pages) { |  | ||||||
|     // Repeatedly open references until we've done so for all pages.
 |  | ||||||
|     while (num_pages) { |  | ||||||
|         auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); |  | ||||||
|         const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); |  | ||||||
| 
 |  | ||||||
|         { |  | ||||||
|             KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); |  | ||||||
|             manager.Open(address, cur_pages); |  | ||||||
|         } |         } | ||||||
| 
 |  | ||||||
|         num_pages -= cur_pages; |  | ||||||
|         address += cur_pages * PageSize; |  | ||||||
|     } |     } | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| void KMemoryManager::Close(PAddr address, size_t num_pages) { |     R_SUCCEED(); | ||||||
|     // Repeatedly close references until we've done so for all pages.
 |  | ||||||
|     while (num_pages) { |  | ||||||
|         auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); |  | ||||||
|         const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); |  | ||||||
| 
 |  | ||||||
|         { |  | ||||||
|             KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); |  | ||||||
|             manager.Close(address, cur_pages); |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         num_pages -= cur_pages; |  | ||||||
|         address += cur_pages * PageSize; |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void KMemoryManager::Close(const KPageGroup& pg) { |  | ||||||
|     for (const auto& node : pg.Nodes()) { |  | ||||||
|         Close(node.GetAddress(), node.GetNumPages()); |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| void KMemoryManager::Open(const KPageGroup& pg) { |  | ||||||
|     for (const auto& node : pg.Nodes()) { |  | ||||||
|         Open(node.GetAddress(), node.GetNumPages()); |  | ||||||
|     } |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, | size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, | ||||||
|  | @ -394,18 +413,31 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage | ||||||
|     ASSERT(Common::IsAligned(total_management_size, PageSize)); |     ASSERT(Common::IsAligned(total_management_size, PageSize)); | ||||||
| 
 | 
 | ||||||
|     // Setup region.
 |     // Setup region.
 | ||||||
|     pool = p; |     m_pool = p; | ||||||
|     management_region = management; |     m_management_region = management; | ||||||
|     page_reference_counts.resize( |     m_page_reference_counts.resize( | ||||||
|         Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); |         Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); | ||||||
|     ASSERT(Common::IsAligned(management_region, PageSize)); |     ASSERT(Common::IsAligned(m_management_region, PageSize)); | ||||||
| 
 | 
 | ||||||
|     // Initialize the manager's KPageHeap.
 |     // Initialize the manager's KPageHeap.
 | ||||||
|     heap.Initialize(address, size, management + manager_size, page_heap_size); |     m_heap.Initialize(address, size, management + manager_size, page_heap_size); | ||||||
| 
 | 
 | ||||||
|     return total_management_size; |     return total_management_size; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) { | ||||||
|  |     UNREACHABLE(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) { | ||||||
|  |     UNREACHABLE(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages, | ||||||
|  |                                                       u8 fill_pattern) { | ||||||
|  |     UNREACHABLE(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { | size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { | ||||||
|     const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); |     const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); | ||||||
|     const size_t optimize_map_size = |     const size_t optimize_map_size = | ||||||
|  |  | ||||||
|  | @ -21,11 +21,8 @@ namespace Kernel { | ||||||
| 
 | 
 | ||||||
| class KPageGroup; | class KPageGroup; | ||||||
| 
 | 
 | ||||||
| class KMemoryManager final { | class KMemoryManager { | ||||||
| public: | public: | ||||||
|     YUZU_NON_COPYABLE(KMemoryManager); |  | ||||||
|     YUZU_NON_MOVEABLE(KMemoryManager); |  | ||||||
| 
 |  | ||||||
|     enum class Pool : u32 { |     enum class Pool : u32 { | ||||||
|         Application = 0, |         Application = 0, | ||||||
|         Applet = 1, |         Applet = 1, | ||||||
|  | @ -45,16 +42,85 @@ public: | ||||||
|     enum class Direction : u32 { |     enum class Direction : u32 { | ||||||
|         FromFront = 0, |         FromFront = 0, | ||||||
|         FromBack = 1, |         FromBack = 1, | ||||||
| 
 |  | ||||||
|         Shift = 0, |         Shift = 0, | ||||||
|         Mask = (0xF << Shift), |         Mask = (0xF << Shift), | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|     explicit KMemoryManager(Core::System& system_); |     static constexpr size_t MaxManagerCount = 10; | ||||||
|  | 
 | ||||||
|  |     explicit KMemoryManager(Core::System& system); | ||||||
| 
 | 
 | ||||||
|     void Initialize(VAddr management_region, size_t management_region_size); |     void Initialize(VAddr management_region, size_t management_region_size); | ||||||
| 
 | 
 | ||||||
|     constexpr size_t GetSize(Pool pool) const { |     Result InitializeOptimizedMemory(u64 process_id, Pool pool); | ||||||
|  |     void FinalizeOptimizedMemory(u64 process_id, Pool pool); | ||||||
|  | 
 | ||||||
|  |     PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); | ||||||
|  |     Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); | ||||||
|  |     Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, | ||||||
|  |                               u8 fill_pattern); | ||||||
|  | 
 | ||||||
|  |     Pool GetPool(PAddr address) const { | ||||||
|  |         return this->GetManager(address).GetPool(); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     void Open(PAddr address, size_t num_pages) { | ||||||
|  |         // Repeatedly open references until we've done so for all pages.
 | ||||||
|  |         while (num_pages) { | ||||||
|  |             auto& manager = this->GetManager(address); | ||||||
|  |             const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||||||
|  | 
 | ||||||
|  |             { | ||||||
|  |                 KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||||||
|  |                 manager.Open(address, cur_pages); | ||||||
|  |             } | ||||||
|  | 
 | ||||||
|  |             num_pages -= cur_pages; | ||||||
|  |             address += cur_pages * PageSize; | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     void OpenFirst(PAddr address, size_t num_pages) { | ||||||
|  |         // Repeatedly open references until we've done so for all pages.
 | ||||||
|  |         while (num_pages) { | ||||||
|  |             auto& manager = this->GetManager(address); | ||||||
|  |             const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||||||
|  | 
 | ||||||
|  |             { | ||||||
|  |                 KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||||||
|  |                 manager.OpenFirst(address, cur_pages); | ||||||
|  |             } | ||||||
|  | 
 | ||||||
|  |             num_pages -= cur_pages; | ||||||
|  |             address += cur_pages * PageSize; | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     void Close(PAddr address, size_t num_pages) { | ||||||
|  |         // Repeatedly close references until we've done so for all pages.
 | ||||||
|  |         while (num_pages) { | ||||||
|  |             auto& manager = this->GetManager(address); | ||||||
|  |             const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||||||
|  | 
 | ||||||
|  |             { | ||||||
|  |                 KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||||||
|  |                 manager.Close(address, cur_pages); | ||||||
|  |             } | ||||||
|  | 
 | ||||||
|  |             num_pages -= cur_pages; | ||||||
|  |             address += cur_pages * PageSize; | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     size_t GetSize() { | ||||||
|  |         size_t total = 0; | ||||||
|  |         for (size_t i = 0; i < m_num_managers; i++) { | ||||||
|  |             total += m_managers[i].GetSize(); | ||||||
|  |         } | ||||||
|  |         return total; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     size_t GetSize(Pool pool) { | ||||||
|         constexpr Direction GetSizeDirection = Direction::FromFront; |         constexpr Direction GetSizeDirection = Direction::FromFront; | ||||||
|         size_t total = 0; |         size_t total = 0; | ||||||
|         for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; |         for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | ||||||
|  | @ -64,18 +130,36 @@ public: | ||||||
|         return total; |         return total; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); |     size_t GetFreeSize() { | ||||||
|     Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); |         size_t total = 0; | ||||||
|     Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, |         for (size_t i = 0; i < m_num_managers; i++) { | ||||||
|                                      u8 fill_pattern); |             KScopedLightLock lk(m_pool_locks[static_cast<size_t>(m_managers[i].GetPool())]); | ||||||
|  |             total += m_managers[i].GetFreeSize(); | ||||||
|  |         } | ||||||
|  |         return total; | ||||||
|  |     } | ||||||
| 
 | 
 | ||||||
|     static constexpr size_t MaxManagerCount = 10; |     size_t GetFreeSize(Pool pool) { | ||||||
|  |         KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); | ||||||
| 
 | 
 | ||||||
|     void Close(PAddr address, size_t num_pages); |         constexpr Direction GetSizeDirection = Direction::FromFront; | ||||||
|     void Close(const KPageGroup& pg); |         size_t total = 0; | ||||||
|  |         for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | ||||||
|  |              manager = this->GetNextManager(manager, GetSizeDirection)) { | ||||||
|  |             total += manager->GetFreeSize(); | ||||||
|  |         } | ||||||
|  |         return total; | ||||||
|  |     } | ||||||
| 
 | 
 | ||||||
|     void Open(PAddr address, size_t num_pages); |     void DumpFreeList(Pool pool) { | ||||||
|     void Open(const KPageGroup& pg); |         KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); | ||||||
|  | 
 | ||||||
|  |         constexpr Direction DumpDirection = Direction::FromFront; | ||||||
|  |         for (auto* manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr; | ||||||
|  |              manager = this->GetNextManager(manager, DumpDirection)) { | ||||||
|  |             manager->DumpFreeList(); | ||||||
|  |         } | ||||||
|  |     } | ||||||
| 
 | 
 | ||||||
| public: | public: | ||||||
|     static size_t CalculateManagementOverheadSize(size_t region_size) { |     static size_t CalculateManagementOverheadSize(size_t region_size) { | ||||||
|  | @ -88,14 +172,13 @@ public: | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     static constexpr Pool GetPool(u32 option) { |     static constexpr Pool GetPool(u32 option) { | ||||||
|         return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >> |         return static_cast<Pool>((option & static_cast<u32>(Pool::Mask)) >> | ||||||
|                                  static_cast<u32>(Pool::Shift)); |                                  static_cast<u32>(Pool::Shift)); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     static constexpr Direction GetDirection(u32 option) { |     static constexpr Direction GetDirection(u32 option) { | ||||||
|         return static_cast<Direction>( |         return static_cast<Direction>((option & static_cast<u32>(Direction::Mask)) >> | ||||||
|             (static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >> |                                       static_cast<u32>(Direction::Shift)); | ||||||
|             static_cast<u32>(Direction::Shift)); |  | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) { |     static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) { | ||||||
|  | @ -103,74 +186,88 @@ public: | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     class Impl final { |     class Impl { | ||||||
|     public: |     public: | ||||||
|         YUZU_NON_COPYABLE(Impl); |         static size_t CalculateManagementOverheadSize(size_t region_size); | ||||||
|         YUZU_NON_MOVEABLE(Impl); |  | ||||||
| 
 | 
 | ||||||
|  |         static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { | ||||||
|  |             return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | ||||||
|  |                     Common::BitSize<u64>()) * | ||||||
|  |                    sizeof(u64); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |     public: | ||||||
|         Impl() = default; |         Impl() = default; | ||||||
|         ~Impl() = default; |  | ||||||
| 
 | 
 | ||||||
|         size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, |         size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, | ||||||
|                           Pool p); |                           Pool p); | ||||||
| 
 | 
 | ||||||
|         VAddr AllocateBlock(s32 index, bool random) { |         PAddr AllocateBlock(s32 index, bool random) { | ||||||
|             return heap.AllocateBlock(index, random); |             return m_heap.AllocateBlock(index, random); | ||||||
|         } |         } | ||||||
| 
 |         PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { | ||||||
|         void Free(VAddr addr, size_t num_pages) { |             return m_heap.AllocateAligned(index, num_pages, align_pages); | ||||||
|             heap.Free(addr, num_pages); |         } | ||||||
|  |         void Free(PAddr addr, size_t num_pages) { | ||||||
|  |             m_heap.Free(addr, num_pages); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         void SetInitialUsedHeapSize(size_t reserved_size) { |         void SetInitialUsedHeapSize(size_t reserved_size) { | ||||||
|             heap.SetInitialUsedSize(reserved_size); |             m_heap.SetInitialUsedSize(reserved_size); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|  |         void InitializeOptimizedMemory() { | ||||||
|  |             UNIMPLEMENTED(); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         void TrackUnoptimizedAllocation(PAddr block, size_t num_pages); | ||||||
|  |         void TrackOptimizedAllocation(PAddr block, size_t num_pages); | ||||||
|  | 
 | ||||||
|  |         bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern); | ||||||
|  | 
 | ||||||
|         constexpr Pool GetPool() const { |         constexpr Pool GetPool() const { | ||||||
|             return pool; |             return m_pool; | ||||||
|         } |         } | ||||||
| 
 |  | ||||||
|         constexpr size_t GetSize() const { |         constexpr size_t GetSize() const { | ||||||
|             return heap.GetSize(); |             return m_heap.GetSize(); | ||||||
|  |         } | ||||||
|  |         constexpr PAddr GetEndAddress() const { | ||||||
|  |             return m_heap.GetEndAddress(); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         constexpr VAddr GetAddress() const { |         size_t GetFreeSize() const { | ||||||
|             return heap.GetAddress(); |             return m_heap.GetFreeSize(); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         constexpr VAddr GetEndAddress() const { |         void DumpFreeList() const { | ||||||
|             return heap.GetEndAddress(); |             UNIMPLEMENTED(); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         constexpr size_t GetPageOffset(PAddr address) const { |         constexpr size_t GetPageOffset(PAddr address) const { | ||||||
|             return heap.GetPageOffset(address); |             return m_heap.GetPageOffset(address); | ||||||
|         } |         } | ||||||
| 
 |  | ||||||
|         constexpr size_t GetPageOffsetToEnd(PAddr address) const { |         constexpr size_t GetPageOffsetToEnd(PAddr address) const { | ||||||
|             return heap.GetPageOffsetToEnd(address); |             return m_heap.GetPageOffsetToEnd(address); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         constexpr void SetNext(Impl* n) { |         constexpr void SetNext(Impl* n) { | ||||||
|             next = n; |             m_next = n; | ||||||
|         } |         } | ||||||
| 
 |  | ||||||
|         constexpr void SetPrev(Impl* n) { |         constexpr void SetPrev(Impl* n) { | ||||||
|             prev = n; |             m_prev = n; | ||||||
|         } |         } | ||||||
| 
 |  | ||||||
|         constexpr Impl* GetNext() const { |         constexpr Impl* GetNext() const { | ||||||
|             return next; |             return m_next; | ||||||
|         } |         } | ||||||
| 
 |  | ||||||
|         constexpr Impl* GetPrev() const { |         constexpr Impl* GetPrev() const { | ||||||
|             return prev; |             return m_prev; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         void OpenFirst(PAddr address, size_t num_pages) { |         void OpenFirst(PAddr address, size_t num_pages) { | ||||||
|             size_t index = this->GetPageOffset(address); |             size_t index = this->GetPageOffset(address); | ||||||
|             const size_t end = index + num_pages; |             const size_t end = index + num_pages; | ||||||
|             while (index < end) { |             while (index < end) { | ||||||
|                 const RefCount ref_count = (++page_reference_counts[index]); |                 const RefCount ref_count = (++m_page_reference_counts[index]); | ||||||
|                 ASSERT(ref_count == 1); |                 ASSERT(ref_count == 1); | ||||||
| 
 | 
 | ||||||
|                 index++; |                 index++; | ||||||
|  | @ -181,7 +278,7 @@ private: | ||||||
|             size_t index = this->GetPageOffset(address); |             size_t index = this->GetPageOffset(address); | ||||||
|             const size_t end = index + num_pages; |             const size_t end = index + num_pages; | ||||||
|             while (index < end) { |             while (index < end) { | ||||||
|                 const RefCount ref_count = (++page_reference_counts[index]); |                 const RefCount ref_count = (++m_page_reference_counts[index]); | ||||||
|                 ASSERT(ref_count > 1); |                 ASSERT(ref_count > 1); | ||||||
| 
 | 
 | ||||||
|                 index++; |                 index++; | ||||||
|  | @ -195,8 +292,8 @@ private: | ||||||
|             size_t free_start = 0; |             size_t free_start = 0; | ||||||
|             size_t free_count = 0; |             size_t free_count = 0; | ||||||
|             while (index < end) { |             while (index < end) { | ||||||
|                 ASSERT(page_reference_counts[index] > 0); |                 ASSERT(m_page_reference_counts[index] > 0); | ||||||
|                 const RefCount ref_count = (--page_reference_counts[index]); |                 const RefCount ref_count = (--m_page_reference_counts[index]); | ||||||
| 
 | 
 | ||||||
|                 // Keep track of how many zero refcounts we see in a row, to minimize calls to free.
 |                 // Keep track of how many zero refcounts we see in a row, to minimize calls to free.
 | ||||||
|                 if (ref_count == 0) { |                 if (ref_count == 0) { | ||||||
|  | @ -208,7 +305,7 @@ private: | ||||||
|                     } |                     } | ||||||
|                 } else { |                 } else { | ||||||
|                     if (free_count > 0) { |                     if (free_count > 0) { | ||||||
|                         this->Free(heap.GetAddress() + free_start * PageSize, free_count); |                         this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); | ||||||
|                         free_count = 0; |                         free_count = 0; | ||||||
|                     } |                     } | ||||||
|                 } |                 } | ||||||
|  | @ -217,44 +314,36 @@ private: | ||||||
|             } |             } | ||||||
| 
 | 
 | ||||||
|             if (free_count > 0) { |             if (free_count > 0) { | ||||||
|                 this->Free(heap.GetAddress() + free_start * PageSize, free_count); |                 this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         static size_t CalculateManagementOverheadSize(size_t region_size); |  | ||||||
| 
 |  | ||||||
|         static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { |  | ||||||
|             return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / |  | ||||||
|                     Common::BitSize<u64>()) * |  | ||||||
|                    sizeof(u64); |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|     private: |     private: | ||||||
|         using RefCount = u16; |         using RefCount = u16; | ||||||
| 
 | 
 | ||||||
|         KPageHeap heap; |         KPageHeap m_heap; | ||||||
|         std::vector<RefCount> page_reference_counts; |         std::vector<RefCount> m_page_reference_counts; | ||||||
|         VAddr management_region{}; |         VAddr m_management_region{}; | ||||||
|         Pool pool{}; |         Pool m_pool{}; | ||||||
|         Impl* next{}; |         Impl* m_next{}; | ||||||
|         Impl* prev{}; |         Impl* m_prev{}; | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) { |     Impl& GetManager(PAddr address) { | ||||||
|         return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; |         return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const { |     const Impl& GetManager(PAddr address) const { | ||||||
|         return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; |         return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     constexpr Impl* GetFirstManager(Pool pool, Direction dir) const { |     constexpr Impl* GetFirstManager(Pool pool, Direction dir) { | ||||||
|         return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)] |         return dir == Direction::FromBack ? m_pool_managers_tail[static_cast<size_t>(pool)] | ||||||
|                                           : pool_managers_head[static_cast<size_t>(pool)]; |                                           : m_pool_managers_head[static_cast<size_t>(pool)]; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     constexpr Impl* GetNextManager(Impl* cur, Direction dir) const { |     constexpr Impl* GetNextManager(Impl* cur, Direction dir) { | ||||||
|         if (dir == Direction::FromBack) { |         if (dir == Direction::FromBack) { | ||||||
|             return cur->GetPrev(); |             return cur->GetPrev(); | ||||||
|         } else { |         } else { | ||||||
|  | @ -263,15 +352,21 @@ private: | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir, |     Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir, | ||||||
|                                  bool random); |                                  bool unoptimized, bool random); | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     Core::System& system; |     template <typename T> | ||||||
|     std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks; |     using PoolArray = std::array<T, static_cast<size_t>(Pool::Count)>; | ||||||
|     std::array<Impl*, MaxManagerCount> pool_managers_head{}; | 
 | ||||||
|     std::array<Impl*, MaxManagerCount> pool_managers_tail{}; |     Core::System& m_system; | ||||||
|     std::array<Impl, MaxManagerCount> managers; |     const KMemoryLayout& m_memory_layout; | ||||||
|     size_t num_managers{}; |     PoolArray<KLightLock> m_pool_locks; | ||||||
|  |     std::array<Impl*, MaxManagerCount> m_pool_managers_head{}; | ||||||
|  |     std::array<Impl*, MaxManagerCount> m_pool_managers_tail{}; | ||||||
|  |     std::array<Impl, MaxManagerCount> m_managers; | ||||||
|  |     size_t m_num_managers{}; | ||||||
|  |     PoolArray<u64> m_optimized_process_ids{}; | ||||||
|  |     PoolArray<bool> m_has_optimized_process{}; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| } // namespace Kernel
 | } // namespace Kernel
 | ||||||
|  |  | ||||||
|  | @ -142,32 +142,38 @@ private: | ||||||
| 
 | 
 | ||||||
| } // namespace impl
 | } // namespace impl
 | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); | constexpr inline auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); | ||||||
| constexpr auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); | 
 | ||||||
| constexpr auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2); | constexpr inline auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); | ||||||
|  | constexpr inline auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2); | ||||||
| static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1); | static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1); | ||||||
| static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); | static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_DramKernelBase = | // constexpr inline auto KMemoryRegionType_CoreLocalRegion =
 | ||||||
|  | // KMemoryRegionType_None.DeriveInitial(2).Finalize();
 | ||||||
|  | // static_assert(KMemoryRegionType_CoreLocalRegion.GetValue() == 0x4);
 | ||||||
|  | 
 | ||||||
|  | constexpr inline auto KMemoryRegionType_DramKernelBase = | ||||||
|     KMemoryRegionType_Dram.DeriveSparse(0, 3, 0) |     KMemoryRegionType_Dram.DeriveSparse(0, 3, 0) | ||||||
|         .SetAttribute(KMemoryRegionAttr_NoUserMap) |         .SetAttribute(KMemoryRegionAttr_NoUserMap) | ||||||
|         .SetAttribute(KMemoryRegionAttr_CarveoutProtected); |         .SetAttribute(KMemoryRegionAttr_CarveoutProtected); | ||||||
| constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); | constexpr inline auto KMemoryRegionType_DramReservedBase = | ||||||
| constexpr auto KMemoryRegionType_DramHeapBase = |     KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); | ||||||
|  | constexpr inline auto KMemoryRegionType_DramHeapBase = | ||||||
|     KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped); |     KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped); | ||||||
| static_assert(KMemoryRegionType_DramKernelBase.GetValue() == | static_assert(KMemoryRegionType_DramKernelBase.GetValue() == | ||||||
|               (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); |               (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); | ||||||
| static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16)); | static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16)); | ||||||
| static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped)); | static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped)); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_DramKernelCode = | constexpr inline auto KMemoryRegionType_DramKernelCode = | ||||||
|     KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); |     KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); | ||||||
| constexpr auto KMemoryRegionType_DramKernelSlab = | constexpr inline auto KMemoryRegionType_DramKernelSlab = | ||||||
|     KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); |     KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); | ||||||
| constexpr auto KMemoryRegionType_DramKernelPtHeap = | constexpr inline auto KMemoryRegionType_DramKernelPtHeap = | ||||||
|     KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute( |     KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute( | ||||||
|         KMemoryRegionAttr_LinearMapped); |         KMemoryRegionAttr_LinearMapped); | ||||||
| constexpr auto KMemoryRegionType_DramKernelInitPt = | constexpr inline auto KMemoryRegionType_DramKernelInitPt = | ||||||
|     KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute( |     KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute( | ||||||
|         KMemoryRegionAttr_LinearMapped); |         KMemoryRegionAttr_LinearMapped); | ||||||
| static_assert(KMemoryRegionType_DramKernelCode.GetValue() == | static_assert(KMemoryRegionType_DramKernelCode.GetValue() == | ||||||
|  | @ -181,32 +187,40 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == | ||||||
|               (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | |               (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | ||||||
|                KMemoryRegionAttr_LinearMapped)); |                KMemoryRegionAttr_LinearMapped)); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_DramReservedEarly = | constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory = | ||||||
|  |     KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute( | ||||||
|  |         KMemoryRegionAttr_LinearMapped); | ||||||
|  | static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == | ||||||
|  |               (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | ||||||
|  |                KMemoryRegionAttr_LinearMapped)); | ||||||
|  | 
 | ||||||
|  | constexpr inline auto KMemoryRegionType_DramReservedEarly = | ||||||
|     KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); |     KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); | ||||||
| static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == | static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == | ||||||
|               (0x16 | KMemoryRegionAttr_NoUserMap)); |               (0x16 | KMemoryRegionAttr_NoUserMap)); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_KernelTraceBuffer = | constexpr inline auto KMemoryRegionType_KernelTraceBuffer = | ||||||
|     KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0) |     KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0) | ||||||
|         .SetAttribute(KMemoryRegionAttr_LinearMapped) |         .SetAttribute(KMemoryRegionAttr_LinearMapped) | ||||||
|         .SetAttribute(KMemoryRegionAttr_UserReadOnly); |         .SetAttribute(KMemoryRegionAttr_UserReadOnly); | ||||||
| constexpr auto KMemoryRegionType_OnMemoryBootImage = | constexpr inline auto KMemoryRegionType_OnMemoryBootImage = | ||||||
|     KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); |     KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); | ||||||
| constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); | constexpr inline auto KMemoryRegionType_DTB = | ||||||
|  |     KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); | ||||||
| static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() == | static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() == | ||||||
|               (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly)); |               (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly)); | ||||||
| static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156); | static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156); | ||||||
| static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); | static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_DramPoolPartition = | constexpr inline auto KMemoryRegionType_DramPoolPartition = | ||||||
|     KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); |     KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); | ||||||
| static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == | static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == | ||||||
|               (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |               (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_DramPoolManagement = | constexpr inline auto KMemoryRegionType_DramPoolManagement = | ||||||
|     KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( |     KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( | ||||||
|         KMemoryRegionAttr_CarveoutProtected); |         KMemoryRegionAttr_CarveoutProtected); | ||||||
| constexpr auto KMemoryRegionType_DramUserPool = | constexpr inline auto KMemoryRegionType_DramUserPool = | ||||||
|     KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); |     KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); | ||||||
| static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == | static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == | ||||||
|               (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | |               (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | ||||||
|  | @ -214,11 +228,13 @@ static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == | ||||||
| static_assert(KMemoryRegionType_DramUserPool.GetValue() == | static_assert(KMemoryRegionType_DramUserPool.GetValue() == | ||||||
|               (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |               (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0); | constexpr inline auto KMemoryRegionType_DramApplicationPool = | ||||||
| constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1); |     KMemoryRegionType_DramUserPool.Derive(4, 0); | ||||||
| constexpr auto KMemoryRegionType_DramSystemNonSecurePool = | constexpr inline auto KMemoryRegionType_DramAppletPool = | ||||||
|  |     KMemoryRegionType_DramUserPool.Derive(4, 1); | ||||||
|  | constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool = | ||||||
|     KMemoryRegionType_DramUserPool.Derive(4, 2); |     KMemoryRegionType_DramUserPool.Derive(4, 2); | ||||||
| constexpr auto KMemoryRegionType_DramSystemPool = | constexpr inline auto KMemoryRegionType_DramSystemPool = | ||||||
|     KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); |     KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); | ||||||
| static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == | static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == | ||||||
|               (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |               (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | ||||||
|  | @ -230,50 +246,55 @@ static_assert(KMemoryRegionType_DramSystemPool.GetValue() == | ||||||
|               (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | |               (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | ||||||
|                KMemoryRegionAttr_CarveoutProtected)); |                KMemoryRegionAttr_CarveoutProtected)); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); | constexpr inline auto KMemoryRegionType_VirtualDramHeapBase = | ||||||
| constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap = |     KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); | ||||||
|  | constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap = | ||||||
|     KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); |     KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); | ||||||
| constexpr auto KMemoryRegionType_VirtualDramKernelTraceBuffer = | constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer = | ||||||
|     KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); |     KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); | ||||||
| static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); | static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); | ||||||
| static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); | ||||||
| static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); | ||||||
| 
 | 
 | ||||||
| // UNUSED: .DeriveSparse(2, 2, 0);
 | // UNUSED: .DeriveSparse(2, 2, 0);
 | ||||||
| constexpr auto KMemoryRegionType_VirtualDramUnknownDebug = | constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug = | ||||||
|     KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); |     KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); | ||||||
| static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); | static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_VirtualDramKernelInitPt = | constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = | ||||||
|  |     KMemoryRegionType_Dram.DeriveSparse(3, 1, 0); | ||||||
|  | static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62)); | ||||||
|  | 
 | ||||||
|  | constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt = | ||||||
|     KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); |     KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); | ||||||
| constexpr auto KMemoryRegionType_VirtualDramPoolManagement = | constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement = | ||||||
|     KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); |     KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); | ||||||
| constexpr auto KMemoryRegionType_VirtualDramUserPool = | constexpr inline auto KMemoryRegionType_VirtualDramUserPool = | ||||||
|     KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); |     KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); | ||||||
| static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); | static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); | ||||||
| static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); | static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); | ||||||
| static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); | static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); | ||||||
| 
 | 
 | ||||||
| // NOTE: For unknown reason, the pools are derived out-of-order here. It's worth eventually trying
 | // NOTE: For unknown reason, the pools are derived out-of-order here.
 | ||||||
| // to understand why Nintendo made this choice.
 | // It's worth eventually trying to understand why Nintendo made this choice.
 | ||||||
| // UNUSED: .Derive(6, 0);
 | // UNUSED: .Derive(6, 0);
 | ||||||
| // UNUSED: .Derive(6, 1);
 | // UNUSED: .Derive(6, 1);
 | ||||||
| constexpr auto KMemoryRegionType_VirtualDramAppletPool = | constexpr inline auto KMemoryRegionType_VirtualDramAppletPool = | ||||||
|     KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); |     KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); | ||||||
| constexpr auto KMemoryRegionType_VirtualDramApplicationPool = | constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool = | ||||||
|     KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); |     KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); | ||||||
| constexpr auto KMemoryRegionType_VirtualDramSystemNonSecurePool = | constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool = | ||||||
|     KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); |     KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); | ||||||
| constexpr auto KMemoryRegionType_VirtualDramSystemPool = | constexpr inline auto KMemoryRegionType_VirtualDramSystemPool = | ||||||
|     KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); |     KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); | ||||||
| static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); | static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); | ||||||
| static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); | static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); | ||||||
| static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); | static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); | ||||||
| static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); | static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_ArchDeviceBase = | constexpr inline auto KMemoryRegionType_ArchDeviceBase = | ||||||
|     KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); |     KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); | ||||||
| constexpr auto KMemoryRegionType_BoardDeviceBase = | constexpr inline auto KMemoryRegionType_BoardDeviceBase = | ||||||
|     KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly(); |     KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly(); | ||||||
| static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5); | static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5); | ||||||
| static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); | static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); | ||||||
|  | @ -284,7 +305,7 @@ static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); | ||||||
| #error "Unimplemented" | #error "Unimplemented" | ||||||
| #else | #else | ||||||
| // Default to no architecture devices.
 | // Default to no architecture devices.
 | ||||||
| constexpr auto NumArchitectureDeviceRegions = 0; | constexpr inline auto NumArchitectureDeviceRegions = 0; | ||||||
| #endif | #endif | ||||||
| static_assert(NumArchitectureDeviceRegions >= 0); | static_assert(NumArchitectureDeviceRegions >= 0); | ||||||
| 
 | 
 | ||||||
|  | @ -292,34 +313,35 @@ static_assert(NumArchitectureDeviceRegions >= 0); | ||||||
| #include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc" | #include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc" | ||||||
| #else | #else | ||||||
| // Default to no board devices.
 | // Default to no board devices.
 | ||||||
| constexpr auto NumBoardDeviceRegions = 0; | constexpr inline auto NumBoardDeviceRegions = 0; | ||||||
| #endif | #endif | ||||||
| static_assert(NumBoardDeviceRegions >= 0); | static_assert(NumBoardDeviceRegions >= 0); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); | constexpr inline auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); | ||||||
| constexpr auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); | constexpr inline auto KMemoryRegionType_KernelStack = | ||||||
| constexpr auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); |     KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); | ||||||
| constexpr auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3); | constexpr inline auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); | ||||||
|  | constexpr inline auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3); | ||||||
| static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19); | static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19); | ||||||
| static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29); | static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29); | ||||||
| static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49); | static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49); | ||||||
| static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89); | static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_KernelMiscDerivedBase = | constexpr inline auto KMemoryRegionType_KernelMiscDerivedBase = | ||||||
|     KMemoryRegionType_KernelMisc.DeriveTransition(); |     KMemoryRegionType_KernelMisc.DeriveTransition(); | ||||||
| static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149); | static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149); | ||||||
| 
 | 
 | ||||||
| // UNUSED: .Derive(7, 0);
 | // UNUSED: .Derive(7, 0);
 | ||||||
| constexpr auto KMemoryRegionType_KernelMiscMainStack = | constexpr inline auto KMemoryRegionType_KernelMiscMainStack = | ||||||
|     KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1); |     KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1); | ||||||
| constexpr auto KMemoryRegionType_KernelMiscMappedDevice = | constexpr inline auto KMemoryRegionType_KernelMiscMappedDevice = | ||||||
|     KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2); |     KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2); | ||||||
| constexpr auto KMemoryRegionType_KernelMiscExceptionStack = | constexpr inline auto KMemoryRegionType_KernelMiscExceptionStack = | ||||||
|     KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3); |     KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3); | ||||||
| constexpr auto KMemoryRegionType_KernelMiscUnknownDebug = | constexpr inline auto KMemoryRegionType_KernelMiscUnknownDebug = | ||||||
|     KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4); |     KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4); | ||||||
| // UNUSED: .Derive(7, 5);
 | // UNUSED: .Derive(7, 5);
 | ||||||
| constexpr auto KMemoryRegionType_KernelMiscIdleStack = | constexpr inline auto KMemoryRegionType_KernelMiscIdleStack = | ||||||
|     KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6); |     KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6); | ||||||
| static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49); | static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49); | ||||||
| static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49); | static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49); | ||||||
|  | @ -327,7 +349,8 @@ static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349); | ||||||
| static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549); | static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549); | ||||||
| static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349); | static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349); | ||||||
| 
 | 
 | ||||||
| constexpr auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0); | constexpr inline auto KMemoryRegionType_KernelTemp = | ||||||
|  |     KMemoryRegionType_Kernel.Advance(2).Derive(2, 0); | ||||||
| static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); | static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); | ||||||
| 
 | 
 | ||||||
| constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | ||||||
|  | @ -335,6 +358,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | ||||||
|         return KMemoryRegionType_VirtualDramKernelTraceBuffer; |         return KMemoryRegionType_VirtualDramKernelTraceBuffer; | ||||||
|     } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { |     } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { | ||||||
|         return KMemoryRegionType_VirtualDramKernelPtHeap; |         return KMemoryRegionType_VirtualDramKernelPtHeap; | ||||||
|  |     } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) { | ||||||
|  |         return KMemoryRegionType_VirtualDramKernelSecureAppletMemory; | ||||||
|     } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { |     } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { | ||||||
|         return KMemoryRegionType_VirtualDramUnknownDebug; |         return KMemoryRegionType_VirtualDramUnknownDebug; | ||||||
|     } else { |     } else { | ||||||
|  |  | ||||||
|  | @ -16,107 +16,126 @@ | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
| class KPageBitmap { | class KPageBitmap { | ||||||
| private: | public: | ||||||
|     class RandomBitGenerator { |     class RandomBitGenerator { | ||||||
|     private: |  | ||||||
|         Common::TinyMT rng{}; |  | ||||||
|         u32 entropy{}; |  | ||||||
|         u32 bits_available{}; |  | ||||||
| 
 |  | ||||||
|     private: |  | ||||||
|         void RefreshEntropy() { |  | ||||||
|             entropy = rng.GenerateRandomU32(); |  | ||||||
|             bits_available = static_cast<u32>(Common::BitSize<decltype(entropy)>()); |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         bool GenerateRandomBit() { |  | ||||||
|             if (bits_available == 0) { |  | ||||||
|                 this->RefreshEntropy(); |  | ||||||
|             } |  | ||||||
| 
 |  | ||||||
|             const bool rnd_bit = (entropy & 1) != 0; |  | ||||||
|             entropy >>= 1; |  | ||||||
|             --bits_available; |  | ||||||
|             return rnd_bit; |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|     public: |     public: | ||||||
|         RandomBitGenerator() { |         RandomBitGenerator() { | ||||||
|             rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64())); |             m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64())); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         std::size_t SelectRandomBit(u64 bitmap) { |         u64 SelectRandomBit(u64 bitmap) { | ||||||
|             u64 selected = 0; |             u64 selected = 0; | ||||||
| 
 | 
 | ||||||
|             u64 cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; |             for (size_t cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; cur_num_bits != 0; | ||||||
|             u64 cur_mask = (1ULL << cur_num_bits) - 1; |                  cur_num_bits /= 2) { | ||||||
|  |                 const u64 high = (bitmap >> cur_num_bits); | ||||||
|  |                 const u64 low = (bitmap & (~(UINT64_C(0xFFFFFFFFFFFFFFFF) << cur_num_bits))); | ||||||
| 
 | 
 | ||||||
|             while (cur_num_bits) { |                 // Choose high if we have high and (don't have low or select high randomly).
 | ||||||
|                 const u64 low = (bitmap >> 0) & cur_mask; |                 if (high && (low == 0 || this->GenerateRandomBit())) { | ||||||
|                 const u64 high = (bitmap >> cur_num_bits) & cur_mask; |  | ||||||
| 
 |  | ||||||
|                 bool choose_low; |  | ||||||
|                 if (high == 0) { |  | ||||||
|                     // If only low val is set, choose low.
 |  | ||||||
|                     choose_low = true; |  | ||||||
|                 } else if (low == 0) { |  | ||||||
|                     // If only high val is set, choose high.
 |  | ||||||
|                     choose_low = false; |  | ||||||
|                 } else { |  | ||||||
|                     // If both are set, choose random.
 |  | ||||||
|                     choose_low = this->GenerateRandomBit(); |  | ||||||
|                 } |  | ||||||
| 
 |  | ||||||
|                 // If we chose low, proceed with low.
 |  | ||||||
|                 if (choose_low) { |  | ||||||
|                     bitmap = low; |  | ||||||
|                     selected += 0; |  | ||||||
|                 } else { |  | ||||||
|                     bitmap = high; |                     bitmap = high; | ||||||
|                     selected += cur_num_bits; |                     selected += cur_num_bits; | ||||||
|  |                 } else { | ||||||
|  |                     bitmap = low; | ||||||
|  |                     selected += 0; | ||||||
|                 } |                 } | ||||||
| 
 |  | ||||||
|                 // Proceed.
 |  | ||||||
|                 cur_num_bits /= 2; |  | ||||||
|                 cur_mask >>= cur_num_bits; |  | ||||||
|             } |             } | ||||||
| 
 | 
 | ||||||
|             return selected; |             return selected; | ||||||
|         } |         } | ||||||
|  | 
 | ||||||
|  |         u64 GenerateRandom(u64 max) { | ||||||
|  |             // Determine the number of bits we need.
 | ||||||
|  |             const u64 bits_needed = 1 + (Common::BitSize<decltype(max)>() - std::countl_zero(max)); | ||||||
|  | 
 | ||||||
|  |             // Generate a random value of the desired bitwidth.
 | ||||||
|  |             const u64 rnd = this->GenerateRandomBits(static_cast<u32>(bits_needed)); | ||||||
|  | 
 | ||||||
|  |             // Adjust the value to be in range.
 | ||||||
|  |             return rnd - ((rnd / max) * max); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |     private: | ||||||
|  |         void RefreshEntropy() { | ||||||
|  |             m_entropy = m_rng.GenerateRandomU32(); | ||||||
|  |             m_bits_available = static_cast<u32>(Common::BitSize<decltype(m_entropy)>()); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         bool GenerateRandomBit() { | ||||||
|  |             if (m_bits_available == 0) { | ||||||
|  |                 this->RefreshEntropy(); | ||||||
|  |             } | ||||||
|  | 
 | ||||||
|  |             const bool rnd_bit = (m_entropy & 1) != 0; | ||||||
|  |             m_entropy >>= 1; | ||||||
|  |             --m_bits_available; | ||||||
|  |             return rnd_bit; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         u64 GenerateRandomBits(u32 num_bits) { | ||||||
|  |             u64 result = 0; | ||||||
|  | 
 | ||||||
|  |             // Iteratively add random bits to our result.
 | ||||||
|  |             while (num_bits > 0) { | ||||||
|  |                 // Ensure we have random bits to take from.
 | ||||||
|  |                 if (m_bits_available == 0) { | ||||||
|  |                     this->RefreshEntropy(); | ||||||
|  |                 } | ||||||
|  | 
 | ||||||
|  |                 // Determine how many bits to take this round.
 | ||||||
|  |                 const auto cur_bits = std::min(num_bits, m_bits_available); | ||||||
|  | 
 | ||||||
|  |                 // Generate mask for our current bits.
 | ||||||
|  |                 const u64 mask = (static_cast<u64>(1) << cur_bits) - 1; | ||||||
|  | 
 | ||||||
|  |                 // Add bits to output from our entropy.
 | ||||||
|  |                 result <<= cur_bits; | ||||||
|  |                 result |= (m_entropy & mask); | ||||||
|  | 
 | ||||||
|  |                 // Remove bits from our entropy.
 | ||||||
|  |                 m_entropy >>= cur_bits; | ||||||
|  |                 m_bits_available -= cur_bits; | ||||||
|  | 
 | ||||||
|  |                 // Advance.
 | ||||||
|  |                 num_bits -= cur_bits; | ||||||
|  |             } | ||||||
|  | 
 | ||||||
|  |             return result; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |     private: | ||||||
|  |         Common::TinyMT m_rng; | ||||||
|  |         u32 m_entropy{}; | ||||||
|  |         u32 m_bits_available{}; | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
| public: | public: | ||||||
|     static constexpr std::size_t MaxDepth = 4; |     static constexpr size_t MaxDepth = 4; | ||||||
| 
 |  | ||||||
| private: |  | ||||||
|     std::array<u64*, MaxDepth> bit_storages{}; |  | ||||||
|     RandomBitGenerator rng{}; |  | ||||||
|     std::size_t num_bits{}; |  | ||||||
|     std::size_t used_depths{}; |  | ||||||
| 
 | 
 | ||||||
| public: | public: | ||||||
|     KPageBitmap() = default; |     KPageBitmap() = default; | ||||||
| 
 | 
 | ||||||
|     constexpr std::size_t GetNumBits() const { |     constexpr size_t GetNumBits() const { | ||||||
|         return num_bits; |         return m_num_bits; | ||||||
|     } |     } | ||||||
|     constexpr s32 GetHighestDepthIndex() const { |     constexpr s32 GetHighestDepthIndex() const { | ||||||
|         return static_cast<s32>(used_depths) - 1; |         return static_cast<s32>(m_used_depths) - 1; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     u64* Initialize(u64* storage, std::size_t size) { |     u64* Initialize(u64* storage, size_t size) { | ||||||
|         // Initially, everything is un-set.
 |         // Initially, everything is un-set.
 | ||||||
|         num_bits = 0; |         m_num_bits = 0; | ||||||
| 
 | 
 | ||||||
|         // Calculate the needed bitmap depth.
 |         // Calculate the needed bitmap depth.
 | ||||||
|         used_depths = static_cast<std::size_t>(GetRequiredDepth(size)); |         m_used_depths = static_cast<size_t>(GetRequiredDepth(size)); | ||||||
|         ASSERT(used_depths <= MaxDepth); |         ASSERT(m_used_depths <= MaxDepth); | ||||||
| 
 | 
 | ||||||
|         // Set the bitmap pointers.
 |         // Set the bitmap pointers.
 | ||||||
|         for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { |         for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { | ||||||
|             bit_storages[depth] = storage; |             m_bit_storages[depth] = storage; | ||||||
|             size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>(); |             size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>(); | ||||||
|             storage += size; |             storage += size; | ||||||
|  |             m_end_storages[depth] = storage; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         return storage; |         return storage; | ||||||
|  | @ -128,19 +147,19 @@ public: | ||||||
| 
 | 
 | ||||||
|         if (random) { |         if (random) { | ||||||
|             do { |             do { | ||||||
|                 const u64 v = bit_storages[depth][offset]; |                 const u64 v = m_bit_storages[depth][offset]; | ||||||
|                 if (v == 0) { |                 if (v == 0) { | ||||||
|                     // If depth is bigger than zero, then a previous level indicated a block was
 |                     // If depth is bigger than zero, then a previous level indicated a block was
 | ||||||
|                     // free.
 |                     // free.
 | ||||||
|                     ASSERT(depth == 0); |                     ASSERT(depth == 0); | ||||||
|                     return -1; |                     return -1; | ||||||
|                 } |                 } | ||||||
|                 offset = offset * Common::BitSize<u64>() + rng.SelectRandomBit(v); |                 offset = offset * Common::BitSize<u64>() + m_rng.SelectRandomBit(v); | ||||||
|                 ++depth; |                 ++depth; | ||||||
|             } while (depth < static_cast<s32>(used_depths)); |             } while (depth < static_cast<s32>(m_used_depths)); | ||||||
|         } else { |         } else { | ||||||
|             do { |             do { | ||||||
|                 const u64 v = bit_storages[depth][offset]; |                 const u64 v = m_bit_storages[depth][offset]; | ||||||
|                 if (v == 0) { |                 if (v == 0) { | ||||||
|                     // If depth is bigger than zero, then a previous level indicated a block was
 |                     // If depth is bigger than zero, then a previous level indicated a block was
 | ||||||
|                     // free.
 |                     // free.
 | ||||||
|  | @ -149,28 +168,69 @@ public: | ||||||
|                 } |                 } | ||||||
|                 offset = offset * Common::BitSize<u64>() + std::countr_zero(v); |                 offset = offset * Common::BitSize<u64>() + std::countr_zero(v); | ||||||
|                 ++depth; |                 ++depth; | ||||||
|             } while (depth < static_cast<s32>(used_depths)); |             } while (depth < static_cast<s32>(m_used_depths)); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         return static_cast<s64>(offset); |         return static_cast<s64>(offset); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     void SetBit(std::size_t offset) { |     s64 FindFreeRange(size_t count) { | ||||||
|  |         // Check that it is possible to find a range.
 | ||||||
|  |         const u64* const storage_start = m_bit_storages[m_used_depths - 1]; | ||||||
|  |         const u64* const storage_end = m_end_storages[m_used_depths - 1]; | ||||||
|  | 
 | ||||||
|  |         // If we don't have a storage to iterate (or want more blocks than fit in a single storage),
 | ||||||
|  |         // we can't find a free range.
 | ||||||
|  |         if (!(storage_start < storage_end && count <= Common::BitSize<u64>())) { | ||||||
|  |             return -1; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         // Walk the storages to select a random free range.
 | ||||||
|  |         const size_t options_per_storage = std::max<size_t>(Common::BitSize<u64>() / count, 1); | ||||||
|  |         const size_t num_entries = std::max<size_t>(storage_end - storage_start, 1); | ||||||
|  | 
 | ||||||
|  |         const u64 free_mask = (static_cast<u64>(1) << count) - 1; | ||||||
|  | 
 | ||||||
|  |         size_t num_valid_options = 0; | ||||||
|  |         s64 chosen_offset = -1; | ||||||
|  |         for (size_t storage_index = 0; storage_index < num_entries; ++storage_index) { | ||||||
|  |             u64 storage = storage_start[storage_index]; | ||||||
|  |             for (size_t option = 0; option < options_per_storage; ++option) { | ||||||
|  |                 if ((storage & free_mask) == free_mask) { | ||||||
|  |                     // We've found a new valid option.
 | ||||||
|  |                     ++num_valid_options; | ||||||
|  | 
 | ||||||
|  |                     // Select the Kth valid option with probability 1/K. This leads to an overall
 | ||||||
|  |                     // uniform distribution.
 | ||||||
|  |                     if (num_valid_options == 1 || m_rng.GenerateRandom(num_valid_options) == 0) { | ||||||
|  |                         // This is our first option, so select it.
 | ||||||
|  |                         chosen_offset = storage_index * Common::BitSize<u64>() + option * count; | ||||||
|  |                     } | ||||||
|  |                 } | ||||||
|  |                 storage >>= count; | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         // Return the random offset we chose.*/
 | ||||||
|  |         return chosen_offset; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     void SetBit(size_t offset) { | ||||||
|         this->SetBit(this->GetHighestDepthIndex(), offset); |         this->SetBit(this->GetHighestDepthIndex(), offset); | ||||||
|         num_bits++; |         m_num_bits++; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     void ClearBit(std::size_t offset) { |     void ClearBit(size_t offset) { | ||||||
|         this->ClearBit(this->GetHighestDepthIndex(), offset); |         this->ClearBit(this->GetHighestDepthIndex(), offset); | ||||||
|         num_bits--; |         m_num_bits--; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     bool ClearRange(std::size_t offset, std::size_t count) { |     bool ClearRange(size_t offset, size_t count) { | ||||||
|         s32 depth = this->GetHighestDepthIndex(); |         s32 depth = this->GetHighestDepthIndex(); | ||||||
|         u64* bits = bit_storages[depth]; |         u64* bits = m_bit_storages[depth]; | ||||||
|         std::size_t bit_ind = offset / Common::BitSize<u64>(); |         size_t bit_ind = offset / Common::BitSize<u64>(); | ||||||
|         if (count < Common::BitSize<u64>()) { |         if (count < Common::BitSize<u64>()) [[likely]] { | ||||||
|             const std::size_t shift = offset % Common::BitSize<u64>(); |             const size_t shift = offset % Common::BitSize<u64>(); | ||||||
|             ASSERT(shift + count <= Common::BitSize<u64>()); |             ASSERT(shift + count <= Common::BitSize<u64>()); | ||||||
|             // Check that all the bits are set.
 |             // Check that all the bits are set.
 | ||||||
|             const u64 mask = ((u64(1) << count) - 1) << shift; |             const u64 mask = ((u64(1) << count) - 1) << shift; | ||||||
|  | @ -189,8 +249,8 @@ public: | ||||||
|             ASSERT(offset % Common::BitSize<u64>() == 0); |             ASSERT(offset % Common::BitSize<u64>() == 0); | ||||||
|             ASSERT(count % Common::BitSize<u64>() == 0); |             ASSERT(count % Common::BitSize<u64>() == 0); | ||||||
|             // Check that all the bits are set.
 |             // Check that all the bits are set.
 | ||||||
|             std::size_t remaining = count; |             size_t remaining = count; | ||||||
|             std::size_t i = 0; |             size_t i = 0; | ||||||
|             do { |             do { | ||||||
|                 if (bits[bit_ind + i++] != ~u64(0)) { |                 if (bits[bit_ind + i++] != ~u64(0)) { | ||||||
|                     return false; |                     return false; | ||||||
|  | @ -209,18 +269,18 @@ public: | ||||||
|             } while (remaining > 0); |             } while (remaining > 0); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         num_bits -= count; |         m_num_bits -= count; | ||||||
|         return true; |         return true; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     void SetBit(s32 depth, std::size_t offset) { |     void SetBit(s32 depth, size_t offset) { | ||||||
|         while (depth >= 0) { |         while (depth >= 0) { | ||||||
|             std::size_t ind = offset / Common::BitSize<u64>(); |             size_t ind = offset / Common::BitSize<u64>(); | ||||||
|             std::size_t which = offset % Common::BitSize<u64>(); |             size_t which = offset % Common::BitSize<u64>(); | ||||||
|             const u64 mask = u64(1) << which; |             const u64 mask = u64(1) << which; | ||||||
| 
 | 
 | ||||||
|             u64* bit = std::addressof(bit_storages[depth][ind]); |             u64* bit = std::addressof(m_bit_storages[depth][ind]); | ||||||
|             u64 v = *bit; |             u64 v = *bit; | ||||||
|             ASSERT((v & mask) == 0); |             ASSERT((v & mask) == 0); | ||||||
|             *bit = v | mask; |             *bit = v | mask; | ||||||
|  | @ -232,13 +292,13 @@ private: | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     void ClearBit(s32 depth, std::size_t offset) { |     void ClearBit(s32 depth, size_t offset) { | ||||||
|         while (depth >= 0) { |         while (depth >= 0) { | ||||||
|             std::size_t ind = offset / Common::BitSize<u64>(); |             size_t ind = offset / Common::BitSize<u64>(); | ||||||
|             std::size_t which = offset % Common::BitSize<u64>(); |             size_t which = offset % Common::BitSize<u64>(); | ||||||
|             const u64 mask = u64(1) << which; |             const u64 mask = u64(1) << which; | ||||||
| 
 | 
 | ||||||
|             u64* bit = std::addressof(bit_storages[depth][ind]); |             u64* bit = std::addressof(m_bit_storages[depth][ind]); | ||||||
|             u64 v = *bit; |             u64 v = *bit; | ||||||
|             ASSERT((v & mask) != 0); |             ASSERT((v & mask) != 0); | ||||||
|             v &= ~mask; |             v &= ~mask; | ||||||
|  | @ -252,7 +312,7 @@ private: | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     static constexpr s32 GetRequiredDepth(std::size_t region_size) { |     static constexpr s32 GetRequiredDepth(size_t region_size) { | ||||||
|         s32 depth = 0; |         s32 depth = 0; | ||||||
|         while (true) { |         while (true) { | ||||||
|             region_size /= Common::BitSize<u64>(); |             region_size /= Common::BitSize<u64>(); | ||||||
|  | @ -264,8 +324,8 @@ private: | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| public: | public: | ||||||
|     static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size) { |     static constexpr size_t CalculateManagementOverheadSize(size_t region_size) { | ||||||
|         std::size_t overhead_bits = 0; |         size_t overhead_bits = 0; | ||||||
|         for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { |         for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { | ||||||
|             region_size = |             region_size = | ||||||
|                 Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>(); |                 Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>(); | ||||||
|  | @ -273,6 +333,13 @@ public: | ||||||
|         } |         } | ||||||
|         return overhead_bits * sizeof(u64); |         return overhead_bits * sizeof(u64); | ||||||
|     } |     } | ||||||
|  | 
 | ||||||
|  | private: | ||||||
|  |     std::array<u64*, MaxDepth> m_bit_storages{}; | ||||||
|  |     std::array<u64*, MaxDepth> m_end_storages{}; | ||||||
|  |     RandomBitGenerator m_rng; | ||||||
|  |     size_t m_num_bits{}; | ||||||
|  |     size_t m_used_depths{}; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| } // namespace Kernel
 | } // namespace Kernel
 | ||||||
|  |  | ||||||
|  | @ -11,6 +11,16 @@ | ||||||
| 
 | 
 | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
|  | class KernelCore; | ||||||
|  | 
 | ||||||
|  | class KPageBufferSlabHeap : protected impl::KSlabHeapImpl { | ||||||
|  | public: | ||||||
|  |     static constexpr size_t BufferSize = PageSize; | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     void Initialize(Core::System& system); | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| class KPageBuffer final : public KSlabAllocated<KPageBuffer> { | class KPageBuffer final : public KSlabAllocated<KPageBuffer> { | ||||||
| public: | public: | ||||||
|     explicit KPageBuffer(KernelCore&) {} |     explicit KPageBuffer(KernelCore&) {} | ||||||
|  | @ -21,8 +31,6 @@ public: | ||||||
| private: | private: | ||||||
|     [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; |     [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; | ||||||
| }; | }; | ||||||
| 
 | static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize); | ||||||
| static_assert(sizeof(KPageBuffer) == PageSize); |  | ||||||
| static_assert(alignof(KPageBuffer) == PageSize); |  | ||||||
| 
 | 
 | ||||||
| } // namespace Kernel
 | } // namespace Kernel
 | ||||||
|  |  | ||||||
|  | @ -5,6 +5,7 @@ | ||||||
| 
 | 
 | ||||||
| #include <list> | #include <list> | ||||||
| 
 | 
 | ||||||
|  | #include "common/alignment.h" | ||||||
| #include "common/assert.h" | #include "common/assert.h" | ||||||
| #include "common/common_types.h" | #include "common/common_types.h" | ||||||
| #include "core/hle/kernel/memory_types.h" | #include "core/hle/kernel/memory_types.h" | ||||||
|  | @ -12,6 +13,89 @@ | ||||||
| 
 | 
 | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
|  | class KPageGroup; | ||||||
|  | 
 | ||||||
|  | class KBlockInfo { | ||||||
|  | private: | ||||||
|  |     friend class KPageGroup; | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     constexpr KBlockInfo() = default; | ||||||
|  | 
 | ||||||
|  |     constexpr void Initialize(PAddr addr, size_t np) { | ||||||
|  |         ASSERT(Common::IsAligned(addr, PageSize)); | ||||||
|  |         ASSERT(static_cast<u32>(np) == np); | ||||||
|  | 
 | ||||||
|  |         m_page_index = static_cast<u32>(addr) / PageSize; | ||||||
|  |         m_num_pages = static_cast<u32>(np); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     constexpr PAddr GetAddress() const { | ||||||
|  |         return m_page_index * PageSize; | ||||||
|  |     } | ||||||
|  |     constexpr size_t GetNumPages() const { | ||||||
|  |         return m_num_pages; | ||||||
|  |     } | ||||||
|  |     constexpr size_t GetSize() const { | ||||||
|  |         return this->GetNumPages() * PageSize; | ||||||
|  |     } | ||||||
|  |     constexpr PAddr GetEndAddress() const { | ||||||
|  |         return (m_page_index + m_num_pages) * PageSize; | ||||||
|  |     } | ||||||
|  |     constexpr PAddr GetLastAddress() const { | ||||||
|  |         return this->GetEndAddress() - 1; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     constexpr KBlockInfo* GetNext() const { | ||||||
|  |         return m_next; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     constexpr bool IsEquivalentTo(const KBlockInfo& rhs) const { | ||||||
|  |         return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     constexpr bool operator==(const KBlockInfo& rhs) const { | ||||||
|  |         return this->IsEquivalentTo(rhs); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     constexpr bool operator!=(const KBlockInfo& rhs) const { | ||||||
|  |         return !(*this == rhs); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     constexpr bool IsStrictlyBefore(PAddr addr) const { | ||||||
|  |         const PAddr end = this->GetEndAddress(); | ||||||
|  | 
 | ||||||
|  |         if (m_page_index != 0 && end == 0) { | ||||||
|  |             return false; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         return end < addr; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     constexpr bool operator<(PAddr addr) const { | ||||||
|  |         return this->IsStrictlyBefore(addr); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     constexpr bool TryConcatenate(PAddr addr, size_t np) { | ||||||
|  |         if (addr != 0 && addr == this->GetEndAddress()) { | ||||||
|  |             m_num_pages += static_cast<u32>(np); | ||||||
|  |             return true; | ||||||
|  |         } | ||||||
|  |         return false; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  | private: | ||||||
|  |     constexpr void SetNext(KBlockInfo* next) { | ||||||
|  |         m_next = next; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  | private: | ||||||
|  |     KBlockInfo* m_next{}; | ||||||
|  |     u32 m_page_index{}; | ||||||
|  |     u32 m_num_pages{}; | ||||||
|  | }; | ||||||
|  | static_assert(sizeof(KBlockInfo) <= 0x10); | ||||||
|  | 
 | ||||||
| class KPageGroup final { | class KPageGroup final { | ||||||
| public: | public: | ||||||
|     class Node final { |     class Node final { | ||||||
|  | @ -92,6 +176,8 @@ public: | ||||||
|         return nodes.empty(); |         return nodes.empty(); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     void Finalize() {} | ||||||
|  | 
 | ||||||
| private: | private: | ||||||
|     std::list<Node> nodes; |     std::list<Node> nodes; | ||||||
| }; | }; | ||||||
|  |  | ||||||
|  | @ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const { | ||||||
|     return num_free; |     return num_free; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| PAddr KPageHeap::AllocateBlock(s32 index, bool random) { | PAddr KPageHeap::AllocateByLinearSearch(s32 index) { | ||||||
|     const size_t needed_size = m_blocks[index].GetSize(); |     const size_t needed_size = m_blocks[index].GetSize(); | ||||||
| 
 | 
 | ||||||
|     for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { |     for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { | ||||||
|         if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) { |         if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) { | ||||||
|             if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { |             if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { | ||||||
|                 this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); |                 this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); | ||||||
|             } |             } | ||||||
|  | @ -59,6 +59,88 @@ PAddr KPageHeap::AllocateBlock(s32 index, bool random) { | ||||||
|     return 0; |     return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) { | ||||||
|  |     // Get the size and required alignment.
 | ||||||
|  |     const size_t needed_size = num_pages * PageSize; | ||||||
|  |     const size_t align_size = align_pages * PageSize; | ||||||
|  | 
 | ||||||
|  |     // Determine meta-alignment of our desired alignment size.
 | ||||||
|  |     const size_t align_shift = std::countr_zero(align_size); | ||||||
|  | 
 | ||||||
|  |     // Decide on a block to allocate from.
 | ||||||
|  |     constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4; | ||||||
|  |     { | ||||||
|  |         // By default, we'll want to look at all blocks larger than our current one.
 | ||||||
|  |         s32 max_blocks = static_cast<s32>(m_num_blocks); | ||||||
|  | 
 | ||||||
|  |         // Determine the maximum block we should try to allocate from.
 | ||||||
|  |         size_t possible_alignments = 0; | ||||||
|  |         for (s32 i = index; i < max_blocks; ++i) { | ||||||
|  |             // Add the possible alignments from blocks at the current size.
 | ||||||
|  |             possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * | ||||||
|  |                                    m_blocks[i].GetNumFreeBlocks(); | ||||||
|  | 
 | ||||||
|  |             // If there are enough possible alignments, we don't need to look at larger blocks.
 | ||||||
|  |             if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) { | ||||||
|  |                 max_blocks = i + 1; | ||||||
|  |                 break; | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         // If we have any possible alignments which require a larger block, we need to pick one.
 | ||||||
|  |         if (possible_alignments > 0 && index + 1 < max_blocks) { | ||||||
|  |             // Select a random alignment from the possibilities.
 | ||||||
|  |             const size_t rnd = m_rng.GenerateRandom(possible_alignments); | ||||||
|  | 
 | ||||||
|  |             // Determine which block corresponds to the random alignment we chose.
 | ||||||
|  |             possible_alignments = 0; | ||||||
|  |             for (s32 i = index; i < max_blocks; ++i) { | ||||||
|  |                 // Add the possible alignments from blocks at the current size.
 | ||||||
|  |                 possible_alignments += | ||||||
|  |                     (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * | ||||||
|  |                     m_blocks[i].GetNumFreeBlocks(); | ||||||
|  | 
 | ||||||
|  |                 // If the current block gets us to our random choice, use the current block.
 | ||||||
|  |                 if (rnd < possible_alignments) { | ||||||
|  |                     index = i; | ||||||
|  |                     break; | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     // Pop a block from the index we selected.
 | ||||||
|  |     if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) { | ||||||
|  |         // Determine how much size we have left over.
 | ||||||
|  |         if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; | ||||||
|  |             leftover_size > 0) { | ||||||
|  |             // Determine how many valid alignments we can have.
 | ||||||
|  |             const size_t possible_alignments = 1 + (leftover_size >> align_shift); | ||||||
|  | 
 | ||||||
|  |             // Select a random valid alignment.
 | ||||||
|  |             const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift; | ||||||
|  | 
 | ||||||
|  |             // Free memory before the random offset.
 | ||||||
|  |             if (random_offset != 0) { | ||||||
|  |                 this->Free(addr, random_offset / PageSize); | ||||||
|  |             } | ||||||
|  | 
 | ||||||
|  |             // Advance our block by the random offset.
 | ||||||
|  |             addr += random_offset; | ||||||
|  | 
 | ||||||
|  |             // Free memory after our allocated block.
 | ||||||
|  |             if (random_offset != leftover_size) { | ||||||
|  |                 this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         // Return the block we allocated.
 | ||||||
|  |         return addr; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| void KPageHeap::FreeBlock(PAddr block, s32 index) { | void KPageHeap::FreeBlock(PAddr block, s32 index) { | ||||||
|     do { |     do { | ||||||
|         block = m_blocks[index++].PushBlock(block); |         block = m_blocks[index++].PushBlock(block); | ||||||
|  |  | ||||||
|  | @ -14,13 +14,9 @@ | ||||||
| 
 | 
 | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
| class KPageHeap final { | class KPageHeap { | ||||||
| public: | public: | ||||||
|     YUZU_NON_COPYABLE(KPageHeap); |  | ||||||
|     YUZU_NON_MOVEABLE(KPageHeap); |  | ||||||
| 
 |  | ||||||
|     KPageHeap() = default; |     KPageHeap() = default; | ||||||
|     ~KPageHeap() = default; |  | ||||||
| 
 | 
 | ||||||
|     constexpr PAddr GetAddress() const { |     constexpr PAddr GetAddress() const { | ||||||
|         return m_heap_address; |         return m_heap_address; | ||||||
|  | @ -57,7 +53,20 @@ public: | ||||||
|         m_initial_used_size = m_heap_size - free_size - reserved_size; |         m_initial_used_size = m_heap_size - free_size - reserved_size; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     PAddr AllocateBlock(s32 index, bool random); |     PAddr AllocateBlock(s32 index, bool random) { | ||||||
|  |         if (random) { | ||||||
|  |             const size_t block_pages = m_blocks[index].GetNumPages(); | ||||||
|  |             return this->AllocateByRandom(index, block_pages, block_pages); | ||||||
|  |         } else { | ||||||
|  |             return this->AllocateByLinearSearch(index); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { | ||||||
|  |         // TODO: linear search support?
 | ||||||
|  |         return this->AllocateByRandom(index, num_pages, align_pages); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     void Free(PAddr addr, size_t num_pages); |     void Free(PAddr addr, size_t num_pages); | ||||||
| 
 | 
 | ||||||
|     static size_t CalculateManagementOverheadSize(size_t region_size) { |     static size_t CalculateManagementOverheadSize(size_t region_size) { | ||||||
|  | @ -68,7 +77,7 @@ public: | ||||||
|     static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { |     static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { | ||||||
|         const size_t target_pages = std::max(num_pages, align_pages); |         const size_t target_pages = std::max(num_pages, align_pages); | ||||||
|         for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { |         for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { | ||||||
|             if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { |             if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { | ||||||
|                 return static_cast<s32>(i); |                 return static_cast<s32>(i); | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|  | @ -77,7 +86,7 @@ public: | ||||||
| 
 | 
 | ||||||
|     static constexpr s32 GetBlockIndex(size_t num_pages) { |     static constexpr s32 GetBlockIndex(size_t num_pages) { | ||||||
|         for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { |         for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { | ||||||
|             if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { |             if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { | ||||||
|                 return i; |                 return i; | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|  | @ -85,7 +94,7 @@ public: | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     static constexpr size_t GetBlockSize(size_t index) { |     static constexpr size_t GetBlockSize(size_t index) { | ||||||
|         return size_t(1) << MemoryBlockPageShifts[index]; |         return static_cast<size_t>(1) << MemoryBlockPageShifts[index]; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     static constexpr size_t GetBlockNumPages(size_t index) { |     static constexpr size_t GetBlockNumPages(size_t index) { | ||||||
|  | @ -93,13 +102,9 @@ public: | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     class Block final { |     class Block { | ||||||
|     public: |     public: | ||||||
|         YUZU_NON_COPYABLE(Block); |  | ||||||
|         YUZU_NON_MOVEABLE(Block); |  | ||||||
| 
 |  | ||||||
|         Block() = default; |         Block() = default; | ||||||
|         ~Block() = default; |  | ||||||
| 
 | 
 | ||||||
|         constexpr size_t GetShift() const { |         constexpr size_t GetShift() const { | ||||||
|             return m_block_shift; |             return m_block_shift; | ||||||
|  | @ -201,6 +206,9 @@ private: | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|  |     PAddr AllocateByLinearSearch(s32 index); | ||||||
|  |     PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages); | ||||||
|  | 
 | ||||||
|     static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, |     static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, | ||||||
|                                                   size_t num_block_shifts); |                                                   size_t num_block_shifts); | ||||||
| 
 | 
 | ||||||
|  | @ -209,7 +217,8 @@ private: | ||||||
|     size_t m_heap_size{}; |     size_t m_heap_size{}; | ||||||
|     size_t m_initial_used_size{}; |     size_t m_initial_used_size{}; | ||||||
|     size_t m_num_blocks{}; |     size_t m_num_blocks{}; | ||||||
|     std::array<Block, NumMemoryBlockPageShifts> m_blocks{}; |     std::array<Block, NumMemoryBlockPageShifts> m_blocks; | ||||||
|  |     KPageBitmap::RandomBitGenerator m_rng; | ||||||
|     std::vector<u64> m_management_data; |     std::vector<u64> m_management_data; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -16,6 +16,7 @@ | ||||||
| #include "core/hle/kernel/k_memory_layout.h" | #include "core/hle/kernel/k_memory_layout.h" | ||||||
| #include "core/hle/kernel/k_memory_manager.h" | #include "core/hle/kernel/k_memory_manager.h" | ||||||
| #include "core/hle/result.h" | #include "core/hle/result.h" | ||||||
|  | #include "core/memory.h" | ||||||
| 
 | 
 | ||||||
| namespace Core { | namespace Core { | ||||||
| class System; | class System; | ||||||
|  | @ -23,7 +24,10 @@ class System; | ||||||
| 
 | 
 | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
|  | class KBlockInfoManager; | ||||||
| class KMemoryBlockManager; | class KMemoryBlockManager; | ||||||
|  | class KResourceLimit; | ||||||
|  | class KSystemResource; | ||||||
| 
 | 
 | ||||||
| class KPageTable final { | class KPageTable final { | ||||||
| public: | public: | ||||||
|  | @ -36,9 +40,9 @@ public: | ||||||
|     ~KPageTable(); |     ~KPageTable(); | ||||||
| 
 | 
 | ||||||
|     Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, |     Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, | ||||||
|                                 VAddr code_addr, size_t code_size, |                                 bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, | ||||||
|                                 KMemoryBlockSlabManager* mem_block_slab_manager, |                                 VAddr code_addr, size_t code_size, KSystemResource* system_resource, | ||||||
|                                 KMemoryManager::Pool pool); |                                 KResourceLimit* resource_limit); | ||||||
| 
 | 
 | ||||||
|     void Finalize(); |     void Finalize(); | ||||||
| 
 | 
 | ||||||
|  | @ -74,12 +78,20 @@ public: | ||||||
|                                           KMemoryState state, KMemoryPermission perm, |                                           KMemoryState state, KMemoryPermission perm, | ||||||
|                                           PAddr map_addr = 0); |                                           PAddr map_addr = 0); | ||||||
| 
 | 
 | ||||||
|     Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, |     Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, | ||||||
|                                         bool is_aligned); |                                         KMemoryPermission perm, bool is_aligned, bool check_heap); | ||||||
|     Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); |     Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap); | ||||||
| 
 | 
 | ||||||
|     Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); |     Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); | ||||||
| 
 | 
 | ||||||
|  |     Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size); | ||||||
|  |     Result UnlockForIpcUserBuffer(VAddr address, size_t size); | ||||||
|  | 
 | ||||||
|  |     Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table, | ||||||
|  |                        KMemoryPermission test_perm, KMemoryState dst_state, bool send); | ||||||
|  |     Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state); | ||||||
|  |     Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state); | ||||||
|  | 
 | ||||||
|     Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); |     Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); | ||||||
|     Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); |     Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); | ||||||
|     Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, |     Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, | ||||||
|  | @ -97,13 +109,54 @@ public: | ||||||
| 
 | 
 | ||||||
|     bool CanContain(VAddr addr, size_t size, KMemoryState state) const; |     bool CanContain(VAddr addr, size_t size, KMemoryState state) const; | ||||||
| 
 | 
 | ||||||
|  | protected: | ||||||
|  |     struct PageLinkedList { | ||||||
|  |     private: | ||||||
|  |         struct Node { | ||||||
|  |             Node* m_next; | ||||||
|  |             std::array<u8, PageSize - sizeof(Node*)> m_buffer; | ||||||
|  |         }; | ||||||
|  | 
 | ||||||
|  |     public: | ||||||
|  |         constexpr PageLinkedList() = default; | ||||||
|  | 
 | ||||||
|  |         void Push(Node* n) { | ||||||
|  |             ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize)); | ||||||
|  |             n->m_next = m_root; | ||||||
|  |             m_root = n; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         void Push(Core::Memory::Memory& memory, VAddr addr) { | ||||||
|  |             this->Push(memory.GetPointer<Node>(addr)); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Node* Peek() const { | ||||||
|  |             return m_root; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         Node* Pop() { | ||||||
|  |             Node* const r = m_root; | ||||||
|  | 
 | ||||||
|  |             m_root = r->m_next; | ||||||
|  |             r->m_next = nullptr; | ||||||
|  | 
 | ||||||
|  |             return r; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |     private: | ||||||
|  |         Node* m_root{}; | ||||||
|  |     }; | ||||||
|  |     static_assert(std::is_trivially_destructible<PageLinkedList>::value); | ||||||
|  | 
 | ||||||
| private: | private: | ||||||
|     enum class OperationType : u32 { |     enum class OperationType : u32 { | ||||||
|         Map, |         Map = 0, | ||||||
|         MapGroup, |         MapFirst = 1, | ||||||
|         Unmap, |         MapGroup = 2, | ||||||
|         ChangePermissions, |         Unmap = 3, | ||||||
|         ChangePermissionsAndRefresh, |         ChangePermissions = 4, | ||||||
|  |         ChangePermissionsAndRefresh = 5, | ||||||
|  |         Separate = 6, | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|     static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = |     static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = | ||||||
|  | @ -123,6 +176,7 @@ private: | ||||||
|                    OperationType operation); |                    OperationType operation); | ||||||
|     Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, |     Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, | ||||||
|                    PAddr map_addr = 0); |                    PAddr map_addr = 0); | ||||||
|  |     void FinalizeUpdate(PageLinkedList* page_list); | ||||||
|     VAddr GetRegionAddress(KMemoryState state) const; |     VAddr GetRegionAddress(KMemoryState state) const; | ||||||
|     size_t GetRegionSize(KMemoryState state) const; |     size_t GetRegionSize(KMemoryState state) const; | ||||||
| 
 | 
 | ||||||
|  | @ -199,6 +253,18 @@ private: | ||||||
|         return *out != 0; |         return *out != 0; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address, | ||||||
|  |                              size_t size, KMemoryPermission test_perm, KMemoryState dst_state); | ||||||
|  |     Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr, | ||||||
|  |                              KMemoryPermission test_perm, KMemoryState dst_state, | ||||||
|  |                              KPageTable& src_page_table, bool send); | ||||||
|  |     void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, | ||||||
|  |                                                  size_t size, KMemoryPermission prot_perm); | ||||||
|  | 
 | ||||||
|  |     // HACK: These will be removed once we automatically manage page reference counts.
 | ||||||
|  |     void HACK_OpenPages(PAddr phys_addr, size_t num_pages); | ||||||
|  |     void HACK_ClosePages(VAddr virt_addr, size_t num_pages); | ||||||
|  | 
 | ||||||
|     mutable KLightLock m_general_lock; |     mutable KLightLock m_general_lock; | ||||||
|     mutable KLightLock m_map_physical_memory_lock; |     mutable KLightLock m_map_physical_memory_lock; | ||||||
| 
 | 
 | ||||||
|  | @ -316,6 +382,31 @@ public: | ||||||
|                addr + size - 1 <= m_address_space_end - 1; |                addr + size - 1 <= m_address_space_end - 1; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | public: | ||||||
|  |     static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||||||
|  |         return layout.GetLinearVirtualAddress(addr); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||||||
|  |         return layout.GetLinearPhysicalAddress(addr); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||||||
|  |         return GetLinearMappedVirtualAddress(layout, addr); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||||||
|  |         return GetLinearMappedPhysicalAddress(layout, addr); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||||||
|  |         return GetLinearMappedVirtualAddress(layout, addr); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||||||
|  |         return GetLinearMappedPhysicalAddress(layout, addr); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
| private: | private: | ||||||
|     constexpr bool IsKernel() const { |     constexpr bool IsKernel() const { | ||||||
|         return m_is_kernel; |         return m_is_kernel; | ||||||
|  | @ -330,6 +421,24 @@ private: | ||||||
|                (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); |                (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | private: | ||||||
|  |     class KScopedPageTableUpdater { | ||||||
|  |     private: | ||||||
|  |         KPageTable* m_pt{}; | ||||||
|  |         PageLinkedList m_ll; | ||||||
|  | 
 | ||||||
|  |     public: | ||||||
|  |         explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {} | ||||||
|  |         explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {} | ||||||
|  |         ~KScopedPageTableUpdater() { | ||||||
|  |             m_pt->FinalizeUpdate(this->GetPageList()); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         PageLinkedList* GetPageList() { | ||||||
|  |             return &m_ll; | ||||||
|  |         } | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
| private: | private: | ||||||
|     VAddr m_address_space_start{}; |     VAddr m_address_space_start{}; | ||||||
|     VAddr m_address_space_end{}; |     VAddr m_address_space_end{}; | ||||||
|  | @ -347,20 +456,27 @@ private: | ||||||
|     VAddr m_alias_code_region_start{}; |     VAddr m_alias_code_region_start{}; | ||||||
|     VAddr m_alias_code_region_end{}; |     VAddr m_alias_code_region_end{}; | ||||||
| 
 | 
 | ||||||
|     size_t m_mapped_physical_memory_size{}; |  | ||||||
|     size_t m_max_heap_size{}; |     size_t m_max_heap_size{}; | ||||||
|     size_t m_max_physical_memory_size{}; |     size_t m_mapped_physical_memory_size{}; | ||||||
|  |     size_t m_mapped_unsafe_physical_memory{}; | ||||||
|  |     size_t m_mapped_insecure_memory{}; | ||||||
|  |     size_t m_mapped_ipc_server_memory{}; | ||||||
|     size_t m_address_space_width{}; |     size_t m_address_space_width{}; | ||||||
| 
 | 
 | ||||||
|     KMemoryBlockManager m_memory_block_manager; |     KMemoryBlockManager m_memory_block_manager; | ||||||
|  |     u32 m_allocate_option{}; | ||||||
| 
 | 
 | ||||||
|     bool m_is_kernel{}; |     bool m_is_kernel{}; | ||||||
|     bool m_enable_aslr{}; |     bool m_enable_aslr{}; | ||||||
|     bool m_enable_device_address_space_merge{}; |     bool m_enable_device_address_space_merge{}; | ||||||
| 
 | 
 | ||||||
|     KMemoryBlockSlabManager* m_memory_block_slab_manager{}; |     KMemoryBlockSlabManager* m_memory_block_slab_manager{}; | ||||||
|  |     KBlockInfoManager* m_block_info_manager{}; | ||||||
|  |     KResourceLimit* m_resource_limit{}; | ||||||
| 
 | 
 | ||||||
|     u32 m_heap_fill_value{}; |     u32 m_heap_fill_value{}; | ||||||
|  |     u32 m_ipc_fill_value{}; | ||||||
|  |     u32 m_stack_fill_value{}; | ||||||
|     const KMemoryRegion* m_cached_physical_heap_region{}; |     const KMemoryRegion* m_cached_physical_heap_region{}; | ||||||
| 
 | 
 | ||||||
|     KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; |     KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; | ||||||
|  |  | ||||||
							
								
								
									
										55
									
								
								src/core/hle/kernel/k_page_table_manager.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								src/core/hle/kernel/k_page_table_manager.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,55 @@ | ||||||
|  | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 | ||||||
|  | // SPDX-License-Identifier: GPL-2.0-or-later
 | ||||||
|  | 
 | ||||||
|  | #pragma once | ||||||
|  | 
 | ||||||
|  | #include <atomic> | ||||||
|  | 
 | ||||||
|  | #include "common/common_types.h" | ||||||
|  | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||||||
|  | #include "core/hle/kernel/k_page_table_slab_heap.h" | ||||||
|  | 
 | ||||||
|  | namespace Kernel { | ||||||
|  | 
 | ||||||
|  | class KPageTableManager : public KDynamicResourceManager<impl::PageTablePage, true> { | ||||||
|  | public: | ||||||
|  |     using RefCount = KPageTableSlabHeap::RefCount; | ||||||
|  |     static constexpr size_t PageTableSize = KPageTableSlabHeap::PageTableSize; | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     KPageTableManager() = default; | ||||||
|  | 
 | ||||||
|  |     void Initialize(KDynamicPageManager* page_allocator, KPageTableSlabHeap* pt_heap) { | ||||||
|  |         m_pt_heap = pt_heap; | ||||||
|  | 
 | ||||||
|  |         static_assert(std::derived_from<KPageTableSlabHeap, DynamicSlabType>); | ||||||
|  |         BaseHeap::Initialize(page_allocator, pt_heap); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     VAddr Allocate() { | ||||||
|  |         return VAddr(BaseHeap::Allocate()); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     RefCount GetRefCount(VAddr addr) const { | ||||||
|  |         return m_pt_heap->GetRefCount(addr); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     void Open(VAddr addr, int count) { | ||||||
|  |         return m_pt_heap->Open(addr, count); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     bool Close(VAddr addr, int count) { | ||||||
|  |         return m_pt_heap->Close(addr, count); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     bool IsInPageTableHeap(VAddr addr) const { | ||||||
|  |         return m_pt_heap->IsInRange(addr); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  | private: | ||||||
|  |     using BaseHeap = KDynamicResourceManager<impl::PageTablePage, true>; | ||||||
|  | 
 | ||||||
|  |     KPageTableSlabHeap* m_pt_heap{}; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | } // namespace Kernel
 | ||||||
							
								
								
									
										93
									
								
								src/core/hle/kernel/k_page_table_slab_heap.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										93
									
								
								src/core/hle/kernel/k_page_table_slab_heap.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,93 @@ | ||||||
|  | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 | ||||||
|  | // SPDX-License-Identifier: GPL-2.0-or-later
 | ||||||
|  | 
 | ||||||
|  | #pragma once | ||||||
|  | 
 | ||||||
|  | #include <array> | ||||||
|  | #include <vector> | ||||||
|  | 
 | ||||||
|  | #include "common/common_types.h" | ||||||
|  | #include "core/hle/kernel/k_dynamic_slab_heap.h" | ||||||
|  | #include "core/hle/kernel/slab_helpers.h" | ||||||
|  | 
 | ||||||
|  | namespace Kernel { | ||||||
|  | 
 | ||||||
|  | namespace impl { | ||||||
|  | 
 | ||||||
|  | class PageTablePage { | ||||||
|  | public: | ||||||
|  |     // Do not initialize anything.
 | ||||||
|  |     PageTablePage() = default; | ||||||
|  | 
 | ||||||
|  | private: | ||||||
|  |     std::array<u8, PageSize> m_buffer{}; | ||||||
|  | }; | ||||||
|  | static_assert(sizeof(PageTablePage) == PageSize); | ||||||
|  | 
 | ||||||
|  | } // namespace impl
 | ||||||
|  | 
 | ||||||
|  | class KPageTableSlabHeap : public KDynamicSlabHeap<impl::PageTablePage, true> { | ||||||
|  | public: | ||||||
|  |     using RefCount = u16; | ||||||
|  |     static constexpr size_t PageTableSize = sizeof(impl::PageTablePage); | ||||||
|  |     static_assert(PageTableSize == PageSize); | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     KPageTableSlabHeap() = default; | ||||||
|  | 
 | ||||||
|  |     static constexpr size_t CalculateReferenceCountSize(size_t size) { | ||||||
|  |         return (size / PageSize) * sizeof(RefCount); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     void Initialize(KDynamicPageManager* page_allocator, size_t object_count, RefCount* rc) { | ||||||
|  |         BaseHeap::Initialize(page_allocator, object_count); | ||||||
|  |         this->Initialize(rc); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     RefCount GetRefCount(VAddr addr) { | ||||||
|  |         ASSERT(this->IsInRange(addr)); | ||||||
|  |         return *this->GetRefCountPointer(addr); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     void Open(VAddr addr, int count) { | ||||||
|  |         ASSERT(this->IsInRange(addr)); | ||||||
|  | 
 | ||||||
|  |         *this->GetRefCountPointer(addr) += static_cast<RefCount>(count); | ||||||
|  | 
 | ||||||
|  |         ASSERT(this->GetRefCount(addr) > 0); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     bool Close(VAddr addr, int count) { | ||||||
|  |         ASSERT(this->IsInRange(addr)); | ||||||
|  |         ASSERT(this->GetRefCount(addr) >= count); | ||||||
|  | 
 | ||||||
|  |         *this->GetRefCountPointer(addr) -= static_cast<RefCount>(count); | ||||||
|  |         return this->GetRefCount(addr) == 0; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     bool IsInPageTableHeap(VAddr addr) const { | ||||||
|  |         return this->IsInRange(addr); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  | private: | ||||||
|  |     void Initialize([[maybe_unused]] RefCount* rc) { | ||||||
|  |         // TODO(bunnei): Use rc once we support kernel virtual memory allocations.
 | ||||||
|  |         const auto count = this->GetSize() / PageSize; | ||||||
|  |         m_ref_counts.resize(count); | ||||||
|  | 
 | ||||||
|  |         for (size_t i = 0; i < count; i++) { | ||||||
|  |             m_ref_counts[i] = 0; | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     RefCount* GetRefCountPointer(VAddr addr) { | ||||||
|  |         return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  | private: | ||||||
|  |     using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>; | ||||||
|  | 
 | ||||||
|  |     std::vector<RefCount> m_ref_counts; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | } // namespace Kernel
 | ||||||
|  | @ -358,8 +358,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | ||||||
|     } |     } | ||||||
|     // Initialize proces address space
 |     // Initialize proces address space
 | ||||||
|     if (const Result result{page_table.InitializeForProcess( |     if (const Result result{page_table.InitializeForProcess( | ||||||
|             metadata.GetAddressSpaceType(), false, 0x8000000, code_size, |             metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application, | ||||||
|             &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; |             0x8000000, code_size, &kernel.GetSystemSystemResource(), resource_limit)}; | ||||||
|         result.IsError()) { |         result.IsError()) { | ||||||
|         R_RETURN(result); |         R_RETURN(result); | ||||||
|     } |     } | ||||||
|  |  | ||||||
							
								
								
									
										26
									
								
								src/core/hle/kernel/k_system_resource.cpp
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								src/core/hle/kernel/k_system_resource.cpp
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,26 @@ | ||||||
|  | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 | ||||||
|  | // SPDX-License-Identifier: GPL-2.0-or-later
 | ||||||
|  | 
 | ||||||
|  | #include "core/hle/kernel/k_system_resource.h" | ||||||
|  | 
 | ||||||
|  | namespace Kernel { | ||||||
|  | 
 | ||||||
|  | Result KSecureSystemResource::Initialize([[maybe_unused]] size_t size, | ||||||
|  |                                          [[maybe_unused]] KResourceLimit* resource_limit, | ||||||
|  |                                          [[maybe_unused]] KMemoryManager::Pool pool) { | ||||||
|  |     // Unimplemented
 | ||||||
|  |     UNREACHABLE(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | void KSecureSystemResource::Finalize() { | ||||||
|  |     // Unimplemented
 | ||||||
|  |     UNREACHABLE(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | size_t KSecureSystemResource::CalculateRequiredSecureMemorySize( | ||||||
|  |     [[maybe_unused]] size_t size, [[maybe_unused]] KMemoryManager::Pool pool) { | ||||||
|  |     // Unimplemented
 | ||||||
|  |     UNREACHABLE(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | } // namespace Kernel
 | ||||||
							
								
								
									
										137
									
								
								src/core/hle/kernel/k_system_resource.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										137
									
								
								src/core/hle/kernel/k_system_resource.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,137 @@ | ||||||
|  | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
 | ||||||
|  | // SPDX-License-Identifier: GPL-2.0-or-later
 | ||||||
|  | 
 | ||||||
|  | #pragma once | ||||||
|  | 
 | ||||||
|  | #include "common/assert.h" | ||||||
|  | #include "common/common_types.h" | ||||||
|  | #include "core/hle/kernel/k_auto_object.h" | ||||||
|  | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||||||
|  | #include "core/hle/kernel/k_memory_manager.h" | ||||||
|  | #include "core/hle/kernel/k_page_table_manager.h" | ||||||
|  | #include "core/hle/kernel/k_resource_limit.h" | ||||||
|  | #include "core/hle/kernel/slab_helpers.h" | ||||||
|  | 
 | ||||||
|  | namespace Kernel { | ||||||
|  | 
 | ||||||
|  | // NOTE: Nintendo's implementation does not have the "is_secure_resource" field, and instead uses
 | ||||||
|  | // virtual IsSecureResource().
 | ||||||
|  | 
 | ||||||
|  | class KSystemResource : public KAutoObject { | ||||||
|  |     KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject); | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {} | ||||||
|  | 
 | ||||||
|  | protected: | ||||||
|  |     void SetSecureResource() { | ||||||
|  |         m_is_secure_resource = true; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     virtual void Destroy() override { | ||||||
|  |         UNREACHABLE_MSG("KSystemResource::Destroy() was called"); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     bool IsSecureResource() const { | ||||||
|  |         return m_is_secure_resource; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     void SetManagers(KMemoryBlockSlabManager& mb, KBlockInfoManager& bi, KPageTableManager& pt) { | ||||||
|  |         ASSERT(m_p_memory_block_slab_manager == nullptr); | ||||||
|  |         ASSERT(m_p_block_info_manager == nullptr); | ||||||
|  |         ASSERT(m_p_page_table_manager == nullptr); | ||||||
|  | 
 | ||||||
|  |         m_p_memory_block_slab_manager = std::addressof(mb); | ||||||
|  |         m_p_block_info_manager = std::addressof(bi); | ||||||
|  |         m_p_page_table_manager = std::addressof(pt); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const { | ||||||
|  |         return *m_p_memory_block_slab_manager; | ||||||
|  |     } | ||||||
|  |     const KBlockInfoManager& GetBlockInfoManager() const { | ||||||
|  |         return *m_p_block_info_manager; | ||||||
|  |     } | ||||||
|  |     const KPageTableManager& GetPageTableManager() const { | ||||||
|  |         return *m_p_page_table_manager; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     KMemoryBlockSlabManager& GetMemoryBlockSlabManager() { | ||||||
|  |         return *m_p_memory_block_slab_manager; | ||||||
|  |     } | ||||||
|  |     KBlockInfoManager& GetBlockInfoManager() { | ||||||
|  |         return *m_p_block_info_manager; | ||||||
|  |     } | ||||||
|  |     KPageTableManager& GetPageTableManager() { | ||||||
|  |         return *m_p_page_table_manager; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     KMemoryBlockSlabManager* GetMemoryBlockSlabManagerPointer() { | ||||||
|  |         return m_p_memory_block_slab_manager; | ||||||
|  |     } | ||||||
|  |     KBlockInfoManager* GetBlockInfoManagerPointer() { | ||||||
|  |         return m_p_block_info_manager; | ||||||
|  |     } | ||||||
|  |     KPageTableManager* GetPageTableManagerPointer() { | ||||||
|  |         return m_p_page_table_manager; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  | private: | ||||||
|  |     KMemoryBlockSlabManager* m_p_memory_block_slab_manager{}; | ||||||
|  |     KBlockInfoManager* m_p_block_info_manager{}; | ||||||
|  |     KPageTableManager* m_p_page_table_manager{}; | ||||||
|  |     bool m_is_secure_resource{false}; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | class KSecureSystemResource final | ||||||
|  |     : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> { | ||||||
|  | public: | ||||||
|  |     explicit KSecureSystemResource(KernelCore& kernel_) | ||||||
|  |         : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) { | ||||||
|  |         // Mark ourselves as being a secure resource.
 | ||||||
|  |         this->SetSecureResource(); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     Result Initialize(size_t size, KResourceLimit* resource_limit, KMemoryManager::Pool pool); | ||||||
|  |     void Finalize(); | ||||||
|  | 
 | ||||||
|  |     bool IsInitialized() const { | ||||||
|  |         return m_is_initialized; | ||||||
|  |     } | ||||||
|  |     static void PostDestroy([[maybe_unused]] uintptr_t arg) {} | ||||||
|  | 
 | ||||||
|  |     size_t CalculateRequiredSecureMemorySize() const { | ||||||
|  |         return CalculateRequiredSecureMemorySize(m_resource_size, m_resource_pool); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     size_t GetSize() const { | ||||||
|  |         return m_resource_size; | ||||||
|  |     } | ||||||
|  |     size_t GetUsedSize() const { | ||||||
|  |         return m_dynamic_page_manager.GetUsed() * PageSize; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     const KDynamicPageManager& GetDynamicPageManager() const { | ||||||
|  |         return m_dynamic_page_manager; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     static size_t CalculateRequiredSecureMemorySize(size_t size, KMemoryManager::Pool pool); | ||||||
|  | 
 | ||||||
|  | private: | ||||||
|  |     bool m_is_initialized{}; | ||||||
|  |     KMemoryManager::Pool m_resource_pool{}; | ||||||
|  |     KDynamicPageManager m_dynamic_page_manager; | ||||||
|  |     KMemoryBlockSlabManager m_memory_block_slab_manager; | ||||||
|  |     KBlockInfoManager m_block_info_manager; | ||||||
|  |     KPageTableManager m_page_table_manager; | ||||||
|  |     KMemoryBlockSlabHeap m_memory_block_heap; | ||||||
|  |     KBlockInfoSlabHeap m_block_info_heap; | ||||||
|  |     KPageTableSlabHeap m_page_table_heap; | ||||||
|  |     KResourceLimit* m_resource_limit{}; | ||||||
|  |     VAddr m_resource_address{}; | ||||||
|  |     size_t m_resource_size{}; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | } // namespace Kernel
 | ||||||
|  | @ -28,10 +28,12 @@ | ||||||
| #include "core/hle/kernel/k_handle_table.h" | #include "core/hle/kernel/k_handle_table.h" | ||||||
| #include "core/hle/kernel/k_memory_layout.h" | #include "core/hle/kernel/k_memory_layout.h" | ||||||
| #include "core/hle/kernel/k_memory_manager.h" | #include "core/hle/kernel/k_memory_manager.h" | ||||||
|  | #include "core/hle/kernel/k_page_buffer.h" | ||||||
| #include "core/hle/kernel/k_process.h" | #include "core/hle/kernel/k_process.h" | ||||||
| #include "core/hle/kernel/k_resource_limit.h" | #include "core/hle/kernel/k_resource_limit.h" | ||||||
| #include "core/hle/kernel/k_scheduler.h" | #include "core/hle/kernel/k_scheduler.h" | ||||||
| #include "core/hle/kernel/k_shared_memory.h" | #include "core/hle/kernel/k_shared_memory.h" | ||||||
|  | #include "core/hle/kernel/k_system_resource.h" | ||||||
| #include "core/hle/kernel/k_thread.h" | #include "core/hle/kernel/k_thread.h" | ||||||
| #include "core/hle/kernel/k_worker_task_manager.h" | #include "core/hle/kernel/k_worker_task_manager.h" | ||||||
| #include "core/hle/kernel/kernel.h" | #include "core/hle/kernel/kernel.h" | ||||||
|  | @ -47,6 +49,11 @@ MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
| struct KernelCore::Impl { | struct KernelCore::Impl { | ||||||
|  |     static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; | ||||||
|  |     static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000; | ||||||
|  |     static constexpr size_t BlockInfoSlabHeapSize = 4000; | ||||||
|  |     static constexpr size_t ReservedDynamicPageCount = 64; | ||||||
|  | 
 | ||||||
|     explicit Impl(Core::System& system_, KernelCore& kernel_) |     explicit Impl(Core::System& system_, KernelCore& kernel_) | ||||||
|         : time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"}, |         : time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"}, | ||||||
|           service_thread_barrier{2}, system{system_} {} |           service_thread_barrier{2}, system{system_} {} | ||||||
|  | @ -71,7 +78,6 @@ struct KernelCore::Impl { | ||||||
|         // Initialize kernel memory and resources.
 |         // Initialize kernel memory and resources.
 | ||||||
|         InitializeSystemResourceLimit(kernel, system.CoreTiming()); |         InitializeSystemResourceLimit(kernel, system.CoreTiming()); | ||||||
|         InitializeMemoryLayout(); |         InitializeMemoryLayout(); | ||||||
|         Init::InitializeKPageBufferSlabHeap(system); |  | ||||||
|         InitializeShutdownThreads(); |         InitializeShutdownThreads(); | ||||||
|         InitializePhysicalCores(); |         InitializePhysicalCores(); | ||||||
|         InitializePreemption(kernel); |         InitializePreemption(kernel); | ||||||
|  | @ -81,7 +87,8 @@ struct KernelCore::Impl { | ||||||
|             const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); |             const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); | ||||||
|             ASSERT(pt_heap_region.GetEndAddress() != 0); |             ASSERT(pt_heap_region.GetEndAddress() != 0); | ||||||
| 
 | 
 | ||||||
|             InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize()); |             InitializeResourceManagers(kernel, pt_heap_region.GetAddress(), | ||||||
|  |                                        pt_heap_region.GetSize()); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         RegisterHostThread(); |         RegisterHostThread(); | ||||||
|  | @ -253,16 +260,82 @@ struct KernelCore::Impl { | ||||||
|         system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); |         system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     void InitializeResourceManagers(VAddr address, size_t size) { |     void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) { | ||||||
|         dynamic_page_manager = std::make_unique<KDynamicPageManager>(); |         // Ensure that the buffer is suitable for our use.
 | ||||||
|         memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); |         ASSERT(Common::IsAligned(address, PageSize)); | ||||||
|         app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); |         ASSERT(Common::IsAligned(size, PageSize)); | ||||||
| 
 | 
 | ||||||
|         dynamic_page_manager->Initialize(address, size); |         // Ensure that we have space for our reference counts.
 | ||||||
|         static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; |         const size_t rc_size = | ||||||
|         memory_block_heap->Initialize(dynamic_page_manager.get(), |             Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(size), PageSize); | ||||||
|                                       ApplicationMemoryBlockSlabHeapSize); |         ASSERT(rc_size < size); | ||||||
|         app_memory_block_manager->Initialize(nullptr, memory_block_heap.get()); |         size -= rc_size; | ||||||
|  | 
 | ||||||
|  |         // Initialize the resource managers' shared page manager.
 | ||||||
|  |         resource_manager_page_manager = std::make_unique<KDynamicPageManager>(); | ||||||
|  |         resource_manager_page_manager->Initialize( | ||||||
|  |             address, size, std::max<size_t>(PageSize, KPageBufferSlabHeap::BufferSize)); | ||||||
|  | 
 | ||||||
|  |         // Initialize the KPageBuffer slab heap.
 | ||||||
|  |         page_buffer_slab_heap.Initialize(system); | ||||||
|  | 
 | ||||||
|  |         // Initialize the fixed-size slabheaps.
 | ||||||
|  |         app_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | ||||||
|  |         sys_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | ||||||
|  |         block_info_heap = std::make_unique<KBlockInfoSlabHeap>(); | ||||||
|  |         app_memory_block_heap->Initialize(resource_manager_page_manager.get(), | ||||||
|  |                                           ApplicationMemoryBlockSlabHeapSize); | ||||||
|  |         sys_memory_block_heap->Initialize(resource_manager_page_manager.get(), | ||||||
|  |                                           SystemMemoryBlockSlabHeapSize); | ||||||
|  |         block_info_heap->Initialize(resource_manager_page_manager.get(), BlockInfoSlabHeapSize); | ||||||
|  | 
 | ||||||
|  |         // Reserve all but a fixed number of remaining pages for the page table heap.
 | ||||||
|  |         const size_t num_pt_pages = resource_manager_page_manager->GetCount() - | ||||||
|  |                                     resource_manager_page_manager->GetUsed() - | ||||||
|  |                                     ReservedDynamicPageCount; | ||||||
|  |         page_table_heap = std::make_unique<KPageTableSlabHeap>(); | ||||||
|  | 
 | ||||||
|  |         // TODO(bunnei): Pass in address once we support kernel virtual memory allocations.
 | ||||||
|  |         page_table_heap->Initialize( | ||||||
|  |             resource_manager_page_manager.get(), num_pt_pages, | ||||||
|  |             /*GetPointer<KPageTableManager::RefCount>(address + size)*/ nullptr); | ||||||
|  | 
 | ||||||
|  |         // Setup the slab managers.
 | ||||||
|  |         KDynamicPageManager* const app_dynamic_page_manager = nullptr; | ||||||
|  |         KDynamicPageManager* const sys_dynamic_page_manager = | ||||||
|  |             /*KTargetSystem::IsDynamicResourceLimitsEnabled()*/ true | ||||||
|  |                 ? resource_manager_page_manager.get() | ||||||
|  |                 : nullptr; | ||||||
|  |         app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); | ||||||
|  |         sys_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); | ||||||
|  |         app_block_info_manager = std::make_unique<KBlockInfoManager>(); | ||||||
|  |         sys_block_info_manager = std::make_unique<KBlockInfoManager>(); | ||||||
|  |         app_page_table_manager = std::make_unique<KPageTableManager>(); | ||||||
|  |         sys_page_table_manager = std::make_unique<KPageTableManager>(); | ||||||
|  | 
 | ||||||
|  |         app_memory_block_manager->Initialize(app_dynamic_page_manager, app_memory_block_heap.get()); | ||||||
|  |         sys_memory_block_manager->Initialize(sys_dynamic_page_manager, sys_memory_block_heap.get()); | ||||||
|  | 
 | ||||||
|  |         app_block_info_manager->Initialize(app_dynamic_page_manager, block_info_heap.get()); | ||||||
|  |         sys_block_info_manager->Initialize(sys_dynamic_page_manager, block_info_heap.get()); | ||||||
|  | 
 | ||||||
|  |         app_page_table_manager->Initialize(app_dynamic_page_manager, page_table_heap.get()); | ||||||
|  |         sys_page_table_manager->Initialize(sys_dynamic_page_manager, page_table_heap.get()); | ||||||
|  | 
 | ||||||
|  |         // Check that we have the correct number of dynamic pages available.
 | ||||||
|  |         ASSERT(resource_manager_page_manager->GetCount() - | ||||||
|  |                    resource_manager_page_manager->GetUsed() == | ||||||
|  |                ReservedDynamicPageCount); | ||||||
|  | 
 | ||||||
|  |         // Create the system page table managers.
 | ||||||
|  |         app_system_resource = std::make_unique<KSystemResource>(kernel); | ||||||
|  |         sys_system_resource = std::make_unique<KSystemResource>(kernel); | ||||||
|  | 
 | ||||||
|  |         // Set the managers for the system resources.
 | ||||||
|  |         app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager, | ||||||
|  |                                          *app_page_table_manager); | ||||||
|  |         sys_system_resource->SetManagers(*sys_memory_block_manager, *sys_block_info_manager, | ||||||
|  |                                          *sys_page_table_manager); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     void InitializeShutdownThreads() { |     void InitializeShutdownThreads() { | ||||||
|  | @ -446,6 +519,9 @@ struct KernelCore::Impl { | ||||||
|         ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |         ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( | ||||||
|             misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); |             misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); | ||||||
| 
 | 
 | ||||||
|  |         // Determine if we'll use extra thread resources.
 | ||||||
|  |         const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); | ||||||
|  | 
 | ||||||
|         // Setup the stack region.
 |         // Setup the stack region.
 | ||||||
|         constexpr size_t StackRegionSize = 14_MiB; |         constexpr size_t StackRegionSize = 14_MiB; | ||||||
|         constexpr size_t StackRegionAlign = KernelAslrAlignment; |         constexpr size_t StackRegionAlign = KernelAslrAlignment; | ||||||
|  | @ -456,7 +532,8 @@ struct KernelCore::Impl { | ||||||
|             stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); |             stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); | ||||||
| 
 | 
 | ||||||
|         // Determine the size of the resource region.
 |         // Determine the size of the resource region.
 | ||||||
|         const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit(); |         const size_t resource_region_size = | ||||||
|  |             memory_layout->GetResourceRegionSizeForInit(use_extra_resources); | ||||||
| 
 | 
 | ||||||
|         // Determine the size of the slab region.
 |         // Determine the size of the slab region.
 | ||||||
|         const size_t slab_region_size = |         const size_t slab_region_size = | ||||||
|  | @ -751,6 +828,8 @@ struct KernelCore::Impl { | ||||||
|     Init::KSlabResourceCounts slab_resource_counts{}; |     Init::KSlabResourceCounts slab_resource_counts{}; | ||||||
|     KResourceLimit* system_resource_limit{}; |     KResourceLimit* system_resource_limit{}; | ||||||
| 
 | 
 | ||||||
|  |     KPageBufferSlabHeap page_buffer_slab_heap; | ||||||
|  | 
 | ||||||
|     std::shared_ptr<Core::Timing::EventType> preemption_event; |     std::shared_ptr<Core::Timing::EventType> preemption_event; | ||||||
| 
 | 
 | ||||||
|     // This is the kernel's handle table or supervisor handle table which
 |     // This is the kernel's handle table or supervisor handle table which
 | ||||||
|  | @ -776,10 +855,20 @@ struct KernelCore::Impl { | ||||||
|     // Kernel memory management
 |     // Kernel memory management
 | ||||||
|     std::unique_ptr<KMemoryManager> memory_manager; |     std::unique_ptr<KMemoryManager> memory_manager; | ||||||
| 
 | 
 | ||||||
|     // Dynamic slab managers
 |     // Resource managers
 | ||||||
|     std::unique_ptr<KDynamicPageManager> dynamic_page_manager; |     std::unique_ptr<KDynamicPageManager> resource_manager_page_manager; | ||||||
|     std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap; |     std::unique_ptr<KPageTableSlabHeap> page_table_heap; | ||||||
|  |     std::unique_ptr<KMemoryBlockSlabHeap> app_memory_block_heap; | ||||||
|  |     std::unique_ptr<KMemoryBlockSlabHeap> sys_memory_block_heap; | ||||||
|  |     std::unique_ptr<KBlockInfoSlabHeap> block_info_heap; | ||||||
|  |     std::unique_ptr<KPageTableManager> app_page_table_manager; | ||||||
|  |     std::unique_ptr<KPageTableManager> sys_page_table_manager; | ||||||
|     std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; |     std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; | ||||||
|  |     std::unique_ptr<KMemoryBlockSlabManager> sys_memory_block_manager; | ||||||
|  |     std::unique_ptr<KBlockInfoManager> app_block_info_manager; | ||||||
|  |     std::unique_ptr<KBlockInfoManager> sys_block_info_manager; | ||||||
|  |     std::unique_ptr<KSystemResource> app_system_resource; | ||||||
|  |     std::unique_ptr<KSystemResource> sys_system_resource; | ||||||
| 
 | 
 | ||||||
|     // Shared memory for services
 |     // Shared memory for services
 | ||||||
|     Kernel::KSharedMemory* hid_shared_mem{}; |     Kernel::KSharedMemory* hid_shared_mem{}; | ||||||
|  | @ -1057,12 +1146,12 @@ const KMemoryManager& KernelCore::MemoryManager() const { | ||||||
|     return *impl->memory_manager; |     return *impl->memory_manager; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() { | KSystemResource& KernelCore::GetSystemSystemResource() { | ||||||
|     return *impl->app_memory_block_manager; |     return *impl->sys_system_resource; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const { | const KSystemResource& KernelCore::GetSystemSystemResource() const { | ||||||
|     return *impl->app_memory_block_manager; |     return *impl->sys_system_resource; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { | Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { | ||||||
|  |  | ||||||
|  | @ -34,13 +34,16 @@ class KClientPort; | ||||||
| class GlobalSchedulerContext; | class GlobalSchedulerContext; | ||||||
| class KAutoObjectWithListContainer; | class KAutoObjectWithListContainer; | ||||||
| class KClientSession; | class KClientSession; | ||||||
|  | class KDebug; | ||||||
|  | class KDynamicPageManager; | ||||||
| class KEvent; | class KEvent; | ||||||
|  | class KEventInfo; | ||||||
| class KHandleTable; | class KHandleTable; | ||||||
| class KLinkedListNode; | class KLinkedListNode; | ||||||
| class KMemoryBlockSlabManager; |  | ||||||
| class KMemoryLayout; | class KMemoryLayout; | ||||||
| class KMemoryManager; | class KMemoryManager; | ||||||
| class KPageBuffer; | class KPageBuffer; | ||||||
|  | class KPageBufferSlabHeap; | ||||||
| class KPort; | class KPort; | ||||||
| class KProcess; | class KProcess; | ||||||
| class KResourceLimit; | class KResourceLimit; | ||||||
|  | @ -51,6 +54,7 @@ class KSession; | ||||||
| class KSessionRequest; | class KSessionRequest; | ||||||
| class KSharedMemory; | class KSharedMemory; | ||||||
| class KSharedMemoryInfo; | class KSharedMemoryInfo; | ||||||
|  | class KSecureSystemResource; | ||||||
| class KThread; | class KThread; | ||||||
| class KThreadLocalPage; | class KThreadLocalPage; | ||||||
| class KTransferMemory; | class KTransferMemory; | ||||||
|  | @ -244,11 +248,11 @@ public: | ||||||
|     /// Gets the virtual memory manager for the kernel.
 |     /// Gets the virtual memory manager for the kernel.
 | ||||||
|     const KMemoryManager& MemoryManager() const; |     const KMemoryManager& MemoryManager() const; | ||||||
| 
 | 
 | ||||||
|     /// Gets the application memory block manager for the kernel.
 |     /// Gets the system resource manager.
 | ||||||
|     KMemoryBlockSlabManager& GetApplicationMemoryBlockManager(); |     KSystemResource& GetSystemSystemResource(); | ||||||
| 
 | 
 | ||||||
|     /// Gets the application memory block manager for the kernel.
 |     /// Gets the system resource manager.
 | ||||||
|     const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const; |     const KSystemResource& GetSystemSystemResource() const; | ||||||
| 
 | 
 | ||||||
|     /// Gets the shared memory object for HID services.
 |     /// Gets the shared memory object for HID services.
 | ||||||
|     Kernel::KSharedMemory& GetHidSharedMem(); |     Kernel::KSharedMemory& GetHidSharedMem(); | ||||||
|  | @ -364,6 +368,12 @@ public: | ||||||
|             return slab_heap_container->thread_local_page; |             return slab_heap_container->thread_local_page; | ||||||
|         } else if constexpr (std::is_same_v<T, KSessionRequest>) { |         } else if constexpr (std::is_same_v<T, KSessionRequest>) { | ||||||
|             return slab_heap_container->session_request; |             return slab_heap_container->session_request; | ||||||
|  |         } else if constexpr (std::is_same_v<T, KSecureSystemResource>) { | ||||||
|  |             return slab_heap_container->secure_system_resource; | ||||||
|  |         } else if constexpr (std::is_same_v<T, KEventInfo>) { | ||||||
|  |             return slab_heap_container->event_info; | ||||||
|  |         } else if constexpr (std::is_same_v<T, KDebug>) { | ||||||
|  |             return slab_heap_container->debug; | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | @ -427,6 +437,9 @@ private: | ||||||
|         KSlabHeap<KPageBuffer> page_buffer; |         KSlabHeap<KPageBuffer> page_buffer; | ||||||
|         KSlabHeap<KThreadLocalPage> thread_local_page; |         KSlabHeap<KThreadLocalPage> thread_local_page; | ||||||
|         KSlabHeap<KSessionRequest> session_request; |         KSlabHeap<KSessionRequest> session_request; | ||||||
|  |         KSlabHeap<KSecureSystemResource> secure_system_resource; | ||||||
|  |         KSlabHeap<KEventInfo> event_info; | ||||||
|  |         KSlabHeap<KDebug> debug; | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|     std::unique_ptr<SlabHeapContainer> slab_heap_container; |     std::unique_ptr<SlabHeapContainer> slab_heap_container; | ||||||
|  |  | ||||||
|  | @ -52,6 +52,84 @@ public: | ||||||
|     } |     } | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | template <typename Derived, typename Base> | ||||||
|  | class KAutoObjectWithSlabHeap : public Base { | ||||||
|  |     static_assert(std::is_base_of<KAutoObject, Base>::value); | ||||||
|  | 
 | ||||||
|  | private: | ||||||
|  |     static Derived* Allocate(KernelCore& kernel) { | ||||||
|  |         return kernel.SlabHeap<Derived>().Allocate(kernel); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static void Free(KernelCore& kernel, Derived* obj) { | ||||||
|  |         kernel.SlabHeap<Derived>().Free(obj); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} | ||||||
|  |     virtual ~KAutoObjectWithSlabHeap() = default; | ||||||
|  | 
 | ||||||
|  |     virtual void Destroy() override { | ||||||
|  |         const bool is_initialized = this->IsInitialized(); | ||||||
|  |         uintptr_t arg = 0; | ||||||
|  |         if (is_initialized) { | ||||||
|  |             arg = this->GetPostDestroyArgument(); | ||||||
|  |             this->Finalize(); | ||||||
|  |         } | ||||||
|  |         Free(kernel, static_cast<Derived*>(this)); | ||||||
|  |         if (is_initialized) { | ||||||
|  |             Derived::PostDestroy(arg); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     virtual bool IsInitialized() const { | ||||||
|  |         return true; | ||||||
|  |     } | ||||||
|  |     virtual uintptr_t GetPostDestroyArgument() const { | ||||||
|  |         return 0; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     size_t GetSlabIndex() const { | ||||||
|  |         return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  | public: | ||||||
|  |     static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) { | ||||||
|  |         kernel.SlabHeap<Derived>().Initialize(memory, memory_size); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static Derived* Create(KernelCore& kernel) { | ||||||
|  |         Derived* obj = Allocate(kernel); | ||||||
|  |         if (obj != nullptr) { | ||||||
|  |             KAutoObject::Create(obj); | ||||||
|  |         } | ||||||
|  |         return obj; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static size_t GetObjectSize(KernelCore& kernel) { | ||||||
|  |         return kernel.SlabHeap<Derived>().GetObjectSize(); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static size_t GetSlabHeapSize(KernelCore& kernel) { | ||||||
|  |         return kernel.SlabHeap<Derived>().GetSlabHeapSize(); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static size_t GetPeakIndex(KernelCore& kernel) { | ||||||
|  |         return kernel.SlabHeap<Derived>().GetPeakIndex(); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static uintptr_t GetSlabHeapAddress(KernelCore& kernel) { | ||||||
|  |         return kernel.SlabHeap<Derived>().GetSlabHeapAddress(); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     static size_t GetNumRemaining(KernelCore& kernel) { | ||||||
|  |         return kernel.SlabHeap<Derived>().GetNumRemaining(); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  | protected: | ||||||
|  |     KernelCore& kernel; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| template <typename Derived, typename Base> | template <typename Derived, typename Base> | ||||||
| class KAutoObjectWithSlabHeapAndContainer : public Base { | class KAutoObjectWithSlabHeapAndContainer : public Base { | ||||||
|     static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); |     static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); | ||||||
|  |  | ||||||
|  | @ -2247,7 +2247,7 @@ static u64 GetSystemTick(Core::System& system) { | ||||||
|     auto& core_timing = system.CoreTiming(); |     auto& core_timing = system.CoreTiming(); | ||||||
| 
 | 
 | ||||||
|     // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
 |     // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
 | ||||||
|     const u64 result{system.CoreTiming().GetClockTicks()}; |     const u64 result{core_timing.GetClockTicks()}; | ||||||
| 
 | 
 | ||||||
|     if (!system.Kernel().IsMulticore()) { |     if (!system.Kernel().IsMulticore()) { | ||||||
|         core_timing.AddTicks(400U); |         core_timing.AddTicks(400U); | ||||||
|  |  | ||||||
|  | @ -37,6 +37,7 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125}; | ||||||
| constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; | constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; | ||||||
| constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; | constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; | ||||||
| constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; | constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; | ||||||
|  | constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259}; | ||||||
| constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; | constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; | ||||||
| 
 | 
 | ||||||
| } // namespace Kernel
 | } // namespace Kernel
 | ||||||
|  |  | ||||||
|  | @ -22,8 +22,8 @@ enum class MemoryState : u32 { | ||||||
|     Ipc = 0x0A, |     Ipc = 0x0A, | ||||||
|     Stack = 0x0B, |     Stack = 0x0B, | ||||||
|     ThreadLocal = 0x0C, |     ThreadLocal = 0x0C, | ||||||
|     Transferred = 0x0D, |     Transfered = 0x0D, | ||||||
|     SharedTransferred = 0x0E, |     SharedTransfered = 0x0E, | ||||||
|     SharedCode = 0x0F, |     SharedCode = 0x0F, | ||||||
|     Inaccessible = 0x10, |     Inaccessible = 0x10, | ||||||
|     NonSecureIpc = 0x11, |     NonSecureIpc = 0x11, | ||||||
|  | @ -32,6 +32,7 @@ enum class MemoryState : u32 { | ||||||
|     GeneratedCode = 0x14, |     GeneratedCode = 0x14, | ||||||
|     CodeOut = 0x15, |     CodeOut = 0x15, | ||||||
|     Coverage = 0x16, |     Coverage = 0x16, | ||||||
|  |     Insecure = 0x17, | ||||||
| }; | }; | ||||||
| DECLARE_ENUM_FLAG_OPERATORS(MemoryState); | DECLARE_ENUM_FLAG_OPERATORS(MemoryState); | ||||||
| 
 | 
 | ||||||
|  | @ -83,6 +84,13 @@ enum class YieldType : s64 { | ||||||
|     ToAnyThread = -2, |     ToAnyThread = -2, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | enum class ThreadExitReason : u32 { | ||||||
|  |     ExitThread = 0, | ||||||
|  |     TerminateThread = 1, | ||||||
|  |     ExitProcess = 2, | ||||||
|  |     TerminateProcess = 3, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| enum class ThreadActivity : u32 { | enum class ThreadActivity : u32 { | ||||||
|     Runnable = 0, |     Runnable = 0, | ||||||
|     Paused = 1, |     Paused = 1, | ||||||
|  | @ -108,6 +116,34 @@ enum class ProcessState : u32 { | ||||||
|     DebugBreak = 7, |     DebugBreak = 7, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | enum class ProcessExitReason : u32 { | ||||||
|  |     ExitProcess = 0, | ||||||
|  |     TerminateProcess = 1, | ||||||
|  |     Exception = 2, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| constexpr inline size_t ThreadLocalRegionSize = 0x200; | constexpr inline size_t ThreadLocalRegionSize = 0x200; | ||||||
| 
 | 
 | ||||||
|  | // Debug types.
 | ||||||
|  | enum class DebugEvent : u32 { | ||||||
|  |     CreateProcess = 0, | ||||||
|  |     CreateThread = 1, | ||||||
|  |     ExitProcess = 2, | ||||||
|  |     ExitThread = 3, | ||||||
|  |     Exception = 4, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | enum class DebugException : u32 { | ||||||
|  |     UndefinedInstruction = 0, | ||||||
|  |     InstructionAbort = 1, | ||||||
|  |     DataAbort = 2, | ||||||
|  |     AlignmentFault = 3, | ||||||
|  |     DebuggerAttached = 4, | ||||||
|  |     BreakPoint = 5, | ||||||
|  |     UserBreak = 6, | ||||||
|  |     DebuggerBreak = 7, | ||||||
|  |     UndefinedSystemCall = 8, | ||||||
|  |     MemorySystemError = 9, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| } // namespace Kernel::Svc
 | } // namespace Kernel::Svc
 | ||||||
|  |  | ||||||
|  | @ -423,16 +423,17 @@ constexpr void UpdateCurrentResultReference<const Result>(Result result_referenc | ||||||
| } // namespace ResultImpl
 | } // namespace ResultImpl
 | ||||||
| 
 | 
 | ||||||
| #define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE)                                \ | #define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE)                                \ | ||||||
|     [[maybe_unused]] constexpr bool HasPrevRef_##COUNTER_VALUE =                                   \ |     [[maybe_unused]] constexpr bool CONCAT2(HasPrevRef_, COUNTER_VALUE) =                          \ | ||||||
|         std::same_as<decltype(__TmpCurrentResultReference), Result&>;                              \ |         std::same_as<decltype(__TmpCurrentResultReference), Result&>;                              \ | ||||||
|     [[maybe_unused]] auto& PrevRef_##COUNTER_VALUE = __TmpCurrentResultReference;                  \ |     [[maybe_unused]] Result CONCAT2(PrevRef_, COUNTER_VALUE) = __TmpCurrentResultReference;        \ | ||||||
|     [[maybe_unused]] Result __tmp_result_##COUNTER_VALUE = ResultSuccess;                          \ |     [[maybe_unused]] Result CONCAT2(__tmp_result_, COUNTER_VALUE) = ResultSuccess;                 \ | ||||||
|     Result& __TmpCurrentResultReference =                                                          \ |     Result& __TmpCurrentResultReference = CONCAT2(HasPrevRef_, COUNTER_VALUE)                      \ | ||||||
|         HasPrevRef_##COUNTER_VALUE ? PrevRef_##COUNTER_VALUE : __tmp_result_##COUNTER_VALUE |                                               ? CONCAT2(PrevRef_, COUNTER_VALUE)                   \ | ||||||
|  |                                               : CONCAT2(__tmp_result_, COUNTER_VALUE) | ||||||
| 
 | 
 | ||||||
| #define ON_RESULT_RETURN_IMPL(...)                                                                 \ | #define ON_RESULT_RETURN_IMPL(...)                                                                 \ | ||||||
|     static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>);                   \ |     static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>);                   \ | ||||||
|     auto RESULT_GUARD_STATE_##__COUNTER__ =                                                        \ |     auto CONCAT2(RESULT_GUARD_STATE_, __COUNTER__) =                                               \ | ||||||
|         ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>(                              \ |         ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>(                              \ | ||||||
|             __TmpCurrentResultReference) +                                                         \ |             __TmpCurrentResultReference) +                                                         \ | ||||||
|         [&]() |         [&]() | ||||||
|  |  | ||||||
|  | @ -126,10 +126,12 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) | ||||||
|         LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); |         LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); | ||||||
|         return result; |         return result; | ||||||
|     } |     } | ||||||
|  |     bool is_out_io{}; | ||||||
|     ASSERT(system.CurrentProcess() |     ASSERT(system.CurrentProcess() | ||||||
|                ->PageTable() |                ->PageTable() | ||||||
|                .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, |                .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address, | ||||||
|                                              Kernel::KMemoryPermission::None, true) |                                              handle_description->size, | ||||||
|  |                                              Kernel::KMemoryPermission::None, true, false) | ||||||
|                .IsSuccess()); |                .IsSuccess()); | ||||||
|     std::memcpy(output.data(), ¶ms, sizeof(params)); |     std::memcpy(output.data(), ¶ms, sizeof(params)); | ||||||
|     return result; |     return result; | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 liamwhite
						liamwhite