forked from eden-emu/eden
		
	hle: kernel: Use host memory allocations for KSlabMemory.
- There are some issues with the current workaround, we will just use host memory until we have a complete kernel memory implementation.
This commit is contained in:
		
							parent
							
								
									7331bb9d8d
								
							
						
					
					
						commit
						b4fc2e52a2
					
				
					 4 changed files with 20 additions and 174 deletions
				
			
		|  | @ -70,14 +70,22 @@ constexpr size_t SlabCountExtraKThread = 160; | ||||||
| template <typename T> | template <typename T> | ||||||
| VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address, | VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address, | ||||||
|                          size_t num_objects) { |                          size_t num_objects) { | ||||||
|  |     // TODO(bunnei): This is just a place holder. We should initialize the appropriate KSlabHeap for
 | ||||||
|  |     // kernel object type T with the backing kernel memory pointer once we emulate kernel memory.
 | ||||||
|  | 
 | ||||||
|     const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*)); |     const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*)); | ||||||
|     VAddr start = Common::AlignUp(address, alignof(T)); |     VAddr start = Common::AlignUp(address, alignof(T)); | ||||||
| 
 | 
 | ||||||
|  |     // This is intentionally empty. Once KSlabHeap is fully implemented, we can replace this with
 | ||||||
|  |     // the pointer to emulated memory to pass along. Until then, KSlabHeap will just allocate/free
 | ||||||
|  |     // host memory.
 | ||||||
|  |     void* backing_kernel_memory{}; | ||||||
|  | 
 | ||||||
|     if (size > 0) { |     if (size > 0) { | ||||||
|         const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); |         const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); | ||||||
|         ASSERT(region != nullptr); |         ASSERT(region != nullptr); | ||||||
|         ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab)); |         ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab)); | ||||||
|         T::InitializeSlabHeap(system.Kernel(), system.Memory().GetKernelBuffer(start, size), size); |         T::InitializeSlabHeap(system.Kernel(), backing_kernel_memory, size); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     return start + size; |     return start + size; | ||||||
|  |  | ||||||
|  | @ -4,165 +4,33 @@ | ||||||
| 
 | 
 | ||||||
| #pragma once | #pragma once | ||||||
| 
 | 
 | ||||||
| #include <atomic> |  | ||||||
| 
 |  | ||||||
| #include "common/assert.h" |  | ||||||
| #include "common/common_types.h" |  | ||||||
| 
 |  | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
| namespace impl { | class KernelCore; | ||||||
| 
 | 
 | ||||||
| class KSlabHeapImpl final : NonCopyable { | /// This is a placeholder class to manage slab heaps for kernel objects. For now, we just allocate
 | ||||||
| public: | /// these with new/delete, but this can be re-implemented later to allocate these in emulated
 | ||||||
|     struct Node { | /// memory.
 | ||||||
|         Node* next{}; |  | ||||||
|     }; |  | ||||||
| 
 |  | ||||||
|     constexpr KSlabHeapImpl() = default; |  | ||||||
| 
 |  | ||||||
|     void Initialize(std::size_t size) { |  | ||||||
|         ASSERT(head == nullptr); |  | ||||||
|         obj_size = size; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     constexpr std::size_t GetObjectSize() const { |  | ||||||
|         return obj_size; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     Node* GetHead() const { |  | ||||||
|         return head; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     void* Allocate() { |  | ||||||
|         Node* ret = head.load(); |  | ||||||
| 
 |  | ||||||
|         do { |  | ||||||
|             if (ret == nullptr) { |  | ||||||
|                 break; |  | ||||||
|             } |  | ||||||
|         } while (!head.compare_exchange_weak(ret, ret->next)); |  | ||||||
| 
 |  | ||||||
|         return ret; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     void Free(void* obj) { |  | ||||||
|         Node* node = static_cast<Node*>(obj); |  | ||||||
| 
 |  | ||||||
|         Node* cur_head = head.load(); |  | ||||||
|         do { |  | ||||||
|             node->next = cur_head; |  | ||||||
|         } while (!head.compare_exchange_weak(cur_head, node)); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
| private: |  | ||||||
|     std::atomic<Node*> head{}; |  | ||||||
|     std::size_t obj_size{}; |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| } // namespace impl
 |  | ||||||
| 
 |  | ||||||
| class KSlabHeapBase : NonCopyable { |  | ||||||
| public: |  | ||||||
|     constexpr KSlabHeapBase() = default; |  | ||||||
| 
 |  | ||||||
|     constexpr bool Contains(uintptr_t addr) const { |  | ||||||
|         return start <= addr && addr < end; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     constexpr std::size_t GetSlabHeapSize() const { |  | ||||||
|         return (end - start) / GetObjectSize(); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     constexpr std::size_t GetObjectSize() const { |  | ||||||
|         return impl.GetObjectSize(); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     constexpr uintptr_t GetSlabHeapAddress() const { |  | ||||||
|         return start; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     std::size_t GetObjectIndexImpl(const void* obj) const { |  | ||||||
|         return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize(); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     std::size_t GetPeakIndex() const { |  | ||||||
|         return GetObjectIndexImpl(reinterpret_cast<const void*>(peak)); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     void* AllocateImpl() { |  | ||||||
|         return impl.Allocate(); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     void FreeImpl(void* obj) { |  | ||||||
|         // Don't allow freeing an object that wasn't allocated from this heap
 |  | ||||||
|         ASSERT(Contains(reinterpret_cast<uintptr_t>(obj))); |  | ||||||
| 
 |  | ||||||
|         impl.Free(obj); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) { |  | ||||||
|         // Ensure we don't initialize a slab using null memory
 |  | ||||||
|         ASSERT(memory != nullptr); |  | ||||||
| 
 |  | ||||||
|         // Initialize the base allocator
 |  | ||||||
|         impl.Initialize(obj_size); |  | ||||||
| 
 |  | ||||||
|         // Set our tracking variables
 |  | ||||||
|         const std::size_t num_obj = (memory_size / obj_size); |  | ||||||
|         start = reinterpret_cast<uintptr_t>(memory); |  | ||||||
|         end = start + num_obj * obj_size; |  | ||||||
|         peak = start; |  | ||||||
| 
 |  | ||||||
|         // Free the objects
 |  | ||||||
|         u8* cur = reinterpret_cast<u8*>(end); |  | ||||||
| 
 |  | ||||||
|         for (std::size_t i{}; i < num_obj; i++) { |  | ||||||
|             cur -= obj_size; |  | ||||||
|             impl.Free(cur); |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
| private: |  | ||||||
|     using Impl = impl::KSlabHeapImpl; |  | ||||||
| 
 |  | ||||||
|     Impl impl; |  | ||||||
|     uintptr_t peak{}; |  | ||||||
|     uintptr_t start{}; |  | ||||||
|     uintptr_t end{}; |  | ||||||
| }; |  | ||||||
| 
 | 
 | ||||||
| template <typename T> | template <typename T> | ||||||
| class KSlabHeap final : public KSlabHeapBase { | class KSlabHeap final : NonCopyable { | ||||||
| public: | public: | ||||||
|     constexpr KSlabHeap() : KSlabHeapBase() {} |     KSlabHeap() = default; | ||||||
| 
 | 
 | ||||||
|     void Initialize(void* memory, std::size_t memory_size) { |     void Initialize([[maybe_unused]] void* memory, [[maybe_unused]] std::size_t memory_size) { | ||||||
|         InitializeImpl(sizeof(T), memory, memory_size); |         // Placeholder that should initialize the backing slab heap implementation.
 | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     T* Allocate() { |     T* Allocate() { | ||||||
|         T* obj = static_cast<T*>(AllocateImpl()); |         return new T(); | ||||||
|         if (obj != nullptr) { |  | ||||||
|             new (obj) T(); |  | ||||||
|         } |  | ||||||
|         return obj; |  | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     T* AllocateWithKernel(KernelCore& kernel) { |     T* AllocateWithKernel(KernelCore& kernel) { | ||||||
|         T* obj = static_cast<T*>(AllocateImpl()); |         return new T(kernel); | ||||||
|         if (obj != nullptr) { |  | ||||||
|             new (obj) T(kernel); |  | ||||||
|         } |  | ||||||
|         return obj; |  | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     void Free(T* obj) { |     void Free(T* obj) { | ||||||
|         FreeImpl(obj); |         delete obj; | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     constexpr std::size_t GetObjectIndex(const T* obj) const { |  | ||||||
|         return GetObjectIndexImpl(obj); |  | ||||||
|     } |     } | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -82,22 +82,6 @@ struct Memory::Impl { | ||||||
|         return nullptr; |         return nullptr; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     u8* GetKernelBuffer(VAddr start_vaddr, size_t size) { |  | ||||||
|         // TODO(bunnei): This is just a workaround until we have kernel memory layout mapped &
 |  | ||||||
|         // managed. Until then, we use this to allocate and access kernel memory regions.
 |  | ||||||
| 
 |  | ||||||
|         auto search = kernel_memory_regions.find(start_vaddr); |  | ||||||
|         if (search != kernel_memory_regions.end()) { |  | ||||||
|             return search->second.get(); |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         std::unique_ptr<u8[]> new_memory_region{new u8[size]}; |  | ||||||
|         u8* raw_ptr = new_memory_region.get(); |  | ||||||
|         kernel_memory_regions[start_vaddr] = std::move(new_memory_region); |  | ||||||
| 
 |  | ||||||
|         return raw_ptr; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     u8 Read8(const VAddr addr) { |     u8 Read8(const VAddr addr) { | ||||||
|         return Read<u8>(addr); |         return Read<u8>(addr); | ||||||
|     } |     } | ||||||
|  | @ -727,7 +711,6 @@ struct Memory::Impl { | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     Common::PageTable* current_page_table = nullptr; |     Common::PageTable* current_page_table = nullptr; | ||||||
|     std::unordered_map<VAddr, std::unique_ptr<u8[]>> kernel_memory_regions; |  | ||||||
|     Core::System& system; |     Core::System& system; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | @ -765,10 +748,6 @@ u8* Memory::GetPointer(VAddr vaddr) { | ||||||
|     return impl->GetPointer(vaddr); |     return impl->GetPointer(vaddr); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| u8* Memory::GetKernelBuffer(VAddr start_vaddr, size_t size) { |  | ||||||
|     return impl->GetKernelBuffer(start_vaddr, size); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| const u8* Memory::GetPointer(VAddr vaddr) const { | const u8* Memory::GetPointer(VAddr vaddr) const { | ||||||
|     return impl->GetPointer(vaddr); |     return impl->GetPointer(vaddr); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -121,15 +121,6 @@ public: | ||||||
|      */ |      */ | ||||||
|     u8* GetPointer(VAddr vaddr); |     u8* GetPointer(VAddr vaddr); | ||||||
| 
 | 
 | ||||||
|     /**
 |  | ||||||
|      * Gets a pointer to the start of a kernel heap allocated memory region. Will allocate one if it |  | ||||||
|      * does not already exist. |  | ||||||
|      * |  | ||||||
|      * @param start_vaddr Start virtual address for the memory region. |  | ||||||
|      * @param size Size of the memory region. |  | ||||||
|      */ |  | ||||||
|     u8* GetKernelBuffer(VAddr start_vaddr, size_t size); |  | ||||||
| 
 |  | ||||||
|     template <typename T> |     template <typename T> | ||||||
|     T* GetPointer(VAddr vaddr) { |     T* GetPointer(VAddr vaddr) { | ||||||
|         return reinterpret_cast<T*>(GetPointer(vaddr)); |         return reinterpret_cast<T*>(GetPointer(vaddr)); | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 bunnei
						bunnei