forked from eden-emu/eden
		
	hle: kernel: Refactor out various KThread std::shared_ptr usage.
This commit is contained in:
		
							parent
							
								
									0cff50a7b5
								
							
						
					
					
						commit
						07939c59a6
					
				
					 10 changed files with 30 additions and 58 deletions
				
			
		|  | @ -38,8 +38,7 @@ ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kern | |||
|     return MakeResult(std::move(client_session)); | ||||
| } | ||||
| 
 | ||||
| ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread, | ||||
|                                           Core::Memory::Memory& memory, | ||||
| ResultCode ClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory, | ||||
|                                           Core::Timing::CoreTiming& core_timing) { | ||||
|     // Keep ServerSession alive until we're done working with it.
 | ||||
|     if (!parent->Server()) { | ||||
|  |  | |||
|  | @ -46,7 +46,7 @@ public: | |||
|         return HANDLE_TYPE; | ||||
|     } | ||||
| 
 | ||||
|     ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory, | ||||
|     ResultCode SendSyncRequest(KThread* thread, Core::Memory::Memory& memory, | ||||
|                                Core::Timing::CoreTiming& core_timing); | ||||
| 
 | ||||
|     bool IsSignaled() const override; | ||||
|  |  | |||
|  | @ -17,12 +17,12 @@ GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel) | |||
| 
 | ||||
| GlobalSchedulerContext::~GlobalSchedulerContext() = default; | ||||
| 
 | ||||
| void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) { | ||||
| void GlobalSchedulerContext::AddThread(KThread* thread) { | ||||
|     std::scoped_lock lock{global_list_guard}; | ||||
|     thread_list.push_back(std::move(thread)); | ||||
|     thread_list.push_back(thread); | ||||
| } | ||||
| 
 | ||||
| void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) { | ||||
| void GlobalSchedulerContext::RemoveThread(KThread* thread) { | ||||
|     std::scoped_lock lock{global_list_guard}; | ||||
|     thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), | ||||
|                       thread_list.end()); | ||||
|  |  | |||
|  | @ -38,13 +38,13 @@ public: | |||
|     ~GlobalSchedulerContext(); | ||||
| 
 | ||||
|     /// Adds a new thread to the scheduler
 | ||||
|     void AddThread(std::shared_ptr<KThread> thread); | ||||
|     void AddThread(KThread* thread); | ||||
| 
 | ||||
|     /// Removes a thread from the scheduler
 | ||||
|     void RemoveThread(std::shared_ptr<KThread> thread); | ||||
|     void RemoveThread(KThread* thread); | ||||
| 
 | ||||
|     /// Returns a list of all threads managed by the scheduler
 | ||||
|     [[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const { | ||||
|     [[nodiscard]] const std::vector<KThread*>& GetThreadList() const { | ||||
|         return thread_list; | ||||
|     } | ||||
| 
 | ||||
|  | @ -79,7 +79,7 @@ private: | |||
|     LockType scheduler_lock; | ||||
| 
 | ||||
|     /// Lists all thread ids that aren't deleted/etc.
 | ||||
|     std::vector<std::shared_ptr<KThread>> thread_list; | ||||
|     std::vector<KThread*> thread_list; | ||||
|     Common::SpinLock global_list_guard{}; | ||||
| }; | ||||
| 
 | ||||
|  |  | |||
|  | @ -46,11 +46,11 @@ void SessionRequestHandler::ClientDisconnected( | |||
|     boost::range::remove_erase(connected_sessions, server_session); | ||||
| } | ||||
| 
 | ||||
| HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, | ||||
|                                      std::shared_ptr<ServerSession> server_session, | ||||
|                                      std::shared_ptr<KThread> thread) | ||||
|     : server_session(std::move(server_session)), | ||||
|       thread(std::move(thread)), kernel{kernel}, memory{memory} { | ||||
| HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_, | ||||
|                                      std::shared_ptr<ServerSession> server_session_, | ||||
|                                      KThread* thread_) | ||||
|     : server_session(std::move(server_session_)), | ||||
|       thread(thread_), kernel{kernel_}, memory{memory_} { | ||||
|     cmd_buf[0] = 0; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -109,8 +109,7 @@ protected: | |||
| class HLERequestContext { | ||||
| public: | ||||
|     explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, | ||||
|                                std::shared_ptr<ServerSession> session, | ||||
|                                std::shared_ptr<KThread> thread); | ||||
|                                std::shared_ptr<ServerSession> session, KThread* thread); | ||||
|     ~HLERequestContext(); | ||||
| 
 | ||||
|     /// Returns a pointer to the IPC command buffer for this request.
 | ||||
|  | @ -276,10 +275,6 @@ public: | |||
|         return *thread; | ||||
|     } | ||||
| 
 | ||||
|     const KThread& GetThread() const { | ||||
|         return *thread; | ||||
|     } | ||||
| 
 | ||||
|     bool IsThreadWaiting() const { | ||||
|         return is_thread_waiting; | ||||
|     } | ||||
|  | @ -291,7 +286,8 @@ private: | |||
| 
 | ||||
|     std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf; | ||||
|     std::shared_ptr<Kernel::ServerSession> server_session; | ||||
|     std::shared_ptr<KThread> thread; | ||||
|     KThread* thread; | ||||
| 
 | ||||
|     // TODO(yuriks): Check common usage of this and optimize size accordingly
 | ||||
|     boost::container::small_vector<Handle, 8> move_handles; | ||||
|     boost::container::small_vector<Handle, 8> copy_handles; | ||||
|  |  | |||
|  | @ -60,8 +60,6 @@ struct KernelCore::Impl { | |||
|     void Initialize(KernelCore& kernel) { | ||||
|         global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); | ||||
| 
 | ||||
|         RegisterHostThread(); | ||||
| 
 | ||||
|         service_thread_manager = | ||||
|             std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager"); | ||||
|         is_phantom_mode_for_singlecore = false; | ||||
|  | @ -77,6 +75,8 @@ struct KernelCore::Impl { | |||
|         InitializeSchedulers(); | ||||
|         InitializeSuspendThreads(); | ||||
|         InitializePreemption(kernel); | ||||
| 
 | ||||
|         RegisterHostThread(); | ||||
|     } | ||||
| 
 | ||||
|     void InitializeCores() { | ||||
|  |  | |||
|  | @ -44,12 +44,7 @@ ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kern | |||
| 
 | ||||
| bool ServerSession::IsSignaled() const { | ||||
|     // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
 | ||||
|     if (!parent->Client()) { | ||||
|         return true; | ||||
|     } | ||||
| 
 | ||||
|     // Wait if we have no pending requests, or if we're currently handling a request.
 | ||||
|     return !pending_requesting_threads.empty() && currently_handling == nullptr; | ||||
|     return !parent->Client(); | ||||
| } | ||||
| 
 | ||||
| void ServerSession::ClientDisconnected() { | ||||
|  | @ -62,11 +57,6 @@ void ServerSession::ClientDisconnected() { | |||
|         // invalidated (set to null).
 | ||||
|         handler->ClientDisconnected(SharedFrom(this)); | ||||
|     } | ||||
| 
 | ||||
|     // Clean up the list of client threads with pending requests, they are unneeded now that the
 | ||||
|     // client endpoint is closed.
 | ||||
|     pending_requesting_threads.clear(); | ||||
|     currently_handling = nullptr; | ||||
| } | ||||
| 
 | ||||
| void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) { | ||||
|  | @ -116,11 +106,9 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con | |||
|     return RESULT_SUCCESS; | ||||
| } | ||||
| 
 | ||||
| ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<KThread> thread, | ||||
|                                            Core::Memory::Memory& memory) { | ||||
| ResultCode ServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) { | ||||
|     u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; | ||||
|     auto context = | ||||
|         std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread)); | ||||
|     auto context = std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), thread); | ||||
| 
 | ||||
|     context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); | ||||
| 
 | ||||
|  | @ -161,10 +149,9 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) { | |||
|     return result; | ||||
| } | ||||
| 
 | ||||
| ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<KThread> thread, | ||||
|                                             Core::Memory::Memory& memory, | ||||
| ResultCode ServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory, | ||||
|                                             Core::Timing::CoreTiming& core_timing) { | ||||
|     return QueueSyncRequest(std::move(thread), memory); | ||||
|     return QueueSyncRequest(thread, memory); | ||||
| } | ||||
| 
 | ||||
| } // namespace Kernel
 | ||||
|  |  | |||
|  | @ -95,7 +95,7 @@ public: | |||
|      * | ||||
|      * @returns ResultCode from the operation. | ||||
|      */ | ||||
|     ResultCode HandleSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory, | ||||
|     ResultCode HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory, | ||||
|                                  Core::Timing::CoreTiming& core_timing); | ||||
| 
 | ||||
|     /// Called when a client disconnection occurs.
 | ||||
|  | @ -130,7 +130,7 @@ public: | |||
| 
 | ||||
| private: | ||||
|     /// Queues a sync request from the emulated application.
 | ||||
|     ResultCode QueueSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory); | ||||
|     ResultCode QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory); | ||||
| 
 | ||||
|     /// Completes a sync request from the emulated application.
 | ||||
|     ResultCode CompleteSyncRequest(HLERequestContext& context); | ||||
|  | @ -148,16 +148,6 @@ private: | |||
|     /// This is the list of domain request handlers (after conversion to a domain)
 | ||||
|     std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers; | ||||
| 
 | ||||
|     /// List of threads that are pending a response after a sync request. This list is processed in
 | ||||
|     /// a LIFO manner, thus, the last request will be dispatched first.
 | ||||
|     /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
 | ||||
|     std::vector<std::shared_ptr<KThread>> pending_requesting_threads; | ||||
| 
 | ||||
|     /// Thread whose request is currently being handled. A request is considered "handled" when a
 | ||||
|     /// response is sent via svcReplyAndReceive.
 | ||||
|     /// TODO(Subv): Find a better name for this.
 | ||||
|     std::shared_ptr<KThread> currently_handling; | ||||
| 
 | ||||
|     /// When set to True, converts the session to a domain at the end of the command
 | ||||
|     bool convert_to_domain{}; | ||||
| 
 | ||||
|  |  | |||
|  | @ -60,7 +60,7 @@ private: | |||
|         const auto process_id = rp.PopRaw<u64>(); | ||||
| 
 | ||||
|         const auto data1 = ctx.ReadBuffer(0); | ||||
|         const auto data2 = [ctx] { | ||||
|         const auto data2 = [&ctx] { | ||||
|             if (ctx.CanReadBuffer(1)) { | ||||
|                 return ctx.ReadBuffer(1); | ||||
|             } | ||||
|  | @ -87,7 +87,7 @@ private: | |||
|         const auto process_id = rp.PopRaw<u64>(); | ||||
| 
 | ||||
|         const auto data1 = ctx.ReadBuffer(0); | ||||
|         const auto data2 = [ctx] { | ||||
|         const auto data2 = [&ctx] { | ||||
|             if (ctx.CanReadBuffer(1)) { | ||||
|                 return ctx.ReadBuffer(1); | ||||
|             } | ||||
|  | @ -139,7 +139,7 @@ private: | |||
|         const auto title_id = rp.PopRaw<u64>(); | ||||
| 
 | ||||
|         const auto data1 = ctx.ReadBuffer(0); | ||||
|         const auto data2 = [ctx] { | ||||
|         const auto data2 = [&ctx] { | ||||
|             if (ctx.CanReadBuffer(1)) { | ||||
|                 return ctx.ReadBuffer(1); | ||||
|             } | ||||
|  | @ -163,7 +163,7 @@ private: | |||
|         const auto title_id = rp.PopRaw<u64>(); | ||||
| 
 | ||||
|         const auto data1 = ctx.ReadBuffer(0); | ||||
|         const auto data2 = [ctx] { | ||||
|         const auto data2 = [&ctx] { | ||||
|             if (ctx.CanReadBuffer(1)) { | ||||
|                 return ctx.ReadBuffer(1); | ||||
|             } | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 bunnei
						bunnei