From b49c0dab8772afb06358e5d19af092226b3a59bb Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Tue, 10 Sep 2019 11:04:40 -0400 Subject: [PATCH] Kernel: Initial implementation of thread preemption. --- src/core/hle/kernel/kernel.cpp | 16 ++++++++++++++++ src/core/hle/kernel/scheduler.cpp | 10 ++++++++++ src/core/hle/kernel/scheduler.h | 4 ++++ 3 files changed, 30 insertions(+) diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 600d6ec74..7a913520d 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -12,6 +12,7 @@ #include "core/core.h" #include "core/core_timing.h" +#include "core/core_timing_util.h" #include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/handle_table.h" @@ -96,6 +97,7 @@ struct KernelCore::Impl { InitializeSystemResourceLimit(kernel); InitializeThreads(); + InitializePreemption(); } void Shutdown() { @@ -111,6 +113,7 @@ struct KernelCore::Impl { thread_wakeup_callback_handle_table.Clear(); thread_wakeup_event_type = nullptr; + preemption_event = nullptr; named_ports.clear(); } @@ -133,6 +136,18 @@ struct KernelCore::Impl { system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback); } + void InitializePreemption() { + preemption_event = system.CoreTiming().RegisterEvent( + "PreemptionCallback", [this](u64 userdata, s64 cycles_late) { + global_scheduler.PreemptThreads(); + s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); + system.CoreTiming().ScheduleEvent(time_interval, preemption_event); + }); + + s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); + system.CoreTiming().ScheduleEvent(time_interval, preemption_event); + } + std::atomic next_object_id{0}; std::atomic next_kernel_process_id{Process::InitialKIPIDMin}; std::atomic next_user_process_id{Process::ProcessIDMin}; @@ -146,6 +161,7 @@ struct KernelCore::Impl { SharedPtr system_resource_limit; Core::Timing::EventType* thread_wakeup_event_type = nullptr; + Core::Timing::EventType* preemption_event = nullptr; // TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future, // allowing us to simply use a pool index or similar. Kernel::HandleTable thread_wakeup_callback_handle_table; diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 451fd8077..0d45307cd 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -238,6 +238,16 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread return AskForReselectionOrMarkRedundant(yielding_thread, winner); } +void GlobalScheduler::PreemptThreads() { + for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) { + const u64 priority = preemption_priorities[core_id]; + if (scheduled_queue[core_id].size(priority) > 1) { + scheduled_queue[core_id].yield(priority); + reselection_pending.store(true, std::memory_order_release); + } + } +} + void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); scheduled_queue[core].add(thread, priority); diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 8fcc86bae..c13a368fd 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -133,6 +133,8 @@ public: */ bool YieldThreadAndWaitForLoadBalancing(Thread* thread); + void PreemptThreads(); + u32 CpuCoresCount() const { return NUM_CPU_CORES; } @@ -153,6 +155,8 @@ private: std::array, NUM_CPU_CORES> suggested_queue; std::atomic reselection_pending; + std::array preemption_priorities = {59, 59, 59, 62}; + /// Lists all thread ids that aren't deleted/etc. std::vector> thread_list; Core::System& system;