yuzu-emu
/
yuzu
Archived
1
0
Fork 0

Merge pull request #7462 from bunnei/kernel-improve-scheduling

Kernel: Improve threading & scheduling V3
This commit is contained in:
bunnei 2021-12-12 22:43:25 -08:00 committed by GitHub
commit 280c779898
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 897 additions and 636 deletions

View File

@ -187,6 +187,7 @@ add_library(core STATIC
hle/kernel/k_event.h hle/kernel/k_event.h
hle/kernel/k_handle_table.cpp hle/kernel/k_handle_table.cpp
hle/kernel/k_handle_table.h hle/kernel/k_handle_table.h
hle/kernel/k_light_condition_variable.cpp
hle/kernel/k_light_condition_variable.h hle/kernel/k_light_condition_variable.h
hle/kernel/k_light_lock.cpp hle/kernel/k_light_lock.cpp
hle/kernel/k_light_lock.h hle/kernel/k_light_lock.h
@ -239,6 +240,7 @@ add_library(core STATIC
hle/kernel/k_system_control.h hle/kernel/k_system_control.h
hle/kernel/k_thread.cpp hle/kernel/k_thread.cpp
hle/kernel/k_thread.h hle/kernel/k_thread.h
hle/kernel/k_thread_queue.cpp
hle/kernel/k_thread_queue.h hle/kernel/k_thread_queue.h
hle/kernel/k_trace.h hle/kernel/k_trace.h
hle/kernel/k_transfer_memory.cpp hle/kernel/k_transfer_memory.cpp

View File

@ -521,12 +521,6 @@ const ARM_Interface& System::CurrentArmInterface() const {
return impl->kernel.CurrentPhysicalCore().ArmInterface(); return impl->kernel.CurrentPhysicalCore().ArmInterface();
} }
std::size_t System::CurrentCoreIndex() const {
std::size_t core = impl->kernel.GetCurrentHostThreadID();
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
return core;
}
Kernel::PhysicalCore& System::CurrentPhysicalCore() { Kernel::PhysicalCore& System::CurrentPhysicalCore() {
return impl->kernel.CurrentPhysicalCore(); return impl->kernel.CurrentPhysicalCore();
} }

View File

@ -208,9 +208,6 @@ public:
/// Gets an ARM interface to the CPU core that is currently running /// Gets an ARM interface to the CPU core that is currently running
[[nodiscard]] const ARM_Interface& CurrentArmInterface() const; [[nodiscard]] const ARM_Interface& CurrentArmInterface() const;
/// Gets the index of the currently running CPU core
[[nodiscard]] std::size_t CurrentCoreIndex() const;
/// Gets the physical core for the CPU core that is currently running /// Gets the physical core for the CPU core that is currently running
[[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();

View File

@ -117,17 +117,18 @@ void CpuManager::MultiCoreRunGuestLoop() {
physical_core = &kernel.CurrentPhysicalCore(); physical_core = &kernel.CurrentPhysicalCore();
} }
system.ExitDynarmicProfile(); system.ExitDynarmicProfile();
{
Kernel::KScopedDisableDispatch dd(kernel);
physical_core->ArmInterface().ClearExclusiveState(); physical_core->ArmInterface().ClearExclusiveState();
kernel.CurrentScheduler()->RescheduleCurrentCore(); }
} }
} }
void CpuManager::MultiCoreRunIdleThread() { void CpuManager::MultiCoreRunIdleThread() {
auto& kernel = system.Kernel(); auto& kernel = system.Kernel();
while (true) { while (true) {
auto& physical_core = kernel.CurrentPhysicalCore(); Kernel::KScopedDisableDispatch dd(kernel);
physical_core.Idle(); kernel.CurrentPhysicalCore().Idle();
kernel.CurrentScheduler()->RescheduleCurrentCore();
} }
} }
@ -135,12 +136,12 @@ void CpuManager::MultiCoreRunSuspendThread() {
auto& kernel = system.Kernel(); auto& kernel = system.Kernel();
kernel.CurrentScheduler()->OnThreadStart(); kernel.CurrentScheduler()->OnThreadStart();
while (true) { while (true) {
auto core = kernel.GetCurrentHostThreadID(); auto core = kernel.CurrentPhysicalCoreIndex();
auto& scheduler = *kernel.CurrentScheduler(); auto& scheduler = *kernel.CurrentScheduler();
Kernel::KThread* current_thread = scheduler.GetCurrentThread(); Kernel::KThread* current_thread = scheduler.GetCurrentThread();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
ASSERT(scheduler.ContextSwitchPending()); ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.GetCurrentHostThreadID()); ASSERT(core == kernel.CurrentPhysicalCoreIndex());
scheduler.RescheduleCurrentCore(); scheduler.RescheduleCurrentCore();
} }
} }
@ -346,13 +347,9 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
sc_sync_first_use = false; sc_sync_first_use = false;
} }
// Abort if emulation was killed before the session really starts // Emulation was stopped
if (!system.IsPoweredOn()) {
return;
}
if (stop_token.stop_requested()) { if (stop_token.stop_requested()) {
break; return;
} }
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();

View File

@ -8,6 +8,7 @@
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h" #include "core/hle/kernel/time_manager.h"
@ -28,7 +29,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
auto& monitor = system.Monitor(); auto& monitor = system.Monitor();
const auto current_core = system.CurrentCoreIndex(); const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
// TODO(bunnei): We should call CanAccessAtomic(..) here. // TODO(bunnei): We should call CanAccessAtomic(..) here.
@ -58,7 +59,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
auto& monitor = system.Monitor(); auto& monitor = system.Monitor();
const auto current_core = system.CurrentCoreIndex(); const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
// TODO(bunnei): We should call CanAccessAtomic(..) here. // TODO(bunnei): We should call CanAccessAtomic(..) here.
@ -85,6 +86,27 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
return true; return true;
} }
class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
public:
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
: KThreadQueue(kernel_), m_tree(t) {}
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
bool cancel_timer_task) override {
// If the thread is waiting on an address arbiter, remove it from the tree.
if (waiting_thread->IsWaitingForAddressArbiter()) {
m_tree->erase(m_tree->iterator_to(*waiting_thread));
waiting_thread->ClearAddressArbiter();
}
// Invoke the base cancel wait handler.
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
private:
KAddressArbiter::ThreadTree* m_tree;
};
} // namespace } // namespace
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) { ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
@ -96,14 +118,14 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
auto it = thread_tree.nfind_light({addr, -1}); auto it = thread_tree.nfind_light({addr, -1});
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) { (it->GetAddressArbiterKey() == addr)) {
// End the thread's wait.
KThread* target_thread = std::addressof(*it); KThread* target_thread = std::addressof(*it);
target_thread->SetSyncedObject(nullptr, ResultSuccess); target_thread->EndWait(ResultSuccess);
ASSERT(target_thread->IsWaitingForAddressArbiter()); ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->Wakeup(); target_thread->ClearAddressArbiter();
it = thread_tree.erase(it); it = thread_tree.erase(it);
target_thread->ClearAddressArbiter();
++num_waiters; ++num_waiters;
} }
} }
@ -129,14 +151,14 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
auto it = thread_tree.nfind_light({addr, -1}); auto it = thread_tree.nfind_light({addr, -1});
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) { (it->GetAddressArbiterKey() == addr)) {
// End the thread's wait.
KThread* target_thread = std::addressof(*it); KThread* target_thread = std::addressof(*it);
target_thread->SetSyncedObject(nullptr, ResultSuccess); target_thread->EndWait(ResultSuccess);
ASSERT(target_thread->IsWaitingForAddressArbiter()); ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->Wakeup(); target_thread->ClearAddressArbiter();
it = thread_tree.erase(it); it = thread_tree.erase(it);
target_thread->ClearAddressArbiter();
++num_waiters; ++num_waiters;
} }
} }
@ -197,14 +219,14 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) { (it->GetAddressArbiterKey() == addr)) {
// End the thread's wait.
KThread* target_thread = std::addressof(*it); KThread* target_thread = std::addressof(*it);
target_thread->SetSyncedObject(nullptr, ResultSuccess); target_thread->EndWait(ResultSuccess);
ASSERT(target_thread->IsWaitingForAddressArbiter()); ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->Wakeup(); target_thread->ClearAddressArbiter();
it = thread_tree.erase(it); it = thread_tree.erase(it);
target_thread->ClearAddressArbiter();
++num_waiters; ++num_waiters;
} }
} }
@ -214,6 +236,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
// Prepare to wait. // Prepare to wait.
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{ {
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
@ -224,9 +247,6 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
return ResultTerminationRequested; return ResultTerminationRequested;
} }
// Set the synced object.
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
// Read the value from userspace. // Read the value from userspace.
s32 user_value{}; s32 user_value{};
bool succeeded{}; bool succeeded{};
@ -256,31 +276,20 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
// Set the arbiter. // Set the arbiter.
cur_thread->SetAddressArbiter(&thread_tree, addr); cur_thread->SetAddressArbiter(&thread_tree, addr);
thread_tree.insert(*cur_thread); thread_tree.insert(*cur_thread);
cur_thread->SetState(ThreadState::Waiting);
// Wait for the thread to finish.
cur_thread->BeginWait(std::addressof(wait_queue));
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
} }
// Cancel the timer wait.
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
// Remove from the address arbiter.
{
KScopedSchedulerLock sl(kernel);
if (cur_thread->IsWaitingForAddressArbiter()) {
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
cur_thread->ClearAddressArbiter();
}
}
// Get the result. // Get the result.
KSynchronizationObject* dummy{}; return cur_thread->GetWaitResult();
return cur_thread->GetWaitResult(&dummy);
} }
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
// Prepare to wait. // Prepare to wait.
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{ {
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
@ -291,9 +300,6 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
return ResultTerminationRequested; return ResultTerminationRequested;
} }
// Set the synced object.
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
// Read the value from userspace. // Read the value from userspace.
s32 user_value{}; s32 user_value{};
if (!ReadFromUser(system, &user_value, addr)) { if (!ReadFromUser(system, &user_value, addr)) {
@ -316,26 +322,14 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
// Set the arbiter. // Set the arbiter.
cur_thread->SetAddressArbiter(&thread_tree, addr); cur_thread->SetAddressArbiter(&thread_tree, addr);
thread_tree.insert(*cur_thread); thread_tree.insert(*cur_thread);
cur_thread->SetState(ThreadState::Waiting);
// Wait for the thread to finish.
cur_thread->BeginWait(std::addressof(wait_queue));
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
} }
// Cancel the timer wait.
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
// Remove from the address arbiter.
{
KScopedSchedulerLock sl(kernel);
if (cur_thread->IsWaitingForAddressArbiter()) {
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
cur_thread->ClearAddressArbiter();
}
}
// Get the result. // Get the result.
KSynchronizationObject* dummy{}; return cur_thread->GetWaitResult();
return cur_thread->GetWaitResult(&dummy);
} }
} // namespace Kernel } // namespace Kernel

View File

@ -170,6 +170,10 @@ public:
} }
} }
const std::string& GetName() const {
return name;
}
private: private:
void RegisterWithKernel(); void RegisterWithKernel();
void UnregisterWithKernel(); void UnregisterWithKernel();

View File

@ -11,6 +11,7 @@
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_common.h" #include "core/hle/kernel/svc_common.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
@ -33,7 +34,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
u32 new_orr_mask) { u32 new_orr_mask) {
auto& monitor = system.Monitor(); auto& monitor = system.Monitor();
const auto current_core = system.CurrentCoreIndex(); const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
// Load the value from the address. // Load the value from the address.
const auto expected = monitor.ExclusiveRead32(current_core, address); const auto expected = monitor.ExclusiveRead32(current_core, address);
@ -57,6 +58,48 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
return true; return true;
} }
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
public:
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
: KThreadQueue(kernel_) {}
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
// Invoke the base cancel wait handler.
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
class ThreadQueueImplForKConditionVariableWaitConditionVariable final : public KThreadQueue {
private:
KConditionVariable::ThreadTree* m_tree;
public:
explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
: KThreadQueue(kernel_), m_tree(t) {}
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(waiting_thread);
}
// If the thread is waiting on a condvar, remove it from the tree.
if (waiting_thread->IsWaitingForConditionVariable()) {
m_tree->erase(m_tree->iterator_to(*waiting_thread));
waiting_thread->ClearConditionVariable();
}
// Invoke the base cancel wait handler.
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
} // namespace } // namespace
KConditionVariable::KConditionVariable(Core::System& system_) KConditionVariable::KConditionVariable(Core::System& system_)
@ -78,84 +121,77 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
// Determine the next tag. // Determine the next tag.
u32 next_value{}; u32 next_value{};
if (next_owner_thread) { if (next_owner_thread != nullptr) {
next_value = next_owner_thread->GetAddressKeyValue(); next_value = next_owner_thread->GetAddressKeyValue();
if (num_waiters > 1) { if (num_waiters > 1) {
next_value |= Svc::HandleWaitMask; next_value |= Svc::HandleWaitMask;
} }
next_owner_thread->SetSyncedObject(nullptr, ResultSuccess);
next_owner_thread->Wakeup();
}
// Write the value to userspace. // Write the value to userspace.
if (!WriteToUser(system, addr, std::addressof(next_value))) { ResultCode result{ResultSuccess};
if (next_owner_thread) { if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
next_owner_thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory); result = ResultSuccess;
} else {
result = ResultInvalidCurrentMemory;
} }
return ResultInvalidCurrentMemory; // Signal the next owner thread.
} next_owner_thread->EndWait(result);
} return result;
} else {
// Just write the value to userspace.
R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)),
ResultInvalidCurrentMemory);
return ResultSuccess; return ResultSuccess;
} }
}
}
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
// Wait for the address. // Wait for the address.
{ KThread* owner_thread{};
KScopedAutoObject<KThread> owner_thread;
ASSERT(owner_thread.IsNull());
{ {
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(kernel);
cur_thread->SetSyncedObject(nullptr, ResultSuccess);
// Check if the thread should terminate. // Check if the thread should terminate.
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
{
// Read the tag from userspace. // Read the tag from userspace.
u32 test_tag{}; u32 test_tag{};
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
ResultInvalidCurrentMemory);
// If the tag isn't the handle (with wait mask), we're done. // If the tag isn't the handle (with wait mask), we're done.
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), ResultSuccess); R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
// Get the lock owner thread. // Get the lock owner thread.
owner_thread = owner_thread = kernel.CurrentProcess()
kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>( ->GetHandleTable()
handle); .GetObjectWithoutPseudoHandle<KThread>(handle)
R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle); .ReleasePointerUnsafe();
R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
// Update the lock. // Update the lock.
cur_thread->SetAddressKey(addr, value); cur_thread->SetAddressKey(addr, value);
owner_thread->AddWaiter(cur_thread); owner_thread->AddWaiter(cur_thread);
cur_thread->SetState(ThreadState::Waiting);
// Begin waiting.
cur_thread->BeginWait(std::addressof(wait_queue));
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
cur_thread->SetMutexWaitAddressForDebugging(addr); cur_thread->SetMutexWaitAddressForDebugging(addr);
} }
}
ASSERT(owner_thread.IsNotNull());
}
// Remove the thread as a waiter from the lock owner. // Close our reference to the owner thread, now that the wait is over.
{ owner_thread->Close();
KScopedSchedulerLock sl(kernel);
KThread* owner_thread = cur_thread->GetLockOwner();
if (owner_thread != nullptr) {
owner_thread->RemoveWaiter(cur_thread);
}
}
// Get the wait result. // Get the wait result.
KSynchronizationObject* dummy{}; return cur_thread->GetWaitResult();
return cur_thread->GetWaitResult(std::addressof(dummy));
} }
KThread* KConditionVariable::SignalImpl(KThread* thread) { void KConditionVariable::SignalImpl(KThread* thread) {
// Check pre-conditions. // Check pre-conditions.
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(kernel.GlobalSchedulerContext().IsLocked());
@ -169,18 +205,16 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
// TODO(bunnei): We should call CanAccessAtomic(..) here. // TODO(bunnei): We should call CanAccessAtomic(..) here.
can_access = true; can_access = true;
if (can_access) { if (can_access) [[likely]] {
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag, UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
Svc::HandleWaitMask); Svc::HandleWaitMask);
} }
} }
KThread* thread_to_close = nullptr; if (can_access) [[likely]] {
if (can_access) {
if (prev_tag == Svc::InvalidHandle) { if (prev_tag == Svc::InvalidHandle) {
// If nobody held the lock previously, we're all good. // If nobody held the lock previously, we're all good.
thread->SetSyncedObject(nullptr, ResultSuccess); thread->EndWait(ResultSuccess);
thread->Wakeup();
} else { } else {
// Get the previous owner. // Get the previous owner.
KThread* owner_thread = kernel.CurrentProcess() KThread* owner_thread = kernel.CurrentProcess()
@ -189,33 +223,22 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask)) static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
.ReleasePointerUnsafe(); .ReleasePointerUnsafe();
if (owner_thread) { if (owner_thread) [[likely]] {
// Add the thread as a waiter on the owner. // Add the thread as a waiter on the owner.
owner_thread->AddWaiter(thread); owner_thread->AddWaiter(thread);
thread_to_close = owner_thread; owner_thread->Close();
} else { } else {
// The lock was tagged with a thread that doesn't exist. // The lock was tagged with a thread that doesn't exist.
thread->SetSyncedObject(nullptr, ResultInvalidState); thread->EndWait(ResultInvalidState);
thread->Wakeup();
} }
} }
} else { } else {
// If the address wasn't accessible, note so. // If the address wasn't accessible, note so.
thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory); thread->EndWait(ResultInvalidCurrentMemory);
thread->Wakeup();
} }
return thread_to_close;
} }
void KConditionVariable::Signal(u64 cv_key, s32 count) { void KConditionVariable::Signal(u64 cv_key, s32 count) {
// Prepare for signaling.
constexpr int MaxThreads = 16;
KLinkedList<KThread> thread_list{kernel};
std::array<KThread*, MaxThreads> thread_array;
s32 num_to_close{};
// Perform signaling. // Perform signaling.
s32 num_waiters{}; s32 num_waiters{};
{ {
@ -226,14 +249,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
(it->GetConditionVariableKey() == cv_key)) { (it->GetConditionVariableKey() == cv_key)) {
KThread* target_thread = std::addressof(*it); KThread* target_thread = std::addressof(*it);
if (KThread* thread = SignalImpl(target_thread); thread != nullptr) { this->SignalImpl(target_thread);
if (num_to_close < MaxThreads) {
thread_array[num_to_close++] = thread;
} else {
thread_list.push_back(*thread);
}
}
it = thread_tree.erase(it); it = thread_tree.erase(it);
target_thread->ClearConditionVariable(); target_thread->ClearConditionVariable();
++num_waiters; ++num_waiters;
@ -245,27 +261,16 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
WriteToUser(system, cv_key, std::addressof(has_waiter_flag)); WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
} }
} }
// Close threads in the array.
for (auto i = 0; i < num_to_close; ++i) {
thread_array[i]->Close();
}
// Close threads in the list.
for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
(*it).Close();
}
} }
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
// Prepare to wait. // Prepare to wait.
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
kernel, std::addressof(thread_tree));
{ {
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout);
// Set the synced object.
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
// Check that the thread isn't terminating. // Check that the thread isn't terminating.
if (cur_thread->IsTerminationRequested()) { if (cur_thread->IsTerminationRequested()) {
@ -290,8 +295,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
} }
// Wake up the next owner. // Wake up the next owner.
next_owner_thread->SetSyncedObject(nullptr, ResultSuccess); next_owner_thread->EndWait(ResultSuccess);
next_owner_thread->Wakeup();
} }
// Write to the cv key. // Write to the cv key.
@ -308,40 +312,21 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
} }
} }
// If timeout is zero, time out.
R_UNLESS(timeout != 0, ResultTimedOut);
// Update condition variable tracking. // Update condition variable tracking.
{
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value); cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
thread_tree.insert(*cur_thread); thread_tree.insert(*cur_thread);
}
// If the timeout is non-zero, set the thread as waiting. // Begin waiting.
if (timeout != 0) { cur_thread->BeginWait(std::addressof(wait_queue));
cur_thread->SetState(ThreadState::Waiting);
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
cur_thread->SetMutexWaitAddressForDebugging(addr); cur_thread->SetMutexWaitAddressForDebugging(addr);
} }
}
// Cancel the timer wait. // Get the wait result.
kernel.TimeManager().UnscheduleTimeEvent(cur_thread); return cur_thread->GetWaitResult();
// Remove from the condition variable.
{
KScopedSchedulerLock sl(kernel);
if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(cur_thread);
}
if (cur_thread->IsWaitingForConditionVariable()) {
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
cur_thread->ClearConditionVariable();
}
}
// Get the result.
KSynchronizationObject* dummy{};
return cur_thread->GetWaitResult(std::addressof(dummy));
} }
} // namespace Kernel } // namespace Kernel

View File

@ -34,7 +34,7 @@ public:
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout); [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
private: private:
[[nodiscard]] KThread* SignalImpl(KThread* thread); void SignalImpl(KThread* thread);
ThreadTree thread_tree; ThreadTree thread_tree;

View File

@ -13,6 +13,7 @@ ResultCode KHandleTable::Finalize() {
// Get the table and clear our record of it. // Get the table and clear our record of it.
u16 saved_table_size = 0; u16 saved_table_size = 0;
{ {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock); KScopedSpinLock lk(m_lock);
std::swap(m_table_size, saved_table_size); std::swap(m_table_size, saved_table_size);
@ -43,6 +44,7 @@ bool KHandleTable::Remove(Handle handle) {
// Find the object and free the entry. // Find the object and free the entry.
KAutoObject* obj = nullptr; KAutoObject* obj = nullptr;
{ {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock); KScopedSpinLock lk(m_lock);
if (this->IsValidHandle(handle)) { if (this->IsValidHandle(handle)) {
@ -62,6 +64,7 @@ bool KHandleTable::Remove(Handle handle) {
} }
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock); KScopedSpinLock lk(m_lock);
// Never exceed our capacity. // Never exceed our capacity.
@ -84,6 +87,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
} }
ResultCode KHandleTable::Reserve(Handle* out_handle) { ResultCode KHandleTable::Reserve(Handle* out_handle) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock); KScopedSpinLock lk(m_lock);
// Never exceed our capacity. // Never exceed our capacity.
@ -94,6 +98,7 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) {
} }
void KHandleTable::Unreserve(Handle handle) { void KHandleTable::Unreserve(Handle handle) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock); KScopedSpinLock lk(m_lock);
// Unpack the handle. // Unpack the handle.
@ -112,6 +117,7 @@ void KHandleTable::Unreserve(Handle handle) {
} }
void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock); KScopedSpinLock lk(m_lock);
// Unpack the handle. // Unpack the handle.

View File

@ -68,6 +68,7 @@ public:
template <typename T = KAutoObject> template <typename T = KAutoObject>
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
// Lock and look up in table. // Lock and look up in table.
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock); KScopedSpinLock lk(m_lock);
if constexpr (std::is_same_v<T, KAutoObject>) { if constexpr (std::is_same_v<T, KAutoObject>) {
@ -122,6 +123,7 @@ public:
size_t num_opened; size_t num_opened;
{ {
// Lock the table. // Lock the table.
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock); KScopedSpinLock lk(m_lock);
for (num_opened = 0; num_opened < num_handles; num_opened++) { for (num_opened = 0; num_opened < num_handles; num_opened++) {
// Get the current handle. // Get the current handle.

View File

@ -0,0 +1,80 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_light_condition_variable.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
namespace {
class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
public:
ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl,
bool term)
: KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
bool cancel_timer_task) override {
// Only process waits if we're allowed to.
if (ResultTerminationRequested == wait_result && m_allow_terminating_thread) {
return;
}
// Remove the thread from the waiting thread from the light condition variable.
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
// Invoke the base cancel wait handler.
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
private:
KThread::WaiterList* m_wait_list;
bool m_allow_terminating_thread;
};
} // namespace
void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
// Create thread queue.
KThread* owner = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list),
allow_terminating_thread);
// Sleep the thread.
{
KScopedSchedulerLockAndSleep lk(kernel, owner, timeout);
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
lk.CancelSleep();
return;
}
lock->Unlock();
// Add the thread to the queue.
wait_list.push_back(*owner);
// Begin waiting.
owner->BeginWait(std::addressof(wait_queue));
}
// Re-acquire the lock.
lock->Lock();
}
void KLightConditionVariable::Broadcast() {
KScopedSchedulerLock lk(kernel);
// Signal all threads.
for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) {
it->EndWait(ResultSuccess);
}
}
} // namespace Kernel

View File

@ -2,72 +2,24 @@
// Licensed under GPLv2 or any later version // Licensed under GPLv2 or any later version
// Refer to the license.txt file included. // Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#pragma once #pragma once
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel { namespace Kernel {
class KernelCore; class KernelCore;
class KLightLock;
class KLightConditionVariable { class KLightConditionVariable {
public: public:
explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {} explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true) { void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true);
WaitImpl(lock, timeout, allow_terminating_thread); void Broadcast();
}
void Broadcast() {
KScopedSchedulerLock lk{kernel};
// Signal all threads.
for (auto& thread : wait_list) {
thread.SetState(ThreadState::Runnable);
}
}
private: private:
void WaitImpl(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
KThread* owner = GetCurrentThreadPointer(kernel);
// Sleep the thread.
{
KScopedSchedulerLockAndSleep lk{kernel, owner, timeout};
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
lk.CancelSleep();
return;
}
lock->Unlock();
// Set the thread as waiting.
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
// Add the thread to the queue.
wait_list.push_back(GetCurrentThread(kernel));
}
// Remove the thread from the wait list.
{
KScopedSchedulerLock sl{kernel};
wait_list.erase(wait_list.iterator_to(GetCurrentThread(kernel)));
}
// Cancel the task that the sleep setup.
kernel.TimeManager().UnscheduleTimeEvent(owner);
// Re-acquire the lock.
lock->Lock();
}
KernelCore& kernel; KernelCore& kernel;
KThread::WaiterList wait_list{}; KThread::WaiterList wait_list{};
}; };

View File

@ -5,44 +5,59 @@
#include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
namespace Kernel { namespace Kernel {
namespace {
class ThreadQueueImplForKLightLock final : public KThreadQueue {
public:
explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(waiting_thread);
}
// Invoke the base cancel wait handler.
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
} // namespace
void KLightLock::Lock() { void KLightLock::Lock() {
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
const uintptr_t cur_thread_tag = (cur_thread | 1);
while (true) { while (true) {
uintptr_t old_tag = tag.load(std::memory_order_relaxed); uintptr_t old_tag = tag.load(std::memory_order_relaxed);
while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
std::memory_order_acquire)) { std::memory_order_acquire)) {
if ((old_tag | 1) == cur_thread_tag) {
return;
}
} }
if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) { if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
break; break;
} }
LockSlowPath(old_tag | 1, cur_thread);
} }
} }
void KLightLock::Unlock() { void KLightLock::Unlock() {
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
uintptr_t expected = cur_thread; uintptr_t expected = cur_thread;
do { if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
if (expected != cur_thread) { this->UnlockSlowPath(cur_thread);
return UnlockSlowPath(cur_thread);
} }
} while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release));
} }
void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread); KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
ThreadQueueImplForKLightLock wait_queue(kernel);
// Pend the current thread waiting on the owner thread. // Pend the current thread waiting on the owner thread.
{ {
@ -50,7 +65,7 @@ void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
// Ensure we actually have locking to do. // Ensure we actually have locking to do.
if (tag.load(std::memory_order_relaxed) != _owner) { if (tag.load(std::memory_order_relaxed) != _owner) {
return; return false;
} }
// Add the current thread as a waiter on the owner. // Add the current thread as a waiter on the owner.
@ -58,22 +73,15 @@ void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag))); cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
owner_thread->AddWaiter(cur_thread); owner_thread->AddWaiter(cur_thread);
// Set thread states. // Begin waiting to hold the lock.
cur_thread->SetState(ThreadState::Waiting); cur_thread->BeginWait(std::addressof(wait_queue));
if (owner_thread->IsSuspended()) { if (owner_thread->IsSuspended()) {
owner_thread->ContinueIfHasKernelWaiters(); owner_thread->ContinueIfHasKernelWaiters();
} }
} }
// We're no longer waiting on the lock owner. return true;
{
KScopedSchedulerLock sl{kernel};
if (KThread* owner_thread = cur_thread->GetLockOwner(); owner_thread != nullptr) {
owner_thread->RemoveWaiter(cur_thread);
}
}
} }
void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
@ -81,22 +89,20 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
// Unlock. // Unlock.
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl(kernel);
// Get the next owner. // Get the next owner.
s32 num_waiters = 0; s32 num_waiters;
KThread* next_owner = owner_thread->RemoveWaiterByKey( KThread* next_owner = owner_thread->RemoveWaiterByKey(
std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag))); std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
// Pass the lock to the next owner. // Pass the lock to the next owner.
uintptr_t next_tag = 0; uintptr_t next_tag = 0;
if (next_owner != nullptr) { if (next_owner != nullptr) {
next_tag = reinterpret_cast<uintptr_t>(next_owner); next_tag =
if (num_waiters > 1) { reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(num_waiters > 1);
next_tag |= 0x1;
}
next_owner->SetState(ThreadState::Runnable); next_owner->EndWait(ResultSuccess);
if (next_owner->IsSuspended()) { if (next_owner->IsSuspended()) {
next_owner->ContinueIfHasKernelWaiters(); next_owner->ContinueIfHasKernelWaiters();
@ -110,7 +116,7 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
} }
// Write the new tag value. // Write the new tag value.
tag.store(next_tag); tag.store(next_tag, std::memory_order_release);
} }
} }

View File

@ -20,7 +20,7 @@ public:
void Unlock(); void Unlock();
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread); bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
void UnlockSlowPath(uintptr_t cur_thread); void UnlockSlowPath(uintptr_t cur_thread);

View File

@ -60,6 +60,7 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
thread->GetContext64().cpu_registers[0] = 0; thread->GetContext64().cpu_registers[0] = 0;
thread->GetContext32().cpu_registers[1] = thread_handle; thread->GetContext32().cpu_registers[1] = thread_handle;
thread->GetContext64().cpu_registers[1] = thread_handle; thread->GetContext64().cpu_registers[1] = thread_handle;
thread->DisableDispatch();
auto& kernel = system.Kernel(); auto& kernel = system.Kernel();
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
@ -227,6 +228,8 @@ void KProcess::PinCurrentThread() {
const s32 core_id = GetCurrentCoreId(kernel); const s32 core_id = GetCurrentCoreId(kernel);
KThread* cur_thread = GetCurrentThreadPointer(kernel); KThread* cur_thread = GetCurrentThreadPointer(kernel);
// If the thread isn't terminated, pin it.
if (!cur_thread->IsTerminationRequested()) {
// Pin it. // Pin it.
PinThread(core_id, cur_thread); PinThread(core_id, cur_thread);
cur_thread->Pin(); cur_thread->Pin();
@ -234,6 +237,7 @@ void KProcess::PinCurrentThread() {
// An update is needed. // An update is needed.
KScheduler::SetSchedulerUpdateNeeded(kernel); KScheduler::SetSchedulerUpdateNeeded(kernel);
} }
}
void KProcess::UnpinCurrentThread() { void KProcess::UnpinCurrentThread() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(kernel.GlobalSchedulerContext().IsLocked());
@ -250,6 +254,20 @@ void KProcess::UnpinCurrentThread() {
KScheduler::SetSchedulerUpdateNeeded(kernel); KScheduler::SetSchedulerUpdateNeeded(kernel);
} }
void KProcess::UnpinThread(KThread* thread) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the thread's core id.
const auto core_id = thread->GetActiveCore();
// Unpin it.
UnpinThread(core_id, thread);
thread->Unpin();
// An update is needed.
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
[[maybe_unused]] size_t size) { [[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access. // Lock ourselves, to prevent concurrent access.

View File

@ -347,6 +347,7 @@ public:
void PinCurrentThread(); void PinCurrentThread();
void UnpinCurrentThread(); void UnpinCurrentThread();
void UnpinThread(KThread* thread);
KLightLock& GetStateLock() { KLightLock& GetStateLock() {
return state_lock; return state_lock;

View File

@ -240,8 +240,8 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3
// If the thread is runnable, we want to change its priority in the queue. // If the thread is runnable, we want to change its priority in the queue.
if (thread->GetRawState() == ThreadState::Runnable) { if (thread->GetRawState() == ThreadState::Runnable) {
GetPriorityQueue(kernel).ChangePriority( GetPriorityQueue(kernel).ChangePriority(old_priority,
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); thread == kernel.GetCurrentEmuThread(), thread);
IncrementScheduledCount(thread); IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded(kernel); SetSchedulerUpdateNeeded(kernel);
} }
@ -360,7 +360,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
} }
bool KScheduler::CanSchedule(KernelCore& kernel) { bool KScheduler::CanSchedule(KernelCore& kernel) {
return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1; return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1;
} }
bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
@ -376,21 +376,31 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
} }
void KScheduler::DisableScheduling(KernelCore& kernel) { void KScheduler::DisableScheduling(KernelCore& kernel) {
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { // If we are shutting down the kernel, none of this is relevant anymore.
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); if (kernel.IsShuttingDown()) {
scheduler->GetCurrentThread()->DisableDispatch(); return;
} }
ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
GetCurrentThreadPointer(kernel)->DisableDispatch();
} }
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { // If we are shutting down the kernel, none of this is relevant anymore.
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); if (kernel.IsShuttingDown()) {
if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { return;
scheduler->GetCurrentThread()->EnableDispatch();
}
} }
auto* current_thread = GetCurrentThreadPointer(kernel);
ASSERT(current_thread->GetDisableDispatchCount() >= 1);
if (current_thread->GetDisableDispatchCount() > 1) {
current_thread->EnableDispatch();
} else {
RescheduleCores(kernel, cores_needing_scheduling); RescheduleCores(kernel, cores_needing_scheduling);
} }
}
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
if (IsSchedulerUpdateNeeded(kernel)) { if (IsSchedulerUpdateNeeded(kernel)) {
@ -617,13 +627,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
state.highest_priority_thread = nullptr; state.highest_priority_thread = nullptr;
} }
KScheduler::~KScheduler() { void KScheduler::Finalize() {
if (idle_thread) { if (idle_thread) {
idle_thread->Close(); idle_thread->Close();
idle_thread = nullptr; idle_thread = nullptr;
} }
} }
KScheduler::~KScheduler() {
ASSERT(!idle_thread);
}
KThread* KScheduler::GetCurrentThread() const { KThread* KScheduler::GetCurrentThread() const {
if (auto result = current_thread.load(); result) { if (auto result = current_thread.load(); result) {
return result; return result;
@ -642,10 +656,12 @@ void KScheduler::RescheduleCurrentCore() {
if (phys_core.IsInterrupted()) { if (phys_core.IsInterrupted()) {
phys_core.ClearInterrupt(); phys_core.ClearInterrupt();
} }
guard.Lock(); guard.Lock();
if (state.needs_scheduling.load()) { if (state.needs_scheduling.load()) {
Schedule(); Schedule();
} else { } else {
GetCurrentThread()->EnableDispatch();
guard.Unlock(); guard.Unlock();
} }
} }
@ -655,27 +671,34 @@ void KScheduler::OnThreadStart() {
} }
void KScheduler::Unload(KThread* thread) { void KScheduler::Unload(KThread* thread) {
ASSERT(thread);
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
if (thread) {
if (thread->IsCallingSvc()) { if (thread->IsCallingSvc()) {
thread->ClearIsCallingSvc(); thread->ClearIsCallingSvc();
} }
if (!thread->IsTerminationRequested()) {
prev_thread = thread;
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); auto& physical_core = system.Kernel().PhysicalCore(core_id);
if (!physical_core.IsInitialized()) {
return;
}
Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
cpu_core.SaveContext(thread->GetContext32()); cpu_core.SaveContext(thread->GetContext32());
cpu_core.SaveContext(thread->GetContext64()); cpu_core.SaveContext(thread->GetContext64());
// Save the TPIDR_EL0 system register in case it was modified. // Save the TPIDR_EL0 system register in case it was modified.
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
cpu_core.ClearExclusiveState(); cpu_core.ClearExclusiveState();
if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
prev_thread = thread;
} else { } else {
prev_thread = nullptr; prev_thread = nullptr;
} }
thread->context_guard.Unlock(); thread->context_guard.Unlock();
} }
}
void KScheduler::Reload(KThread* thread) { void KScheduler::Reload(KThread* thread) {
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr"); LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
@ -683,11 +706,6 @@ void KScheduler::Reload(KThread* thread) {
if (thread) { if (thread) {
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
auto* const thread_owner_process = thread->GetOwnerProcess();
if (thread_owner_process != nullptr) {
system.Kernel().MakeCurrentProcess(thread_owner_process);
}
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
cpu_core.LoadContext(thread->GetContext32()); cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64()); cpu_core.LoadContext(thread->GetContext64());
@ -705,7 +723,7 @@ void KScheduler::SwitchContextStep2() {
} }
void KScheduler::ScheduleImpl() { void KScheduler::ScheduleImpl() {
KThread* previous_thread = current_thread.load(); KThread* previous_thread = GetCurrentThread();
KThread* next_thread = state.highest_priority_thread; KThread* next_thread = state.highest_priority_thread;
state.needs_scheduling = false; state.needs_scheduling = false;
@ -717,10 +735,15 @@ void KScheduler::ScheduleImpl() {
// If we're not actually switching thread, there's nothing to do. // If we're not actually switching thread, there's nothing to do.
if (next_thread == current_thread.load()) { if (next_thread == current_thread.load()) {
previous_thread->EnableDispatch();
guard.Unlock(); guard.Unlock();
return; return;
} }
if (next_thread->GetCurrentCore() != core_id) {
next_thread->SetCurrentCore(core_id);
}
current_thread.store(next_thread); current_thread.store(next_thread);
KProcess* const previous_process = system.Kernel().CurrentProcess(); KProcess* const previous_process = system.Kernel().CurrentProcess();
@ -731,11 +754,7 @@ void KScheduler::ScheduleImpl() {
Unload(previous_thread); Unload(previous_thread);
std::shared_ptr<Common::Fiber>* old_context; std::shared_ptr<Common::Fiber>* old_context;
if (previous_thread != nullptr) {
old_context = &previous_thread->GetHostContext(); old_context = &previous_thread->GetHostContext();
} else {
old_context = &idle_thread->GetHostContext();
}
guard.Unlock(); guard.Unlock();
Common::Fiber::YieldTo(*old_context, *switch_fiber); Common::Fiber::YieldTo(*old_context, *switch_fiber);

View File

@ -33,6 +33,8 @@ public:
explicit KScheduler(Core::System& system_, s32 core_id_); explicit KScheduler(Core::System& system_, s32 core_id_);
~KScheduler(); ~KScheduler();
void Finalize();
/// Reschedules to the next available thread (call after current thread is suspended) /// Reschedules to the next available thread (call after current thread is suspended)
void RescheduleCurrentCore(); void RescheduleCurrentCore();

View File

@ -23,6 +23,11 @@ public:
} }
void Lock() { void Lock() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (kernel.IsShuttingDown()) {
return;
}
if (IsLockedByCurrentThread()) { if (IsLockedByCurrentThread()) {
// If we already own the lock, we can just increment the count. // If we already own the lock, we can just increment the count.
ASSERT(lock_count > 0); ASSERT(lock_count > 0);
@ -43,6 +48,11 @@ public:
} }
void Unlock() { void Unlock() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (kernel.IsShuttingDown()) {
return;
}
ASSERT(IsLockedByCurrentThread()); ASSERT(IsLockedByCurrentThread());
ASSERT(lock_count > 0); ASSERT(lock_count > 0);

View File

@ -8,6 +8,7 @@
#pragma once #pragma once
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/kernel/global_scheduler_context.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/time_manager.h" #include "core/hle/kernel/time_manager.h"

View File

@ -175,8 +175,7 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
{ {
KScopedSchedulerLock lock(kernel); KScopedSchedulerLock lock(kernel);
if (!context.IsThreadWaiting()) { if (!context.IsThreadWaiting()) {
context.GetThread().Wakeup(); context.GetThread().EndWait(result);
context.GetThread().SetSyncedObject(nullptr, result);
} }
} }

View File

@ -8,11 +8,66 @@
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
namespace Kernel { namespace Kernel {
namespace {
class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
public:
ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o,
KSynchronizationObject::ThreadListNode* n, s32 c)
: KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
ResultCode wait_result) override {
// Determine the sync index, and unlink all nodes.
s32 sync_index = -1;
for (auto i = 0; i < m_count; ++i) {
// Check if this is the signaled object.
if (m_objects[i] == signaled_object && sync_index == -1) {
sync_index = i;
}
// Unlink the current node from the current object.
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
}
// Set the waiting thread's sync index.
waiting_thread->SetSyncedIndex(sync_index);
// Set the waiting thread as not cancellable.
waiting_thread->ClearCancellable();
// Invoke the base end wait handler.
KThreadQueue::EndWait(waiting_thread, wait_result);
}
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
bool cancel_timer_task) override {
// Remove all nodes from our list.
for (auto i = 0; i < m_count; ++i) {
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
}
// Set the waiting thread as not cancellable.
waiting_thread->ClearCancellable();
// Invoke the base cancel wait handler.
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
private:
KSynchronizationObject** m_objects;
KSynchronizationObject::ThreadListNode* m_nodes;
s32 m_count;
};
} // namespace
void KSynchronizationObject::Finalize() { void KSynchronizationObject::Finalize() {
this->OnFinalizeSynchronizationObject(); this->OnFinalizeSynchronizationObject();
KAutoObject::Finalize(); KAutoObject::Finalize();
@ -25,11 +80,19 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
std::vector<ThreadListNode> thread_nodes(num_objects); std::vector<ThreadListNode> thread_nodes(num_objects);
// Prepare for wait. // Prepare for wait.
KThread* thread = kernel_ctx.CurrentScheduler()->GetCurrentThread(); KThread* thread = GetCurrentThreadPointer(kernel_ctx);
ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects,
thread_nodes.data(), num_objects);
{ {
// Setup the scheduling lock and sleep. // Setup the scheduling lock and sleep.
KScopedSchedulerLockAndSleep slp{kernel_ctx, thread, timeout}; KScopedSchedulerLockAndSleep slp(kernel_ctx, thread, timeout);
// Check if the thread should terminate.
if (thread->IsTerminationRequested()) {
slp.CancelSleep();
return ResultTerminationRequested;
}
// Check if any of the objects are already signaled. // Check if any of the objects are already signaled.
for (auto i = 0; i < num_objects; ++i) { for (auto i = 0; i < num_objects; ++i) {
@ -48,12 +111,6 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
return ResultTimedOut; return ResultTimedOut;
} }
// Check if the thread should terminate.
if (thread->IsTerminationRequested()) {
slp.CancelSleep();
return ResultTerminationRequested;
}
// Check if waiting was canceled. // Check if waiting was canceled.
if (thread->IsWaitCancelled()) { if (thread->IsWaitCancelled()) {
slp.CancelSleep(); slp.CancelSleep();
@ -66,73 +123,25 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
thread_nodes[i].thread = thread; thread_nodes[i].thread = thread;
thread_nodes[i].next = nullptr; thread_nodes[i].next = nullptr;
if (objects[i]->thread_list_tail == nullptr) { objects[i]->LinkNode(std::addressof(thread_nodes[i]));
objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
} else {
objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
} }
objects[i]->thread_list_tail = std::addressof(thread_nodes[i]); // Mark the thread as cancellable.
}
// For debugging only
thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
// Mark the thread as waiting.
thread->SetCancellable(); thread->SetCancellable();
thread->SetSyncedObject(nullptr, ResultTimedOut);
thread->SetState(ThreadState::Waiting); // Clear the thread's synced index.
thread->SetSyncedIndex(-1);
// Wait for an object to be signaled.
thread->BeginWait(std::addressof(wait_queue));
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization); thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
} }
// The lock/sleep is done, so we should be able to get our result. // Set the output index.
*out_index = thread->GetSyncedIndex();
// Thread is no longer cancellable.
thread->ClearCancellable();
// For debugging only
thread->SetWaitObjectsForDebugging({});
// Cancel the timer as needed.
kernel_ctx.TimeManager().UnscheduleTimeEvent(thread);
// Get the wait result. // Get the wait result.
ResultCode wait_result{ResultSuccess}; return thread->GetWaitResult();
s32 sync_index = -1;
{
KScopedSchedulerLock lock(kernel_ctx);
KSynchronizationObject* synced_obj;
wait_result = thread->GetWaitResult(std::addressof(synced_obj));
for (auto i = 0; i < num_objects; ++i) {
// Unlink the object from the list.
ThreadListNode* prev_ptr =
reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
ThreadListNode* prev_val = nullptr;
ThreadListNode *prev, *tail_prev;
do {
prev = prev_ptr;
prev_ptr = prev_ptr->next;
tail_prev = prev_val;
prev_val = prev_ptr;
} while (prev_ptr != std::addressof(thread_nodes[i]));
if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
objects[i]->thread_list_tail = tail_prev;
}
prev->next = thread_nodes[i].next;
if (objects[i] == synced_obj) {
sync_index = i;
}
}
}
// Set output.
*out_index = sync_index;
return wait_result;
} }
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
@ -141,7 +150,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
KSynchronizationObject::~KSynchronizationObject() = default; KSynchronizationObject::~KSynchronizationObject() = default;
void KSynchronizationObject::NotifyAvailable(ResultCode result) { void KSynchronizationObject::NotifyAvailable(ResultCode result) {
KScopedSchedulerLock lock(kernel); KScopedSchedulerLock sl(kernel);
// If we're not signaled, we've nothing to notify. // If we're not signaled, we've nothing to notify.
if (!this->IsSignaled()) { if (!this->IsSignaled()) {
@ -150,11 +159,7 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) {
// Iterate over each thread. // Iterate over each thread.
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
KThread* thread = cur_node->thread; cur_node->thread->NotifyAvailable(this, result);
if (thread->GetState() == ThreadState::Waiting) {
thread->SetSyncedObject(this, result);
thread->SetState(ThreadState::Runnable);
}
} }
} }

View File

@ -35,6 +35,38 @@ public:
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
void LinkNode(ThreadListNode* node_) {
// Link the node to the list.
if (thread_list_tail == nullptr) {
thread_list_head = node_;
} else {
thread_list_tail->next = node_;
}
thread_list_tail = node_;
}
void UnlinkNode(ThreadListNode* node_) {
// Unlink the node from the list.
ThreadListNode* prev_ptr =
reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head));
ThreadListNode* prev_val = nullptr;
ThreadListNode *prev, *tail_prev;
do {
prev = prev_ptr;
prev_ptr = prev_ptr->next;
tail_prev = prev_val;
prev_val = prev_ptr;
} while (prev_ptr != node_);
if (thread_list_tail == node_) {
thread_list_tail = tail_prev;
}
prev->next = node_->next;
}
protected: protected:
explicit KSynchronizationObject(KernelCore& kernel); explicit KSynchronizationObject(KernelCore& kernel);
~KSynchronizationObject() override; ~KSynchronizationObject() override;

View File

@ -13,6 +13,9 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "common/fiber.h" #include "common/fiber.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/scope_exit.h"
#include "common/settings.h"
#include "common/thread_queue_list.h"
#include "core/core.h" #include "core/core.h"
#include "core/cpu_manager.h" #include "core/cpu_manager.h"
#include "core/hardware_properties.h" #include "core/hardware_properties.h"
@ -56,6 +59,34 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
namespace Kernel { namespace Kernel {
namespace {
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
public:
explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_)
: KThreadQueueWithoutEndWait(kernel_) {}
};
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
public:
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
: KThreadQueue(kernel_), m_wait_list(wl) {}
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
bool cancel_timer_task) override {
// Remove the thread from the wait list.
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
// Invoke the base cancel wait handler.
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
private:
KThread::WaiterList* m_wait_list;
};
} // namespace
KThread::KThread(KernelCore& kernel_) KThread::KThread(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {} : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
KThread::~KThread() = default; KThread::~KThread() = default;
@ -82,6 +113,8 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
[[fallthrough]]; [[fallthrough]];
case ThreadType::HighPriority: case ThreadType::HighPriority:
[[fallthrough]]; [[fallthrough]];
case ThreadType::Dummy:
[[fallthrough]];
case ThreadType::User: case ThreadType::User:
ASSERT(((owner == nullptr) || ASSERT(((owner == nullptr) ||
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
@ -127,11 +160,8 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
priority = prio; priority = prio;
base_priority = prio; base_priority = prio;
// Set sync object and waiting lock to null.
synced_object = nullptr;
// Initialize sleeping queue. // Initialize sleeping queue.
sleeping_queue = nullptr; wait_queue = nullptr;
// Set suspend flags. // Set suspend flags.
suspend_request_flags = 0; suspend_request_flags = 0;
@ -184,7 +214,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
// Setup the stack parameters. // Setup the stack parameters.
StackParameters& sp = GetStackParameters(); StackParameters& sp = GetStackParameters();
sp.cur_thread = this; sp.cur_thread = this;
sp.disable_count = 1; sp.disable_count = 0;
SetInExceptionHandler(); SetInExceptionHandler();
// Set thread ID. // Set thread ID.
@ -211,15 +241,16 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
// Initialize the thread. // Initialize the thread.
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
// Initialize host context. // Initialize emulation parameters.
thread->host_context = thread->host_context =
std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
return ResultSuccess; return ResultSuccess;
} }
ResultCode KThread::InitializeDummyThread(KThread* thread) { ResultCode KThread::InitializeDummyThread(KThread* thread) {
return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main); return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy);
} }
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
@ -273,11 +304,14 @@ void KThread::Finalize() {
auto it = waiter_list.begin(); auto it = waiter_list.begin();
while (it != waiter_list.end()) { while (it != waiter_list.end()) {
// The thread shouldn't be a kernel waiter. // Clear the lock owner
it->SetLockOwner(nullptr); it->SetLockOwner(nullptr);
it->SetSyncedObject(nullptr, ResultInvalidState);
it->Wakeup(); // Erase the waiter from our list.
it = waiter_list.erase(it); it = waiter_list.erase(it);
// Cancel the thread's wait.
it->CancelWait(ResultInvalidState, true);
} }
} }
@ -294,15 +328,12 @@ bool KThread::IsSignaled() const {
return signaled; return signaled;
} }
void KThread::Wakeup() { void KThread::OnTimer() {
KScopedSchedulerLock sl{kernel}; ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// If we're waiting, cancel the wait.
if (GetState() == ThreadState::Waiting) { if (GetState() == ThreadState::Waiting) {
if (sleeping_queue != nullptr) { wait_queue->CancelWait(this, ResultTimedOut, false);
sleeping_queue->WakeupThread(this);
} else {
SetState(ThreadState::Runnable);
}
} }
} }
@ -327,7 +358,7 @@ void KThread::StartTermination() {
// Signal. // Signal.
signaled = true; signaled = true;
NotifyAvailable(); KSynchronizationObject::NotifyAvailable();
// Clear previous thread in KScheduler. // Clear previous thread in KScheduler.
KScheduler::ClearPreviousThread(kernel, this); KScheduler::ClearPreviousThread(kernel, this);
@ -475,30 +506,32 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m
return ResultSuccess; return ResultSuccess;
} }
ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) { ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
ASSERT(parent != nullptr); ASSERT(parent != nullptr);
ASSERT(v_affinity_mask != 0); ASSERT(v_affinity_mask != 0);
KScopedLightLock lk{activity_pause_lock}; KScopedLightLock lk(activity_pause_lock);
// Set the core mask. // Set the core mask.
u64 p_affinity_mask = 0; u64 p_affinity_mask = 0;
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl(kernel);
ASSERT(num_core_migration_disables >= 0); ASSERT(num_core_migration_disables >= 0);
// If the core id is no-update magic, preserve the ideal core id. // If we're updating, set our ideal virtual core.
if (cpu_core_id == Svc::IdealCoreNoUpdate) { if (core_id_ != Svc::IdealCoreNoUpdate) {
cpu_core_id = virtual_ideal_core_id; virtual_ideal_core_id = core_id_;
R_UNLESS(((1ULL << cpu_core_id) & v_affinity_mask) != 0, ResultInvalidCombination); } else {
// Preserve our ideal core id.
core_id_ = virtual_ideal_core_id;
R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination);
} }
// Set the virtual core/affinity mask. // Set our affinity mask.
virtual_ideal_core_id = cpu_core_id;
virtual_affinity_mask = v_affinity_mask; virtual_affinity_mask = v_affinity_mask;
// Translate the virtual core to a physical core. // Translate the virtual core to a physical core.
if (cpu_core_id >= 0) { if (core_id_ >= 0) {
cpu_core_id = Core::Hardware::VirtualToPhysicalCoreMap[cpu_core_id]; core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_];
} }
// Translate the virtual affinity mask to a physical one. // Translate the virtual affinity mask to a physical one.
@ -513,7 +546,7 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
const KAffinityMask old_mask = physical_affinity_mask; const KAffinityMask old_mask = physical_affinity_mask;
// Set our new ideals. // Set our new ideals.
physical_ideal_core_id = cpu_core_id; physical_ideal_core_id = core_id_;
physical_affinity_mask.SetAffinityMask(p_affinity_mask); physical_affinity_mask.SetAffinityMask(p_affinity_mask);
if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
@ -531,18 +564,18 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
} }
} else { } else {
// Otherwise, we edit the original affinity for restoration later. // Otherwise, we edit the original affinity for restoration later.
original_physical_ideal_core_id = cpu_core_id; original_physical_ideal_core_id = core_id_;
original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
} }
} }
// Update the pinned waiter list. // Update the pinned waiter list.
ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list));
{ {
bool retry_update{}; bool retry_update{};
bool thread_is_pinned{};
do { do {
// Lock the scheduler. // Lock the scheduler.
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl(kernel);
// Don't do any further management if our termination has been requested. // Don't do any further management if our termination has been requested.
R_SUCCEED_IF(IsTerminationRequested()); R_SUCCEED_IF(IsTerminationRequested());
@ -570,12 +603,9 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
ResultTerminationRequested); ResultTerminationRequested);
// Note that the thread was pinned.
thread_is_pinned = true;
// Wait until the thread isn't pinned any more. // Wait until the thread isn't pinned any more.
pinned_waiter_list.push_back(GetCurrentThread(kernel)); pinned_waiter_list.push_back(GetCurrentThread(kernel));
GetCurrentThread(kernel).SetState(ThreadState::Waiting); GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
} else { } else {
// If the thread isn't pinned, release the scheduler lock and retry until it's // If the thread isn't pinned, release the scheduler lock and retry until it's
// not current. // not current.
@ -583,16 +613,6 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
} }
} }
} while (retry_update); } while (retry_update);
// If the thread was pinned, it no longer is, and we should remove the current thread from
// our waiter list.
if (thread_is_pinned) {
// Lock the scheduler.
KScopedSchedulerLock sl{kernel};
// Remove from the list.
pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
}
} }
return ResultSuccess; return ResultSuccess;
@ -641,15 +661,9 @@ void KThread::WaitCancel() {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{kernel};
// Check if we're waiting and cancellable. // Check if we're waiting and cancellable.
if (GetState() == ThreadState::Waiting && cancellable) { if (this->GetState() == ThreadState::Waiting && cancellable) {
if (sleeping_queue != nullptr) {
sleeping_queue->WakeupThread(this);
wait_cancelled = true;
} else {
SetSyncedObject(nullptr, ResultCancelled);
SetState(ThreadState::Runnable);
wait_cancelled = false; wait_cancelled = false;
} wait_queue->CancelWait(this, ResultCancelled, true);
} else { } else {
// Otherwise, note that we cancelled a wait. // Otherwise, note that we cancelled a wait.
wait_cancelled = true; wait_cancelled = true;
@ -700,60 +714,59 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
// Set the activity. // Set the activity.
{ {
// Lock the scheduler. // Lock the scheduler.
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl(kernel);
// Verify our state. // Verify our state.
const auto cur_state = GetState(); const auto cur_state = this->GetState();
R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable), R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
ResultInvalidState); ResultInvalidState);
// Either pause or resume. // Either pause or resume.
if (activity == Svc::ThreadActivity::Paused) { if (activity == Svc::ThreadActivity::Paused) {
// Verify that we're not suspended. // Verify that we're not suspended.
R_UNLESS(!IsSuspendRequested(SuspendType::Thread), ResultInvalidState); R_UNLESS(!this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
// Suspend. // Suspend.
RequestSuspend(SuspendType::Thread); this->RequestSuspend(SuspendType::Thread);
} else { } else {
ASSERT(activity == Svc::ThreadActivity::Runnable); ASSERT(activity == Svc::ThreadActivity::Runnable);
// Verify that we're suspended. // Verify that we're suspended.
R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
// Resume. // Resume.
Resume(SuspendType::Thread); this->Resume(SuspendType::Thread);
} }
} }
// If the thread is now paused, update the pinned waiter list. // If the thread is now paused, update the pinned waiter list.
if (activity == Svc::ThreadActivity::Paused) { if (activity == Svc::ThreadActivity::Paused) {
bool thread_is_pinned{}; ThreadQueueImplForKThreadSetProperty wait_queue_(kernel,
bool thread_is_current{}; std::addressof(pinned_waiter_list));
bool thread_is_current;
do { do {
// Lock the scheduler. // Lock the scheduler.
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl(kernel);
// Don't do any further management if our termination has been requested. // Don't do any further management if our termination has been requested.
R_SUCCEED_IF(IsTerminationRequested()); R_SUCCEED_IF(this->IsTerminationRequested());
// By default, treat the thread as not current.
thread_is_current = false;
// Check whether the thread is pinned. // Check whether the thread is pinned.
if (GetStackParameters().is_pinned) { if (this->GetStackParameters().is_pinned) {
// Verify that the current thread isn't terminating. // Verify that the current thread isn't terminating.
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
ResultTerminationRequested); ResultTerminationRequested);
// Note that the thread was pinned and not current.
thread_is_pinned = true;
thread_is_current = false;
// Wait until the thread isn't pinned any more. // Wait until the thread isn't pinned any more.
pinned_waiter_list.push_back(GetCurrentThread(kernel)); pinned_waiter_list.push_back(GetCurrentThread(kernel));
GetCurrentThread(kernel).SetState(ThreadState::Waiting); GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
} else { } else {
// Check if the thread is currently running. // Check if the thread is currently running.
// If it is, we'll need to retry. // If it is, we'll need to retry.
thread_is_current = false;
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) { for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
if (kernel.Scheduler(i).GetCurrentThread() == this) { if (kernel.Scheduler(i).GetCurrentThread() == this) {
thread_is_current = true; thread_is_current = true;
@ -762,16 +775,6 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
} }
} }
} while (thread_is_current); } while (thread_is_current);
// If the thread was pinned, it no longer is, and we should remove the current thread from
// our waiter list.
if (thread_is_pinned) {
// Lock the scheduler.
KScopedSchedulerLock sl{kernel};
// Remove from the list.
pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
}
} }
return ResultSuccess; return ResultSuccess;
@ -966,6 +969,9 @@ ResultCode KThread::Run() {
// Set our state and finish. // Set our state and finish.
SetState(ThreadState::Runnable); SetState(ThreadState::Runnable);
DisableDispatch();
return ResultSuccess; return ResultSuccess;
} }
} }
@ -996,29 +1002,63 @@ ResultCode KThread::Sleep(s64 timeout) {
ASSERT(this == GetCurrentThreadPointer(kernel)); ASSERT(this == GetCurrentThreadPointer(kernel));
ASSERT(timeout > 0); ASSERT(timeout > 0);
ThreadQueueImplForKThreadSleep wait_queue_(kernel);
{ {
// Setup the scheduling lock and sleep. // Setup the scheduling lock and sleep.
KScopedSchedulerLockAndSleep slp{kernel, this, timeout}; KScopedSchedulerLockAndSleep slp(kernel, this, timeout);
// Check if the thread should terminate. // Check if the thread should terminate.
if (IsTerminationRequested()) { if (this->IsTerminationRequested()) {
slp.CancelSleep(); slp.CancelSleep();
return ResultTerminationRequested; return ResultTerminationRequested;
} }
// Mark the thread as waiting. // Wait for the sleep to end.
SetState(ThreadState::Waiting); this->BeginWait(std::addressof(wait_queue_));
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
} }
// The lock/sleep is done.
// Cancel the timer.
kernel.TimeManager().UnscheduleTimeEvent(this);
return ResultSuccess; return ResultSuccess;
} }
void KThread::BeginWait(KThreadQueue* queue) {
// Set our state as waiting.
SetState(ThreadState::Waiting);
// Set our wait queue.
wait_queue = queue;
}
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
// If we're waiting, notify our queue that we're available.
if (GetState() == ThreadState::Waiting) {
wait_queue->NotifyAvailable(this, signaled_object, wait_result_);
}
}
void KThread::EndWait(ResultCode wait_result_) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
// If we're waiting, notify our queue that we're available.
if (GetState() == ThreadState::Waiting) {
wait_queue->EndWait(this, wait_result_);
}
}
void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
// If we're waiting, notify our queue that we're available.
if (GetState() == ThreadState::Waiting) {
wait_queue->CancelWait(this, wait_result_, cancel_timer_task);
}
}
void KThread::SetState(ThreadState state) { void KThread::SetState(ThreadState state) {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{kernel};
@ -1050,4 +1090,26 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
return GetCurrentThread(kernel).GetCurrentCore(); return GetCurrentThread(kernel).GetCurrentCore();
} }
KScopedDisableDispatch::~KScopedDisableDispatch() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (kernel.IsShuttingDown()) {
return;
}
// Skip the reschedule if single-core, as dispatch tracking is disabled here.
if (!Settings::values.use_multi_core.GetValue()) {
return;
}
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
auto scheduler = kernel.CurrentScheduler();
if (scheduler) {
scheduler->RescheduleCurrentCore();
}
} else {
GetCurrentThread(kernel).EnableDispatch();
}
}
} // namespace Kernel } // namespace Kernel

View File

@ -48,6 +48,7 @@ enum class ThreadType : u32 {
Kernel = 1, Kernel = 1,
HighPriority = 2, HighPriority = 2,
User = 3, User = 3,
Dummy = 100, // Special thread type for emulation purposes only
}; };
DECLARE_ENUM_FLAG_OPERATORS(ThreadType); DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
@ -161,8 +162,6 @@ public:
} }
} }
void Wakeup();
void SetBasePriority(s32 value); void SetBasePriority(s32 value);
[[nodiscard]] ResultCode Run(); [[nodiscard]] ResultCode Run();
@ -197,13 +196,19 @@ public:
void Suspend(); void Suspend();
void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) { constexpr void SetSyncedIndex(s32 index) {
synced_object = obj; synced_index = index;
}
[[nodiscard]] constexpr s32 GetSyncedIndex() const {
return synced_index;
}
constexpr void SetWaitResult(ResultCode wait_res) {
wait_result = wait_res; wait_result = wait_res;
} }
[[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const { [[nodiscard]] constexpr ResultCode GetWaitResult() const {
*out = synced_object;
return wait_result; return wait_result;
} }
@ -374,6 +379,8 @@ public:
[[nodiscard]] bool IsSignaled() const override; [[nodiscard]] bool IsSignaled() const override;
void OnTimer();
static void PostDestroy(uintptr_t arg); static void PostDestroy(uintptr_t arg);
[[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread); [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
@ -446,20 +453,39 @@ public:
return per_core_priority_queue_entry[core]; return per_core_priority_queue_entry[core];
} }
void SetSleepingQueue(KThreadQueue* q) { [[nodiscard]] bool IsKernelThread() const {
sleeping_queue = q; return GetActiveCore() == 3;
}
[[nodiscard]] bool IsDispatchTrackingDisabled() const {
return is_single_core || IsKernelThread();
} }
[[nodiscard]] s32 GetDisableDispatchCount() const { [[nodiscard]] s32 GetDisableDispatchCount() const {
if (IsDispatchTrackingDisabled()) {
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
return 1;
}
return this->GetStackParameters().disable_count; return this->GetStackParameters().disable_count;
} }
void DisableDispatch() { void DisableDispatch() {
if (IsDispatchTrackingDisabled()) {
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
return;
}
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
this->GetStackParameters().disable_count++; this->GetStackParameters().disable_count++;
} }
void EnableDispatch() { void EnableDispatch() {
if (IsDispatchTrackingDisabled()) {
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
return;
}
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
this->GetStackParameters().disable_count--; this->GetStackParameters().disable_count--;
} }
@ -573,6 +599,15 @@ public:
address_key_value = val; address_key_value = val;
} }
void ClearWaitQueue() {
wait_queue = nullptr;
}
void BeginWait(KThreadQueue* queue);
void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_);
void EndWait(ResultCode wait_result_);
void CancelWait(ResultCode wait_result_, bool cancel_timer_task);
[[nodiscard]] bool HasWaiters() const { [[nodiscard]] bool HasWaiters() const {
return !waiter_list.empty(); return !waiter_list.empty();
} }
@ -667,7 +702,6 @@ private:
KAffinityMask physical_affinity_mask{}; KAffinityMask physical_affinity_mask{};
u64 thread_id{}; u64 thread_id{};
std::atomic<s64> cpu_time{}; std::atomic<s64> cpu_time{};
KSynchronizationObject* synced_object{};
VAddr address_key{}; VAddr address_key{};
KProcess* parent{}; KProcess* parent{};
VAddr kernel_stack_top{}; VAddr kernel_stack_top{};
@ -677,13 +711,14 @@ private:
s64 schedule_count{}; s64 schedule_count{};
s64 last_scheduled_tick{}; s64 last_scheduled_tick{};
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
KThreadQueue* sleeping_queue{}; KThreadQueue* wait_queue{};
WaiterList waiter_list{}; WaiterList waiter_list{};
WaiterList pinned_waiter_list{}; WaiterList pinned_waiter_list{};
KThread* lock_owner{}; KThread* lock_owner{};
u32 address_key_value{}; u32 address_key_value{};
u32 suspend_request_flags{}; u32 suspend_request_flags{};
u32 suspend_allowed_flags{}; u32 suspend_allowed_flags{};
s32 synced_index{};
ResultCode wait_result{ResultSuccess}; ResultCode wait_result{ResultSuccess};
s32 base_priority{}; s32 base_priority{};
s32 physical_ideal_core_id{}; s32 physical_ideal_core_id{};
@ -708,6 +743,7 @@ private:
// For emulation // For emulation
std::shared_ptr<Common::Fiber> host_context{}; std::shared_ptr<Common::Fiber> host_context{};
bool is_single_core{};
// For debugging // For debugging
std::vector<KSynchronizationObject*> wait_objects_for_debugging; std::vector<KSynchronizationObject*> wait_objects_for_debugging;
@ -752,4 +788,20 @@ public:
} }
}; };
class KScopedDisableDispatch {
public:
[[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
// If we are shutting down the kernel, none of this is relevant anymore.
if (kernel.IsShuttingDown()) {
return;
}
GetCurrentThread(kernel).DisableDispatch();
}
~KScopedDisableDispatch();
private:
KernelCore& kernel;
};
} // namespace Kernel } // namespace Kernel

View File

@ -0,0 +1,51 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel {
void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread,
[[maybe_unused]] KSynchronizationObject* signaled_object,
[[maybe_unused]] ResultCode wait_result) {}
void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
// Set the thread's wait result.
waiting_thread->SetWaitResult(wait_result);
// Set the thread as runnable.
waiting_thread->SetState(ThreadState::Runnable);
// Clear the thread's wait queue.
waiting_thread->ClearWaitQueue();
// Cancel the thread task.
kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
}
void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
bool cancel_timer_task) {
// Set the thread's wait result.
waiting_thread->SetWaitResult(wait_result);
// Set the thread as runnable.
waiting_thread->SetState(ThreadState::Runnable);
// Clear the thread's wait queue.
waiting_thread->ClearWaitQueue();
// Cancel the thread task.
if (cancel_timer_task) {
kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
}
}
void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread,
[[maybe_unused]] ResultCode wait_result) {}
} // namespace Kernel

View File

@ -4,6 +4,7 @@
#pragma once #pragma once
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
namespace Kernel { namespace Kernel {
@ -11,71 +12,24 @@ namespace Kernel {
class KThreadQueue { class KThreadQueue {
public: public:
explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {} explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {}
virtual ~KThreadQueue() = default;
bool IsEmpty() const { virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
return wait_list.empty(); ResultCode wait_result);
} virtual void EndWait(KThread* waiting_thread, ResultCode wait_result);
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
KThread::WaiterList::iterator begin() { bool cancel_timer_task);
return wait_list.begin();
}
KThread::WaiterList::iterator end() {
return wait_list.end();
}
bool SleepThread(KThread* t) {
KScopedSchedulerLock sl{kernel};
// If the thread needs terminating, don't enqueue it.
if (t->IsTerminationRequested()) {
return false;
}
// Set the thread's queue and mark it as waiting.
t->SetSleepingQueue(this);
t->SetState(ThreadState::Waiting);
// Add the thread to the queue.
wait_list.push_back(*t);
return true;
}
void WakeupThread(KThread* t) {
KScopedSchedulerLock sl{kernel};
// Remove the thread from the queue.
wait_list.erase(wait_list.iterator_to(*t));
// Mark the thread as no longer sleeping.
t->SetState(ThreadState::Runnable);
t->SetSleepingQueue(nullptr);
}
KThread* WakeupFrontThread() {
KScopedSchedulerLock sl{kernel};
if (wait_list.empty()) {
return nullptr;
} else {
// Remove the thread from the queue.
auto it = wait_list.begin();
KThread* thread = std::addressof(*it);
wait_list.erase(it);
ASSERT(thread->GetState() == ThreadState::Waiting);
// Mark the thread as no longer sleeping.
thread->SetState(ThreadState::Runnable);
thread->SetSleepingQueue(nullptr);
return thread;
}
}
private: private:
KernelCore& kernel; KernelCore& kernel;
KThread::WaiterList wait_list{}; KThread::WaiterList wait_list{};
}; };
class KThreadQueueWithoutEndWait : public KThreadQueue {
public:
explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
void EndWait(KThread* waiting_thread, ResultCode wait_result) override final;
};
} // namespace Kernel } // namespace Kernel

View File

@ -14,6 +14,7 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/microprofile.h" #include "common/microprofile.h"
#include "common/scope_exit.h"
#include "common/thread.h" #include "common/thread.h"
#include "common/thread_worker.h" #include "common/thread_worker.h"
#include "core/arm/arm_interface.h" #include "core/arm/arm_interface.h"
@ -83,12 +84,16 @@ struct KernelCore::Impl {
} }
void InitializeCores() { void InitializeCores() {
for (auto& core : cores) { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
core.Initialize(current_process->Is64BitProcess()); cores[core_id].Initialize(current_process->Is64BitProcess());
system.Memory().SetCurrentPageTable(*current_process, core_id);
} }
} }
void Shutdown() { void Shutdown() {
is_shutting_down.store(true, std::memory_order_relaxed);
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
process_list.clear(); process_list.clear();
// Close all open server ports. // Close all open server ports.
@ -123,15 +128,6 @@ struct KernelCore::Impl {
next_user_process_id = KProcess::ProcessIDMin; next_user_process_id = KProcess::ProcessIDMin;
next_thread_id = 1; next_thread_id = 1;
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
if (suspend_threads[core_id]) {
suspend_threads[core_id]->Close();
suspend_threads[core_id] = nullptr;
}
schedulers[core_id].reset();
}
cores.clear(); cores.clear();
global_handle_table->Finalize(); global_handle_table->Finalize();
@ -159,6 +155,16 @@ struct KernelCore::Impl {
CleanupObject(time_shared_mem); CleanupObject(time_shared_mem);
CleanupObject(system_resource_limit); CleanupObject(system_resource_limit);
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
if (suspend_threads[core_id]) {
suspend_threads[core_id]->Close();
suspend_threads[core_id] = nullptr;
}
schedulers[core_id]->Finalize();
schedulers[core_id].reset();
}
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
next_host_thread_id = Core::Hardware::NUM_CPU_CORES; next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
@ -245,13 +251,11 @@ struct KernelCore::Impl {
KScopedSchedulerLock lock(kernel); KScopedSchedulerLock lock(kernel);
global_scheduler_context->PreemptThreads(); global_scheduler_context->PreemptThreads();
} }
const auto time_interval = std::chrono::nanoseconds{ const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
system.CoreTiming().ScheduleEvent(time_interval, preemption_event); system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
}); });
const auto time_interval = const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))};
system.CoreTiming().ScheduleEvent(time_interval, preemption_event); system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
} }
@ -267,14 +271,6 @@ struct KernelCore::Impl {
void MakeCurrentProcess(KProcess* process) { void MakeCurrentProcess(KProcess* process) {
current_process = process; current_process = process;
if (process == nullptr) {
return;
}
const u32 core_id = GetCurrentHostThreadID();
if (core_id < Core::Hardware::NUM_CPU_CORES) {
system.Memory().SetCurrentPageTable(*process, core_id);
}
} }
static inline thread_local u32 host_thread_id = UINT32_MAX; static inline thread_local u32 host_thread_id = UINT32_MAX;
@ -344,7 +340,16 @@ struct KernelCore::Impl {
is_phantom_mode_for_singlecore = value; is_phantom_mode_for_singlecore = value;
} }
bool IsShuttingDown() const {
return is_shutting_down.load(std::memory_order_relaxed);
}
KThread* GetCurrentEmuThread() { KThread* GetCurrentEmuThread() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (IsShuttingDown()) {
return {};
}
const auto thread_id = GetCurrentHostThreadID(); const auto thread_id = GetCurrentHostThreadID();
if (thread_id >= Core::Hardware::NUM_CPU_CORES) { if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
return GetHostDummyThread(); return GetHostDummyThread();
@ -760,6 +765,7 @@ struct KernelCore::Impl {
std::vector<std::unique_ptr<KThread>> dummy_threads; std::vector<std::unique_ptr<KThread>> dummy_threads;
bool is_multicore{}; bool is_multicore{};
std::atomic_bool is_shutting_down{};
bool is_phantom_mode_for_singlecore{}; bool is_phantom_mode_for_singlecore{};
u32 single_core_thread_id{}; u32 single_core_thread_id{};
@ -845,16 +851,20 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
return impl->cores[id]; return impl->cores[id];
} }
size_t KernelCore::CurrentPhysicalCoreIndex() const {
const u32 core_id = impl->GetCurrentHostThreadID();
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
return Core::Hardware::NUM_CPU_CORES - 1;
}
return core_id;
}
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
u32 core_id = impl->GetCurrentHostThreadID(); return impl->cores[CurrentPhysicalCoreIndex()];
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
return impl->cores[core_id];
} }
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
u32 core_id = impl->GetCurrentHostThreadID(); return impl->cores[CurrentPhysicalCoreIndex()];
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
return impl->cores[core_id];
} }
Kernel::KScheduler* KernelCore::CurrentScheduler() { Kernel::KScheduler* KernelCore::CurrentScheduler() {
@ -1057,6 +1067,9 @@ void KernelCore::Suspend(bool in_suspention) {
impl->suspend_threads[core_id]->SetState(state); impl->suspend_threads[core_id]->SetState(state);
impl->suspend_threads[core_id]->SetWaitReasonForDebugging( impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
ThreadWaitReasonForDebugging::Suspended); ThreadWaitReasonForDebugging::Suspended);
if (!should_suspend) {
impl->suspend_threads[core_id]->DisableDispatch();
}
} }
} }
} }
@ -1065,19 +1078,21 @@ bool KernelCore::IsMulticore() const {
return impl->is_multicore; return impl->is_multicore;
} }
bool KernelCore::IsShuttingDown() const {
return impl->IsShuttingDown();
}
void KernelCore::ExceptionalExit() { void KernelCore::ExceptionalExit() {
exception_exited = true; exception_exited = true;
Suspend(true); Suspend(true);
} }
void KernelCore::EnterSVCProfile() { void KernelCore::EnterSVCProfile() {
std::size_t core = impl->GetCurrentHostThreadID(); impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
} }
void KernelCore::ExitSVCProfile() { void KernelCore::ExitSVCProfile() {
std::size_t core = impl->GetCurrentHostThreadID(); MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
} }
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {

View File

@ -149,6 +149,9 @@ public:
/// Gets the an instance of the respective physical CPU core. /// Gets the an instance of the respective physical CPU core.
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
/// Gets the current physical core index for the running host thread.
std::size_t CurrentPhysicalCoreIndex() const;
/// Gets the sole instance of the Scheduler at the current running core. /// Gets the sole instance of the Scheduler at the current running core.
Kernel::KScheduler* CurrentScheduler(); Kernel::KScheduler* CurrentScheduler();
@ -272,6 +275,8 @@ public:
bool IsMulticore() const; bool IsMulticore() const;
bool IsShuttingDown() const;
void EnterSVCProfile(); void EnterSVCProfile();
void ExitSVCProfile(); void ExitSVCProfile();

View File

@ -25,24 +25,27 @@ public:
void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context); void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
private: private:
std::vector<std::thread> threads; std::vector<std::jthread> threads;
std::queue<std::function<void()>> requests; std::queue<std::function<void()>> requests;
std::mutex queue_mutex; std::mutex queue_mutex;
std::condition_variable condition; std::condition_variable_any condition;
const std::string service_name; const std::string service_name;
bool stop{};
}; };
ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name) ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
: service_name{name} { : service_name{name} {
for (std::size_t i = 0; i < num_threads; ++i) for (std::size_t i = 0; i < num_threads; ++i) {
threads.emplace_back([this, &kernel] { threads.emplace_back([this, &kernel](std::stop_token stop_token) {
Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str()); Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
// Wait for first request before trying to acquire a render context // Wait for first request before trying to acquire a render context
{ {
std::unique_lock lock{queue_mutex}; std::unique_lock lock{queue_mutex};
condition.wait(lock, [this] { return stop || !requests.empty(); }); condition.wait(lock, stop_token, [this] { return !requests.empty(); });
}
if (stop_token.stop_requested()) {
return;
} }
kernel.RegisterHostThread(); kernel.RegisterHostThread();
@ -52,10 +55,16 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
{ {
std::unique_lock lock{queue_mutex}; std::unique_lock lock{queue_mutex};
condition.wait(lock, [this] { return stop || !requests.empty(); }); condition.wait(lock, stop_token, [this] { return !requests.empty(); });
if (stop || requests.empty()) {
if (stop_token.stop_requested()) {
return; return;
} }
if (requests.empty()) {
continue;
}
task = std::move(requests.front()); task = std::move(requests.front());
requests.pop(); requests.pop();
} }
@ -64,6 +73,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
} }
}); });
} }
}
void ServiceThread::Impl::QueueSyncRequest(KSession& session, void ServiceThread::Impl::QueueSyncRequest(KSession& session,
std::shared_ptr<HLERequestContext>&& context) { std::shared_ptr<HLERequestContext>&& context) {
@ -88,12 +98,9 @@ void ServiceThread::Impl::QueueSyncRequest(KSession& session,
} }
ServiceThread::Impl::~Impl() { ServiceThread::Impl::~Impl() {
{
std::unique_lock lock{queue_mutex};
stop = true;
}
condition.notify_all(); condition.notify_all();
for (std::thread& thread : threads) { for (auto& thread : threads) {
thread.request_stop();
thread.join(); thread.join();
} }
} }

View File

@ -32,6 +32,7 @@
#include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/k_transfer_memory.h" #include "core/hle/kernel/k_transfer_memory.h"
#include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
@ -308,26 +309,29 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
/// Makes a blocking IPC call to an OS service. /// Makes a blocking IPC call to an OS service.
static ResultCode SendSyncRequest(Core::System& system, Handle handle) { static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
auto& kernel = system.Kernel(); auto& kernel = system.Kernel();
// Create the wait queue.
KThreadQueue wait_queue(kernel);
// Get the client session from its handle.
KScopedAutoObject session =
kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
auto thread = kernel.CurrentScheduler()->GetCurrentThread(); auto thread = kernel.CurrentScheduler()->GetCurrentThread();
{ {
KScopedSchedulerLock lock(kernel); KScopedSchedulerLock lock(kernel);
thread->SetState(ThreadState::Waiting);
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
{ // This is a synchronous request, so we should wait for our request to complete.
KScopedAutoObject session = GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue));
kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle); GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
R_UNLESS(session.IsNotNull(), ResultInvalidHandle); session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
session->SendSyncRequest(thread, system.Memory(), system.CoreTiming());
}
} }
KSynchronizationObject* dummy{}; return thread->GetWaitResult();
return thread->GetWaitResult(std::addressof(dummy));
} }
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@ -874,7 +878,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
const u64 thread_ticks = current_thread->GetCpuTime(); const u64 thread_ticks = current_thread->GetCpuTime();
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
} }
@ -888,7 +892,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
return ResultInvalidHandle; return ResultInvalidHandle;
} }
if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id != system.CurrentCoreIndex()) { if (info_sub_id != 0xFFFFFFFFFFFFFFFF &&
info_sub_id != system.Kernel().CurrentPhysicalCoreIndex()) {
LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id); LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id);
return ResultInvalidCombination; return ResultInvalidCombination;
} }

View File

@ -5,6 +5,7 @@
#include "common/assert.h" #include "common/assert.h"
#include "core/core.h" #include "core/core.h"
#include "core/core_timing.h" #include "core/core_timing.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/time_manager.h" #include "core/hle/kernel/time_manager.h"
@ -15,7 +16,10 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
Core::Timing::CreateEvent("Kernel::TimeManagerCallback", Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
KThread* thread = reinterpret_cast<KThread*>(thread_handle); KThread* thread = reinterpret_cast<KThread*>(thread_handle);
thread->Wakeup(); {
KScopedSchedulerLock sl(system.Kernel());
thread->OnTimer();
}
}); });
} }