yuzu-emu
/
yuzu-android
Archived
1
0
Fork 0

Scheduler: Correct yields.

This commit is contained in:
Fernando Sahmkow 2020-03-08 11:25:50 -04:00
parent 445b4342b3
commit 4217e58a10
2 changed files with 25 additions and 7 deletions

View File

@ -147,9 +147,11 @@ bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
const u32 priority = yielding_thread->GetPriority(); const u32 priority = yielding_thread->GetPriority();
// Yield the thread // Yield the thread
const Thread* const winner = scheduled_queue[core_id].front(priority); Reschedule(priority, core_id, yielding_thread);
ASSERT_MSG(yielding_thread == winner, "Thread yielding without being in front"); const Thread* const winner = scheduled_queue[core_id].front();
scheduled_queue[core_id].yield(priority); if (kernel.GetCurrentHostThreadID() != core_id) {
is_reselection_pending.store(true, std::memory_order_release);
}
return AskForReselectionOrMarkRedundant(yielding_thread, winner); return AskForReselectionOrMarkRedundant(yielding_thread, winner);
} }
@ -162,9 +164,7 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
const u32 priority = yielding_thread->GetPriority(); const u32 priority = yielding_thread->GetPriority();
// Yield the thread // Yield the thread
ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), Reschedule(priority, core_id, yielding_thread);
"Thread yielding without being in front");
scheduled_queue[core_id].yield(priority);
std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
for (std::size_t i = 0; i < current_threads.size(); i++) { for (std::size_t i = 0; i < current_threads.size(); i++) {
@ -200,6 +200,10 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
winner = next_thread; winner = next_thread;
} }
if (kernel.GetCurrentHostThreadID() != core_id) {
is_reselection_pending.store(true, std::memory_order_release);
}
return AskForReselectionOrMarkRedundant(yielding_thread, winner); return AskForReselectionOrMarkRedundant(yielding_thread, winner);
} }
@ -239,6 +243,12 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
} else { } else {
winner = yielding_thread; winner = yielding_thread;
} }
} else {
winner = scheduled_queue[i].front();
}
if (kernel.GetCurrentHostThreadID() != core_id) {
is_reselection_pending.store(true, std::memory_order_release);
} }
return AskForReselectionOrMarkRedundant(yielding_thread, winner); return AskForReselectionOrMarkRedundant(yielding_thread, winner);
@ -687,7 +697,11 @@ void Scheduler::SwitchToCurrent() {
while (!is_context_switch_pending) { while (!is_context_switch_pending) {
if (current_thread != nullptr && !current_thread->IsHLEThread()) { if (current_thread != nullptr && !current_thread->IsHLEThread()) {
current_thread->context_guard.lock(); current_thread->context_guard.lock();
if (current_thread->GetSchedulingStatus() != ThreadSchedStatus::Runnable) { if (!current_thread->IsRunnable()) {
current_thread->context_guard.unlock();
break;
}
if (current_thread->GetProcessorID() != core_id) {
current_thread->context_guard.unlock(); current_thread->context_guard.unlock();
break; break;
} }

View File

@ -524,6 +524,10 @@ public:
static_cast<u32>(ThreadSchedMasks::LowMask)); static_cast<u32>(ThreadSchedMasks::LowMask));
} }
bool IsRunnable() const {
return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable);
}
bool IsRunning() const { bool IsRunning() const {
return is_running; return is_running;
} }