Merge pull request #9215 from liamwhite/swordfight
Ensure correctness of atomic store ordering
This commit is contained in:
commit
3161b34ff6
|
@ -384,7 +384,8 @@ void KScheduler::SwitchThread(KThread* next_thread) {
|
|||
|
||||
void KScheduler::ScheduleImpl() {
|
||||
// First, clear the needs scheduling bool.
|
||||
m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
|
||||
m_state.needs_scheduling.store(false, std::memory_order_relaxed);
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
|
||||
// Load the appropriate thread pointers for scheduling.
|
||||
KThread* const cur_thread{GetCurrentThreadPointer(kernel)};
|
||||
|
@ -400,7 +401,8 @@ void KScheduler::ScheduleImpl() {
|
|||
// If there aren't, we want to check if the highest priority thread is the same as the current
|
||||
// thread.
|
||||
if (highest_priority_thread == cur_thread) {
|
||||
// If they're the same, then we can just return.
|
||||
// If they're the same, then we can just issue a memory barrier and return.
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -476,7 +478,8 @@ void KScheduler::ScheduleImplFiber() {
|
|||
|
||||
// We failed to successfully do the context switch, and need to retry.
|
||||
// Clear needs_scheduling.
|
||||
m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
|
||||
m_state.needs_scheduling.store(false, std::memory_order_relaxed);
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
|
||||
// Refresh the highest priority thread.
|
||||
highest_priority_thread = m_state.highest_priority_thread;
|
||||
|
|
|
@ -60,6 +60,9 @@ public:
|
|||
|
||||
// Release an instance of the lock.
|
||||
if ((--lock_count) == 0) {
|
||||
// Perform a memory barrier here.
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
|
||||
// We're no longer going to hold the lock. Take note of what cores need scheduling.
|
||||
const u64 cores_needing_scheduling =
|
||||
SchedulerType::UpdateHighestPriorityThreads(kernel);
|
||||
|
|
Reference in New Issue