hle: kernel: Separate KScheduler from GlobalSchedulerContext class.
This commit is contained in:
parent
9e29e36a78
commit
8d3e06349e
|
@ -141,7 +141,6 @@ add_library(common STATIC
|
|||
microprofile.h
|
||||
microprofileui.h
|
||||
misc.cpp
|
||||
multi_level_queue.h
|
||||
page_table.cpp
|
||||
page_table.h
|
||||
param_package.cpp
|
||||
|
|
|
@ -1,345 +0,0 @@
|
|||
// Copyright 2019 TuxSH
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <iterator>
|
||||
#include <list>
|
||||
#include <utility>
|
||||
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
/**
|
||||
* A MultiLevelQueue is a type of priority queue which has the following characteristics:
|
||||
* - iteratable through each of its elements.
|
||||
* - back can be obtained.
|
||||
* - O(1) add, lookup (both front and back)
|
||||
* - discrete priorities and a max of 64 priorities (limited domain)
|
||||
* This type of priority queue is normaly used for managing threads within an scheduler
|
||||
*/
|
||||
template <typename T, std::size_t Depth>
|
||||
class MultiLevelQueue {
|
||||
public:
|
||||
using value_type = T;
|
||||
using reference = value_type&;
|
||||
using const_reference = const value_type&;
|
||||
using pointer = value_type*;
|
||||
using const_pointer = const value_type*;
|
||||
|
||||
using difference_type = typename std::pointer_traits<pointer>::difference_type;
|
||||
using size_type = std::size_t;
|
||||
|
||||
template <bool is_constant>
|
||||
class iterator_impl {
|
||||
public:
|
||||
using iterator_category = std::bidirectional_iterator_tag;
|
||||
using value_type = T;
|
||||
using pointer = std::conditional_t<is_constant, T*, const T*>;
|
||||
using reference = std::conditional_t<is_constant, const T&, T&>;
|
||||
using difference_type = typename std::pointer_traits<pointer>::difference_type;
|
||||
|
||||
friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) {
|
||||
if (lhs.IsEnd() && rhs.IsEnd())
|
||||
return true;
|
||||
return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it);
|
||||
}
|
||||
|
||||
friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) {
|
||||
return !operator==(lhs, rhs);
|
||||
}
|
||||
|
||||
reference operator*() const {
|
||||
return *it;
|
||||
}
|
||||
|
||||
pointer operator->() const {
|
||||
return it.operator->();
|
||||
}
|
||||
|
||||
iterator_impl& operator++() {
|
||||
if (IsEnd()) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
++it;
|
||||
|
||||
if (it == GetEndItForPrio()) {
|
||||
u64 prios = mlq.used_priorities;
|
||||
prios &= ~((1ULL << (current_priority + 1)) - 1);
|
||||
if (prios == 0) {
|
||||
current_priority = static_cast<u32>(mlq.depth());
|
||||
} else {
|
||||
current_priority = CountTrailingZeroes64(prios);
|
||||
it = GetBeginItForPrio();
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
iterator_impl& operator--() {
|
||||
if (IsEnd()) {
|
||||
if (mlq.used_priorities != 0) {
|
||||
current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities);
|
||||
it = GetEndItForPrio();
|
||||
--it;
|
||||
}
|
||||
} else if (it == GetBeginItForPrio()) {
|
||||
u64 prios = mlq.used_priorities;
|
||||
prios &= (1ULL << current_priority) - 1;
|
||||
if (prios != 0) {
|
||||
current_priority = CountTrailingZeroes64(prios);
|
||||
it = GetEndItForPrio();
|
||||
--it;
|
||||
}
|
||||
} else {
|
||||
--it;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
iterator_impl operator++(int) {
|
||||
const iterator_impl v{*this};
|
||||
++(*this);
|
||||
return v;
|
||||
}
|
||||
|
||||
iterator_impl operator--(int) {
|
||||
const iterator_impl v{*this};
|
||||
--(*this);
|
||||
return v;
|
||||
}
|
||||
|
||||
// allow implicit const->non-const
|
||||
iterator_impl(const iterator_impl<false>& other)
|
||||
: mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
|
||||
|
||||
iterator_impl(const iterator_impl<true>& other)
|
||||
: mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
|
||||
|
||||
iterator_impl& operator=(const iterator_impl<false>& other) {
|
||||
mlq = other.mlq;
|
||||
it = other.it;
|
||||
current_priority = other.current_priority;
|
||||
return *this;
|
||||
}
|
||||
|
||||
friend class iterator_impl<true>;
|
||||
iterator_impl() = default;
|
||||
|
||||
private:
|
||||
friend class MultiLevelQueue;
|
||||
using container_ref =
|
||||
std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>;
|
||||
using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator,
|
||||
typename std::list<T>::iterator>;
|
||||
|
||||
explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority)
|
||||
: mlq(mlq), it(it), current_priority(current_priority) {}
|
||||
explicit iterator_impl(container_ref mlq, u32 current_priority)
|
||||
: mlq(mlq), it(), current_priority(current_priority) {}
|
||||
|
||||
bool IsEnd() const {
|
||||
return current_priority == mlq.depth();
|
||||
}
|
||||
|
||||
list_iterator GetBeginItForPrio() const {
|
||||
return mlq.levels[current_priority].begin();
|
||||
}
|
||||
|
||||
list_iterator GetEndItForPrio() const {
|
||||
return mlq.levels[current_priority].end();
|
||||
}
|
||||
|
||||
container_ref mlq;
|
||||
list_iterator it;
|
||||
u32 current_priority;
|
||||
};
|
||||
|
||||
using iterator = iterator_impl<false>;
|
||||
using const_iterator = iterator_impl<true>;
|
||||
|
||||
void add(const T& element, u32 priority, bool send_back = true) {
|
||||
if (send_back)
|
||||
levels[priority].push_back(element);
|
||||
else
|
||||
levels[priority].push_front(element);
|
||||
used_priorities |= 1ULL << priority;
|
||||
}
|
||||
|
||||
void remove(const T& element, u32 priority) {
|
||||
auto it = ListIterateTo(levels[priority], element);
|
||||
if (it == levels[priority].end())
|
||||
return;
|
||||
levels[priority].erase(it);
|
||||
if (levels[priority].empty()) {
|
||||
used_priorities &= ~(1ULL << priority);
|
||||
}
|
||||
}
|
||||
|
||||
void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) {
|
||||
remove(element, old_priority);
|
||||
add(element, new_priority, !adjust_front);
|
||||
}
|
||||
void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) {
|
||||
adjust(*it, old_priority, new_priority, adjust_front);
|
||||
}
|
||||
|
||||
void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) {
|
||||
ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority],
|
||||
ListIterateTo(levels[priority], element));
|
||||
|
||||
other.used_priorities |= 1ULL << priority;
|
||||
|
||||
if (levels[priority].empty()) {
|
||||
used_priorities &= ~(1ULL << priority);
|
||||
}
|
||||
}
|
||||
|
||||
void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) {
|
||||
transfer_to_front(*it, priority, other);
|
||||
}
|
||||
|
||||
void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) {
|
||||
ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority],
|
||||
ListIterateTo(levels[priority], element));
|
||||
|
||||
other.used_priorities |= 1ULL << priority;
|
||||
|
||||
if (levels[priority].empty()) {
|
||||
used_priorities &= ~(1ULL << priority);
|
||||
}
|
||||
}
|
||||
|
||||
void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) {
|
||||
transfer_to_back(*it, priority, other);
|
||||
}
|
||||
|
||||
void yield(u32 priority, std::size_t n = 1) {
|
||||
ListShiftForward(levels[priority], n);
|
||||
}
|
||||
|
||||
[[nodiscard]] std::size_t depth() const {
|
||||
return Depth;
|
||||
}
|
||||
|
||||
[[nodiscard]] std::size_t size(u32 priority) const {
|
||||
return levels[priority].size();
|
||||
}
|
||||
|
||||
[[nodiscard]] std::size_t size() const {
|
||||
u64 priorities = used_priorities;
|
||||
std::size_t size = 0;
|
||||
while (priorities != 0) {
|
||||
const u64 current_priority = CountTrailingZeroes64(priorities);
|
||||
size += levels[current_priority].size();
|
||||
priorities &= ~(1ULL << current_priority);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool empty() const {
|
||||
return used_priorities == 0;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool empty(u32 priority) const {
|
||||
return (used_priorities & (1ULL << priority)) == 0;
|
||||
}
|
||||
|
||||
[[nodiscard]] u32 highest_priority_set(u32 max_priority = 0) const {
|
||||
const u64 priorities =
|
||||
max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1));
|
||||
return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities));
|
||||
}
|
||||
|
||||
[[nodiscard]] u32 lowest_priority_set(u32 min_priority = Depth - 1) const {
|
||||
const u64 priorities = min_priority >= Depth - 1
|
||||
? used_priorities
|
||||
: (used_priorities & ((1ULL << (min_priority + 1)) - 1));
|
||||
return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities);
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator cbegin(u32 max_prio = 0) const {
|
||||
const u32 priority = highest_priority_set(max_prio);
|
||||
return priority == Depth ? cend()
|
||||
: const_iterator{*this, levels[priority].cbegin(), priority};
|
||||
}
|
||||
[[nodiscard]] const_iterator begin(u32 max_prio = 0) const {
|
||||
return cbegin(max_prio);
|
||||
}
|
||||
[[nodiscard]] iterator begin(u32 max_prio = 0) {
|
||||
const u32 priority = highest_priority_set(max_prio);
|
||||
return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority};
|
||||
}
|
||||
|
||||
[[nodiscard]] const_iterator cend(u32 min_prio = Depth - 1) const {
|
||||
return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1);
|
||||
}
|
||||
[[nodiscard]] const_iterator end(u32 min_prio = Depth - 1) const {
|
||||
return cend(min_prio);
|
||||
}
|
||||
[[nodiscard]] iterator end(u32 min_prio = Depth - 1) {
|
||||
return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1);
|
||||
}
|
||||
|
||||
[[nodiscard]] T& front(u32 max_priority = 0) {
|
||||
const u32 priority = highest_priority_set(max_priority);
|
||||
return levels[priority == Depth ? 0 : priority].front();
|
||||
}
|
||||
[[nodiscard]] const T& front(u32 max_priority = 0) const {
|
||||
const u32 priority = highest_priority_set(max_priority);
|
||||
return levels[priority == Depth ? 0 : priority].front();
|
||||
}
|
||||
|
||||
[[nodiscard]] T& back(u32 min_priority = Depth - 1) {
|
||||
const u32 priority = lowest_priority_set(min_priority); // intended
|
||||
return levels[priority == Depth ? 63 : priority].back();
|
||||
}
|
||||
[[nodiscard]] const T& back(u32 min_priority = Depth - 1) const {
|
||||
const u32 priority = lowest_priority_set(min_priority); // intended
|
||||
return levels[priority == Depth ? 63 : priority].back();
|
||||
}
|
||||
|
||||
void clear() {
|
||||
used_priorities = 0;
|
||||
for (std::size_t i = 0; i < Depth; i++) {
|
||||
levels[i].clear();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
using const_list_iterator = typename std::list<T>::const_iterator;
|
||||
|
||||
static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) {
|
||||
if (shift >= list.size()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto begin_range = list.begin();
|
||||
const auto end_range = std::next(begin_range, shift);
|
||||
list.splice(list.end(), list, begin_range, end_range);
|
||||
}
|
||||
|
||||
static void ListSplice(std::list<T>& in_list, const_list_iterator position,
|
||||
std::list<T>& out_list, const_list_iterator element) {
|
||||
in_list.splice(position, out_list, element);
|
||||
}
|
||||
|
||||
[[nodiscard]] static const_list_iterator ListIterateTo(const std::list<T>& list,
|
||||
const T& element) {
|
||||
auto it = list.cbegin();
|
||||
while (it != list.cend() && *it != element) {
|
||||
++it;
|
||||
}
|
||||
return it;
|
||||
}
|
||||
|
||||
std::array<std::list<T>, Depth> levels;
|
||||
u64 used_priorities = 0;
|
||||
};
|
||||
|
||||
} // namespace Common
|
|
@ -148,6 +148,8 @@ add_library(core STATIC
|
|||
hle/kernel/code_set.cpp
|
||||
hle/kernel/code_set.h
|
||||
hle/kernel/errors.h
|
||||
hle/kernel/global_scheduler_context.cpp
|
||||
hle/kernel/global_scheduler_context.h
|
||||
hle/kernel/handle_table.cpp
|
||||
hle/kernel/handle_table.h
|
||||
hle/kernel/hle_ipc.cpp
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/global_scheduler_context.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
|
||||
: kernel{kernel}, scheduler_lock{kernel} {}
|
||||
|
||||
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
|
||||
|
||||
void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||
thread_list.end());
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::PreemptThreads() {
|
||||
// The priority levels at which the global scheduler preempts threads every 10 ms. They are
|
||||
// ordered from Core 0 to Core 3.
|
||||
std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 63};
|
||||
|
||||
ASSERT(IsLocked());
|
||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
const u32 priority = preemption_priorities[core_id];
|
||||
kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
|
||||
}
|
||||
}
|
||||
|
||||
bool GlobalSchedulerContext::IsLocked() const {
|
||||
return scheduler_lock.IsLockedByCurrentThread();
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::Lock() {
|
||||
scheduler_lock.Lock();
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::Unlock() {
|
||||
scheduler_lock.Unlock();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
|
@ -0,0 +1,79 @@
|
|||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/k_priority_queue.h"
|
||||
#include "core/hle/kernel/k_scheduler_lock.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class SchedulerLock;
|
||||
|
||||
using KSchedulerPriorityQueue =
|
||||
KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
|
||||
static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
|
||||
|
||||
class GlobalSchedulerContext final {
|
||||
friend class KScheduler;
|
||||
|
||||
public:
|
||||
explicit GlobalSchedulerContext(KernelCore& kernel);
|
||||
~GlobalSchedulerContext();
|
||||
|
||||
/// Adds a new thread to the scheduler
|
||||
void AddThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the scheduler
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Returns a list of all threads managed by the scheduler
|
||||
const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rotates the scheduling queues of threads at a preemption priority and then does
|
||||
* some core rebalancing. Preemption priorities can be found in the array
|
||||
* 'preemption_priorities'.
|
||||
*
|
||||
* @note This operation happens every 10ms.
|
||||
*/
|
||||
void PreemptThreads();
|
||||
|
||||
/// Returns true if the global scheduler lock is acquired
|
||||
bool IsLocked() const;
|
||||
|
||||
private:
|
||||
friend class SchedulerLock;
|
||||
|
||||
/// Lock the scheduler to the current thread.
|
||||
void Lock();
|
||||
|
||||
/// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
|
||||
/// and reschedules current core if needed.
|
||||
void Unlock();
|
||||
|
||||
using LockType = KAbstractSchedulerLock<KScheduler>;
|
||||
|
||||
KernelCore& kernel;
|
||||
|
||||
std::atomic_bool scheduler_update_needed{};
|
||||
KSchedulerPriorityQueue priority_queue;
|
||||
LockType scheduler_lock;
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<std::shared_ptr<Thread>> thread_list;
|
||||
Common::SpinLock global_list_guard{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
|
@ -5,12 +5,6 @@
|
|||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include <algorithm>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/fiber.h"
|
||||
|
@ -19,10 +13,10 @@
|
|||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
|
@ -34,11 +28,6 @@ static void IncrementScheduledCount(Kernel::Thread* thread) {
|
|||
}
|
||||
}
|
||||
|
||||
GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
|
||||
: kernel{kernel}, scheduler_lock{kernel} {}
|
||||
|
||||
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
|
||||
|
||||
/*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
|
||||
Core::EmuThreadHandle global_thread) {
|
||||
u32 current_core = global_thread.host_handle;
|
||||
|
@ -205,33 +194,6 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
|
|||
return cores_needing_scheduling;
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||
thread_list.end());
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::PreemptThreads() {
|
||||
// The priority levels at which the global scheduler preempts threads every 10 ms. They are
|
||||
// ordered from Core 0 to Core 3.
|
||||
std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 63};
|
||||
|
||||
ASSERT(IsLocked());
|
||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
const u32 priority = preemption_priorities[core_id];
|
||||
kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
|
||||
}
|
||||
}
|
||||
|
||||
bool GlobalSchedulerContext::IsLocked() const {
|
||||
return scheduler_lock.IsLockedByCurrentThread();
|
||||
}
|
||||
|
||||
/*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread,
|
||||
u32 old_state) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
@ -635,14 +597,6 @@ void KScheduler::YieldToAnyThread() {
|
|||
}
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::Lock() {
|
||||
scheduler_lock.Lock();
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::Unlock() {
|
||||
scheduler_lock.Unlock();
|
||||
}
|
||||
|
||||
KScheduler::KScheduler(Core::System& system, std::size_t core_id)
|
||||
: system(system), core_id(core_id) {
|
||||
switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
|
||||
|
|
|
@ -8,94 +8,27 @@
|
|||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/multi_level_queue.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/global_scheduler_context.h"
|
||||
#include "core/hle/kernel/k_priority_queue.h"
|
||||
#include "core/hle/kernel/k_scheduler_lock.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Common {
|
||||
class Fiber;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
class ARM_Interface;
|
||||
class System;
|
||||
} // namespace Core
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Process;
|
||||
class SchedulerLock;
|
||||
|
||||
using KSchedulerPriorityQueue =
|
||||
KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
|
||||
static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
|
||||
|
||||
class GlobalSchedulerContext final {
|
||||
friend class KScheduler;
|
||||
|
||||
public:
|
||||
explicit GlobalSchedulerContext(KernelCore& kernel);
|
||||
~GlobalSchedulerContext();
|
||||
|
||||
/// Adds a new thread to the scheduler
|
||||
void AddThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the scheduler
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Returns a list of all threads managed by the scheduler
|
||||
const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rotates the scheduling queues of threads at a preemption priority and then does
|
||||
* some core rebalancing. Preemption priorities can be found in the array
|
||||
* 'preemption_priorities'.
|
||||
*
|
||||
* @note This operation happens every 10ms.
|
||||
*/
|
||||
void PreemptThreads();
|
||||
|
||||
u32 CpuCoresCount() const {
|
||||
return Core::Hardware::NUM_CPU_CORES;
|
||||
}
|
||||
|
||||
bool IsLocked() const;
|
||||
|
||||
private:
|
||||
friend class SchedulerLock;
|
||||
|
||||
/// Lock the scheduler to the current thread.
|
||||
void Lock();
|
||||
|
||||
/// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
|
||||
/// and reschedules current core if needed.
|
||||
void Unlock();
|
||||
|
||||
using LockType = KAbstractSchedulerLock<KScheduler>;
|
||||
|
||||
KernelCore& kernel;
|
||||
|
||||
std::atomic_bool scheduler_update_needed{};
|
||||
KSchedulerPriorityQueue priority_queue;
|
||||
LockType scheduler_lock;
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<std::shared_ptr<Thread>> thread_list;
|
||||
Common::SpinLock global_list_guard{};
|
||||
};
|
||||
class Thread;
|
||||
|
||||
class KScheduler final {
|
||||
public:
|
||||
|
@ -221,7 +154,6 @@ private:
|
|||
|
||||
/// Switches the CPU's active thread context to that of the specified thread
|
||||
void ScheduleImpl();
|
||||
void SwitchThread(Thread* next_thread);
|
||||
|
||||
/// When a thread wakes up, it must run this through it's new scheduler
|
||||
void SwitchContextStep2();
|
||||
|
|
|
@ -2,7 +2,6 @@ add_executable(tests
|
|||
common/bit_field.cpp
|
||||
common/bit_utils.cpp
|
||||
common/fibers.cpp
|
||||
common/multi_level_queue.cpp
|
||||
common/param_package.cpp
|
||||
common/ring_buffer.cpp
|
||||
core/arm/arm_test_common.cpp
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
// Copyright 2019 Yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <catch2/catch.hpp>
|
||||
#include <math.h>
|
||||
#include "common/common_types.h"
|
||||
#include "common/multi_level_queue.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
TEST_CASE("MultiLevelQueue", "[common]") {
|
||||
std::array<f32, 8> values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0};
|
||||
Common::MultiLevelQueue<f32, 64> mlq;
|
||||
REQUIRE(mlq.empty());
|
||||
mlq.add(values[2], 2);
|
||||
mlq.add(values[7], 7);
|
||||
mlq.add(values[3], 3);
|
||||
mlq.add(values[4], 4);
|
||||
mlq.add(values[0], 0);
|
||||
mlq.add(values[5], 5);
|
||||
mlq.add(values[6], 6);
|
||||
mlq.add(values[1], 1);
|
||||
u32 index = 0;
|
||||
bool all_set = true;
|
||||
for (auto& f : mlq) {
|
||||
all_set &= (f == values[index]);
|
||||
index++;
|
||||
}
|
||||
REQUIRE(all_set);
|
||||
REQUIRE(!mlq.empty());
|
||||
f32 v = 8.0;
|
||||
mlq.add(v, 2);
|
||||
v = -7.0;
|
||||
mlq.add(v, 2, false);
|
||||
REQUIRE(mlq.front(2) == -7.0);
|
||||
mlq.yield(2);
|
||||
REQUIRE(mlq.front(2) == values[2]);
|
||||
REQUIRE(mlq.back(2) == -7.0);
|
||||
REQUIRE(mlq.empty(8));
|
||||
v = 10.0;
|
||||
mlq.add(v, 8);
|
||||
mlq.adjust(v, 8, 9);
|
||||
REQUIRE(mlq.front(9) == v);
|
||||
REQUIRE(mlq.empty(8));
|
||||
REQUIRE(!mlq.empty(9));
|
||||
mlq.adjust(values[0], 0, 9);
|
||||
REQUIRE(mlq.highest_priority_set() == 1);
|
||||
REQUIRE(mlq.lowest_priority_set() == 9);
|
||||
mlq.remove(values[1], 1);
|
||||
REQUIRE(mlq.highest_priority_set() == 2);
|
||||
REQUIRE(mlq.empty(1));
|
||||
}
|
||||
|
||||
} // namespace Common
|
Reference in New Issue