yuzu-emu
/
yuzu
Archived
1
0
Fork 0

kernel: convert miscellaneous

This commit is contained in:
Liam 2023-03-06 23:08:53 -05:00
parent d1b53c8d82
commit 57f1d8ef8d
7 changed files with 80 additions and 93 deletions

View File

@ -71,26 +71,26 @@ void KSynchronizationObject::Finalize() {
KAutoObject::Finalize(); KAutoObject::Finalize();
} }
Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, Result KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
KSynchronizationObject** objects, const s32 num_objects, KSynchronizationObject** objects, const s32 num_objects,
s64 timeout) { s64 timeout) {
// Allocate space on stack for thread nodes. // Allocate space on stack for thread nodes.
std::vector<ThreadListNode> thread_nodes(num_objects); std::vector<ThreadListNode> thread_nodes(num_objects);
// Prepare for wait. // Prepare for wait.
KThread* thread = GetCurrentThreadPointer(kernel_ctx); KThread* thread = GetCurrentThreadPointer(kernel);
KHardwareTimer* timer{}; KHardwareTimer* timer{};
ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects, ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel, objects, thread_nodes.data(),
thread_nodes.data(), num_objects); num_objects);
{ {
// Setup the scheduling lock and sleep. // Setup the scheduling lock and sleep.
KScopedSchedulerLockAndSleep slp(kernel_ctx, std::addressof(timer), thread, timeout); KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), thread, timeout);
// Check if the thread should terminate. // Check if the thread should terminate.
if (thread->IsTerminationRequested()) { if (thread->IsTerminationRequested()) {
slp.CancelSleep(); slp.CancelSleep();
return ResultTerminationRequested; R_THROW(ResultTerminationRequested);
} }
// Check if any of the objects are already signaled. // Check if any of the objects are already signaled.
@ -100,21 +100,21 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
if (objects[i]->IsSignaled()) { if (objects[i]->IsSignaled()) {
*out_index = i; *out_index = i;
slp.CancelSleep(); slp.CancelSleep();
return ResultSuccess; R_THROW(ResultSuccess);
} }
} }
// Check if the timeout is zero. // Check if the timeout is zero.
if (timeout == 0) { if (timeout == 0) {
slp.CancelSleep(); slp.CancelSleep();
return ResultTimedOut; R_THROW(ResultTimedOut);
} }
// Check if waiting was canceled. // Check if waiting was canceled.
if (thread->IsWaitCancelled()) { if (thread->IsWaitCancelled()) {
slp.CancelSleep(); slp.CancelSleep();
thread->ClearWaitCancelled(); thread->ClearWaitCancelled();
return ResultCancelled; R_THROW(ResultCancelled);
} }
// Add the waiters. // Add the waiters.
@ -141,7 +141,7 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
*out_index = thread->GetSyncedIndex(); *out_index = thread->GetSyncedIndex();
// Get the wait result. // Get the wait result.
return thread->GetWaitResult(); R_RETURN(thread->GetWaitResult());
} }
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
@ -158,7 +158,7 @@ void KSynchronizationObject::NotifyAvailable(Result result) {
} }
// Iterate over each thread. // Iterate over each thread.
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
cur_node->thread->NotifyAvailable(this, result); cur_node->thread->NotifyAvailable(this, result);
} }
} }
@ -169,7 +169,7 @@ std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() co
// If debugging, dump the list of waiters. // If debugging, dump the list of waiters.
{ {
KScopedSchedulerLock lock(kernel); KScopedSchedulerLock lock(kernel);
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
threads.emplace_back(cur_node->thread); threads.emplace_back(cur_node->thread);
} }
} }

View File

@ -24,31 +24,30 @@ public:
KThread* thread{}; KThread* thread{};
}; };
[[nodiscard]] static Result Wait(KernelCore& kernel, s32* out_index, static Result Wait(KernelCore& kernel, s32* out_index, KSynchronizationObject** objects,
KSynchronizationObject** objects, const s32 num_objects, const s32 num_objects, s64 timeout);
s64 timeout);
void Finalize() override; void Finalize() override;
[[nodiscard]] virtual bool IsSignaled() const = 0; virtual bool IsSignaled() const = 0;
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; std::vector<KThread*> GetWaitingThreadsForDebugging() const;
void LinkNode(ThreadListNode* node_) { void LinkNode(ThreadListNode* node_) {
// Link the node to the list. // Link the node to the list.
if (thread_list_tail == nullptr) { if (m_thread_list_tail == nullptr) {
thread_list_head = node_; m_thread_list_head = node_;
} else { } else {
thread_list_tail->next = node_; m_thread_list_tail->next = node_;
} }
thread_list_tail = node_; m_thread_list_tail = node_;
} }
void UnlinkNode(ThreadListNode* node_) { void UnlinkNode(ThreadListNode* node_) {
// Unlink the node from the list. // Unlink the node from the list.
ThreadListNode* prev_ptr = ThreadListNode* prev_ptr =
reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head)); reinterpret_cast<ThreadListNode*>(std::addressof(m_thread_list_head));
ThreadListNode* prev_val = nullptr; ThreadListNode* prev_val = nullptr;
ThreadListNode *prev, *tail_prev; ThreadListNode *prev, *tail_prev;
@ -59,8 +58,8 @@ public:
prev_val = prev_ptr; prev_val = prev_ptr;
} while (prev_ptr != node_); } while (prev_ptr != node_);
if (thread_list_tail == node_) { if (m_thread_list_tail == node_) {
thread_list_tail = tail_prev; m_thread_list_tail = tail_prev;
} }
prev->next = node_->next; prev->next = node_->next;
@ -78,8 +77,8 @@ protected:
} }
private: private:
ThreadListNode* thread_list_head{}; ThreadListNode* m_thread_list_head{};
ThreadListNode* thread_list_tail{}; ThreadListNode* m_thread_list_tail{};
}; };
} // namespace Kernel } // namespace Kernel

View File

@ -16,18 +16,18 @@ KTransferMemory::~KTransferMemory() = default;
Result KTransferMemory::Initialize(VAddr address_, std::size_t size_, Result KTransferMemory::Initialize(VAddr address_, std::size_t size_,
Svc::MemoryPermission owner_perm_) { Svc::MemoryPermission owner_perm_) {
// Set members. // Set members.
owner = GetCurrentProcessPointer(kernel); m_owner = GetCurrentProcessPointer(kernel);
// TODO(bunnei): Lock for transfer memory // TODO(bunnei): Lock for transfer memory
// Set remaining tracking members. // Set remaining tracking members.
owner->Open(); m_owner->Open();
owner_perm = owner_perm_; m_owner_perm = owner_perm_;
address = address_; m_address = address_;
size = size_; m_size = size_;
is_initialized = true; m_is_initialized = true;
return ResultSuccess; R_SUCCEED();
} }
void KTransferMemory::Finalize() { void KTransferMemory::Finalize() {

View File

@ -31,33 +31,33 @@ public:
void Finalize() override; void Finalize() override;
bool IsInitialized() const override { bool IsInitialized() const override {
return is_initialized; return m_is_initialized;
} }
uintptr_t GetPostDestroyArgument() const override { uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(owner); return reinterpret_cast<uintptr_t>(m_owner);
} }
static void PostDestroy(uintptr_t arg); static void PostDestroy(uintptr_t arg);
KProcess* GetOwner() const override { KProcess* GetOwner() const override {
return owner; return m_owner;
} }
VAddr GetSourceAddress() const { VAddr GetSourceAddress() const {
return address; return m_address;
} }
size_t GetSize() const { size_t GetSize() const {
return is_initialized ? size : 0; return m_is_initialized ? m_size : 0;
} }
private: private:
KProcess* owner{}; KProcess* m_owner{};
VAddr address{}; VAddr m_address{};
Svc::MemoryPermission owner_perm{}; Svc::MemoryPermission m_owner_perm{};
size_t size{}; size_t m_size{};
bool is_initialized{}; bool m_is_initialized{};
}; };
} // namespace Kernel } // namespace Kernel

View File

@ -10,14 +10,14 @@
namespace Kernel { namespace Kernel {
PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_) PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, KScheduler& scheduler)
: core_index{core_index_}, system{system_}, scheduler{scheduler_} { : m_core_index{core_index}, m_system{system}, m_scheduler{scheduler} {
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
// TODO(bunnei): Initialization relies on a core being available. We may later replace this with // TODO(bunnei): Initialization relies on a core being available. We may later replace this with
// a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager. // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
auto& kernel = system.Kernel(); auto& kernel = system.Kernel();
arm_interface = std::make_unique<Core::ARM_Dynarmic_64>( m_arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index); system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index);
#else #else
#error Platform not supported yet. #error Platform not supported yet.
#endif #endif
@ -25,13 +25,13 @@ PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KSche
PhysicalCore::~PhysicalCore() = default; PhysicalCore::~PhysicalCore() = default;
void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { void PhysicalCore::Initialize(bool is_64_bit) {
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
auto& kernel = system.Kernel(); auto& kernel = m_system.Kernel();
if (!is_64_bit) { if (!is_64_bit) {
// We already initialized a 64-bit core, replace with a 32-bit one. // We already initialized a 64-bit core, replace with a 32-bit one.
arm_interface = std::make_unique<Core::ARM_Dynarmic_32>( m_arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index); m_system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index);
} }
#else #else
#error Platform not supported yet. #error Platform not supported yet.
@ -39,31 +39,30 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
} }
void PhysicalCore::Run() { void PhysicalCore::Run() {
arm_interface->Run(); m_arm_interface->Run();
arm_interface->ClearExclusiveState(); m_arm_interface->ClearExclusiveState();
} }
void PhysicalCore::Idle() { void PhysicalCore::Idle() {
std::unique_lock lk{guard}; std::unique_lock lk{m_guard};
on_interrupt.wait(lk, [this] { return is_interrupted; }); m_on_interrupt.wait(lk, [this] { return m_is_interrupted; });
} }
bool PhysicalCore::IsInterrupted() const { bool PhysicalCore::IsInterrupted() const {
return is_interrupted; return m_is_interrupted;
} }
void PhysicalCore::Interrupt() { void PhysicalCore::Interrupt() {
std::unique_lock lk{guard}; std::unique_lock lk{m_guard};
is_interrupted = true; m_is_interrupted = true;
arm_interface->SignalInterrupt(); m_arm_interface->SignalInterrupt();
on_interrupt.notify_all(); m_on_interrupt.notify_all();
} }
void PhysicalCore::ClearInterrupt() { void PhysicalCore::ClearInterrupt() {
std::unique_lock lk{guard}; std::unique_lock lk{m_guard};
is_interrupted = false; m_is_interrupted = false;
arm_interface->ClearInterrupt(); m_arm_interface->ClearInterrupt();
on_interrupt.notify_all();
} }
} // namespace Kernel } // namespace Kernel

View File

@ -47,46 +47,38 @@ public:
bool IsInterrupted() const; bool IsInterrupted() const;
bool IsInitialized() const { bool IsInitialized() const {
return arm_interface != nullptr; return m_arm_interface != nullptr;
} }
Core::ARM_Interface& ArmInterface() { Core::ARM_Interface& ArmInterface() {
return *arm_interface; return *m_arm_interface;
} }
const Core::ARM_Interface& ArmInterface() const { const Core::ARM_Interface& ArmInterface() const {
return *arm_interface; return *m_arm_interface;
}
bool IsMainCore() const {
return core_index == 0;
}
bool IsSystemCore() const {
return core_index == 3;
} }
std::size_t CoreIndex() const { std::size_t CoreIndex() const {
return core_index; return m_core_index;
} }
Kernel::KScheduler& Scheduler() { Kernel::KScheduler& Scheduler() {
return scheduler; return m_scheduler;
} }
const Kernel::KScheduler& Scheduler() const { const Kernel::KScheduler& Scheduler() const {
return scheduler; return m_scheduler;
} }
private: private:
const std::size_t core_index; const std::size_t m_core_index;
Core::System& system; Core::System& m_system;
Kernel::KScheduler& scheduler; Kernel::KScheduler& m_scheduler;
std::mutex guard; std::mutex m_guard;
std::condition_variable on_interrupt; std::condition_variable m_on_interrupt;
std::unique_ptr<Core::ARM_Interface> arm_interface; std::unique_ptr<Core::ARM_Interface> m_arm_interface;
bool is_interrupted{}; bool m_is_interrupted{};
}; };
} // namespace Kernel } // namespace Kernel

View File

@ -132,7 +132,7 @@ protected:
template <typename Derived, typename Base> template <typename Derived, typename Base>
class KAutoObjectWithSlabHeapAndContainer : public Base { class KAutoObjectWithSlabHeapAndContainer : public Base {
static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); static_assert(std::is_base_of_v<KAutoObjectWithList, Base>);
private: private:
static Derived* Allocate(KernelCore& kernel) { static Derived* Allocate(KernelCore& kernel) {
@ -144,18 +144,18 @@ private:
} }
public: public:
KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_) {}
virtual ~KAutoObjectWithSlabHeapAndContainer() {} virtual ~KAutoObjectWithSlabHeapAndContainer() {}
virtual void Destroy() override { virtual void Destroy() override {
const bool is_initialized = this->IsInitialized(); const bool is_initialized = this->IsInitialized();
uintptr_t arg = 0; uintptr_t arg = 0;
if (is_initialized) { if (is_initialized) {
kernel.ObjectListContainer().Unregister(this); Base::kernel.ObjectListContainer().Unregister(this);
arg = this->GetPostDestroyArgument(); arg = this->GetPostDestroyArgument();
this->Finalize(); this->Finalize();
} }
Free(kernel, static_cast<Derived*>(this)); Free(Base::kernel, static_cast<Derived*>(this));
if (is_initialized) { if (is_initialized) {
Derived::PostDestroy(arg); Derived::PostDestroy(arg);
} }
@ -169,7 +169,7 @@ public:
} }
size_t GetSlabIndex() const { size_t GetSlabIndex() const {
return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); return SlabHeap<Derived>(Base::kernel).GetObjectIndex(static_cast<const Derived*>(this));
} }
public: public:
@ -209,9 +209,6 @@ public:
static size_t GetNumRemaining(KernelCore& kernel) { static size_t GetNumRemaining(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetNumRemaining(); return kernel.SlabHeap<Derived>().GetNumRemaining();
} }
protected:
KernelCore& kernel;
}; };
} // namespace Kernel } // namespace Kernel