General: Cleanup legacy code.
This commit is contained in:
parent
c8bf47dcfb
commit
48fa3b7a0f
|
@ -17,8 +17,6 @@ add_library(core STATIC
|
|||
constants.h
|
||||
core.cpp
|
||||
core.h
|
||||
core_manager.cpp
|
||||
core_manager.h
|
||||
core_timing.cpp
|
||||
core_timing.h
|
||||
core_timing_util.cpp
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_cp15.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_manager.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/kernel/svc.h"
|
||||
#include "core/memory.h"
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#include "core/arm/cpu_interrupt_handler.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_manager.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/gdbstub/gdbstub.h"
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/arm/unicorn/arm_unicorn.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_manager.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/scheduler.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
CoreManager::CoreManager(System& system, std::size_t core_index)
|
||||
: global_scheduler{system.GlobalScheduler()}, physical_core{system.Kernel().PhysicalCore(
|
||||
core_index)},
|
||||
core_timing{system.CoreTiming()}, core_index{core_index} {}
|
||||
|
||||
CoreManager::~CoreManager() = default;
|
||||
|
||||
void CoreManager::RunLoop(bool tight_loop) {
|
||||
/// Deprecated
|
||||
}
|
||||
|
||||
void CoreManager::SingleStep() {
|
||||
return RunLoop(false);
|
||||
}
|
||||
|
||||
void CoreManager::PrepareReschedule() {
|
||||
//physical_core.Stop();
|
||||
}
|
||||
|
||||
void CoreManager::Reschedule() {
|
||||
// Lock the global kernel mutex when we manipulate the HLE state
|
||||
std::lock_guard lock(HLE::g_hle_lock);
|
||||
|
||||
// global_scheduler.SelectThread(core_index);
|
||||
|
||||
physical_core.Scheduler().TryDoContextSwitch();
|
||||
}
|
||||
|
||||
} // namespace Core
|
|
@ -1,63 +0,0 @@
|
|||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
class GlobalScheduler;
|
||||
class PhysicalCore;
|
||||
} // namespace Kernel
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
}
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
|
||||
constexpr unsigned NUM_CPU_CORES{4};
|
||||
|
||||
class CoreManager {
|
||||
public:
|
||||
CoreManager(System& system, std::size_t core_index);
|
||||
~CoreManager();
|
||||
|
||||
void RunLoop(bool tight_loop = true);
|
||||
|
||||
void SingleStep();
|
||||
|
||||
void PrepareReschedule();
|
||||
|
||||
bool IsMainCore() const {
|
||||
return core_index == 0;
|
||||
}
|
||||
|
||||
std::size_t CoreIndex() const {
|
||||
return core_index;
|
||||
}
|
||||
|
||||
private:
|
||||
void Reschedule();
|
||||
|
||||
Kernel::GlobalScheduler& global_scheduler;
|
||||
Kernel::PhysicalCore& physical_core;
|
||||
Timing::CoreTiming& core_timing;
|
||||
|
||||
std::atomic<bool> reschedule_pending = false;
|
||||
std::size_t core_index;
|
||||
};
|
||||
|
||||
} // namespace Core
|
|
@ -35,7 +35,6 @@
|
|||
#include "common/swap.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_manager.h"
|
||||
#include "core/gdbstub/gdbstub.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
|
|
|
@ -34,7 +34,7 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
|
|||
}
|
||||
|
||||
// Wake the threads waiting on the ServerPort
|
||||
server_port->WakeupAllWaitingThreads();
|
||||
server_port->Signal();
|
||||
|
||||
return MakeResult(std::move(client));
|
||||
}
|
||||
|
|
|
@ -48,72 +48,6 @@ MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
/**
|
||||
* Callback that will wake up the thread it was scheduled for
|
||||
* @param thread_handle The handle of the thread that's been awoken
|
||||
* @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
|
||||
*/
|
||||
static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
|
||||
UNREACHABLE();
|
||||
const auto proper_handle = static_cast<Handle>(thread_handle);
|
||||
const auto& system = Core::System::GetInstance();
|
||||
|
||||
// Lock the global kernel mutex when we enter the kernel HLE.
|
||||
std::lock_guard lock{HLE::g_hle_lock};
|
||||
|
||||
std::shared_ptr<Thread> thread =
|
||||
system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
|
||||
if (thread == nullptr) {
|
||||
LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
|
||||
return;
|
||||
}
|
||||
|
||||
bool resume = true;
|
||||
|
||||
if (thread->GetStatus() == ThreadStatus::WaitSynch ||
|
||||
thread->GetStatus() == ThreadStatus::WaitHLEEvent) {
|
||||
// Remove the thread from each of its waiting objects' waitlists
|
||||
for (const auto& object : thread->GetSynchronizationObjects()) {
|
||||
object->RemoveWaitingThread(thread);
|
||||
}
|
||||
thread->ClearSynchronizationObjects();
|
||||
|
||||
// Invoke the wakeup callback before clearing the wait objects
|
||||
if (thread->HasWakeupCallback()) {
|
||||
resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0);
|
||||
}
|
||||
} else if (thread->GetStatus() == ThreadStatus::WaitMutex ||
|
||||
thread->GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
thread->SetMutexWaitAddress(0);
|
||||
thread->SetWaitHandle(0);
|
||||
if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
thread->GetOwnerProcess()->RemoveConditionVariableThread(thread);
|
||||
thread->SetCondVarWaitAddress(0);
|
||||
}
|
||||
|
||||
auto* const lock_owner = thread->GetLockOwner();
|
||||
// Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance
|
||||
// and don't have a lock owner unless SignalProcessWideKey was called first and the thread
|
||||
// wasn't awakened due to the mutex already being acquired.
|
||||
if (lock_owner != nullptr) {
|
||||
lock_owner->RemoveMutexWaiter(thread);
|
||||
}
|
||||
}
|
||||
|
||||
if (thread->GetStatus() == ThreadStatus::WaitArb) {
|
||||
auto& address_arbiter = thread->GetOwnerProcess()->GetAddressArbiter();
|
||||
address_arbiter.HandleWakeupThread(thread);
|
||||
}
|
||||
|
||||
if (resume) {
|
||||
if (thread->GetStatus() == ThreadStatus::WaitCondVar ||
|
||||
thread->GetStatus() == ThreadStatus::WaitArb) {
|
||||
thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
|
||||
}
|
||||
thread->ResumeFromWait();
|
||||
}
|
||||
}
|
||||
|
||||
struct KernelCore::Impl {
|
||||
explicit Impl(Core::System& system, KernelCore& kernel)
|
||||
: global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {}
|
||||
|
@ -129,7 +63,6 @@ struct KernelCore::Impl {
|
|||
InitializePhysicalCores();
|
||||
InitializeSystemResourceLimit(kernel);
|
||||
InitializeMemoryLayout();
|
||||
InitializeThreads();
|
||||
InitializePreemption(kernel);
|
||||
InitializeSchedulers();
|
||||
InitializeSuspendThreads();
|
||||
|
@ -161,7 +94,6 @@ struct KernelCore::Impl {
|
|||
system_resource_limit = nullptr;
|
||||
|
||||
global_handle_table.Clear();
|
||||
thread_wakeup_event_type = nullptr;
|
||||
preemption_event = nullptr;
|
||||
|
||||
global_scheduler.Shutdown();
|
||||
|
@ -210,11 +142,6 @@ struct KernelCore::Impl {
|
|||
}
|
||||
}
|
||||
|
||||
void InitializeThreads() {
|
||||
thread_wakeup_event_type =
|
||||
Core::Timing::CreateEvent("ThreadWakeupCallback", ThreadWakeupCallback);
|
||||
}
|
||||
|
||||
void InitializePreemption(KernelCore& kernel) {
|
||||
preemption_event = Core::Timing::CreateEvent(
|
||||
"PreemptionCallback", [this, &kernel](u64 userdata, s64 cycles_late) {
|
||||
|
@ -376,7 +303,6 @@ struct KernelCore::Impl {
|
|||
|
||||
std::shared_ptr<ResourceLimit> system_resource_limit;
|
||||
|
||||
std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type;
|
||||
std::shared_ptr<Core::Timing::EventType> preemption_event;
|
||||
|
||||
// This is the kernel's handle table or supervisor handle table which
|
||||
|
@ -516,7 +442,8 @@ std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore
|
|||
return impl->interrupts;
|
||||
}
|
||||
|
||||
const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() const {
|
||||
const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts()
|
||||
const {
|
||||
return impl->interrupts;
|
||||
}
|
||||
|
||||
|
@ -595,10 +522,6 @@ u64 KernelCore::CreateNewUserProcessID() {
|
|||
return impl->next_user_process_id++;
|
||||
}
|
||||
|
||||
const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallbackEventType() const {
|
||||
return impl->thread_wakeup_event_type;
|
||||
}
|
||||
|
||||
Kernel::HandleTable& KernelCore::GlobalHandleTable() {
|
||||
return impl->global_handle_table;
|
||||
}
|
||||
|
|
|
@ -241,9 +241,6 @@ private:
|
|||
/// Creates a new thread ID, incrementing the internal thread ID counter.
|
||||
u64 CreateNewThreadID();
|
||||
|
||||
/// Retrieves the event type used for thread wakeup callbacks.
|
||||
const std::shared_ptr<Core::Timing::EventType>& ThreadWakeupCallbackEventType() const;
|
||||
|
||||
/// Provides a reference to the global handle table.
|
||||
Kernel::HandleTable& GlobalHandleTable();
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include "common/string_util.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_manager.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/cpu_manager.h"
|
||||
|
@ -1909,7 +1908,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
|
|||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
|
||||
if (core < Core::NUM_CPU_CORES) {
|
||||
if (core < Core::Hardware::NUM_CPU_CORES) {
|
||||
if ((affinity_mask & (1ULL << core)) == 0) {
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
"Core is not enabled for the current mask, core={}, mask={:016X}", core,
|
||||
|
|
|
@ -38,70 +38,6 @@ void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread)
|
|||
waiting_threads.erase(itr);
|
||||
}
|
||||
|
||||
std::shared_ptr<Thread> SynchronizationObject::GetHighestPriorityReadyThread() const {
|
||||
Thread* candidate = nullptr;
|
||||
u32 candidate_priority = THREADPRIO_LOWEST + 1;
|
||||
|
||||
for (const auto& thread : waiting_threads) {
|
||||
const ThreadStatus thread_status = thread->GetStatus();
|
||||
|
||||
// The list of waiting threads must not contain threads that are not waiting to be awakened.
|
||||
ASSERT_MSG(thread_status == ThreadStatus::WaitSynch ||
|
||||
thread_status == ThreadStatus::WaitHLEEvent,
|
||||
"Inconsistent thread statuses in waiting_threads");
|
||||
|
||||
if (thread->GetPriority() >= candidate_priority)
|
||||
continue;
|
||||
|
||||
if (ShouldWait(thread.get()))
|
||||
continue;
|
||||
|
||||
candidate = thread.get();
|
||||
candidate_priority = thread->GetPriority();
|
||||
}
|
||||
|
||||
return SharedFrom(candidate);
|
||||
}
|
||||
|
||||
void SynchronizationObject::WakeupWaitingThread(std::shared_ptr<Thread> thread) {
|
||||
ASSERT(!ShouldWait(thread.get()));
|
||||
|
||||
if (!thread) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (thread->IsSleepingOnWait()) {
|
||||
for (const auto& object : thread->GetSynchronizationObjects()) {
|
||||
ASSERT(!object->ShouldWait(thread.get()));
|
||||
object->Acquire(thread.get());
|
||||
}
|
||||
} else {
|
||||
Acquire(thread.get());
|
||||
}
|
||||
|
||||
const std::size_t index = thread->GetSynchronizationObjectIndex(SharedFrom(this));
|
||||
|
||||
thread->ClearSynchronizationObjects();
|
||||
|
||||
thread->CancelWakeupTimer();
|
||||
|
||||
bool resume = true;
|
||||
if (thread->HasWakeupCallback()) {
|
||||
resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Signal, thread, SharedFrom(this),
|
||||
index);
|
||||
}
|
||||
if (resume) {
|
||||
thread->ResumeFromWait();
|
||||
kernel.PrepareReschedule(thread->GetProcessorID());
|
||||
}
|
||||
}
|
||||
|
||||
void SynchronizationObject::WakeupAllWaitingThreads() {
|
||||
while (auto thread = GetHighestPriorityReadyThread()) {
|
||||
WakeupWaitingThread(thread);
|
||||
}
|
||||
}
|
||||
|
||||
void SynchronizationObject::ClearWaitingThreads() {
|
||||
waiting_threads.clear();
|
||||
}
|
||||
|
|
|
@ -50,21 +50,6 @@ public:
|
|||
*/
|
||||
void RemoveWaitingThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/**
|
||||
* Wake up all threads waiting on this object that can be awoken, in priority order,
|
||||
* and set the synchronization result and output of the thread.
|
||||
*/
|
||||
void /* deprecated */ WakeupAllWaitingThreads();
|
||||
|
||||
/**
|
||||
* Wakes up a single thread waiting on this object.
|
||||
* @param thread Thread that is waiting on this object to wakeup.
|
||||
*/
|
||||
void WakeupWaitingThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Obtains the highest priority thread that is ready to run from this object's waiting list.
|
||||
std::shared_ptr<Thread> /* deprecated */ GetHighestPriorityReadyThread() const;
|
||||
|
||||
/// Get a const reference to the waiting threads list for debug use
|
||||
const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
|
||||
|
||||
|
|
|
@ -56,9 +56,6 @@ Thread::~Thread() = default;
|
|||
void Thread::Stop() {
|
||||
{
|
||||
SchedulerLock lock(kernel);
|
||||
// Cancel any outstanding wakeup events for this thread
|
||||
Core::System::GetInstance().CoreTiming().UnscheduleEvent(
|
||||
kernel.ThreadWakeupCallbackEventType(), global_handle);
|
||||
SetStatus(ThreadStatus::Dead);
|
||||
Signal();
|
||||
kernel.GlobalHandleTable().Close(global_handle);
|
||||
|
@ -75,22 +72,6 @@ void Thread::Stop() {
|
|||
global_handle = 0;
|
||||
}
|
||||
|
||||
void Thread::WakeAfterDelay(s64 nanoseconds) {
|
||||
// Don't schedule a wakeup if the thread wants to wait forever
|
||||
if (nanoseconds == -1)
|
||||
return;
|
||||
|
||||
// This function might be called from any thread so we have to be cautious and use the
|
||||
// thread-safe version of ScheduleEvent.
|
||||
Core::System::GetInstance().CoreTiming().ScheduleEvent(
|
||||
nanoseconds, kernel.ThreadWakeupCallbackEventType(), global_handle);
|
||||
}
|
||||
|
||||
void Thread::CancelWakeupTimer() {
|
||||
Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(),
|
||||
global_handle);
|
||||
}
|
||||
|
||||
void Thread::ResumeFromWait() {
|
||||
SchedulerLock lock(kernel);
|
||||
switch (status) {
|
||||
|
@ -284,14 +265,6 @@ void Thread::SetPriority(u32 priority) {
|
|||
UpdatePriority();
|
||||
}
|
||||
|
||||
void Thread::SetWaitSynchronizationResult(ResultCode result) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void Thread::SetWaitSynchronizationOutput(s32 output) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) {
|
||||
signaling_object = object;
|
||||
signaling_result = result;
|
||||
|
@ -425,13 +398,6 @@ bool Thread::AllSynchronizationObjectsReady() const {
|
|||
});
|
||||
}
|
||||
|
||||
bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
|
||||
std::shared_ptr<SynchronizationObject> object,
|
||||
std::size_t index) {
|
||||
ASSERT(wakeup_callback);
|
||||
return wakeup_callback(reason, std::move(thread), std::move(object), index);
|
||||
}
|
||||
|
||||
bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
|
||||
ASSERT(hle_callback);
|
||||
return hle_callback(std::move(thread));
|
||||
|
|
|
@ -128,9 +128,6 @@ public:
|
|||
|
||||
using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>;
|
||||
|
||||
using WakeupCallback =
|
||||
std::function<bool(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
|
||||
std::shared_ptr<SynchronizationObject> object, std::size_t index)>;
|
||||
using HLECallback = std::function<bool(std::shared_ptr<Thread> thread)>;
|
||||
|
||||
/**
|
||||
|
@ -235,7 +232,7 @@ public:
|
|||
}
|
||||
|
||||
/// Resumes a thread from waiting
|
||||
void /* deprecated */ ResumeFromWait();
|
||||
void ResumeFromWait();
|
||||
|
||||
void OnWakeUp();
|
||||
|
||||
|
@ -249,27 +246,6 @@ public:
|
|||
///
|
||||
void CancelWait();
|
||||
|
||||
/**
|
||||
* Schedules an event to wake up the specified thread after the specified delay
|
||||
* @param nanoseconds The time this thread will be allowed to sleep for
|
||||
*/
|
||||
void /* deprecated */ WakeAfterDelay(s64 nanoseconds);
|
||||
|
||||
/// Cancel any outstanding wakeup events for this thread
|
||||
void /* deprecated */ CancelWakeupTimer();
|
||||
|
||||
/**
|
||||
* Sets the result after the thread awakens (from svcWaitSynchronization)
|
||||
* @param result Value to set to the returned result
|
||||
*/
|
||||
void /*deprecated*/ SetWaitSynchronizationResult(ResultCode result);
|
||||
|
||||
/**
|
||||
* Sets the output parameter value after the thread awakens (from svcWaitSynchronization)
|
||||
* @param output Value to set to the output parameter
|
||||
*/
|
||||
void /*deprecated*/ SetWaitSynchronizationOutput(s32 output);
|
||||
|
||||
void SetSynchronizationResults(SynchronizationObject* object, ResultCode result);
|
||||
|
||||
Core::ARM_Interface& ArmInterface();
|
||||
|
@ -330,11 +306,6 @@ public:
|
|||
*/
|
||||
VAddr GetCommandBufferAddress() const;
|
||||
|
||||
/// Returns whether this thread is waiting on objects from a WaitSynchronization call.
|
||||
bool IsSleepingOnWait() const {
|
||||
return status == ThreadStatus::WaitSynch;
|
||||
}
|
||||
|
||||
ThreadContext32& GetContext32() {
|
||||
return context_32;
|
||||
}
|
||||
|
@ -469,18 +440,10 @@ public:
|
|||
arb_wait_address = address;
|
||||
}
|
||||
|
||||
bool HasWakeupCallback() const {
|
||||
return wakeup_callback != nullptr;
|
||||
}
|
||||
|
||||
bool HasHLECallback() const {
|
||||
return hle_callback != nullptr;
|
||||
}
|
||||
|
||||
void SetWakeupCallback(WakeupCallback callback) {
|
||||
wakeup_callback = std::move(callback);
|
||||
}
|
||||
|
||||
void SetHLECallback(HLECallback callback) {
|
||||
hle_callback = std::move(callback);
|
||||
}
|
||||
|
@ -501,22 +464,10 @@ public:
|
|||
return hle_object;
|
||||
}
|
||||
|
||||
void InvalidateWakeupCallback() {
|
||||
SetWakeupCallback(nullptr);
|
||||
}
|
||||
|
||||
void InvalidateHLECallback() {
|
||||
SetHLECallback(nullptr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes the thread's wakeup callback.
|
||||
*
|
||||
* @pre A valid wakeup callback has been set. Violating this precondition
|
||||
* will cause an assertion to trigger.
|
||||
*/
|
||||
bool InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
|
||||
std::shared_ptr<SynchronizationObject> object, std::size_t index);
|
||||
bool InvokeHLECallback(std::shared_ptr<Thread> thread);
|
||||
|
||||
u32 GetIdealCore() const {
|
||||
|
@ -698,11 +649,6 @@ private:
|
|||
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
|
||||
Handle global_handle = 0;
|
||||
|
||||
/// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
|
||||
/// was waiting via WaitSynchronization then the object will be the last object that became
|
||||
/// available. In case of a timeout, the object will be nullptr. DEPRECATED
|
||||
WakeupCallback wakeup_callback;
|
||||
|
||||
/// Callback for HLE Events
|
||||
HLECallback hle_callback;
|
||||
Handle hle_time_event;
|
||||
|
|
|
@ -142,7 +142,7 @@ void SM::GetService(Kernel::HLERequestContext& ctx) {
|
|||
}
|
||||
|
||||
// Wake the threads waiting on the ServerPort
|
||||
server_port->WakeupAllWaitingThreads();
|
||||
server_port->Signal();
|
||||
|
||||
LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId());
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
|
||||
|
|
|
@ -1,206 +0,0 @@
|
|||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/host_timing.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/core_timing_util.h"
|
||||
|
||||
namespace Core::HostTiming {
|
||||
|
||||
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
|
||||
return std::make_shared<EventType>(std::move(callback), std::move(name));
|
||||
}
|
||||
|
||||
struct CoreTiming::Event {
|
||||
u64 time;
|
||||
u64 fifo_order;
|
||||
u64 userdata;
|
||||
std::weak_ptr<EventType> type;
|
||||
|
||||
// Sort by time, unless the times are the same, in which case sort by
|
||||
// the order added to the queue
|
||||
friend bool operator>(const Event& left, const Event& right) {
|
||||
return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order);
|
||||
}
|
||||
|
||||
friend bool operator<(const Event& left, const Event& right) {
|
||||
return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order);
|
||||
}
|
||||
};
|
||||
|
||||
CoreTiming::CoreTiming() {
|
||||
clock =
|
||||
Common::CreateBestMatchingClock(Core::Hardware::BASE_CLOCK_RATE, Core::Hardware::CNTFREQ);
|
||||
}
|
||||
|
||||
CoreTiming::~CoreTiming() = default;
|
||||
|
||||
void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
instance.ThreadLoop();
|
||||
}
|
||||
|
||||
void CoreTiming::Initialize() {
|
||||
event_fifo_id = 0;
|
||||
const auto empty_timed_callback = [](u64, s64) {};
|
||||
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
||||
timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
|
||||
}
|
||||
|
||||
void CoreTiming::Shutdown() {
|
||||
paused = true;
|
||||
shutting_down = true;
|
||||
event.Set();
|
||||
timer_thread->join();
|
||||
ClearPendingEvents();
|
||||
timer_thread.reset();
|
||||
has_started = false;
|
||||
}
|
||||
|
||||
void CoreTiming::Pause(bool is_paused) {
|
||||
paused = is_paused;
|
||||
}
|
||||
|
||||
void CoreTiming::SyncPause(bool is_paused) {
|
||||
if (is_paused == paused && paused_set == paused) {
|
||||
return;
|
||||
}
|
||||
Pause(is_paused);
|
||||
event.Set();
|
||||
while (paused_set != is_paused)
|
||||
;
|
||||
}
|
||||
|
||||
bool CoreTiming::IsRunning() const {
|
||||
return !paused_set;
|
||||
}
|
||||
|
||||
bool CoreTiming::HasPendingEvents() const {
|
||||
return !(wait_set && event_queue.empty());
|
||||
}
|
||||
|
||||
void CoreTiming::ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
|
||||
u64 userdata) {
|
||||
basic_lock.lock();
|
||||
const u64 timeout = static_cast<u64>(GetGlobalTimeNs().count() + ns_into_future);
|
||||
|
||||
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
|
||||
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
basic_lock.unlock();
|
||||
event.Set();
|
||||
}
|
||||
|
||||
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) {
|
||||
basic_lock.lock();
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type.lock().get() == event_type.get() && e.userdata == userdata;
|
||||
});
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != event_queue.end()) {
|
||||
event_queue.erase(itr, event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
basic_lock.unlock();
|
||||
}
|
||||
|
||||
void CoreTiming::AddTicks(std::size_t core_index, u64 ticks) {
|
||||
ticks_count[core_index] += ticks;
|
||||
}
|
||||
|
||||
void CoreTiming::ResetTicks(std::size_t core_index) {
|
||||
ticks_count[core_index] = 0;
|
||||
}
|
||||
|
||||
u64 CoreTiming::GetCPUTicks() const {
|
||||
return clock->GetCPUCycles();
|
||||
}
|
||||
|
||||
u64 CoreTiming::GetClockTicks() const {
|
||||
return clock->GetClockCycles();
|
||||
}
|
||||
|
||||
void CoreTiming::ClearPendingEvents() {
|
||||
event_queue.clear();
|
||||
}
|
||||
|
||||
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
||||
basic_lock.lock();
|
||||
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type.lock().get() == event_type.get();
|
||||
});
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != event_queue.end()) {
|
||||
event_queue.erase(itr, event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
basic_lock.unlock();
|
||||
}
|
||||
|
||||
std::optional<u64> CoreTiming::Advance() {
|
||||
advance_lock.lock();
|
||||
basic_lock.lock();
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
|
||||
while (!event_queue.empty() && event_queue.front().time <= global_timer) {
|
||||
Event evt = std::move(event_queue.front());
|
||||
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
event_queue.pop_back();
|
||||
basic_lock.unlock();
|
||||
|
||||
if (auto event_type{evt.type.lock()}) {
|
||||
event_type->callback(evt.userdata, global_timer - evt.time);
|
||||
}
|
||||
|
||||
basic_lock.lock();
|
||||
}
|
||||
|
||||
if (!event_queue.empty()) {
|
||||
const u64 next_time = event_queue.front().time - global_timer;
|
||||
basic_lock.unlock();
|
||||
advance_lock.unlock();
|
||||
return next_time;
|
||||
} else {
|
||||
basic_lock.unlock();
|
||||
advance_lock.unlock();
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::ThreadLoop() {
|
||||
has_started = true;
|
||||
while (!shutting_down) {
|
||||
while (!paused) {
|
||||
paused_set = false;
|
||||
const auto next_time = Advance();
|
||||
if (next_time) {
|
||||
std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
|
||||
event.WaitFor(next_time_ns);
|
||||
} else {
|
||||
wait_set = true;
|
||||
event.Wait();
|
||||
}
|
||||
wait_set = false;
|
||||
}
|
||||
paused_set = true;
|
||||
}
|
||||
}
|
||||
|
||||
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
|
||||
return clock->GetTimeNS();
|
||||
}
|
||||
|
||||
std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
|
||||
return clock->GetTimeUS();
|
||||
}
|
||||
|
||||
} // namespace Core::HostTiming
|
|
@ -1,160 +0,0 @@
|
|||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "common/thread.h"
|
||||
#include "common/threadsafe_queue.h"
|
||||
#include "common/wall_clock.h"
|
||||
#include "core/hardware_properties.h"
|
||||
|
||||
namespace Core::HostTiming {
|
||||
|
||||
/// A callback that may be scheduled for a particular core timing event.
|
||||
using TimedCallback = std::function<void(u64 userdata, s64 cycles_late)>;
|
||||
|
||||
/// Contains the characteristics of a particular event.
|
||||
struct EventType {
|
||||
EventType(TimedCallback&& callback, std::string&& name)
|
||||
: callback{std::move(callback)}, name{std::move(name)} {}
|
||||
|
||||
/// The event's callback function.
|
||||
TimedCallback callback;
|
||||
/// A pointer to the name of the event.
|
||||
const std::string name;
|
||||
};
|
||||
|
||||
/**
|
||||
* This is a system to schedule events into the emulated machine's future. Time is measured
|
||||
* in main CPU clock cycles.
|
||||
*
|
||||
* To schedule an event, you first have to register its type. This is where you pass in the
|
||||
* callback. You then schedule events using the type id you get back.
|
||||
*
|
||||
* The int cyclesLate that the callbacks get is how many cycles late it was.
|
||||
* So to schedule a new event on a regular basis:
|
||||
* inside callback:
|
||||
* ScheduleEvent(periodInCycles - cyclesLate, callback, "whatever")
|
||||
*/
|
||||
class CoreTiming {
|
||||
public:
|
||||
CoreTiming();
|
||||
~CoreTiming();
|
||||
|
||||
CoreTiming(const CoreTiming&) = delete;
|
||||
CoreTiming(CoreTiming&&) = delete;
|
||||
|
||||
CoreTiming& operator=(const CoreTiming&) = delete;
|
||||
CoreTiming& operator=(CoreTiming&&) = delete;
|
||||
|
||||
/// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is
|
||||
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
|
||||
void Initialize();
|
||||
|
||||
/// Tears down all timing related functionality.
|
||||
void Shutdown();
|
||||
|
||||
/// Pauses/Unpauses the execution of the timer thread.
|
||||
void Pause(bool is_paused);
|
||||
|
||||
/// Pauses/Unpauses the execution of the timer thread and waits until paused.
|
||||
void SyncPause(bool is_paused);
|
||||
|
||||
/// Checks if core timing is running.
|
||||
bool IsRunning() const;
|
||||
|
||||
/// Checks if the timer thread has started.
|
||||
bool HasStarted() const {
|
||||
return has_started;
|
||||
}
|
||||
|
||||
/// Checks if there are any pending time events.
|
||||
bool HasPendingEvents() const;
|
||||
|
||||
/// Schedules an event in core timing
|
||||
void ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
|
||||
u64 userdata = 0);
|
||||
|
||||
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
|
||||
|
||||
/// We only permit one event of each type in the queue at a time.
|
||||
void RemoveEvent(const std::shared_ptr<EventType>& event_type);
|
||||
|
||||
void AddTicks(std::size_t core_index, u64 ticks);
|
||||
|
||||
void ResetTicks(std::size_t core_index);
|
||||
|
||||
/// Returns current time in emulated CPU cycles
|
||||
u64 GetCPUTicks() const;
|
||||
|
||||
/// Returns current time in emulated in Clock cycles
|
||||
u64 GetClockTicks() const;
|
||||
|
||||
/// Returns current time in microseconds.
|
||||
std::chrono::microseconds GetGlobalTimeUs() const;
|
||||
|
||||
/// Returns current time in nanoseconds.
|
||||
std::chrono::nanoseconds GetGlobalTimeNs() const;
|
||||
|
||||
/// Checks for events manually and returns time in nanoseconds for next event, threadsafe.
|
||||
std::optional<u64> Advance();
|
||||
|
||||
private:
|
||||
struct Event;
|
||||
|
||||
/// Clear all pending events. This should ONLY be done on exit.
|
||||
void ClearPendingEvents();
|
||||
|
||||
static void ThreadEntry(CoreTiming& instance);
|
||||
void ThreadLoop();
|
||||
|
||||
std::unique_ptr<Common::WallClock> clock;
|
||||
|
||||
u64 global_timer = 0;
|
||||
|
||||
std::chrono::nanoseconds start_point;
|
||||
|
||||
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
|
||||
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
|
||||
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
|
||||
// accomodated by the standard adaptor class.
|
||||
std::vector<Event> event_queue;
|
||||
u64 event_fifo_id = 0;
|
||||
|
||||
std::shared_ptr<EventType> ev_lost;
|
||||
Common::Event event{};
|
||||
Common::SpinLock basic_lock{};
|
||||
Common::SpinLock advance_lock{};
|
||||
std::unique_ptr<std::thread> timer_thread;
|
||||
std::atomic<bool> paused{};
|
||||
std::atomic<bool> paused_set{};
|
||||
std::atomic<bool> wait_set{};
|
||||
std::atomic<bool> shutting_down{};
|
||||
std::atomic<bool> has_started{};
|
||||
|
||||
std::array<std::atomic<u64>, Core::Hardware::NUM_CPU_CORES> ticks_count{};
|
||||
};
|
||||
|
||||
/// Creates a core timing event with the given name and callback.
|
||||
///
|
||||
/// @param name The name of the core timing event to create.
|
||||
/// @param callback The callback to execute for the event.
|
||||
///
|
||||
/// @returns An EventType instance representing the created event.
|
||||
///
|
||||
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback);
|
||||
|
||||
} // namespace Core::HostTiming
|
|
@ -39,6 +39,7 @@ u64 callbacks_done = 0;
|
|||
|
||||
struct ScopeInit final {
|
||||
ScopeInit() {
|
||||
core_timing.SetMulticore(true);
|
||||
core_timing.Initialize([]() {});
|
||||
}
|
||||
~ScopeInit() {
|
||||
|
|
|
@ -340,7 +340,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
|
|||
|
||||
if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynch) {
|
||||
list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetSynchronizationObjects(),
|
||||
thread.IsSleepingOnWait()));
|
||||
thread.IsWaitingSync()));
|
||||
}
|
||||
|
||||
list.push_back(std::make_unique<WaitTreeCallstack>(thread));
|
||||
|
|
Reference in New Issue