Merge pull request #4348 from lioncash/nano
core_timing: Make usage of nanoseconds more consistent in the interface
This commit is contained in:
commit
4a8cb9a706
|
@ -38,7 +38,7 @@ Stream::Stream(Core::Timing::CoreTiming& core_timing, u32 sample_rate, Format fo
|
||||||
sink_stream{sink_stream}, core_timing{core_timing}, name{std::move(name_)} {
|
sink_stream{sink_stream}, core_timing{core_timing}, name{std::move(name_)} {
|
||||||
|
|
||||||
release_event = Core::Timing::CreateEvent(
|
release_event = Core::Timing::CreateEvent(
|
||||||
name, [this](u64 userdata, s64 cycles_late) { ReleaseActiveBuffer(cycles_late); });
|
name, [this](u64, std::chrono::nanoseconds ns_late) { ReleaseActiveBuffer(ns_late); });
|
||||||
}
|
}
|
||||||
|
|
||||||
void Stream::Play() {
|
void Stream::Play() {
|
||||||
|
@ -59,11 +59,9 @@ Stream::State Stream::GetState() const {
|
||||||
return state;
|
return state;
|
||||||
}
|
}
|
||||||
|
|
||||||
s64 Stream::GetBufferReleaseNS(const Buffer& buffer) const {
|
std::chrono::nanoseconds Stream::GetBufferReleaseNS(const Buffer& buffer) const {
|
||||||
const std::size_t num_samples{buffer.GetSamples().size() / GetNumChannels()};
|
const std::size_t num_samples{buffer.GetSamples().size() / GetNumChannels()};
|
||||||
const auto ns =
|
return std::chrono::nanoseconds((static_cast<u64>(num_samples) * 1000000000ULL) / sample_rate);
|
||||||
std::chrono::nanoseconds((static_cast<u64>(num_samples) * 1000000000ULL) / sample_rate);
|
|
||||||
return ns.count();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void VolumeAdjustSamples(std::vector<s16>& samples, float game_volume) {
|
static void VolumeAdjustSamples(std::vector<s16>& samples, float game_volume) {
|
||||||
|
@ -80,7 +78,7 @@ static void VolumeAdjustSamples(std::vector<s16>& samples, float game_volume) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Stream::PlayNextBuffer(s64 cycles_late) {
|
void Stream::PlayNextBuffer(std::chrono::nanoseconds ns_late) {
|
||||||
if (!IsPlaying()) {
|
if (!IsPlaying()) {
|
||||||
// Ensure we are in playing state before playing the next buffer
|
// Ensure we are in playing state before playing the next buffer
|
||||||
sink_stream.Flush();
|
sink_stream.Flush();
|
||||||
|
@ -105,17 +103,18 @@ void Stream::PlayNextBuffer(s64 cycles_late) {
|
||||||
|
|
||||||
sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples());
|
sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples());
|
||||||
|
|
||||||
core_timing.ScheduleEvent(
|
const auto time_stretch_delta = Settings::values.enable_audio_stretching.GetValue()
|
||||||
GetBufferReleaseNS(*active_buffer) -
|
? std::chrono::nanoseconds::zero()
|
||||||
(Settings::values.enable_audio_stretching.GetValue() ? 0 : cycles_late),
|
: ns_late;
|
||||||
release_event, {});
|
const auto future_time = GetBufferReleaseNS(*active_buffer) - time_stretch_delta;
|
||||||
|
core_timing.ScheduleEvent(future_time, release_event, {});
|
||||||
}
|
}
|
||||||
|
|
||||||
void Stream::ReleaseActiveBuffer(s64 cycles_late) {
|
void Stream::ReleaseActiveBuffer(std::chrono::nanoseconds ns_late) {
|
||||||
ASSERT(active_buffer);
|
ASSERT(active_buffer);
|
||||||
released_buffers.push(std::move(active_buffer));
|
released_buffers.push(std::move(active_buffer));
|
||||||
release_callback();
|
release_callback();
|
||||||
PlayNextBuffer(cycles_late);
|
PlayNextBuffer(ns_late);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Stream::QueueBuffer(BufferPtr&& buffer) {
|
bool Stream::QueueBuffer(BufferPtr&& buffer) {
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
@ -90,16 +91,13 @@ public:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Plays the next queued buffer in the audio stream, starting playback if necessary
|
/// Plays the next queued buffer in the audio stream, starting playback if necessary
|
||||||
void PlayNextBuffer(s64 cycles_late = 0);
|
void PlayNextBuffer(std::chrono::nanoseconds ns_late = {});
|
||||||
|
|
||||||
/// Releases the actively playing buffer, signalling that it has been completed
|
/// Releases the actively playing buffer, signalling that it has been completed
|
||||||
void ReleaseActiveBuffer(s64 cycles_late = 0);
|
void ReleaseActiveBuffer(std::chrono::nanoseconds ns_late = {});
|
||||||
|
|
||||||
/// Gets the number of core cycles when the specified buffer will be released
|
/// Gets the number of core cycles when the specified buffer will be released
|
||||||
s64 GetBufferReleaseNS(const Buffer& buffer) const;
|
std::chrono::nanoseconds GetBufferReleaseNS(const Buffer& buffer) const;
|
||||||
|
|
||||||
/// Gets the number of core cycles when the specified buffer will be released
|
|
||||||
s64 GetBufferReleaseNSHostTiming(const Buffer& buffer) const;
|
|
||||||
|
|
||||||
u32 sample_rate; ///< Sample rate of the stream
|
u32 sample_rate; ///< Sample rate of the stream
|
||||||
Format format; ///< Format of the stream
|
Format format; ///< Format of the stream
|
||||||
|
|
|
@ -53,12 +53,12 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||||
instance.ThreadLoop();
|
instance.ThreadLoop();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CoreTiming::Initialize(std::function<void(void)>&& on_thread_init_) {
|
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||||
on_thread_init = std::move(on_thread_init_);
|
on_thread_init = std::move(on_thread_init_);
|
||||||
event_fifo_id = 0;
|
event_fifo_id = 0;
|
||||||
shutting_down = false;
|
shutting_down = false;
|
||||||
ticks = 0;
|
ticks = 0;
|
||||||
const auto empty_timed_callback = [](u64, s64) {};
|
const auto empty_timed_callback = [](u64, std::chrono::nanoseconds) {};
|
||||||
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
||||||
if (is_multicore) {
|
if (is_multicore) {
|
||||||
timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
|
timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
|
||||||
|
@ -106,11 +106,11 @@ bool CoreTiming::HasPendingEvents() const {
|
||||||
return !(wait_set && event_queue.empty());
|
return !(wait_set && event_queue.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
void CoreTiming::ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
|
void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||||
u64 userdata) {
|
const std::shared_ptr<EventType>& event_type, u64 userdata) {
|
||||||
{
|
{
|
||||||
std::scoped_lock scope{basic_lock};
|
std::scoped_lock scope{basic_lock};
|
||||||
const u64 timeout = static_cast<u64>(GetGlobalTimeNs().count() + ns_into_future);
|
const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count());
|
||||||
|
|
||||||
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
|
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
|
||||||
|
|
||||||
|
@ -195,8 +195,9 @@ std::optional<s64> CoreTiming::Advance() {
|
||||||
event_queue.pop_back();
|
event_queue.pop_back();
|
||||||
basic_lock.unlock();
|
basic_lock.unlock();
|
||||||
|
|
||||||
if (auto event_type{evt.type.lock()}) {
|
if (const auto event_type{evt.type.lock()}) {
|
||||||
event_type->callback(evt.userdata, global_timer - evt.time);
|
event_type->callback(
|
||||||
|
evt.userdata, std::chrono::nanoseconds{static_cast<s64>(global_timer - evt.time)});
|
||||||
}
|
}
|
||||||
|
|
||||||
basic_lock.lock();
|
basic_lock.lock();
|
||||||
|
|
|
@ -17,14 +17,12 @@
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/spin_lock.h"
|
#include "common/spin_lock.h"
|
||||||
#include "common/thread.h"
|
#include "common/thread.h"
|
||||||
#include "common/threadsafe_queue.h"
|
|
||||||
#include "common/wall_clock.h"
|
#include "common/wall_clock.h"
|
||||||
#include "core/hardware_properties.h"
|
|
||||||
|
|
||||||
namespace Core::Timing {
|
namespace Core::Timing {
|
||||||
|
|
||||||
/// A callback that may be scheduled for a particular core timing event.
|
/// A callback that may be scheduled for a particular core timing event.
|
||||||
using TimedCallback = std::function<void(u64 userdata, s64 cycles_late)>;
|
using TimedCallback = std::function<void(u64 userdata, std::chrono::nanoseconds ns_late)>;
|
||||||
|
|
||||||
/// Contains the characteristics of a particular event.
|
/// Contains the characteristics of a particular event.
|
||||||
struct EventType {
|
struct EventType {
|
||||||
|
@ -42,12 +40,12 @@ struct EventType {
|
||||||
* in main CPU clock cycles.
|
* in main CPU clock cycles.
|
||||||
*
|
*
|
||||||
* To schedule an event, you first have to register its type. This is where you pass in the
|
* To schedule an event, you first have to register its type. This is where you pass in the
|
||||||
* callback. You then schedule events using the type id you get back.
|
* callback. You then schedule events using the type ID you get back.
|
||||||
*
|
*
|
||||||
* The int cyclesLate that the callbacks get is how many cycles late it was.
|
* The s64 ns_late that the callbacks get is how many ns late it was.
|
||||||
* So to schedule a new event on a regular basis:
|
* So to schedule a new event on a regular basis:
|
||||||
* inside callback:
|
* inside callback:
|
||||||
* ScheduleEvent(periodInCycles - cyclesLate, callback, "whatever")
|
* ScheduleEvent(period_in_ns - ns_late, callback, "whatever")
|
||||||
*/
|
*/
|
||||||
class CoreTiming {
|
class CoreTiming {
|
||||||
public:
|
public:
|
||||||
|
@ -62,7 +60,7 @@ public:
|
||||||
|
|
||||||
/// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is
|
/// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is
|
||||||
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
|
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
|
||||||
void Initialize(std::function<void(void)>&& on_thread_init_);
|
void Initialize(std::function<void()>&& on_thread_init_);
|
||||||
|
|
||||||
/// Tears down all timing related functionality.
|
/// Tears down all timing related functionality.
|
||||||
void Shutdown();
|
void Shutdown();
|
||||||
|
@ -95,8 +93,8 @@ public:
|
||||||
bool HasPendingEvents() const;
|
bool HasPendingEvents() const;
|
||||||
|
|
||||||
/// Schedules an event in core timing
|
/// Schedules an event in core timing
|
||||||
void ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
|
void ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||||
u64 userdata = 0);
|
const std::shared_ptr<EventType>& event_type, u64 userdata = 0);
|
||||||
|
|
||||||
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
|
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
|
||||||
|
|
||||||
|
@ -141,8 +139,6 @@ private:
|
||||||
|
|
||||||
u64 global_timer = 0;
|
u64 global_timer = 0;
|
||||||
|
|
||||||
std::chrono::nanoseconds start_point;
|
|
||||||
|
|
||||||
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
|
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
|
||||||
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
|
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
|
||||||
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
|
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
|
||||||
|
@ -161,7 +157,7 @@ private:
|
||||||
std::atomic<bool> wait_set{};
|
std::atomic<bool> wait_set{};
|
||||||
std::atomic<bool> shutting_down{};
|
std::atomic<bool> shutting_down{};
|
||||||
std::atomic<bool> has_started{};
|
std::atomic<bool> has_started{};
|
||||||
std::function<void(void)> on_thread_init{};
|
std::function<void()> on_thread_init{};
|
||||||
|
|
||||||
bool is_multicore{};
|
bool is_multicore{};
|
||||||
|
|
||||||
|
|
|
@ -11,19 +11,20 @@
|
||||||
namespace Core::Hardware {
|
namespace Core::Hardware {
|
||||||
|
|
||||||
InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) {
|
InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) {
|
||||||
gpu_interrupt_event = Core::Timing::CreateEvent("GPUInterrupt", [this](u64 message, s64) {
|
gpu_interrupt_event =
|
||||||
auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv");
|
Core::Timing::CreateEvent("GPUInterrupt", [this](u64 message, std::chrono::nanoseconds) {
|
||||||
const u32 syncpt = static_cast<u32>(message >> 32);
|
auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv");
|
||||||
const u32 value = static_cast<u32>(message);
|
const u32 syncpt = static_cast<u32>(message >> 32);
|
||||||
nvdrv->SignalGPUInterruptSyncpt(syncpt, value);
|
const u32 value = static_cast<u32>(message);
|
||||||
});
|
nvdrv->SignalGPUInterruptSyncpt(syncpt, value);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
InterruptManager::~InterruptManager() = default;
|
InterruptManager::~InterruptManager() = default;
|
||||||
|
|
||||||
void InterruptManager::GPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
|
void InterruptManager::GPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
|
||||||
const u64 msg = (static_cast<u64>(syncpoint_id) << 32ULL) | value;
|
const u64 msg = (static_cast<u64>(syncpoint_id) << 32ULL) | value;
|
||||||
system.CoreTiming().ScheduleEvent(10, gpu_interrupt_event, msg);
|
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{10}, gpu_interrupt_event, msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Core::Hardware
|
} // namespace Core::Hardware
|
||||||
|
|
|
@ -145,16 +145,18 @@ struct KernelCore::Impl {
|
||||||
|
|
||||||
void InitializePreemption(KernelCore& kernel) {
|
void InitializePreemption(KernelCore& kernel) {
|
||||||
preemption_event = Core::Timing::CreateEvent(
|
preemption_event = Core::Timing::CreateEvent(
|
||||||
"PreemptionCallback", [this, &kernel](u64 userdata, s64 cycles_late) {
|
"PreemptionCallback", [this, &kernel](u64, std::chrono::nanoseconds) {
|
||||||
{
|
{
|
||||||
SchedulerLock lock(kernel);
|
SchedulerLock lock(kernel);
|
||||||
global_scheduler.PreemptThreads();
|
global_scheduler.PreemptThreads();
|
||||||
}
|
}
|
||||||
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
|
const auto time_interval = std::chrono::nanoseconds{
|
||||||
|
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
||||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||||
});
|
});
|
||||||
|
|
||||||
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
|
const auto time_interval =
|
||||||
|
std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
||||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kern
|
||||||
std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)};
|
std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)};
|
||||||
|
|
||||||
session->request_event = Core::Timing::CreateEvent(
|
session->request_event = Core::Timing::CreateEvent(
|
||||||
name, [session](u64 userdata, s64 cycles_late) { session->CompleteSyncRequest(); });
|
name, [session](u64, std::chrono::nanoseconds) { session->CompleteSyncRequest(); });
|
||||||
session->name = std::move(name);
|
session->name = std::move(name);
|
||||||
session->parent = std::move(parent);
|
session->parent = std::move(parent);
|
||||||
|
|
||||||
|
@ -184,8 +184,8 @@ ResultCode ServerSession::CompleteSyncRequest() {
|
||||||
|
|
||||||
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
|
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
|
||||||
Core::Memory::Memory& memory) {
|
Core::Memory::Memory& memory) {
|
||||||
ResultCode result = QueueSyncRequest(std::move(thread), memory);
|
const ResultCode result = QueueSyncRequest(std::move(thread), memory);
|
||||||
const u64 delay = kernel.IsMulticore() ? 0U : 20000U;
|
const auto delay = std::chrono::nanoseconds{kernel.IsMulticore() ? 0 : 20000};
|
||||||
Core::System::GetInstance().CoreTiming().ScheduleEvent(delay, request_event, {});
|
Core::System::GetInstance().CoreTiming().ScheduleEvent(delay, request_event, {});
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ namespace Kernel {
|
||||||
|
|
||||||
TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||||
time_manager_event_type = Core::Timing::CreateEvent(
|
time_manager_event_type = Core::Timing::CreateEvent(
|
||||||
"Kernel::TimeManagerCallback", [this](u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
|
"Kernel::TimeManagerCallback", [this](u64 thread_handle, std::chrono::nanoseconds) {
|
||||||
SchedulerLock lock(system.Kernel());
|
SchedulerLock lock(system.Kernel());
|
||||||
Handle proper_handle = static_cast<Handle>(thread_handle);
|
Handle proper_handle = static_cast<Handle>(thread_handle);
|
||||||
if (cancelled_events[proper_handle]) {
|
if (cancelled_events[proper_handle]) {
|
||||||
|
@ -34,7 +34,8 @@ void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64
|
||||||
ASSERT(timetask);
|
ASSERT(timetask);
|
||||||
ASSERT(timetask->GetStatus() != ThreadStatus::Ready);
|
ASSERT(timetask->GetStatus() != ThreadStatus::Ready);
|
||||||
ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
|
ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
|
||||||
system.CoreTiming().ScheduleEvent(nanoseconds, time_manager_event_type, event_handle);
|
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds},
|
||||||
|
time_manager_event_type, event_handle);
|
||||||
} else {
|
} else {
|
||||||
event_handle = InvalidHandle;
|
event_handle = InvalidHandle;
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,9 +39,10 @@ namespace Service::HID {
|
||||||
|
|
||||||
// Updating period for each HID device.
|
// Updating period for each HID device.
|
||||||
// TODO(ogniK): Find actual polling rate of hid
|
// TODO(ogniK): Find actual polling rate of hid
|
||||||
constexpr s64 pad_update_ticks = static_cast<s64>(1000000000 / 66);
|
constexpr auto pad_update_ns = std::chrono::nanoseconds{1000000000 / 66};
|
||||||
[[maybe_unused]] constexpr s64 accelerometer_update_ticks = static_cast<s64>(1000000000 / 100);
|
[[maybe_unused]] constexpr auto accelerometer_update_ns =
|
||||||
[[maybe_unused]] constexpr s64 gyroscope_update_ticks = static_cast<s64>(1000000000 / 100);
|
std::chrono::nanoseconds{1000000000 / 100};
|
||||||
|
[[maybe_unused]] constexpr auto gyroscope_update_ticks = std::chrono::nanoseconds{1000000000 / 100};
|
||||||
constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000;
|
constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000;
|
||||||
|
|
||||||
IAppletResource::IAppletResource(Core::System& system)
|
IAppletResource::IAppletResource(Core::System& system)
|
||||||
|
@ -75,14 +76,14 @@ IAppletResource::IAppletResource(Core::System& system)
|
||||||
GetController<Controller_Stubbed>(HidController::Unknown3).SetCommonHeaderOffset(0x5000);
|
GetController<Controller_Stubbed>(HidController::Unknown3).SetCommonHeaderOffset(0x5000);
|
||||||
|
|
||||||
// Register update callbacks
|
// Register update callbacks
|
||||||
pad_update_event =
|
pad_update_event = Core::Timing::CreateEvent(
|
||||||
Core::Timing::CreateEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 ns_late) {
|
"HID::UpdatePadCallback", [this](u64 userdata, std::chrono::nanoseconds ns_late) {
|
||||||
UpdateControllers(userdata, ns_late);
|
UpdateControllers(userdata, ns_late);
|
||||||
});
|
});
|
||||||
|
|
||||||
// TODO(shinyquagsire23): Other update callbacks? (accel, gyro?)
|
// TODO(shinyquagsire23): Other update callbacks? (accel, gyro?)
|
||||||
|
|
||||||
system.CoreTiming().ScheduleEvent(pad_update_ticks, pad_update_event);
|
system.CoreTiming().ScheduleEvent(pad_update_ns, pad_update_event);
|
||||||
|
|
||||||
ReloadInputDevices();
|
ReloadInputDevices();
|
||||||
}
|
}
|
||||||
|
@ -107,7 +108,7 @@ void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) {
|
||||||
rb.PushCopyObjects(shared_mem);
|
rb.PushCopyObjects(shared_mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
void IAppletResource::UpdateControllers(u64 userdata, s64 ns_late) {
|
void IAppletResource::UpdateControllers(u64 userdata, std::chrono::nanoseconds ns_late) {
|
||||||
auto& core_timing = system.CoreTiming();
|
auto& core_timing = system.CoreTiming();
|
||||||
|
|
||||||
const bool should_reload = Settings::values.is_device_reload_pending.exchange(false);
|
const bool should_reload = Settings::values.is_device_reload_pending.exchange(false);
|
||||||
|
@ -118,7 +119,7 @@ void IAppletResource::UpdateControllers(u64 userdata, s64 ns_late) {
|
||||||
controller->OnUpdate(core_timing, shared_mem->GetPointer(), SHARED_MEMORY_SIZE);
|
controller->OnUpdate(core_timing, shared_mem->GetPointer(), SHARED_MEMORY_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
core_timing.ScheduleEvent(pad_update_ticks - ns_late, pad_update_event);
|
core_timing.ScheduleEvent(pad_update_ns - ns_late, pad_update_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> {
|
class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> {
|
||||||
|
|
|
@ -4,10 +4,9 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "core/hle/service/hid/controllers/controller_base.h"
|
#include <chrono>
|
||||||
#include "core/hle/service/service.h"
|
|
||||||
|
|
||||||
#include "controllers/controller_base.h"
|
#include "core/hle/service/hid/controllers/controller_base.h"
|
||||||
#include "core/hle/service/service.h"
|
#include "core/hle/service/service.h"
|
||||||
|
|
||||||
namespace Core::Timing {
|
namespace Core::Timing {
|
||||||
|
@ -65,7 +64,7 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx);
|
void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx);
|
||||||
void UpdateControllers(u64 userdata, s64 cycles_late);
|
void UpdateControllers(u64 userdata, std::chrono::nanoseconds ns_late);
|
||||||
|
|
||||||
std::shared_ptr<Kernel::SharedMemory> shared_mem;
|
std::shared_ptr<Kernel::SharedMemory> shared_mem;
|
||||||
|
|
||||||
|
|
|
@ -28,8 +28,7 @@
|
||||||
|
|
||||||
namespace Service::NVFlinger {
|
namespace Service::NVFlinger {
|
||||||
|
|
||||||
constexpr s64 frame_ticks = static_cast<s64>(1000000000 / 60);
|
constexpr auto frame_ns = std::chrono::nanoseconds{1000000000 / 60};
|
||||||
constexpr s64 frame_ticks_30fps = static_cast<s64>(1000000000 / 30);
|
|
||||||
|
|
||||||
void NVFlinger::VSyncThread(NVFlinger& nv_flinger) {
|
void NVFlinger::VSyncThread(NVFlinger& nv_flinger) {
|
||||||
nv_flinger.SplitVSync();
|
nv_flinger.SplitVSync();
|
||||||
|
@ -67,20 +66,24 @@ NVFlinger::NVFlinger(Core::System& system) : system(system) {
|
||||||
guard = std::make_shared<std::mutex>();
|
guard = std::make_shared<std::mutex>();
|
||||||
|
|
||||||
// Schedule the screen composition events
|
// Schedule the screen composition events
|
||||||
composition_event =
|
composition_event = Core::Timing::CreateEvent(
|
||||||
Core::Timing::CreateEvent("ScreenComposition", [this](u64 userdata, s64 ns_late) {
|
"ScreenComposition", [this](u64, std::chrono::nanoseconds ns_late) {
|
||||||
Lock();
|
Lock();
|
||||||
Compose();
|
Compose();
|
||||||
const auto ticks = GetNextTicks();
|
|
||||||
this->system.CoreTiming().ScheduleEvent(std::max<s64>(0LL, ticks - ns_late),
|
const auto ticks = std::chrono::nanoseconds{GetNextTicks()};
|
||||||
composition_event);
|
const auto ticks_delta = ticks - ns_late;
|
||||||
|
const auto future_ns = std::max(std::chrono::nanoseconds::zero(), ticks_delta);
|
||||||
|
|
||||||
|
this->system.CoreTiming().ScheduleEvent(future_ns, composition_event);
|
||||||
});
|
});
|
||||||
|
|
||||||
if (system.IsMulticore()) {
|
if (system.IsMulticore()) {
|
||||||
is_running = true;
|
is_running = true;
|
||||||
wait_event = std::make_unique<Common::Event>();
|
wait_event = std::make_unique<Common::Event>();
|
||||||
vsync_thread = std::make_unique<std::thread>(VSyncThread, std::ref(*this));
|
vsync_thread = std::make_unique<std::thread>(VSyncThread, std::ref(*this));
|
||||||
} else {
|
} else {
|
||||||
system.CoreTiming().ScheduleEvent(frame_ticks, composition_event);
|
system.CoreTiming().ScheduleEvent(frame_ns, composition_event);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
|
|
||||||
namespace Core::Memory {
|
namespace Core::Memory {
|
||||||
|
|
||||||
constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(1000000000 / 12);
|
constexpr auto CHEAT_ENGINE_NS = std::chrono::nanoseconds{1000000000 / 12};
|
||||||
constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF;
|
constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF;
|
||||||
|
|
||||||
StandardVmCallbacks::StandardVmCallbacks(Core::System& system, const CheatProcessMetadata& metadata)
|
StandardVmCallbacks::StandardVmCallbacks(Core::System& system, const CheatProcessMetadata& metadata)
|
||||||
|
@ -188,10 +188,12 @@ CheatEngine::~CheatEngine() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void CheatEngine::Initialize() {
|
void CheatEngine::Initialize() {
|
||||||
event = Core::Timing::CreateEvent(
|
event = Core::Timing::CreateEvent("CheatEngine::FrameCallback::" +
|
||||||
"CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id),
|
Common::HexToString(metadata.main_nso_build_id),
|
||||||
[this](u64 userdata, s64 ns_late) { FrameCallback(userdata, ns_late); });
|
[this](u64 userdata, std::chrono::nanoseconds ns_late) {
|
||||||
core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS, event);
|
FrameCallback(userdata, ns_late);
|
||||||
|
});
|
||||||
|
core_timing.ScheduleEvent(CHEAT_ENGINE_NS, event);
|
||||||
|
|
||||||
metadata.process_id = system.CurrentProcess()->GetProcessID();
|
metadata.process_id = system.CurrentProcess()->GetProcessID();
|
||||||
metadata.title_id = system.CurrentProcess()->GetTitleID();
|
metadata.title_id = system.CurrentProcess()->GetTitleID();
|
||||||
|
@ -217,7 +219,7 @@ void CheatEngine::Reload(std::vector<CheatEntry> cheats) {
|
||||||
|
|
||||||
MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70));
|
MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70));
|
||||||
|
|
||||||
void CheatEngine::FrameCallback(u64 userdata, s64 ns_late) {
|
void CheatEngine::FrameCallback(u64, std::chrono::nanoseconds ns_late) {
|
||||||
if (is_pending_reload.exchange(false)) {
|
if (is_pending_reload.exchange(false)) {
|
||||||
vm.LoadProgram(cheats);
|
vm.LoadProgram(cheats);
|
||||||
}
|
}
|
||||||
|
@ -230,7 +232,7 @@ void CheatEngine::FrameCallback(u64 userdata, s64 ns_late) {
|
||||||
|
|
||||||
vm.Execute(metadata);
|
vm.Execute(metadata);
|
||||||
|
|
||||||
core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - ns_late, event);
|
core_timing.ScheduleEvent(CHEAT_ENGINE_NS - ns_late, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Core::Memory
|
} // namespace Core::Memory
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
#include <chrono>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
@ -71,7 +72,7 @@ public:
|
||||||
void Reload(std::vector<CheatEntry> cheats);
|
void Reload(std::vector<CheatEntry> cheats);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void FrameCallback(u64 userdata, s64 cycles_late);
|
void FrameCallback(u64 userdata, std::chrono::nanoseconds ns_late);
|
||||||
|
|
||||||
DmntCheatVm vm;
|
DmntCheatVm vm;
|
||||||
CheatProcessMetadata metadata;
|
CheatProcessMetadata metadata;
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
namespace Tools {
|
namespace Tools {
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(1000000000 / 60);
|
constexpr auto memory_freezer_ns = std::chrono::nanoseconds{1000000000 / 60};
|
||||||
|
|
||||||
u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) {
|
u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) {
|
||||||
switch (width) {
|
switch (width) {
|
||||||
|
@ -55,10 +55,11 @@ void MemoryWriteWidth(Core::Memory::Memory& memory, u32 width, VAddr addr, u64 v
|
||||||
|
|
||||||
Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_)
|
Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_)
|
||||||
: core_timing{core_timing_}, memory{memory_} {
|
: core_timing{core_timing_}, memory{memory_} {
|
||||||
event = Core::Timing::CreateEvent(
|
event = Core::Timing::CreateEvent("MemoryFreezer::FrameCallback",
|
||||||
"MemoryFreezer::FrameCallback",
|
[this](u64 userdata, std::chrono::nanoseconds ns_late) {
|
||||||
[this](u64 userdata, s64 ns_late) { FrameCallback(userdata, ns_late); });
|
FrameCallback(userdata, ns_late);
|
||||||
core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event);
|
});
|
||||||
|
core_timing.ScheduleEvent(memory_freezer_ns, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
Freezer::~Freezer() {
|
Freezer::~Freezer() {
|
||||||
|
@ -68,7 +69,7 @@ Freezer::~Freezer() {
|
||||||
void Freezer::SetActive(bool active) {
|
void Freezer::SetActive(bool active) {
|
||||||
if (!this->active.exchange(active)) {
|
if (!this->active.exchange(active)) {
|
||||||
FillEntryReads();
|
FillEntryReads();
|
||||||
core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event);
|
core_timing.ScheduleEvent(memory_freezer_ns, event);
|
||||||
LOG_DEBUG(Common_Memory, "Memory freezer activated!");
|
LOG_DEBUG(Common_Memory, "Memory freezer activated!");
|
||||||
} else {
|
} else {
|
||||||
LOG_DEBUG(Common_Memory, "Memory freezer deactivated!");
|
LOG_DEBUG(Common_Memory, "Memory freezer deactivated!");
|
||||||
|
@ -158,7 +159,7 @@ std::vector<Freezer::Entry> Freezer::GetEntries() const {
|
||||||
return entries;
|
return entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Freezer::FrameCallback(u64 userdata, s64 ns_late) {
|
void Freezer::FrameCallback(u64, std::chrono::nanoseconds ns_late) {
|
||||||
if (!IsActive()) {
|
if (!IsActive()) {
|
||||||
LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events.");
|
LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events.");
|
||||||
return;
|
return;
|
||||||
|
@ -173,7 +174,7 @@ void Freezer::FrameCallback(u64 userdata, s64 ns_late) {
|
||||||
MemoryWriteWidth(memory, entry.width, entry.address, entry.value);
|
MemoryWriteWidth(memory, entry.width, entry.address, entry.value);
|
||||||
}
|
}
|
||||||
|
|
||||||
core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS - ns_late, event);
|
core_timing.ScheduleEvent(memory_freezer_ns - ns_late, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Freezer::FillEntryReads() {
|
void Freezer::FillEntryReads() {
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
#include <chrono>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
@ -72,7 +73,7 @@ public:
|
||||||
std::vector<Entry> GetEntries() const;
|
std::vector<Entry> GetEntries() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void FrameCallback(u64 userdata, s64 cycles_late);
|
void FrameCallback(u64 userdata, std::chrono::nanoseconds ns_late);
|
||||||
void FillEntryReads();
|
void FillEntryReads();
|
||||||
|
|
||||||
std::atomic_bool active{false};
|
std::atomic_bool active{false};
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <bitset>
|
#include <bitset>
|
||||||
|
#include <chrono>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
@ -17,7 +18,6 @@
|
||||||
namespace {
|
namespace {
|
||||||
// Numbers are chosen randomly to make sure the correct one is given.
|
// Numbers are chosen randomly to make sure the correct one is given.
|
||||||
constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}};
|
constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}};
|
||||||
constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals
|
|
||||||
constexpr std::array<u64, 5> calls_order{{2, 0, 1, 4, 3}};
|
constexpr std::array<u64, 5> calls_order{{2, 0, 1, 4, 3}};
|
||||||
std::array<s64, 5> delays{};
|
std::array<s64, 5> delays{};
|
||||||
|
|
||||||
|
@ -25,12 +25,12 @@ std::bitset<CB_IDS.size()> callbacks_ran_flags;
|
||||||
u64 expected_callback = 0;
|
u64 expected_callback = 0;
|
||||||
|
|
||||||
template <unsigned int IDX>
|
template <unsigned int IDX>
|
||||||
void HostCallbackTemplate(u64 userdata, s64 nanoseconds_late) {
|
void HostCallbackTemplate(u64 userdata, std::chrono::nanoseconds ns_late) {
|
||||||
static_assert(IDX < CB_IDS.size(), "IDX out of range");
|
static_assert(IDX < CB_IDS.size(), "IDX out of range");
|
||||||
callbacks_ran_flags.set(IDX);
|
callbacks_ran_flags.set(IDX);
|
||||||
REQUIRE(CB_IDS[IDX] == userdata);
|
REQUIRE(CB_IDS[IDX] == userdata);
|
||||||
REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]);
|
REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]);
|
||||||
delays[IDX] = nanoseconds_late;
|
delays[IDX] = ns_late.count();
|
||||||
++expected_callback;
|
++expected_callback;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,10 +77,12 @@ TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
|
||||||
|
|
||||||
core_timing.SyncPause(true);
|
core_timing.SyncPause(true);
|
||||||
|
|
||||||
u64 one_micro = 1000U;
|
const u64 one_micro = 1000U;
|
||||||
for (std::size_t i = 0; i < events.size(); i++) {
|
for (std::size_t i = 0; i < events.size(); i++) {
|
||||||
u64 order = calls_order[i];
|
const u64 order = calls_order[i];
|
||||||
core_timing.ScheduleEvent(i * one_micro + 100U, events[order], CB_IDS[order]);
|
const auto future_ns = std::chrono::nanoseconds{static_cast<s64>(i * one_micro + 100)};
|
||||||
|
|
||||||
|
core_timing.ScheduleEvent(future_ns, events[order], CB_IDS[order]);
|
||||||
}
|
}
|
||||||
/// test pause
|
/// test pause
|
||||||
REQUIRE(callbacks_ran_flags.none());
|
REQUIRE(callbacks_ran_flags.none());
|
||||||
|
@ -116,13 +118,16 @@ TEST_CASE("CoreTiming[BasicOrderNoPausing]", "[core]") {
|
||||||
|
|
||||||
expected_callback = 0;
|
expected_callback = 0;
|
||||||
|
|
||||||
u64 start = core_timing.GetGlobalTimeNs().count();
|
const u64 start = core_timing.GetGlobalTimeNs().count();
|
||||||
u64 one_micro = 1000U;
|
const u64 one_micro = 1000U;
|
||||||
|
|
||||||
for (std::size_t i = 0; i < events.size(); i++) {
|
for (std::size_t i = 0; i < events.size(); i++) {
|
||||||
u64 order = calls_order[i];
|
const u64 order = calls_order[i];
|
||||||
core_timing.ScheduleEvent(i * one_micro + 100U, events[order], CB_IDS[order]);
|
const auto future_ns = std::chrono::nanoseconds{static_cast<s64>(i * one_micro + 100)};
|
||||||
|
core_timing.ScheduleEvent(future_ns, events[order], CB_IDS[order]);
|
||||||
}
|
}
|
||||||
u64 end = core_timing.GetGlobalTimeNs().count();
|
|
||||||
|
const u64 end = core_timing.GetGlobalTimeNs().count();
|
||||||
const double scheduling_time = static_cast<double>(end - start);
|
const double scheduling_time = static_cast<double>(end - start);
|
||||||
const double timer_time = static_cast<double>(TestTimerSpeed(core_timing));
|
const double timer_time = static_cast<double>(TestTimerSpeed(core_timing));
|
||||||
|
|
||||||
|
|
Reference in New Issue