kernel: conver KScopedLock, KScopedResourceReservation, KSessionRequest, KSharedMemory, KSpinLock
This commit is contained in:
parent
7322c99e5f
commit
d1b53c8d82
|
@ -18,15 +18,15 @@ std::is_reference_v<T>&& requires(T& t) {
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
requires KLockable<T>
|
requires KLockable<T>
|
||||||
class [[nodiscard]] KScopedLock {
|
class KScopedLock {
|
||||||
public:
|
public:
|
||||||
explicit KScopedLock(T* l) : lock_ptr(l) {
|
explicit KScopedLock(T* l) : m_lock(*l) {}
|
||||||
this->lock_ptr->Lock();
|
explicit KScopedLock(T& l) : m_lock(l) {
|
||||||
|
m_lock.Lock();
|
||||||
}
|
}
|
||||||
explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) {}
|
|
||||||
|
|
||||||
~KScopedLock() {
|
~KScopedLock() {
|
||||||
this->lock_ptr->Unlock();
|
m_lock.Unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
KScopedLock(const KScopedLock&) = delete;
|
KScopedLock(const KScopedLock&) = delete;
|
||||||
|
@ -36,7 +36,7 @@ public:
|
||||||
KScopedLock& operator=(KScopedLock&&) = delete;
|
KScopedLock& operator=(KScopedLock&&) = delete;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
T* lock_ptr;
|
T& m_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -12,20 +12,20 @@ namespace Kernel {
|
||||||
class KScopedResourceReservation {
|
class KScopedResourceReservation {
|
||||||
public:
|
public:
|
||||||
explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v, s64 timeout)
|
explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v, s64 timeout)
|
||||||
: resource_limit(std::move(l)), value(v), resource(r) {
|
: m_limit(l), m_value(v), m_resource(r) {
|
||||||
if (resource_limit && value) {
|
if (m_limit && m_value) {
|
||||||
success = resource_limit->Reserve(resource, value, timeout);
|
m_succeeded = m_limit->Reserve(m_resource, m_value, timeout);
|
||||||
} else {
|
} else {
|
||||||
success = true;
|
m_succeeded = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v = 1)
|
explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v = 1)
|
||||||
: resource_limit(std::move(l)), value(v), resource(r) {
|
: m_limit(l), m_value(v), m_resource(r) {
|
||||||
if (resource_limit && value) {
|
if (m_limit && m_value) {
|
||||||
success = resource_limit->Reserve(resource, value);
|
m_succeeded = m_limit->Reserve(m_resource, m_value);
|
||||||
} else {
|
} else {
|
||||||
success = true;
|
m_succeeded = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,26 +36,26 @@ public:
|
||||||
: KScopedResourceReservation(p->GetResourceLimit(), r, v) {}
|
: KScopedResourceReservation(p->GetResourceLimit(), r, v) {}
|
||||||
|
|
||||||
~KScopedResourceReservation() noexcept {
|
~KScopedResourceReservation() noexcept {
|
||||||
if (resource_limit && value && success) {
|
if (m_limit && m_value && m_succeeded) {
|
||||||
// resource was not committed, release the reservation.
|
// Resource was not committed, release the reservation.
|
||||||
resource_limit->Release(resource, value);
|
m_limit->Release(m_resource, m_value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Commit the resource reservation, destruction of this object does not release the resource
|
/// Commit the resource reservation, destruction of this object does not release the resource
|
||||||
void Commit() {
|
void Commit() {
|
||||||
resource_limit = nullptr;
|
m_limit = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] bool Succeeded() const {
|
bool Succeeded() const {
|
||||||
return success;
|
return m_succeeded;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
KResourceLimit* resource_limit{};
|
KResourceLimit* m_limit{};
|
||||||
s64 value;
|
s64 m_value{};
|
||||||
LimitableResource resource;
|
LimitableResource m_resource{};
|
||||||
bool success;
|
bool m_succeeded{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -14,46 +14,46 @@ Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, siz
|
||||||
// Get the mapping.
|
// Get the mapping.
|
||||||
Mapping* mapping;
|
Mapping* mapping;
|
||||||
if (index < NumStaticMappings) {
|
if (index < NumStaticMappings) {
|
||||||
mapping = &m_static_mappings[index];
|
mapping = std::addressof(m_static_mappings[index]);
|
||||||
} else {
|
} else {
|
||||||
// Allocate a page for the extra mappings.
|
// Allocate a page for the extra mappings.
|
||||||
if (m_mappings == nullptr) {
|
if (m_mappings == nullptr) {
|
||||||
KPageBuffer* page_buffer = KPageBuffer::Allocate(kernel);
|
KPageBuffer* page_buffer = KPageBuffer::Allocate(m_kernel);
|
||||||
R_UNLESS(page_buffer != nullptr, ResultOutOfMemory);
|
R_UNLESS(page_buffer != nullptr, ResultOutOfMemory);
|
||||||
|
|
||||||
m_mappings = reinterpret_cast<Mapping*>(page_buffer);
|
m_mappings = reinterpret_cast<Mapping*>(page_buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
mapping = &m_mappings[index - NumStaticMappings];
|
mapping = std::addressof(m_mappings[index - NumStaticMappings]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the mapping.
|
// Set the mapping.
|
||||||
mapping->Set(client, server, size, state);
|
mapping->Set(client, server, size, state);
|
||||||
|
|
||||||
return ResultSuccess;
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size,
|
Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size,
|
||||||
KMemoryState state) {
|
KMemoryState state) {
|
||||||
ASSERT(m_num_recv == 0);
|
ASSERT(m_num_recv == 0);
|
||||||
ASSERT(m_num_exch == 0);
|
ASSERT(m_num_exch == 0);
|
||||||
return this->PushMap(client, server, size, state, m_num_send++);
|
R_RETURN(this->PushMap(client, server, size, state, m_num_send++));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size,
|
Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size,
|
||||||
KMemoryState state) {
|
KMemoryState state) {
|
||||||
ASSERT(m_num_exch == 0);
|
ASSERT(m_num_exch == 0);
|
||||||
return this->PushMap(client, server, size, state, m_num_send + m_num_recv++);
|
R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv++));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size,
|
Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size,
|
||||||
KMemoryState state) {
|
KMemoryState state) {
|
||||||
return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++);
|
R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++));
|
||||||
}
|
}
|
||||||
|
|
||||||
void KSessionRequest::SessionMappings::Finalize() {
|
void KSessionRequest::SessionMappings::Finalize() {
|
||||||
if (m_mappings) {
|
if (m_mappings) {
|
||||||
KPageBuffer::Free(kernel, reinterpret_cast<KPageBuffer*>(m_mappings));
|
KPageBuffer::Free(m_kernel, reinterpret_cast<KPageBuffer*>(m_mappings));
|
||||||
m_mappings = nullptr;
|
m_mappings = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,14 +47,14 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VAddr m_client_address;
|
VAddr m_client_address{};
|
||||||
VAddr m_server_address;
|
VAddr m_server_address{};
|
||||||
size_t m_size;
|
size_t m_size{};
|
||||||
KMemoryState m_state;
|
KMemoryState m_state{};
|
||||||
};
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit SessionMappings(KernelCore& kernel_) : kernel(kernel_) {}
|
explicit SessionMappings(KernelCore& kernel) : m_kernel(kernel) {}
|
||||||
|
|
||||||
void Initialize() {}
|
void Initialize() {}
|
||||||
void Finalize();
|
void Finalize();
|
||||||
|
@ -149,8 +149,8 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
KernelCore& kernel;
|
KernelCore& m_kernel;
|
||||||
std::array<Mapping, NumStaticMappings> m_static_mappings;
|
std::array<Mapping, NumStaticMappings> m_static_mappings{};
|
||||||
Mapping* m_mappings{};
|
Mapping* m_mappings{};
|
||||||
u8 m_num_send{};
|
u8 m_num_send{};
|
||||||
u8 m_num_recv{};
|
u8 m_num_recv{};
|
||||||
|
|
|
@ -15,15 +15,15 @@ namespace Kernel {
|
||||||
KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
|
KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
|
||||||
KSharedMemory::~KSharedMemory() = default;
|
KSharedMemory::~KSharedMemory() = default;
|
||||||
|
|
||||||
Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
|
Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* owner_process,
|
||||||
Svc::MemoryPermission owner_permission_,
|
Svc::MemoryPermission owner_permission,
|
||||||
Svc::MemoryPermission user_permission_, std::size_t size_) {
|
Svc::MemoryPermission user_permission, std::size_t size) {
|
||||||
// Set members.
|
// Set members.
|
||||||
owner_process = owner_process_;
|
m_owner_process = owner_process;
|
||||||
device_memory = &device_memory_;
|
m_device_memory = std::addressof(device_memory);
|
||||||
owner_permission = owner_permission_;
|
m_owner_permission = owner_permission;
|
||||||
user_permission = user_permission_;
|
m_user_permission = user_permission;
|
||||||
size = Common::AlignUp(size_, PageSize);
|
m_size = Common::AlignUp(size, PageSize);
|
||||||
|
|
||||||
const size_t num_pages = Common::DivideUp(size, PageSize);
|
const size_t num_pages = Common::DivideUp(size, PageSize);
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
|
||||||
|
|
||||||
// Reserve memory for ourselves.
|
// Reserve memory for ourselves.
|
||||||
KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax,
|
KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax,
|
||||||
size_);
|
size);
|
||||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||||
|
|
||||||
// Allocate the memory.
|
// Allocate the memory.
|
||||||
|
@ -40,26 +40,26 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
|
||||||
//! HACK: Open continuous mapping from sysmodule pool.
|
//! HACK: Open continuous mapping from sysmodule pool.
|
||||||
auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure,
|
auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure,
|
||||||
KMemoryManager::Direction::FromBack);
|
KMemoryManager::Direction::FromBack);
|
||||||
physical_address = kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option);
|
m_physical_address = kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option);
|
||||||
R_UNLESS(physical_address != 0, ResultOutOfMemory);
|
R_UNLESS(m_physical_address != 0, ResultOutOfMemory);
|
||||||
|
|
||||||
//! Insert the result into our page group.
|
//! Insert the result into our page group.
|
||||||
page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager());
|
m_page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager());
|
||||||
page_group->AddBlock(physical_address, num_pages);
|
m_page_group->AddBlock(m_physical_address, num_pages);
|
||||||
|
|
||||||
// Commit our reservation.
|
// Commit our reservation.
|
||||||
memory_reservation.Commit();
|
memory_reservation.Commit();
|
||||||
|
|
||||||
// Set our resource limit.
|
// Set our resource limit.
|
||||||
resource_limit = reslimit;
|
m_resource_limit = reslimit;
|
||||||
resource_limit->Open();
|
m_resource_limit->Open();
|
||||||
|
|
||||||
// Mark initialized.
|
// Mark initialized.
|
||||||
is_initialized = true;
|
m_is_initialized = true;
|
||||||
|
|
||||||
// Clear all pages in the memory.
|
// Clear all pages in the memory.
|
||||||
for (const auto& block : *page_group) {
|
for (const auto& block : *m_page_group) {
|
||||||
std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize());
|
std::memset(m_device_memory->GetPointer<void>(block.GetAddress()), 0, block.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
@ -67,12 +67,12 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
|
||||||
|
|
||||||
void KSharedMemory::Finalize() {
|
void KSharedMemory::Finalize() {
|
||||||
// Close and finalize the page group.
|
// Close and finalize the page group.
|
||||||
page_group->Close();
|
m_page_group->Close();
|
||||||
page_group->Finalize();
|
m_page_group->Finalize();
|
||||||
|
|
||||||
// Release the memory reservation.
|
// Release the memory reservation.
|
||||||
resource_limit->Release(LimitableResource::PhysicalMemoryMax, size);
|
m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, m_size);
|
||||||
resource_limit->Close();
|
m_resource_limit->Close();
|
||||||
|
|
||||||
// Perform inherited finalization.
|
// Perform inherited finalization.
|
||||||
KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
|
KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
|
||||||
|
@ -81,26 +81,27 @@ void KSharedMemory::Finalize() {
|
||||||
Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
|
Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
|
||||||
Svc::MemoryPermission map_perm) {
|
Svc::MemoryPermission map_perm) {
|
||||||
// Validate the size.
|
// Validate the size.
|
||||||
R_UNLESS(size == map_size, ResultInvalidSize);
|
R_UNLESS(m_size == map_size, ResultInvalidSize);
|
||||||
|
|
||||||
// Validate the permission.
|
// Validate the permission.
|
||||||
const Svc::MemoryPermission test_perm =
|
const Svc::MemoryPermission test_perm =
|
||||||
&target_process == owner_process ? owner_permission : user_permission;
|
std::addressof(target_process) == m_owner_process ? m_owner_permission : m_user_permission;
|
||||||
if (test_perm == Svc::MemoryPermission::DontCare) {
|
if (test_perm == Svc::MemoryPermission::DontCare) {
|
||||||
ASSERT(map_perm == Svc::MemoryPermission::Read || map_perm == Svc::MemoryPermission::Write);
|
ASSERT(map_perm == Svc::MemoryPermission::Read || map_perm == Svc::MemoryPermission::Write);
|
||||||
} else {
|
} else {
|
||||||
R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission);
|
R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission);
|
||||||
}
|
}
|
||||||
|
|
||||||
R_RETURN(target_process.PageTable().MapPageGroup(address, *page_group, KMemoryState::Shared,
|
R_RETURN(target_process.PageTable().MapPageGroup(address, *m_page_group, KMemoryState::Shared,
|
||||||
ConvertToKMemoryPermission(map_perm)));
|
ConvertToKMemoryPermission(map_perm)));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
|
Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
|
||||||
// Validate the size.
|
// Validate the size.
|
||||||
R_UNLESS(size == unmap_size, ResultInvalidSize);
|
R_UNLESS(m_size == unmap_size, ResultInvalidSize);
|
||||||
|
|
||||||
R_RETURN(target_process.PageTable().UnmapPageGroup(address, *page_group, KMemoryState::Shared));
|
R_RETURN(
|
||||||
|
target_process.PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::Shared));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -54,7 +54,7 @@ public:
|
||||||
* @return A pointer to the shared memory block from the specified offset
|
* @return A pointer to the shared memory block from the specified offset
|
||||||
*/
|
*/
|
||||||
u8* GetPointer(std::size_t offset = 0) {
|
u8* GetPointer(std::size_t offset = 0) {
|
||||||
return device_memory->GetPointer<u8>(physical_address + offset);
|
return m_device_memory->GetPointer<u8>(m_physical_address + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -63,26 +63,26 @@ public:
|
||||||
* @return A pointer to the shared memory block from the specified offset
|
* @return A pointer to the shared memory block from the specified offset
|
||||||
*/
|
*/
|
||||||
const u8* GetPointer(std::size_t offset = 0) const {
|
const u8* GetPointer(std::size_t offset = 0) const {
|
||||||
return device_memory->GetPointer<u8>(physical_address + offset);
|
return m_device_memory->GetPointer<u8>(m_physical_address + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Finalize() override;
|
void Finalize() override;
|
||||||
|
|
||||||
bool IsInitialized() const override {
|
bool IsInitialized() const override {
|
||||||
return is_initialized;
|
return m_is_initialized;
|
||||||
}
|
}
|
||||||
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
|
static void PostDestroy(uintptr_t arg) {}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Core::DeviceMemory* device_memory{};
|
Core::DeviceMemory* m_device_memory{};
|
||||||
KProcess* owner_process{};
|
KProcess* m_owner_process{};
|
||||||
std::optional<KPageGroup> page_group{};
|
std::optional<KPageGroup> m_page_group{};
|
||||||
Svc::MemoryPermission owner_permission{};
|
Svc::MemoryPermission m_owner_permission{};
|
||||||
Svc::MemoryPermission user_permission{};
|
Svc::MemoryPermission m_user_permission{};
|
||||||
PAddr physical_address{};
|
PAddr m_physical_address{};
|
||||||
std::size_t size{};
|
std::size_t m_size{};
|
||||||
KResourceLimit* resource_limit{};
|
KResourceLimit* m_resource_limit{};
|
||||||
bool is_initialized{};
|
bool m_is_initialized{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -18,25 +18,28 @@ public:
|
||||||
explicit KSharedMemoryInfo(KernelCore&) {}
|
explicit KSharedMemoryInfo(KernelCore&) {}
|
||||||
KSharedMemoryInfo() = default;
|
KSharedMemoryInfo() = default;
|
||||||
|
|
||||||
constexpr void Initialize(KSharedMemory* shmem) {
|
constexpr void Initialize(KSharedMemory* m) {
|
||||||
shared_memory = shmem;
|
m_shared_memory = m;
|
||||||
|
m_reference_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KSharedMemory* GetSharedMemory() const {
|
constexpr KSharedMemory* GetSharedMemory() const {
|
||||||
return shared_memory;
|
return m_shared_memory;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void Open() {
|
constexpr void Open() {
|
||||||
++reference_count;
|
++m_reference_count;
|
||||||
|
ASSERT(m_reference_count > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool Close() {
|
constexpr bool Close() {
|
||||||
return (--reference_count) == 0;
|
ASSERT(m_reference_count > 0);
|
||||||
|
return (--m_reference_count) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
KSharedMemory* shared_memory{};
|
KSharedMemory* m_shared_memory{};
|
||||||
size_t reference_count{};
|
size_t m_reference_count{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -6,15 +6,15 @@
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
void KSpinLock::Lock() {
|
void KSpinLock::Lock() {
|
||||||
lck.lock();
|
m_lock.lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void KSpinLock::Unlock() {
|
void KSpinLock::Unlock() {
|
||||||
lck.unlock();
|
m_lock.unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KSpinLock::TryLock() {
|
bool KSpinLock::TryLock() {
|
||||||
return lck.try_lock();
|
return m_lock.try_lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -5,26 +5,24 @@
|
||||||
|
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
|
||||||
|
#include "common/common_funcs.h"
|
||||||
#include "core/hle/kernel/k_scoped_lock.h"
|
#include "core/hle/kernel/k_scoped_lock.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class KSpinLock {
|
class KSpinLock {
|
||||||
public:
|
public:
|
||||||
KSpinLock() = default;
|
explicit KSpinLock() = default;
|
||||||
|
|
||||||
KSpinLock(const KSpinLock&) = delete;
|
YUZU_NON_COPYABLE(KSpinLock);
|
||||||
KSpinLock& operator=(const KSpinLock&) = delete;
|
YUZU_NON_MOVEABLE(KSpinLock);
|
||||||
|
|
||||||
KSpinLock(KSpinLock&&) = delete;
|
|
||||||
KSpinLock& operator=(KSpinLock&&) = delete;
|
|
||||||
|
|
||||||
void Lock();
|
void Lock();
|
||||||
void Unlock();
|
void Unlock();
|
||||||
[[nodiscard]] bool TryLock();
|
bool TryLock();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::mutex lck;
|
std::mutex m_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
|
// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
|
||||||
|
|
Reference in New Issue