Merge pull request #6347 from bunnei/ipc-improvements-next-2
Various improvements to IPC and session management (Part 2)
This commit is contained in:
commit
3ead4a3494
|
@ -43,6 +43,8 @@
|
|||
* The maximum height of a red-black tree is 2lg (n+1).
|
||||
*/
|
||||
|
||||
#include "common/assert.h"
|
||||
|
||||
namespace Common {
|
||||
template <typename T>
|
||||
class RBHead {
|
||||
|
@ -325,6 +327,10 @@ void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) {
|
|||
while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head->Root() && parent != nullptr) {
|
||||
if (RB_LEFT(parent) == elm) {
|
||||
tmp = RB_RIGHT(parent);
|
||||
if (!tmp) {
|
||||
ASSERT_MSG(false, "tmp is invalid!");
|
||||
break;
|
||||
}
|
||||
if (RB_IS_RED(tmp)) {
|
||||
RB_SET_BLACKRED(tmp, parent);
|
||||
RB_ROTATE_LEFT(head, parent, tmp);
|
||||
|
@ -366,6 +372,11 @@ void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) {
|
|||
tmp = RB_LEFT(parent);
|
||||
}
|
||||
|
||||
if (!tmp) {
|
||||
ASSERT_MSG(false, "tmp is invalid!");
|
||||
break;
|
||||
}
|
||||
|
||||
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
|
||||
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
|
||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
||||
|
|
|
@ -80,16 +80,12 @@ public:
|
|||
|
||||
memset(cmdbuf, 0, sizeof(u32) * IPC::COMMAND_BUFFER_LENGTH);
|
||||
|
||||
ctx.ClearIncomingObjects();
|
||||
|
||||
IPC::CommandHeader header{};
|
||||
|
||||
// The entire size of the raw data section in u32 units, including the 16 bytes of mandatory
|
||||
// padding.
|
||||
u32 raw_data_size = ctx.IsTipc()
|
||||
? normal_params_size - 1
|
||||
: sizeof(IPC::DataPayloadHeader) / 4 + 4 + normal_params_size;
|
||||
|
||||
u32 raw_data_size = ctx.write_size =
|
||||
ctx.IsTipc() ? normal_params_size - 1 : normal_params_size;
|
||||
u32 num_handles_to_move{};
|
||||
u32 num_domain_objects{};
|
||||
const bool always_move_handles{
|
||||
|
@ -101,16 +97,20 @@ public:
|
|||
}
|
||||
|
||||
if (ctx.Session()->IsDomain()) {
|
||||
raw_data_size += static_cast<u32>(sizeof(DomainMessageHeader) / 4 + num_domain_objects);
|
||||
raw_data_size +=
|
||||
static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects);
|
||||
ctx.write_size += num_domain_objects;
|
||||
}
|
||||
|
||||
if (ctx.IsTipc()) {
|
||||
header.type.Assign(ctx.GetCommandType());
|
||||
} else {
|
||||
raw_data_size += static_cast<u32>(sizeof(IPC::DataPayloadHeader) / sizeof(u32) + 4 +
|
||||
normal_params_size);
|
||||
}
|
||||
|
||||
ctx.data_size = static_cast<u32>(raw_data_size);
|
||||
header.data_size.Assign(static_cast<u32>(raw_data_size));
|
||||
if (num_handles_to_copy != 0 || num_handles_to_move != 0) {
|
||||
header.data_size.Assign(raw_data_size);
|
||||
if (num_handles_to_copy || num_handles_to_move) {
|
||||
header.enable_handle_descriptor.Assign(1);
|
||||
}
|
||||
PushRaw(header);
|
||||
|
@ -143,7 +143,8 @@ public:
|
|||
data_payload_index = index;
|
||||
|
||||
ctx.data_payload_offset = index;
|
||||
ctx.domain_offset = index + raw_data_size / 4;
|
||||
ctx.write_size += index;
|
||||
ctx.domain_offset = static_cast<u32>(index + raw_data_size / sizeof(u32));
|
||||
}
|
||||
|
||||
template <class T>
|
||||
|
@ -151,8 +152,8 @@ public:
|
|||
if (context->Session()->IsDomain()) {
|
||||
context->AddDomainObject(std::move(iface));
|
||||
} else {
|
||||
// kernel.CurrentProcess()->GetResourceLimit()->Reserve(
|
||||
// Kernel::LimitableResource::Sessions, 1);
|
||||
kernel.CurrentProcess()->GetResourceLimit()->Reserve(
|
||||
Kernel::LimitableResource::Sessions, 1);
|
||||
|
||||
auto* session = Kernel::KSession::Create(kernel);
|
||||
session->Initialize(nullptr, iface->GetServiceName());
|
||||
|
@ -167,24 +168,6 @@ public:
|
|||
PushIpcInterface<T>(std::make_shared<T>(std::forward<Args>(args)...));
|
||||
}
|
||||
|
||||
void ValidateHeader() {
|
||||
const std::size_t num_domain_objects = context->NumDomainObjects();
|
||||
const std::size_t num_move_objects = context->NumMoveObjects();
|
||||
ASSERT_MSG(!num_domain_objects || !num_move_objects,
|
||||
"cannot move normal handles and domain objects");
|
||||
ASSERT_MSG((index - data_payload_index) == normal_params_size,
|
||||
"normal_params_size value is incorrect");
|
||||
ASSERT_MSG((num_domain_objects + num_move_objects) == num_objects_to_move,
|
||||
"num_objects_to_move value is incorrect");
|
||||
ASSERT_MSG(context->NumCopyObjects() == num_handles_to_copy,
|
||||
"num_handles_to_copy value is incorrect");
|
||||
}
|
||||
|
||||
// Validate on destruction, as there shouldn't be any case where we don't want it
|
||||
~ResponseBuilder() {
|
||||
ValidateHeader();
|
||||
}
|
||||
|
||||
void PushImpl(s8 value);
|
||||
void PushImpl(s16 value);
|
||||
void PushImpl(s32 value);
|
||||
|
@ -404,7 +387,7 @@ public:
|
|||
std::shared_ptr<T> PopIpcInterface() {
|
||||
ASSERT(context->Session()->IsDomain());
|
||||
ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
|
||||
return context->GetDomainRequestHandler<T>(Pop<u32>() - 1);
|
||||
return context->GetDomainHandler<T>(Pop<u32>() - 1);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -35,11 +35,11 @@ SessionRequestHandler::SessionRequestHandler() = default;
|
|||
SessionRequestHandler::~SessionRequestHandler() = default;
|
||||
|
||||
void SessionRequestHandler::ClientConnected(KServerSession* session) {
|
||||
session->SetHleHandler(shared_from_this());
|
||||
session->SetSessionHandler(shared_from_this());
|
||||
}
|
||||
|
||||
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
|
||||
session->SetHleHandler(nullptr);
|
||||
session->SetSessionHandler(nullptr);
|
||||
}
|
||||
|
||||
HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
|
||||
|
@ -69,14 +69,10 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
|
|||
if (incoming) {
|
||||
// Populate the object lists with the data in the IPC request.
|
||||
for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_copy; ++handle) {
|
||||
const u32 copy_handle{rp.Pop<Handle>()};
|
||||
copy_handles.push_back(copy_handle);
|
||||
copy_objects.push_back(handle_table.GetObject(copy_handle).GetPointerUnsafe());
|
||||
incoming_copy_handles.push_back(rp.Pop<Handle>());
|
||||
}
|
||||
for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_move; ++handle) {
|
||||
const u32 move_handle{rp.Pop<Handle>()};
|
||||
move_handles.push_back(move_handle);
|
||||
move_objects.push_back(handle_table.GetObject(move_handle).GetPointerUnsafe());
|
||||
incoming_move_handles.push_back(rp.Pop<Handle>());
|
||||
}
|
||||
} else {
|
||||
// For responses we just ignore the handles, they're empty and will be populated when
|
||||
|
@ -186,26 +182,14 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_t
|
|||
auto& owner_process = *requesting_thread.GetOwnerProcess();
|
||||
auto& handle_table = owner_process.GetHandleTable();
|
||||
|
||||
// The data_size already includes the payload header, the padding and the domain header.
|
||||
std::size_t size{};
|
||||
|
||||
if (IsTipc()) {
|
||||
size = cmd_buf.size();
|
||||
} else {
|
||||
size = data_payload_offset + data_size - sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
|
||||
if (Session()->IsDomain()) {
|
||||
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& object : copy_objects) {
|
||||
for (auto& object : outgoing_copy_objects) {
|
||||
Handle handle{};
|
||||
if (object) {
|
||||
R_TRY(handle_table.Add(&handle, object));
|
||||
}
|
||||
cmd_buf[current_offset++] = handle;
|
||||
}
|
||||
for (auto& object : move_objects) {
|
||||
for (auto& object : outgoing_move_objects) {
|
||||
Handle handle{};
|
||||
if (object) {
|
||||
R_TRY(handle_table.Add(&handle, object));
|
||||
|
@ -220,9 +204,9 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_t
|
|||
// TODO(Subv): This completely ignores C buffers.
|
||||
|
||||
if (Session()->IsDomain()) {
|
||||
current_offset = domain_offset - static_cast<u32>(domain_objects.size());
|
||||
for (const auto& object : domain_objects) {
|
||||
server_session->AppendDomainRequestHandler(object);
|
||||
current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size());
|
||||
for (const auto& object : outgoing_domain_objects) {
|
||||
server_session->AppendDomainHandler(object);
|
||||
cmd_buf[current_offset++] =
|
||||
static_cast<u32_le>(server_session->NumDomainRequestHandlers());
|
||||
}
|
||||
|
@ -230,7 +214,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_t
|
|||
|
||||
// Copy the translated command buffer back into the thread's command buffer area.
|
||||
memory.WriteBlock(owner_process, requesting_thread.GetTLSAddress(), cmd_buf.data(),
|
||||
size * sizeof(u32));
|
||||
write_size * sizeof(u32));
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -11,7 +11,8 @@
|
|||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
#include <boost/container/small_vector.hpp>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/concepts.h"
|
||||
#include "common/swap.h"
|
||||
|
@ -84,6 +85,69 @@ public:
|
|||
void ClientDisconnected(KServerSession* session);
|
||||
};
|
||||
|
||||
using SessionRequestHandlerPtr = std::shared_ptr<SessionRequestHandler>;
|
||||
|
||||
/**
|
||||
* Manages the underlying HLE requests for a session, and whether (or not) the session should be
|
||||
* treated as a domain. This is managed separately from server sessions, as this state is shared
|
||||
* when objects are cloned.
|
||||
*/
|
||||
class SessionRequestManager final {
|
||||
public:
|
||||
SessionRequestManager() = default;
|
||||
|
||||
bool IsDomain() const {
|
||||
return is_domain;
|
||||
}
|
||||
|
||||
void ConvertToDomain() {
|
||||
domain_handlers = {session_handler};
|
||||
is_domain = true;
|
||||
}
|
||||
|
||||
std::size_t DomainHandlerCount() const {
|
||||
return domain_handlers.size();
|
||||
}
|
||||
|
||||
bool HasSessionHandler() const {
|
||||
return session_handler != nullptr;
|
||||
}
|
||||
|
||||
SessionRequestHandler& SessionHandler() {
|
||||
return *session_handler;
|
||||
}
|
||||
|
||||
const SessionRequestHandler& SessionHandler() const {
|
||||
return *session_handler;
|
||||
}
|
||||
|
||||
void CloseDomainHandler(std::size_t index) {
|
||||
if (index < DomainHandlerCount()) {
|
||||
domain_handlers[index] = nullptr;
|
||||
} else {
|
||||
UNREACHABLE_MSG("Unexpected handler index {}", index);
|
||||
}
|
||||
}
|
||||
|
||||
SessionRequestHandlerPtr DomainHandler(std::size_t index) const {
|
||||
ASSERT_MSG(index < DomainHandlerCount(), "Unexpected handler index {}", index);
|
||||
return domain_handlers.at(index);
|
||||
}
|
||||
|
||||
void AppendDomainHandler(SessionRequestHandlerPtr&& handler) {
|
||||
domain_handlers.emplace_back(std::move(handler));
|
||||
}
|
||||
|
||||
void SetSessionHandler(SessionRequestHandlerPtr&& handler) {
|
||||
session_handler = std::move(handler);
|
||||
}
|
||||
|
||||
private:
|
||||
bool is_domain{};
|
||||
SessionRequestHandlerPtr session_handler;
|
||||
std::vector<SessionRequestHandlerPtr> domain_handlers;
|
||||
};
|
||||
|
||||
/**
|
||||
* Class containing information about an in-flight IPC request being handled by an HLE service
|
||||
* implementation. Services should avoid using old global APIs (e.g. Kernel::GetCommandBuffer()) and
|
||||
|
@ -224,53 +288,32 @@ public:
|
|||
bool CanWriteBuffer(std::size_t buffer_index = 0) const;
|
||||
|
||||
Handle GetCopyHandle(std::size_t index) const {
|
||||
return copy_handles.at(index);
|
||||
return incoming_copy_handles.at(index);
|
||||
}
|
||||
|
||||
Handle GetMoveHandle(std::size_t index) const {
|
||||
return move_handles.at(index);
|
||||
return incoming_move_handles.at(index);
|
||||
}
|
||||
|
||||
void AddMoveObject(KAutoObject* object) {
|
||||
move_objects.emplace_back(object);
|
||||
outgoing_move_objects.emplace_back(object);
|
||||
}
|
||||
|
||||
void AddCopyObject(KAutoObject* object) {
|
||||
copy_objects.emplace_back(object);
|
||||
outgoing_copy_objects.emplace_back(object);
|
||||
}
|
||||
|
||||
void AddDomainObject(std::shared_ptr<SessionRequestHandler> object) {
|
||||
domain_objects.emplace_back(std::move(object));
|
||||
void AddDomainObject(SessionRequestHandlerPtr object) {
|
||||
outgoing_domain_objects.emplace_back(std::move(object));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const {
|
||||
return std::static_pointer_cast<T>(domain_request_handlers.at(index));
|
||||
std::shared_ptr<T> GetDomainHandler(std::size_t index) const {
|
||||
return std::static_pointer_cast<T>(manager->DomainHandler(index));
|
||||
}
|
||||
|
||||
void SetDomainRequestHandlers(
|
||||
const std::vector<std::shared_ptr<SessionRequestHandler>>& handlers) {
|
||||
domain_request_handlers = handlers;
|
||||
}
|
||||
|
||||
/// Clears the list of objects so that no lingering objects are written accidentally to the
|
||||
/// response buffer.
|
||||
void ClearIncomingObjects() {
|
||||
move_objects.clear();
|
||||
copy_objects.clear();
|
||||
domain_objects.clear();
|
||||
}
|
||||
|
||||
std::size_t NumMoveObjects() const {
|
||||
return move_objects.size();
|
||||
}
|
||||
|
||||
std::size_t NumCopyObjects() const {
|
||||
return copy_objects.size();
|
||||
}
|
||||
|
||||
std::size_t NumDomainObjects() const {
|
||||
return domain_objects.size();
|
||||
void SetSessionRequestManager(std::shared_ptr<SessionRequestManager> manager_) {
|
||||
manager = std::move(manager_);
|
||||
}
|
||||
|
||||
std::string Description() const;
|
||||
|
@ -292,12 +335,12 @@ private:
|
|||
Kernel::KServerSession* server_session{};
|
||||
KThread* thread;
|
||||
|
||||
// TODO(yuriks): Check common usage of this and optimize size accordingly
|
||||
boost::container::small_vector<Handle, 8> move_handles;
|
||||
boost::container::small_vector<Handle, 8> copy_handles;
|
||||
boost::container::small_vector<KAutoObject*, 8> move_objects;
|
||||
boost::container::small_vector<KAutoObject*, 8> copy_objects;
|
||||
boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects;
|
||||
std::vector<Handle> incoming_move_handles;
|
||||
std::vector<Handle> incoming_copy_handles;
|
||||
|
||||
std::vector<KAutoObject*> outgoing_move_objects;
|
||||
std::vector<KAutoObject*> outgoing_copy_objects;
|
||||
std::vector<SessionRequestHandlerPtr> outgoing_domain_objects;
|
||||
|
||||
std::optional<IPC::CommandHeader> command_header;
|
||||
std::optional<IPC::HandleDescriptorHeader> handle_descriptor_header;
|
||||
|
@ -311,12 +354,12 @@ private:
|
|||
|
||||
u32_le command{};
|
||||
u64 pid{};
|
||||
u32 write_size{};
|
||||
u32 data_payload_offset{};
|
||||
u32 handles_offset{};
|
||||
u32 domain_offset{};
|
||||
u32 data_size{};
|
||||
|
||||
std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
|
||||
std::shared_ptr<SessionRequestManager> manager;
|
||||
bool is_thread_waiting{};
|
||||
|
||||
KernelCore& kernel;
|
||||
|
|
|
@ -70,14 +70,22 @@ constexpr size_t SlabCountExtraKThread = 160;
|
|||
template <typename T>
|
||||
VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
|
||||
size_t num_objects) {
|
||||
// TODO(bunnei): This is just a place holder. We should initialize the appropriate KSlabHeap for
|
||||
// kernel object type T with the backing kernel memory pointer once we emulate kernel memory.
|
||||
|
||||
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
|
||||
VAddr start = Common::AlignUp(address, alignof(T));
|
||||
|
||||
// This is intentionally empty. Once KSlabHeap is fully implemented, we can replace this with
|
||||
// the pointer to emulated memory to pass along. Until then, KSlabHeap will just allocate/free
|
||||
// host memory.
|
||||
void* backing_kernel_memory{};
|
||||
|
||||
if (size > 0) {
|
||||
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
|
||||
ASSERT(region != nullptr);
|
||||
ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab));
|
||||
T::InitializeSlabHeap(system.Kernel(), system.Memory().GetKernelBuffer(start, size), size);
|
||||
T::InitializeSlabHeap(system.Kernel(), backing_kernel_memory, size);
|
||||
}
|
||||
|
||||
return start + size;
|
||||
|
|
|
@ -58,9 +58,9 @@ bool KClientPort::IsSignaled() const {
|
|||
|
||||
ResultCode KClientPort::CreateSession(KClientSession** out) {
|
||||
// Reserve a new session from the resource limit.
|
||||
// KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
|
||||
// LimitableResource::Sessions);
|
||||
// R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
|
||||
KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
|
||||
LimitableResource::Sessions);
|
||||
R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
|
||||
|
||||
// Update the session counts.
|
||||
{
|
||||
|
@ -104,7 +104,7 @@ ResultCode KClientPort::CreateSession(KClientSession** out) {
|
|||
session->Initialize(this, parent->GetName());
|
||||
|
||||
// Commit the session reservation.
|
||||
// session_reservation.Commit();
|
||||
session_reservation.Commit();
|
||||
|
||||
// Register the session.
|
||||
KSession::Register(kernel, session);
|
||||
|
|
|
@ -31,6 +31,9 @@ public:
|
|||
const KPort* GetParent() const {
|
||||
return parent;
|
||||
}
|
||||
KPort* GetParent() {
|
||||
return parent;
|
||||
}
|
||||
|
||||
s32 GetNumSessions() const {
|
||||
return num_sessions;
|
||||
|
|
|
@ -56,11 +56,8 @@ ResultCode KPort::EnqueueSession(KServerSession* session) {
|
|||
|
||||
R_UNLESS(state == State::Normal, ResultPortClosed);
|
||||
|
||||
if (server.HasHLEHandler()) {
|
||||
server.GetHLEHandler()->ClientConnected(session);
|
||||
} else {
|
||||
server.EnqueueSession(session);
|
||||
}
|
||||
server.EnqueueSession(session);
|
||||
server.GetSessionRequestHandler()->ClientConnected(server.AcceptSession());
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -32,26 +32,24 @@ public:
|
|||
explicit KServerPort(KernelCore& kernel_);
|
||||
virtual ~KServerPort() override;
|
||||
|
||||
using HLEHandler = std::shared_ptr<SessionRequestHandler>;
|
||||
|
||||
void Initialize(KPort* parent_, std::string&& name_);
|
||||
|
||||
/// Whether or not this server port has an HLE handler available.
|
||||
bool HasHLEHandler() const {
|
||||
return hle_handler != nullptr;
|
||||
bool HasSessionRequestHandler() const {
|
||||
return session_handler != nullptr;
|
||||
}
|
||||
|
||||
/// Gets the HLE handler for this port.
|
||||
HLEHandler GetHLEHandler() const {
|
||||
return hle_handler;
|
||||
SessionRequestHandlerPtr GetSessionRequestHandler() const {
|
||||
return session_handler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
|
||||
* will inherit a reference to this handler.
|
||||
*/
|
||||
void SetHleHandler(HLEHandler hle_handler_) {
|
||||
hle_handler = std::move(hle_handler_);
|
||||
void SetSessionHandler(SessionRequestHandlerPtr&& handler) {
|
||||
session_handler = std::move(handler);
|
||||
}
|
||||
|
||||
void EnqueueSession(KServerSession* pending_session);
|
||||
|
@ -73,7 +71,7 @@ private:
|
|||
|
||||
private:
|
||||
SessionList session_list;
|
||||
HLEHandler hle_handler;
|
||||
SessionRequestHandlerPtr session_handler;
|
||||
KPort* parent{};
|
||||
};
|
||||
|
||||
|
|
|
@ -23,7 +23,8 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
|
||||
KServerSession::KServerSession(KernelCore& kernel_)
|
||||
: KSynchronizationObject{kernel_}, manager{std::make_shared<SessionRequestManager>()} {}
|
||||
|
||||
KServerSession::~KServerSession() {
|
||||
kernel.ReleaseServiceThread(service_thread);
|
||||
|
@ -43,14 +44,8 @@ void KServerSession::Destroy() {
|
|||
}
|
||||
|
||||
void KServerSession::OnClientClosed() {
|
||||
// We keep a shared pointer to the hle handler to keep it alive throughout
|
||||
// the call to ClientDisconnected, as ClientDisconnected invalidates the
|
||||
// hle_handler member itself during the course of the function executing.
|
||||
std::shared_ptr<SessionRequestHandler> handler = hle_handler;
|
||||
if (handler) {
|
||||
// Note that after this returns, this server session's hle_handler is
|
||||
// invalidated (set to null).
|
||||
handler->ClientDisconnected(this);
|
||||
if (manager->HasSessionHandler()) {
|
||||
manager->SessionHandler().ClientDisconnected(this);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -66,12 +61,12 @@ bool KServerSession::IsSignaled() const {
|
|||
return false;
|
||||
}
|
||||
|
||||
void KServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) {
|
||||
domain_request_handlers.push_back(std::move(handler));
|
||||
void KServerSession::AppendDomainHandler(SessionRequestHandlerPtr handler) {
|
||||
manager->AppendDomainHandler(std::move(handler));
|
||||
}
|
||||
|
||||
std::size_t KServerSession::NumDomainRequestHandlers() const {
|
||||
return domain_request_handlers.size();
|
||||
return manager->DomainHandlerCount();
|
||||
}
|
||||
|
||||
ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
|
||||
|
@ -80,14 +75,14 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
|
|||
}
|
||||
|
||||
// Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
|
||||
context.SetDomainRequestHandlers(domain_request_handlers);
|
||||
context.SetSessionRequestManager(manager);
|
||||
|
||||
// If there is a DomainMessageHeader, then this is CommandType "Request"
|
||||
const auto& domain_message_header = context.GetDomainMessageHeader();
|
||||
const u32 object_id{domain_message_header.object_id};
|
||||
switch (domain_message_header.command) {
|
||||
case IPC::DomainMessageHeader::CommandType::SendMessage:
|
||||
if (object_id > domain_request_handlers.size()) {
|
||||
if (object_id > manager->DomainHandlerCount()) {
|
||||
LOG_CRITICAL(IPC,
|
||||
"object_id {} is too big! This probably means a recent service call "
|
||||
"to {} needed to return a new interface!",
|
||||
|
@ -95,12 +90,12 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
|
|||
UNREACHABLE();
|
||||
return RESULT_SUCCESS; // Ignore error if asserts are off
|
||||
}
|
||||
return domain_request_handlers[object_id - 1]->HandleSyncRequest(*this, context);
|
||||
return manager->DomainHandler(object_id - 1)->HandleSyncRequest(*this, context);
|
||||
|
||||
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
|
||||
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
|
||||
|
||||
domain_request_handlers[object_id - 1] = nullptr;
|
||||
manager->CloseDomainHandler(object_id - 1);
|
||||
|
||||
IPC::ResponseBuilder rb{context, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
|
@ -133,14 +128,14 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
|||
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||
result = HandleDomainSyncRequest(context);
|
||||
// If there is no domain header, the regular session handler is used
|
||||
} else if (hle_handler != nullptr) {
|
||||
} else if (manager->HasSessionHandler()) {
|
||||
// If this ServerSession has an associated HLE handler, forward the request to it.
|
||||
result = hle_handler->HandleSyncRequest(*this, context);
|
||||
result = manager->SessionHandler().HandleSyncRequest(*this, context);
|
||||
}
|
||||
|
||||
if (convert_to_domain) {
|
||||
ASSERT_MSG(IsSession(), "ServerSession is already a domain instance.");
|
||||
domain_request_handlers = {hle_handler};
|
||||
ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
|
||||
manager->ConvertToDomain();
|
||||
convert_to_domain = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <boost/intrusive/list.hpp>
|
||||
|
||||
#include "common/threadsafe_queue.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/hle/result.h"
|
||||
|
@ -64,8 +65,8 @@ public:
|
|||
* instead of the regular IPC machinery. (The regular IPC machinery is currently not
|
||||
* implemented.)
|
||||
*/
|
||||
void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) {
|
||||
hle_handler = std::move(hle_handler_);
|
||||
void SetSessionHandler(SessionRequestHandlerPtr handler) {
|
||||
manager->SetSessionHandler(std::move(handler));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -82,7 +83,7 @@ public:
|
|||
|
||||
/// Adds a new domain request handler to the collection of request handlers within
|
||||
/// this ServerSession instance.
|
||||
void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler);
|
||||
void AppendDomainHandler(SessionRequestHandlerPtr handler);
|
||||
|
||||
/// Retrieves the total number of domain request handlers that have been
|
||||
/// appended to this ServerSession instance.
|
||||
|
@ -90,12 +91,7 @@ public:
|
|||
|
||||
/// Returns true if the session has been converted to a domain, otherwise False
|
||||
bool IsDomain() const {
|
||||
return !IsSession();
|
||||
}
|
||||
|
||||
/// Returns true if this session has not been converted to a domain, otherwise false.
|
||||
bool IsSession() const {
|
||||
return domain_request_handlers.empty();
|
||||
return manager->IsDomain();
|
||||
}
|
||||
|
||||
/// Converts the session to a domain at the end of the current command
|
||||
|
@ -103,6 +99,21 @@ public:
|
|||
convert_to_domain = true;
|
||||
}
|
||||
|
||||
/// Gets the session request manager, which forwards requests to the underlying service
|
||||
std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() {
|
||||
return manager;
|
||||
}
|
||||
|
||||
/// Gets the session request manager, which forwards requests to the underlying service
|
||||
const std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() const {
|
||||
return manager;
|
||||
}
|
||||
|
||||
/// Sets the session request manager, which forwards requests to the underlying service
|
||||
void SetSessionRequestManager(std::shared_ptr<SessionRequestManager> manager_) {
|
||||
manager = std::move(manager_);
|
||||
}
|
||||
|
||||
private:
|
||||
/// Queues a sync request from the emulated application.
|
||||
ResultCode QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
|
||||
|
@ -114,11 +125,8 @@ private:
|
|||
/// object handle.
|
||||
ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
|
||||
|
||||
/// This session's HLE request handler (applicable when not a domain)
|
||||
std::shared_ptr<SessionRequestHandler> hle_handler;
|
||||
|
||||
/// This is the list of domain request handlers (after conversion to a domain)
|
||||
std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
|
||||
/// This session's HLE request handlers
|
||||
std::shared_ptr<SessionRequestManager> manager;
|
||||
|
||||
/// When set to True, converts the session to a domain at the end of the command
|
||||
bool convert_to_domain{};
|
||||
|
|
|
@ -78,7 +78,7 @@ void KSession::OnClientClosed() {
|
|||
void KSession::PostDestroy(uintptr_t arg) {
|
||||
// Release the session count resource the owner process holds.
|
||||
KProcess* owner = reinterpret_cast<KProcess*>(arg);
|
||||
// owner->GetResourceLimit()->Release(LimitableResource::Sessions, 1);
|
||||
owner->GetResourceLimit()->Release(LimitableResource::Sessions, 1);
|
||||
owner->Close();
|
||||
}
|
||||
|
||||
|
|
|
@ -66,6 +66,10 @@ public:
|
|||
return port;
|
||||
}
|
||||
|
||||
KClientPort* GetParent() {
|
||||
return port;
|
||||
}
|
||||
|
||||
private:
|
||||
enum class State : u8 {
|
||||
Invalid = 0,
|
||||
|
|
|
@ -4,165 +4,33 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace impl {
|
||||
class KernelCore;
|
||||
|
||||
class KSlabHeapImpl final : NonCopyable {
|
||||
public:
|
||||
struct Node {
|
||||
Node* next{};
|
||||
};
|
||||
|
||||
constexpr KSlabHeapImpl() = default;
|
||||
|
||||
void Initialize(std::size_t size) {
|
||||
ASSERT(head == nullptr);
|
||||
obj_size = size;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetObjectSize() const {
|
||||
return obj_size;
|
||||
}
|
||||
|
||||
Node* GetHead() const {
|
||||
return head;
|
||||
}
|
||||
|
||||
void* Allocate() {
|
||||
Node* ret = head.load();
|
||||
|
||||
do {
|
||||
if (ret == nullptr) {
|
||||
break;
|
||||
}
|
||||
} while (!head.compare_exchange_weak(ret, ret->next));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void Free(void* obj) {
|
||||
Node* node = static_cast<Node*>(obj);
|
||||
|
||||
Node* cur_head = head.load();
|
||||
do {
|
||||
node->next = cur_head;
|
||||
} while (!head.compare_exchange_weak(cur_head, node));
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<Node*> head{};
|
||||
std::size_t obj_size{};
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
class KSlabHeapBase : NonCopyable {
|
||||
public:
|
||||
constexpr KSlabHeapBase() = default;
|
||||
|
||||
constexpr bool Contains(uintptr_t addr) const {
|
||||
return start <= addr && addr < end;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetSlabHeapSize() const {
|
||||
return (end - start) / GetObjectSize();
|
||||
}
|
||||
|
||||
constexpr std::size_t GetObjectSize() const {
|
||||
return impl.GetObjectSize();
|
||||
}
|
||||
|
||||
constexpr uintptr_t GetSlabHeapAddress() const {
|
||||
return start;
|
||||
}
|
||||
|
||||
std::size_t GetObjectIndexImpl(const void* obj) const {
|
||||
return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
|
||||
}
|
||||
|
||||
std::size_t GetPeakIndex() const {
|
||||
return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
|
||||
}
|
||||
|
||||
void* AllocateImpl() {
|
||||
return impl.Allocate();
|
||||
}
|
||||
|
||||
void FreeImpl(void* obj) {
|
||||
// Don't allow freeing an object that wasn't allocated from this heap
|
||||
ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
|
||||
|
||||
impl.Free(obj);
|
||||
}
|
||||
|
||||
void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
|
||||
// Ensure we don't initialize a slab using null memory
|
||||
ASSERT(memory != nullptr);
|
||||
|
||||
// Initialize the base allocator
|
||||
impl.Initialize(obj_size);
|
||||
|
||||
// Set our tracking variables
|
||||
const std::size_t num_obj = (memory_size / obj_size);
|
||||
start = reinterpret_cast<uintptr_t>(memory);
|
||||
end = start + num_obj * obj_size;
|
||||
peak = start;
|
||||
|
||||
// Free the objects
|
||||
u8* cur = reinterpret_cast<u8*>(end);
|
||||
|
||||
for (std::size_t i{}; i < num_obj; i++) {
|
||||
cur -= obj_size;
|
||||
impl.Free(cur);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
using Impl = impl::KSlabHeapImpl;
|
||||
|
||||
Impl impl;
|
||||
uintptr_t peak{};
|
||||
uintptr_t start{};
|
||||
uintptr_t end{};
|
||||
};
|
||||
/// This is a placeholder class to manage slab heaps for kernel objects. For now, we just allocate
|
||||
/// these with new/delete, but this can be re-implemented later to allocate these in emulated
|
||||
/// memory.
|
||||
|
||||
template <typename T>
|
||||
class KSlabHeap final : public KSlabHeapBase {
|
||||
class KSlabHeap final : NonCopyable {
|
||||
public:
|
||||
constexpr KSlabHeap() : KSlabHeapBase() {}
|
||||
KSlabHeap() = default;
|
||||
|
||||
void Initialize(void* memory, std::size_t memory_size) {
|
||||
InitializeImpl(sizeof(T), memory, memory_size);
|
||||
void Initialize([[maybe_unused]] void* memory, [[maybe_unused]] std::size_t memory_size) {
|
||||
// Placeholder that should initialize the backing slab heap implementation.
|
||||
}
|
||||
|
||||
T* Allocate() {
|
||||
T* obj = static_cast<T*>(AllocateImpl());
|
||||
if (obj != nullptr) {
|
||||
new (obj) T();
|
||||
}
|
||||
return obj;
|
||||
return new T();
|
||||
}
|
||||
|
||||
T* AllocateWithKernel(KernelCore& kernel) {
|
||||
T* obj = static_cast<T*>(AllocateImpl());
|
||||
if (obj != nullptr) {
|
||||
new (obj) T(kernel);
|
||||
}
|
||||
return obj;
|
||||
return new T(kernel);
|
||||
}
|
||||
|
||||
void Free(T* obj) {
|
||||
FreeImpl(obj);
|
||||
}
|
||||
|
||||
constexpr std::size_t GetObjectIndex(const T* obj) const {
|
||||
return GetObjectIndexImpl(obj);
|
||||
delete obj;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -74,21 +74,17 @@ void ServiceThread::Impl::QueueSyncRequest(KSession& session,
|
|||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
|
||||
auto* server_session{&session.GetServerSession()};
|
||||
|
||||
// Open a reference to the session to ensure it is not closes while the service request
|
||||
// completes asynchronously.
|
||||
session.Open();
|
||||
server_session->Open();
|
||||
|
||||
requests.emplace([session_ptr{&session}, context{std::move(context)}]() {
|
||||
requests.emplace([server_session, context{std::move(context)}]() {
|
||||
// Close the reference.
|
||||
SCOPE_EXIT({ session_ptr->Close(); });
|
||||
|
||||
// If the session has been closed, we are done.
|
||||
if (session_ptr->IsServerClosed()) {
|
||||
return;
|
||||
}
|
||||
SCOPE_EXIT({ server_session->Close(); });
|
||||
|
||||
// Complete the service request.
|
||||
KScopedAutoObject server_session{&session_ptr->GetServerSession()};
|
||||
server_session->CompleteSyncRequest(*context);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -67,11 +67,11 @@ class KAutoObjectWithSlabHeapAndContainer : public Base {
|
|||
|
||||
private:
|
||||
static Derived* Allocate(KernelCore& kernel) {
|
||||
return new Derived(kernel);
|
||||
return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel);
|
||||
}
|
||||
|
||||
static void Free(KernelCore& kernel, Derived* obj) {
|
||||
delete obj;
|
||||
kernel.SlabHeap<Derived>().Free(obj);
|
||||
}
|
||||
|
||||
public:
|
||||
|
|
|
@ -107,7 +107,7 @@ void ServiceFrameworkBase::InstallAsService(SM::ServiceManager& service_manager)
|
|||
ASSERT(!port_installed);
|
||||
|
||||
auto port = service_manager.RegisterService(service_name, max_sessions).Unwrap();
|
||||
port->SetHleHandler(shared_from_this());
|
||||
port->SetSessionHandler(shared_from_this());
|
||||
port_installed = true;
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,7 @@ Kernel::KClientPort& ServiceFrameworkBase::CreatePort(Kernel::KernelCore& kernel
|
|||
|
||||
auto* port = Kernel::KPort::Create(kernel);
|
||||
port->Initialize(max_sessions, false, service_name);
|
||||
port->GetServerPort().SetHleHandler(shared_from_this());
|
||||
port->GetServerPort().SetSessionHandler(shared_from_this());
|
||||
|
||||
port_installed = true;
|
||||
|
||||
|
|
|
@ -4,8 +4,13 @@
|
|||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/k_client_port.h"
|
||||
#include "core/hle/kernel/k_client_session.h"
|
||||
#include "core/hle/kernel/k_port.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/k_server_port.h"
|
||||
#include "core/hle/kernel/k_server_session.h"
|
||||
#include "core/hle/kernel/k_session.h"
|
||||
#include "core/hle/service/sm/controller.h"
|
||||
|
@ -13,7 +18,7 @@
|
|||
namespace Service::SM {
|
||||
|
||||
void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) {
|
||||
ASSERT_MSG(ctx.Session()->IsSession(), "Session is already a domain");
|
||||
ASSERT_MSG(!ctx.Session()->IsDomain(), "Session is already a domain");
|
||||
LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId());
|
||||
ctx.Session()->ConvertToDomain();
|
||||
|
||||
|
@ -29,16 +34,36 @@ void Controller::CloneCurrentObject(Kernel::HLERequestContext& ctx) {
|
|||
|
||||
LOG_DEBUG(Service, "called");
|
||||
|
||||
auto session = ctx.Session()->GetParent();
|
||||
auto& kernel = system.Kernel();
|
||||
auto* session = ctx.Session()->GetParent();
|
||||
auto* port = session->GetParent()->GetParent();
|
||||
|
||||
// Open a reference to the session to simulate a new one being created.
|
||||
session->Open();
|
||||
session->GetClientSession().Open();
|
||||
session->GetServerSession().Open();
|
||||
// Reserve a new session from the process resource limit.
|
||||
Kernel::KScopedResourceReservation session_reservation(
|
||||
kernel.CurrentProcess()->GetResourceLimit(), Kernel::LimitableResource::Sessions);
|
||||
if (!session_reservation.Succeeded()) {
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(Kernel::ResultLimitReached);
|
||||
}
|
||||
|
||||
// Create a new session.
|
||||
auto* clone = Kernel::KSession::Create(kernel);
|
||||
clone->Initialize(&port->GetClientPort(), session->GetName());
|
||||
|
||||
// Commit the session reservation.
|
||||
session_reservation.Commit();
|
||||
|
||||
// Enqueue the session with the named port.
|
||||
port->EnqueueSession(&clone->GetServerSession());
|
||||
|
||||
// Set the session request manager.
|
||||
clone->GetServerSession().SetSessionRequestManager(
|
||||
session->GetServerSession().GetSessionRequestManager());
|
||||
|
||||
// We succeeded.
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushMoveObjects(session->GetClientSession());
|
||||
rb.PushMoveObjects(clone->GetClientSession());
|
||||
}
|
||||
|
||||
void Controller::CloneCurrentObjectEx(Kernel::HLERequestContext& ctx) {
|
||||
|
|
|
@ -150,31 +150,31 @@ ResultVal<Kernel::KClientSession*> SM::GetServiceImpl(Kernel::HLERequestContext&
|
|||
IPC::RequestParser rp{ctx};
|
||||
std::string name(PopServiceName(rp));
|
||||
|
||||
// Find the named port.
|
||||
auto result = service_manager.GetServicePort(name);
|
||||
if (result.Failed()) {
|
||||
LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, result.Code().raw);
|
||||
return result.Code();
|
||||
}
|
||||
|
||||
auto* port = result.Unwrap();
|
||||
|
||||
// Kernel::KScopedResourceReservation session_reservation(
|
||||
// kernel.CurrentProcess()->GetResourceLimit(), Kernel::LimitableResource::Sessions);
|
||||
// R_UNLESS(session_reservation.Succeeded(), Kernel::ResultLimitReached);
|
||||
// Reserve a new session from the process resource limit.
|
||||
Kernel::KScopedResourceReservation session_reservation(
|
||||
kernel.CurrentProcess()->GetResourceLimit(), Kernel::LimitableResource::Sessions);
|
||||
R_UNLESS(session_reservation.Succeeded(), Kernel::ResultLimitReached);
|
||||
|
||||
// Create a new session.
|
||||
auto* session = Kernel::KSession::Create(kernel);
|
||||
session->Initialize(&port->GetClientPort(), std::move(name));
|
||||
|
||||
// Commit the session reservation.
|
||||
// session_reservation.Commit();
|
||||
session_reservation.Commit();
|
||||
|
||||
if (port->GetServerPort().GetHLEHandler()) {
|
||||
port->GetServerPort().GetHLEHandler()->ClientConnected(&session->GetServerSession());
|
||||
} else {
|
||||
port->EnqueueSession(&session->GetServerSession());
|
||||
}
|
||||
// Enqueue the session with the named port.
|
||||
port->EnqueueSession(&session->GetServerSession());
|
||||
|
||||
LOG_DEBUG(Service_SM, "called service={} -> session={}", name, session->GetId());
|
||||
|
||||
return MakeResult(&session->GetClientSession());
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ public:
|
|||
if (port == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
return std::static_pointer_cast<T>(port->GetServerPort().GetHLEHandler());
|
||||
return std::static_pointer_cast<T>(port->GetServerPort().GetSessionRequestHandler());
|
||||
}
|
||||
|
||||
void InvokeControlRequest(Kernel::HLERequestContext& context);
|
||||
|
|
|
@ -82,22 +82,6 @@ struct Memory::Impl {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
u8* GetKernelBuffer(VAddr start_vaddr, size_t size) {
|
||||
// TODO(bunnei): This is just a workaround until we have kernel memory layout mapped &
|
||||
// managed. Until then, we use this to allocate and access kernel memory regions.
|
||||
|
||||
auto search = kernel_memory_regions.find(start_vaddr);
|
||||
if (search != kernel_memory_regions.end()) {
|
||||
return search->second.get();
|
||||
}
|
||||
|
||||
std::unique_ptr<u8[]> new_memory_region{new u8[size]};
|
||||
u8* raw_ptr = new_memory_region.get();
|
||||
kernel_memory_regions[start_vaddr] = std::move(new_memory_region);
|
||||
|
||||
return raw_ptr;
|
||||
}
|
||||
|
||||
u8 Read8(const VAddr addr) {
|
||||
return Read<u8>(addr);
|
||||
}
|
||||
|
@ -727,7 +711,6 @@ struct Memory::Impl {
|
|||
}
|
||||
|
||||
Common::PageTable* current_page_table = nullptr;
|
||||
std::unordered_map<VAddr, std::unique_ptr<u8[]>> kernel_memory_regions;
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
|
@ -765,10 +748,6 @@ u8* Memory::GetPointer(VAddr vaddr) {
|
|||
return impl->GetPointer(vaddr);
|
||||
}
|
||||
|
||||
u8* Memory::GetKernelBuffer(VAddr start_vaddr, size_t size) {
|
||||
return impl->GetKernelBuffer(start_vaddr, size);
|
||||
}
|
||||
|
||||
const u8* Memory::GetPointer(VAddr vaddr) const {
|
||||
return impl->GetPointer(vaddr);
|
||||
}
|
||||
|
|
|
@ -121,15 +121,6 @@ public:
|
|||
*/
|
||||
u8* GetPointer(VAddr vaddr);
|
||||
|
||||
/**
|
||||
* Gets a pointer to the start of a kernel heap allocated memory region. Will allocate one if it
|
||||
* does not already exist.
|
||||
*
|
||||
* @param start_vaddr Start virtual address for the memory region.
|
||||
* @param size Size of the memory region.
|
||||
*/
|
||||
u8* GetKernelBuffer(VAddr start_vaddr, size_t size);
|
||||
|
||||
template <typename T>
|
||||
T* GetPointer(VAddr vaddr) {
|
||||
return reinterpret_cast<T*>(GetPointer(vaddr));
|
||||
|
|
Reference in New Issue