citra-emu
/
citra-canary
Archived
1
0
Fork 0

Merge pull request #2793 from Subv/replyandreceive

Kernel/SVC: Partially implemented svcReplyAndReceive
This commit is contained in:
Sebastian Valle 2017-06-29 17:05:22 -05:00 committed by GitHub
commit 56d718b2a1
6 changed files with 161 additions and 23 deletions

View File

@ -16,9 +16,6 @@ namespace HLE {
#define PARAM(n) Core::CPU().GetReg(n) #define PARAM(n) Core::CPU().GetReg(n)
/// An invalid result code that is meant to be overwritten when a thread resumes from waiting
static const ResultCode RESULT_INVALID(0xDEADC0DE);
/** /**
* HLE a function return from the current ARM11 userland process * HLE a function return from the current ARM11 userland process
* @param res Result to return * @param res Result to return
@ -68,10 +65,18 @@ void Wrap() {
(PARAM(3) != 0), (((s64)PARAM(4) << 32) | PARAM(0))) (PARAM(3) != 0), (((s64)PARAM(4) << 32) | PARAM(0)))
.raw; .raw;
if (retval != RESULT_INVALID.raw) {
Core::CPU().SetReg(1, (u32)param_1); Core::CPU().SetReg(1, (u32)param_1);
FuncReturn(retval); FuncReturn(retval);
} }
template <ResultCode func(s32*, u32*, s32, u32)>
void Wrap() {
s32 param_1 = 0;
u32 retval =
func(&param_1, (Kernel::Handle*)Memory::GetPointer(PARAM(1)), (s32)PARAM(2), PARAM(3)).raw;
Core::CPU().SetReg(1, (u32)param_1);
FuncReturn(retval);
} }
template <ResultCode func(u32, u32, u32, u32, s64)> template <ResultCode func(u32, u32, u32, u32, s64)>
@ -92,9 +97,7 @@ template <ResultCode func(u32, s64)>
void Wrap() { void Wrap() {
s32 retval = func(PARAM(0), (((s64)PARAM(3) << 32) | PARAM(2))).raw; s32 retval = func(PARAM(0), (((s64)PARAM(3) << 32) | PARAM(2))).raw;
if (retval != RESULT_INVALID.raw) {
FuncReturn(retval); FuncReturn(retval);
}
} }
template <ResultCode func(MemoryInfo*, PageInfo*, u32)> template <ResultCode func(MemoryInfo*, PageInfo*, u32)>

View File

@ -9,6 +9,7 @@
#include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/server_session.h" #include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h" #include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
namespace Kernel { namespace Kernel {
@ -27,19 +28,24 @@ ClientSession::~ClientSession() {
// TODO(Subv): Force a wake up of all the ServerSession's waiting threads and set // TODO(Subv): Force a wake up of all the ServerSession's waiting threads and set
// their WaitSynchronization result to 0xC920181A. // their WaitSynchronization result to 0xC920181A.
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
server->pending_requesting_threads.clear();
server->currently_handling = nullptr;
} }
parent->client = nullptr; parent->client = nullptr;
} }
ResultCode ClientSession::SendSyncRequest() { ResultCode ClientSession::SendSyncRequest(SharedPtr<Thread> thread) {
// Keep ServerSession alive until we're done working with it. // Keep ServerSession alive until we're done working with it.
SharedPtr<ServerSession> server = parent->server; SharedPtr<ServerSession> server = parent->server;
if (server == nullptr) if (server == nullptr)
return ERR_SESSION_CLOSED_BY_REMOTE; return ERR_SESSION_CLOSED_BY_REMOTE;
// Signal the server session that new data is available // Signal the server session that new data is available
return server->HandleSyncRequest(); return server->HandleSyncRequest(std::move(thread));
} }
} // namespace } // namespace

View File

@ -14,6 +14,7 @@ namespace Kernel {
class ServerSession; class ServerSession;
class Session; class Session;
class Thread;
class ClientSession final : public Object { class ClientSession final : public Object {
public: public:
@ -34,9 +35,10 @@ public:
/** /**
* Sends an SyncRequest from the current emulated thread. * Sends an SyncRequest from the current emulated thread.
* @param thread Thread that initiated the request.
* @return ResultCode of the operation. * @return ResultCode of the operation.
*/ */
ResultCode SendSyncRequest(); ResultCode SendSyncRequest(SharedPtr<Thread> thread);
std::string name; ///< Name of client port (optional) std::string name; ///< Name of client port (optional)

View File

@ -32,22 +32,29 @@ ResultVal<SharedPtr<ServerSession>> ServerSession::Create(std::string name) {
SharedPtr<ServerSession> server_session(new ServerSession); SharedPtr<ServerSession> server_session(new ServerSession);
server_session->name = std::move(name); server_session->name = std::move(name);
server_session->signaled = false;
server_session->parent = nullptr; server_session->parent = nullptr;
return MakeResult(std::move(server_session)); return MakeResult(std::move(server_session));
} }
bool ServerSession::ShouldWait(Thread* thread) const { bool ServerSession::ShouldWait(Thread* thread) const {
return !signaled; // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
if (parent->client == nullptr)
return false;
// Wait if we have no pending requests, or if we're currently handling a request.
return pending_requesting_threads.empty() || currently_handling != nullptr;
} }
void ServerSession::Acquire(Thread* thread) { void ServerSession::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
signaled = false; // We are now handling a request, pop it from the stack.
// TODO(Subv): What happens if the client endpoint is closed before any requests are made?
ASSERT(!pending_requesting_threads.empty());
currently_handling = pending_requesting_threads.back();
pending_requesting_threads.pop_back();
} }
ResultCode ServerSession::HandleSyncRequest() { ResultCode ServerSession::HandleSyncRequest(SharedPtr<Thread> thread) {
// The ServerSession received a sync request, this means that there's new data available // The ServerSession received a sync request, this means that there's new data available
// from its ClientSession, so wake up any threads that may be waiting on a svcReplyAndReceive or // from its ClientSession, so wake up any threads that may be waiting on a svcReplyAndReceive or
// similar. // similar.
@ -60,11 +67,14 @@ ResultCode ServerSession::HandleSyncRequest() {
return result; return result;
hle_handler->HandleSyncRequest(SharedPtr<ServerSession>(this)); hle_handler->HandleSyncRequest(SharedPtr<ServerSession>(this));
// TODO(Subv): Translate the response command buffer. // TODO(Subv): Translate the response command buffer.
} else {
// Add the thread to the list of threads that have issued a sync request with this
// server.
pending_requesting_threads.push_back(std::move(thread));
} }
// If this ServerSession does not have an HLE implementation, just wake up the threads waiting // If this ServerSession does not have an HLE implementation, just wake up the threads waiting
// on it. // on it.
signaled = true;
WakeupAllWaitingThreads(); WakeupAllWaitingThreads();
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
@ -90,4 +100,4 @@ ResultCode TranslateHLERequest(ServerSession* server_session) {
// TODO(Subv): Implement this function once multiple concurrent processes are supported. // TODO(Subv): Implement this function once multiple concurrent processes are supported.
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
} } // namespace Kernel

View File

@ -67,20 +67,30 @@ public:
/** /**
* Handle a sync request from the emulated application. * Handle a sync request from the emulated application.
* @param thread Thread that initiated the request.
* @returns ResultCode from the operation. * @returns ResultCode from the operation.
*/ */
ResultCode HandleSyncRequest(); ResultCode HandleSyncRequest(SharedPtr<Thread> thread);
bool ShouldWait(Thread* thread) const override; bool ShouldWait(Thread* thread) const override;
void Acquire(Thread* thread) override; void Acquire(Thread* thread) override;
std::string name; ///< The name of this session (optional) std::string name; ///< The name of this session (optional)
bool signaled; ///< Whether there's new data available to this ServerSession
std::shared_ptr<Session> parent; ///< The parent session, which links to the client endpoint. std::shared_ptr<Session> parent; ///< The parent session, which links to the client endpoint.
std::shared_ptr<SessionRequestHandler> std::shared_ptr<SessionRequestHandler>
hle_handler; ///< This session's HLE request handler (optional) hle_handler; ///< This session's HLE request handler (optional)
/// List of threads that are pending a response after a sync request. This list is processed in
/// a LIFO manner, thus, the last request will be dispatched first.
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
std::vector<SharedPtr<Thread>> pending_requesting_threads;
/// Thread whose request is currently being handled. A request is considered "handled" when a
/// response is sent via svcReplyAndReceive.
/// TODO(Subv): Find a better name for this.
SharedPtr<Thread> currently_handling;
private: private:
ServerSession(); ServerSession();
~ServerSession() override; ~ServerSession() override;

View File

@ -25,6 +25,7 @@
#include "core/hle/kernel/semaphore.h" #include "core/hle/kernel/semaphore.h"
#include "core/hle/kernel/server_port.h" #include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h" #include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/shared_memory.h"
#include "core/hle/kernel/thread.h" #include "core/hle/kernel/thread.h"
#include "core/hle/kernel/timer.h" #include "core/hle/kernel/timer.h"
@ -237,7 +238,7 @@ static ResultCode SendSyncRequest(Kernel::Handle handle) {
// TODO(Subv): svcSendSyncRequest should put the caller thread to sleep while the server // TODO(Subv): svcSendSyncRequest should put the caller thread to sleep while the server
// responds and cause a reschedule. // responds and cause a reschedule.
return session->SendSyncRequest(); return session->SendSyncRequest(Kernel::GetCurrentThread());
} }
/// Close a handle /// Close a handle
@ -398,6 +399,112 @@ static ResultCode WaitSynchronizationN(s32* out, Kernel::Handle* handles, s32 ha
} }
} }
/// In a single operation, sends a IPC reply and waits for a new request.
static ResultCode ReplyAndReceive(s32* index, Kernel::Handle* handles, s32 handle_count,
Kernel::Handle reply_target) {
// 'handles' has to be a valid pointer even if 'handle_count' is 0.
if (handles == nullptr)
return Kernel::ERR_INVALID_POINTER;
// Check if 'handle_count' is invalid
if (handle_count < 0)
return Kernel::ERR_OUT_OF_RANGE;
using ObjectPtr = SharedPtr<Kernel::WaitObject>;
std::vector<ObjectPtr> objects(handle_count);
for (int i = 0; i < handle_count; ++i) {
auto object = Kernel::g_handle_table.Get<Kernel::WaitObject>(handles[i]);
if (object == nullptr)
return ERR_INVALID_HANDLE;
objects[i] = object;
}
// We are also sending a command reply.
// Do not send a reply if the command id in the command buffer is 0xFFFF.
u32* cmd_buff = Kernel::GetCommandBuffer();
IPC::Header header{cmd_buff[0]};
if (reply_target != 0 && header.command_id != 0xFFFF) {
auto session = Kernel::g_handle_table.Get<Kernel::ServerSession>(reply_target);
if (session == nullptr)
return ERR_INVALID_HANDLE;
auto request_thread = std::move(session->currently_handling);
// Mark the request as "handled".
session->currently_handling = nullptr;
// Error out if there's no request thread or the session was closed.
// TODO(Subv): Is the same error code (ClosedByRemote) returned for both of these cases?
if (request_thread == nullptr || session->parent->client == nullptr) {
*index = -1;
return Kernel::ERR_SESSION_CLOSED_BY_REMOTE;
}
// TODO(Subv): Perform IPC translation from the current thread to request_thread.
// Note: The scheduler is not invoked here.
request_thread->ResumeFromWait();
}
if (handle_count == 0) {
*index = 0;
// The kernel uses this value as a placeholder for the real error, and returns it when we
// pass no handles and do not perform any reply.
if (reply_target == 0 || header.command_id == 0xFFFF)
return ResultCode(0xE7E3FFFF);
return RESULT_SUCCESS;
}
auto thread = Kernel::GetCurrentThread();
// Find the first object that is acquirable in the provided list of objects
auto itr = std::find_if(objects.begin(), objects.end(), [thread](const ObjectPtr& object) {
return !object->ShouldWait(thread);
});
if (itr != objects.end()) {
// We found a ready object, acquire it and set the result value
Kernel::WaitObject* object = itr->get();
object->Acquire(thread);
*index = std::distance(objects.begin(), itr);
if (object->GetHandleType() == Kernel::HandleType::ServerSession) {
auto server_session = static_cast<Kernel::ServerSession*>(object);
if (server_session->parent->client == nullptr)
return Kernel::ERR_SESSION_CLOSED_BY_REMOTE;
// TODO(Subv): Perform IPC translation from the ServerSession to the current thread.
}
return RESULT_SUCCESS;
}
// No objects were ready to be acquired, prepare to suspend the thread.
// TODO(Subv): Perform IPC translation upon wakeup.
// Put the thread to sleep
thread->status = THREADSTATUS_WAIT_SYNCH_ANY;
// Add the thread to each of the objects' waiting threads.
for (size_t i = 0; i < objects.size(); ++i) {
Kernel::WaitObject* object = objects[i].get();
object->AddWaitingThread(thread);
}
thread->wait_objects = std::move(objects);
Core::System::GetInstance().PrepareReschedule();
// Note: The output of this SVC will be set to RESULT_SUCCESS if the thread resumes due to a
// signal in one of its wait objects, or to 0xC8A01836 if there was a translation error.
// By default the index is set to -1.
thread->wait_set_output = true;
*index = -1;
return RESULT_SUCCESS;
}
/// Create an address arbiter (to allocate access to shared resources) /// Create an address arbiter (to allocate access to shared resources)
static ResultCode CreateAddressArbiter(Kernel::Handle* out_handle) { static ResultCode CreateAddressArbiter(Kernel::Handle* out_handle) {
using Kernel::AddressArbiter; using Kernel::AddressArbiter;
@ -1163,7 +1270,7 @@ static const FunctionDef SVC_Table[] = {
{0x4C, nullptr, "ReplyAndReceive2"}, {0x4C, nullptr, "ReplyAndReceive2"},
{0x4D, nullptr, "ReplyAndReceive3"}, {0x4D, nullptr, "ReplyAndReceive3"},
{0x4E, nullptr, "ReplyAndReceive4"}, {0x4E, nullptr, "ReplyAndReceive4"},
{0x4F, nullptr, "ReplyAndReceive"}, {0x4F, HLE::Wrap<ReplyAndReceive>, "ReplyAndReceive"},
{0x50, nullptr, "BindInterrupt"}, {0x50, nullptr, "BindInterrupt"},
{0x51, nullptr, "UnbindInterrupt"}, {0x51, nullptr, "UnbindInterrupt"},
{0x52, nullptr, "InvalidateProcessDataCache"}, {0x52, nullptr, "InvalidateProcessDataCache"},