yuzu-emu
/
yuzu-mainline
Archived
1
0
Fork 0

Merge pull request #1899 from lioncash/state

vm_manager/svc: Modify MemoryState enum, and correct error handling for svcQueryMemory
This commit is contained in:
bunnei 2018-12-14 15:30:02 -05:00 committed by GitHub
commit 1a23970d17
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 187 additions and 83 deletions

View File

@ -150,7 +150,7 @@ void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) {
vm_manager
.MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size,
std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size,
MemoryState::Mapped)
MemoryState::Stack)
.Unwrap();
vm_manager.LogLayout();

View File

@ -262,8 +262,7 @@ public:
ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
ResultCode HeapFree(VAddr target, u32 size);
ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size,
MemoryState state = MemoryState::Mapped);
ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size);

View File

@ -35,6 +35,7 @@
#include "core/hle/lock.h"
#include "core/hle/result.h"
#include "core/hle/service/service.h"
#include "core/memory.h"
namespace Kernel {
namespace {
@ -273,7 +274,7 @@ static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
return result;
}
return current_process->MirrorMemory(dst_addr, src_addr, size);
return current_process->MirrorMemory(dst_addr, src_addr, size, MemoryState::Stack);
}
/// Unmaps a region that was previously mapped with svcMapMemory
@ -1066,10 +1067,9 @@ static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64
return shared_memory->Unmap(*current_process, addr);
}
/// Query process memory
static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_info*/,
Handle process_handle, u64 addr) {
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} addr={:X}", process_handle, addr);
static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address,
Handle process_handle, VAddr address) {
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
SharedPtr<Process> process = handle_table.Get<Process>(process_handle);
if (!process) {
@ -1079,28 +1079,32 @@ static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_i
}
const auto& vm_manager = process->VMManager();
const auto vma = vm_manager.FindVMA(addr);
const MemoryInfo memory_info = vm_manager.QueryMemory(address);
memory_info->attributes = 0;
if (vm_manager.IsValidHandle(vma)) {
memory_info->base_address = vma->second.base;
memory_info->permission = static_cast<u32>(vma->second.permissions);
memory_info->size = vma->second.size;
memory_info->type = static_cast<u32>(vma->second.meminfo_state);
} else {
memory_info->base_address = 0;
memory_info->permission = static_cast<u32>(VMAPermission::None);
memory_info->size = 0;
memory_info->type = static_cast<u32>(MemoryState::Unmapped);
}
Memory::Write64(memory_info_address, memory_info.base_address);
Memory::Write64(memory_info_address + 8, memory_info.size);
Memory::Write32(memory_info_address + 16, memory_info.state);
Memory::Write32(memory_info_address + 20, memory_info.attributes);
Memory::Write32(memory_info_address + 24, memory_info.permission);
Memory::Write32(memory_info_address + 32, memory_info.ipc_ref_count);
Memory::Write32(memory_info_address + 28, memory_info.device_ref_count);
Memory::Write32(memory_info_address + 36, 0);
// Page info appears to be currently unused by the kernel and is always set to zero.
Memory::Write32(page_info_address, 0);
return RESULT_SUCCESS;
}
/// Query memory
static ResultCode QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, VAddr addr) {
LOG_TRACE(Kernel_SVC, "called, addr={:X}", addr);
return QueryProcessMemory(memory_info, page_info, CurrentProcess, addr);
static ResultCode QueryMemory(VAddr memory_info_address, VAddr page_info_address,
VAddr query_address) {
LOG_TRACE(Kernel_SVC,
"called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, "
"query_address=0x{:016X}",
memory_info_address, page_info_address, query_address);
return QueryProcessMemory(memory_info_address, page_info_address, CurrentProcess,
query_address);
}
/// Exits the current process
@ -1907,7 +1911,7 @@ static const FunctionDef SVC_Table[] = {
{0x73, nullptr, "SetProcessMemoryPermission"},
{0x74, nullptr, "MapProcessMemory"},
{0x75, nullptr, "UnmapProcessMemory"},
{0x76, nullptr, "QueryProcessMemory"},
{0x76, SvcWrap<QueryProcessMemory>, "QueryProcessMemory"},
{0x77, nullptr, "MapProcessCodeMemory"},
{0x78, nullptr, "UnmapProcessCodeMemory"},
{0x79, nullptr, "CreateProcess"},

View File

@ -8,22 +8,6 @@
namespace Kernel {
struct MemoryInfo {
u64 base_address;
u64 size;
u32 type;
u32 attributes;
u32 permission;
u32 device_refcount;
u32 ipc_refcount;
INSERT_PADDING_WORDS(1);
};
static_assert(sizeof(MemoryInfo) == 0x28, "MemoryInfo has incorrect size.");
struct PageInfo {
u64 flags;
};
void CallSVC(u32 immediate);
} // namespace Kernel

View File

@ -7,9 +7,7 @@
#include "common/common_types.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
#include "core/hle/kernel/svc.h"
#include "core/hle/result.h"
#include "core/memory.h"
namespace Kernel {
@ -132,6 +130,11 @@ void SvcWrap() {
func(Param(0), Param(1), static_cast<u32>(Param(2)), static_cast<u32>(Param(3))).raw);
}
template <ResultCode func(u64, u64, u32, u64)>
void SvcWrap() {
FuncReturn(func(Param(0), Param(1), static_cast<u32>(Param(2)), Param(3)).raw);
}
template <ResultCode func(u32, u64, u32)>
void SvcWrap() {
FuncReturn(func(static_cast<u32>(Param(0)), Param(1), static_cast<u32>(Param(2))).raw);
@ -191,21 +194,6 @@ void SvcWrap() {
FuncReturn(retval);
}
template <ResultCode func(MemoryInfo*, PageInfo*, u64)>
void SvcWrap() {
MemoryInfo memory_info = {};
PageInfo page_info = {};
u32 retval = func(&memory_info, &page_info, Param(2)).raw;
Memory::Write64(Param(0), memory_info.base_address);
Memory::Write64(Param(0) + 8, memory_info.size);
Memory::Write32(Param(0) + 16, memory_info.type);
Memory::Write32(Param(0) + 20, memory_info.attributes);
Memory::Write32(Param(0) + 24, memory_info.permission);
FuncReturn(retval);
}
template <ResultCode func(u32*, u64, u64, u32)>
void SvcWrap() {
u32 param_1 = 0;

View File

@ -25,14 +25,14 @@ static const char* GetMemoryStateName(MemoryState state) {
"CodeMutable", "Heap",
"Shared", "Unknown1",
"ModuleCodeStatic", "ModuleCodeMutable",
"IpcBuffer0", "Mapped",
"IpcBuffer0", "Stack",
"ThreadLocal", "TransferMemoryIsolated",
"TransferMemory", "ProcessMemory",
"Unknown2", "IpcBuffer1",
"Inaccessible", "IpcBuffer1",
"IpcBuffer3", "KernelStack",
};
return names[static_cast<int>(state)];
return names[ToSvcMemoryState(state)];
}
bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
@ -302,6 +302,25 @@ ResultCode VMManager::HeapFree(VAddr target, u64 size) {
return RESULT_SUCCESS;
}
MemoryInfo VMManager::QueryMemory(VAddr address) const {
const auto vma = FindVMA(address);
MemoryInfo memory_info{};
if (IsValidHandle(vma)) {
memory_info.base_address = vma->second.base;
memory_info.permission = static_cast<u32>(vma->second.permissions);
memory_info.size = vma->second.size;
memory_info.state = ToSvcMemoryState(vma->second.meminfo_state);
} else {
memory_info.base_address = address_space_end;
memory_info.permission = static_cast<u32>(VMAPermission::None);
memory_info.size = 0 - address_space_end;
memory_info.state = static_cast<u32>(MemoryState::Inaccessible);
}
return memory_info;
}
ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state) {
const auto vma = FindVMA(src_addr);

View File

@ -43,26 +43,129 @@ enum class VMAPermission : u8 {
ReadWriteExecute = Read | Write | Execute,
};
/// Set of values returned in MemoryInfo.state by svcQueryMemory.
// clang-format off
/// Represents memory states and any relevant flags, as used by the kernel.
/// svcQueryMemory interprets these by masking away all but the first eight
/// bits when storing memory state into a MemoryInfo instance.
enum class MemoryState : u32 {
Unmapped = 0x0,
Io = 0x1,
Normal = 0x2,
CodeStatic = 0x3,
CodeMutable = 0x4,
Heap = 0x5,
Shared = 0x6,
ModuleCodeStatic = 0x8,
ModuleCodeMutable = 0x9,
IpcBuffer0 = 0xA,
Mapped = 0xB,
ThreadLocal = 0xC,
TransferMemoryIsolated = 0xD,
TransferMemory = 0xE,
ProcessMemory = 0xF,
IpcBuffer1 = 0x11,
IpcBuffer3 = 0x12,
KernelStack = 0x13,
Mask = 0xFF,
FlagProtect = 1U << 8,
FlagDebug = 1U << 9,
FlagIPC0 = 1U << 10,
FlagIPC3 = 1U << 11,
FlagIPC1 = 1U << 12,
FlagMapped = 1U << 13,
FlagCode = 1U << 14,
FlagAlias = 1U << 15,
FlagModule = 1U << 16,
FlagTransfer = 1U << 17,
FlagQueryPhysicalAddressAllowed = 1U << 18,
FlagSharedDevice = 1U << 19,
FlagSharedDeviceAligned = 1U << 20,
FlagIPCBuffer = 1U << 21,
FlagMemoryPoolAllocated = 1U << 22,
FlagMapProcess = 1U << 23,
FlagUncached = 1U << 24,
FlagCodeMemory = 1U << 25,
// Convenience flag sets to reduce repetition
IPCFlags = FlagIPC0 | FlagIPC3 | FlagIPC1,
CodeFlags = FlagDebug | IPCFlags | FlagMapped | FlagCode | FlagQueryPhysicalAddressAllowed |
FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
DataFlags = FlagProtect | IPCFlags | FlagMapped | FlagAlias | FlagTransfer |
FlagQueryPhysicalAddressAllowed | FlagSharedDevice | FlagSharedDeviceAligned |
FlagMemoryPoolAllocated | FlagIPCBuffer | FlagUncached,
Unmapped = 0x00,
Io = 0x01 | FlagMapped,
Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed,
CodeStatic = 0x03 | CodeFlags | FlagMapProcess,
CodeMutable = 0x04 | CodeFlags | FlagMapProcess | FlagCodeMemory,
Heap = 0x05 | DataFlags | FlagCodeMemory,
Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated,
ModuleCodeStatic = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
ModuleCodeMutable = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated |
IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned,
Stack = 0x0B | FlagMapped | IPCFlags | FlagQueryPhysicalAddressAllowed |
FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
ThreadLocal = 0x0C | FlagMapped | FlagMemoryPoolAllocated,
TransferMemoryIsolated = 0x0D | IPCFlags | FlagMapped | FlagQueryPhysicalAddressAllowed |
FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated |
FlagUncached,
TransferMemory = 0x0E | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed |
FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
ProcessMemory = 0x0F | FlagIPC3 | FlagIPC1 | FlagMapped | FlagMemoryPoolAllocated,
// Used to signify an inaccessible or invalid memory region with memory queries
Inaccessible = 0x10,
IpcBuffer1 = 0x11 | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed |
FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
IpcBuffer3 = 0x12 | FlagIPC3 | FlagMapped | FlagQueryPhysicalAddressAllowed |
FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
KernelStack = 0x13 | FlagMapped,
};
// clang-format on
constexpr MemoryState operator|(MemoryState lhs, MemoryState rhs) {
return static_cast<MemoryState>(u32(lhs) | u32(rhs));
}
constexpr MemoryState operator&(MemoryState lhs, MemoryState rhs) {
return static_cast<MemoryState>(u32(lhs) & u32(rhs));
}
constexpr MemoryState operator^(MemoryState lhs, MemoryState rhs) {
return static_cast<MemoryState>(u32(lhs) ^ u32(rhs));
}
constexpr MemoryState operator~(MemoryState lhs) {
return static_cast<MemoryState>(~u32(lhs));
}
constexpr MemoryState& operator|=(MemoryState& lhs, MemoryState rhs) {
lhs = lhs | rhs;
return lhs;
}
constexpr MemoryState& operator&=(MemoryState& lhs, MemoryState rhs) {
lhs = lhs & rhs;
return lhs;
}
constexpr MemoryState& operator^=(MemoryState& lhs, MemoryState rhs) {
lhs = lhs ^ rhs;
return lhs;
}
constexpr u32 ToSvcMemoryState(MemoryState state) {
return static_cast<u32>(state & MemoryState::Mask);
}
struct MemoryInfo {
u64 base_address;
u64 size;
u32 state;
u32 attributes;
u32 permission;
u32 ipc_ref_count;
u32 device_ref_count;
};
static_assert(sizeof(MemoryInfo) == 0x28, "MemoryInfo has incorrect size.");
struct PageInfo {
u32 flags;
};
/**
@ -186,8 +289,15 @@ public:
ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
ResultCode HeapFree(VAddr target, u64 size);
ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size,
MemoryState state = MemoryState::Mapped);
ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
/// Queries the memory manager for information about the given address.
///
/// @param address The address to query the memory manager about for information.
///
/// @return A MemoryInfo instance containing information about the given address.
///
MemoryInfo QueryMemory(VAddr address) const;
/**
* Scans all VMAs and updates the page table range of any that use the given vector as backing