yuzu-emu
/
yuzu-android
Archived
1
0
Fork 0

Merge pull request #2252 from bunnei/move-page-table

core: Move PageTable struct into Common.
This commit is contained in:
bunnei 2019-03-17 14:42:57 -04:00 committed by GitHub
commit 57ca1e3e69
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 215 additions and 171 deletions

View File

@ -92,10 +92,14 @@ add_library(common STATIC
logging/text_formatter.cpp
logging/text_formatter.h
math_util.h
memory_hook.cpp
memory_hook.h
microprofile.cpp
microprofile.h
microprofileui.h
misc.cpp
page_table.cpp
page_table.h
param_package.cpp
param_package.h
quaternion.h

View File

@ -2,10 +2,10 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/memory_hook.h"
#include "common/memory_hook.h"
namespace Memory {
namespace Common {
MemoryHook::~MemoryHook() = default;
} // namespace Memory
} // namespace Common

View File

@ -9,7 +9,7 @@
#include "common/common_types.h"
namespace Memory {
namespace Common {
/**
* Memory hooks have two purposes:
@ -44,4 +44,4 @@ public:
};
using MemoryHookPointer = std::shared_ptr<MemoryHook>;
} // namespace Memory
} // namespace Common

29
src/common/page_table.cpp Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/page_table.h"
namespace Common {
PageTable::PageTable(std::size_t page_size_in_bits) : page_size_in_bits{page_size_in_bits} {}
PageTable::~PageTable() = default;
void PageTable::Resize(std::size_t address_space_width_in_bits) {
const std::size_t num_page_table_entries = 1ULL
<< (address_space_width_in_bits - page_size_in_bits);
pointers.resize(num_page_table_entries);
attributes.resize(num_page_table_entries);
// The default is a 39-bit address space, which causes an initial 1GB allocation size. If the
// vector size is subsequently decreased (via resize), the vector might not automatically
// actually reallocate/resize its underlying allocation, which wastes up to ~800 MB for
// 36-bit titles. Call shrink_to_fit to reduce capacity to what's actually in use.
pointers.shrink_to_fit();
attributes.shrink_to_fit();
}
} // namespace Common

80
src/common/page_table.h Normal file
View File

@ -0,0 +1,80 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include <boost/icl/interval_map.hpp>
#include "common/common_types.h"
#include "common/memory_hook.h"
namespace Common {
enum class PageType : u8 {
/// Page is unmapped and should cause an access error.
Unmapped,
/// Page is mapped to regular memory. This is the only type you can get pointers to.
Memory,
/// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
/// invalidation
RasterizerCachedMemory,
/// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
Special,
};
struct SpecialRegion {
enum class Type {
DebugHook,
IODevice,
} type;
MemoryHookPointer handler;
bool operator<(const SpecialRegion& other) const {
return std::tie(type, handler) < std::tie(other.type, other.handler);
}
bool operator==(const SpecialRegion& other) const {
return std::tie(type, handler) == std::tie(other.type, other.handler);
}
};
/**
* A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
* mimics the way a real CPU page table works.
*/
struct PageTable {
explicit PageTable(std::size_t page_size_in_bits);
~PageTable();
/**
* Resizes the page table to be able to accomodate enough pages within
* a given address space.
*
* @param address_space_width_in_bits The address size width in bits.
*/
void Resize(std::size_t address_space_width_in_bits);
/**
* Vector of memory pointers backing each page. An entry can only be non-null if the
* corresponding entry in the `attributes` vector is of type `Memory`.
*/
std::vector<u8*> pointers;
/**
* Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is
* of type `Special`.
*/
boost::icl::interval_map<VAddr, std::set<SpecialRegion>> special_regions;
/**
* Vector of fine grained page attributes. If it is set to any value other than `Memory`, then
* the corresponding entry in `pointers` MUST be set to null.
*/
std::vector<PageType> attributes;
const std::size_t page_size_in_bits{};
};
} // namespace Common

View File

@ -437,8 +437,6 @@ add_library(core STATIC
loader/xci.h
memory.cpp
memory.h
memory_hook.cpp
memory_hook.h
memory_setup.h
perf_stats.cpp
perf_stats.h

View File

@ -12,7 +12,7 @@
#include "core/arm/exclusive_monitor.h"
#include "core/arm/unicorn/arm_unicorn.h"
namespace Memory {
namespace Common {
struct PageTable;
}
@ -70,7 +70,7 @@ private:
Timing::CoreTiming& core_timing;
DynarmicExclusiveMonitor& exclusive_monitor;
Memory::PageTable* current_page_table = nullptr;
Common::PageTable* current_page_table = nullptr;
};
class DynarmicExclusiveMonitor final : public ExclusiveMonitor {

View File

@ -31,7 +31,7 @@ namespace {
*/
void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) {
// Setup page table so we can write to memory
SetCurrentPageTable(&owner_process.VMManager().page_table);
Memory::SetCurrentPageTable(&owner_process.VMManager().page_table);
// Initialize new "main" thread
const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress();

View File

@ -96,7 +96,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
auto* const thread_owner_process = current_thread->GetOwnerProcess();
if (previous_process != thread_owner_process) {
system.Kernel().MakeCurrentProcess(thread_owner_process);
SetCurrentPageTable(&thread_owner_process->VMManager().page_table);
Memory::SetCurrentPageTable(&thread_owner_process->VMManager().page_table);
}
cpu_core.LoadContext(new_thread->GetContext());

View File

@ -7,13 +7,13 @@
#include <utility>
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/memory_hook.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/memory.h"
#include "core/memory_hook.h"
#include "core/memory_setup.h"
namespace Kernel {
@ -177,7 +177,7 @@ ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size,
MemoryState state,
Memory::MemoryHookPointer mmio_handler) {
Common::MemoryHookPointer mmio_handler) {
// This is the appropriately sized VMA that will turn into our allocation.
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
VirtualMemoryArea& final_vma = vma_handle->second;
@ -624,7 +624,7 @@ void VMManager::ClearPageTable() {
std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
page_table.special_regions.clear();
std::fill(page_table.attributes.begin(), page_table.attributes.end(),
Memory::PageType::Unmapped);
Common::PageType::Unmapped);
}
VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask,

View File

@ -9,9 +9,10 @@
#include <tuple>
#include <vector>
#include "common/common_types.h"
#include "common/memory_hook.h"
#include "common/page_table.h"
#include "core/hle/result.h"
#include "core/memory.h"
#include "core/memory_hook.h"
namespace FileSys {
enum class ProgramAddressSpaceType : u8;
@ -290,7 +291,7 @@ struct VirtualMemoryArea {
// Settings for type = MMIO
/// Physical address of the register area this VMA maps to.
PAddr paddr = 0;
Memory::MemoryHookPointer mmio_handler = nullptr;
Common::MemoryHookPointer mmio_handler = nullptr;
/// Tests if this area can be merged to the right with `next`.
bool CanBeMergedWith(const VirtualMemoryArea& next) const;
@ -368,7 +369,7 @@ public:
* @param mmio_handler The handler that will implement read and write for this MMIO region.
*/
ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state,
Memory::MemoryHookPointer mmio_handler);
Common::MemoryHookPointer mmio_handler);
/// Unmaps a range of addresses, splitting VMAs as necessary.
ResultCode UnmapRange(VAddr target, u64 size);
@ -509,7 +510,7 @@ public:
/// Each VMManager has its own page table, which is set as the main one when the owning process
/// is scheduled.
Memory::PageTable page_table;
Common::PageTable page_table{Memory::PAGE_BITS};
private:
using VMAIter = VMAMap::iterator;

View File

@ -10,6 +10,7 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/page_table.h"
#include "common/swap.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
@ -23,9 +24,9 @@
namespace Memory {
static PageTable* current_page_table = nullptr;
static Common::PageTable* current_page_table = nullptr;
void SetCurrentPageTable(PageTable* page_table) {
void SetCurrentPageTable(Common::PageTable* page_table) {
current_page_table = page_table;
auto& system = Core::System::GetInstance();
@ -37,34 +38,12 @@ void SetCurrentPageTable(PageTable* page_table) {
}
}
PageTable* GetCurrentPageTable() {
Common::PageTable* GetCurrentPageTable() {
return current_page_table;
}
PageTable::PageTable() = default;
PageTable::PageTable(std::size_t address_space_width_in_bits) {
Resize(address_space_width_in_bits);
}
PageTable::~PageTable() = default;
void PageTable::Resize(std::size_t address_space_width_in_bits) {
const std::size_t num_page_table_entries = 1ULL << (address_space_width_in_bits - PAGE_BITS);
pointers.resize(num_page_table_entries);
attributes.resize(num_page_table_entries);
// The default is a 39-bit address space, which causes an initial 1GB allocation size. If the
// vector size is subsequently decreased (via resize), the vector might not automatically
// actually reallocate/resize its underlying allocation, which wastes up to ~800 MB for
// 36-bit titles. Call shrink_to_fit to reduce capacity to what's actually in use.
pointers.shrink_to_fit();
attributes.shrink_to_fit();
}
static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, PageType type) {
static void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory,
Common::PageType type) {
LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE,
(base + size) * PAGE_SIZE);
@ -92,41 +71,47 @@ static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, Pa
}
}
void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target) {
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
}
void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler) {
void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer mmio_handler) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Special);
auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
SpecialRegion region{SpecialRegion::Type::IODevice, std::move(mmio_handler)};
page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region}));
Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, std::move(mmio_handler)};
page_table.special_regions.add(
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
}
void UnmapRegion(PageTable& page_table, VAddr base, u64 size) {
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Unmapped);
auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
page_table.special_regions.erase(interval);
}
void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) {
void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook) {
auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
SpecialRegion region{SpecialRegion::Type::DebugHook, std::move(hook)};
page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region}));
Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
page_table.special_regions.add(
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
}
void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) {
void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook) {
auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
SpecialRegion region{SpecialRegion::Type::DebugHook, std::move(hook)};
page_table.special_regions.subtract(std::make_pair(interval, std::set<SpecialRegion>{region}));
Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
page_table.special_regions.subtract(
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
}
/**
@ -175,15 +160,15 @@ T Read(const VAddr vaddr) {
return value;
}
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
switch (type) {
case PageType::Unmapped:
case Common::PageType::Unmapped:
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
return 0;
case PageType::Memory:
case Common::PageType::Memory:
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
break;
case PageType::RasterizerCachedMemory: {
case Common::PageType::RasterizerCachedMemory: {
auto host_ptr{GetPointerFromVMA(vaddr)};
Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T));
T value;
@ -205,16 +190,16 @@ void Write(const VAddr vaddr, const T data) {
return;
}
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
switch (type) {
case PageType::Unmapped:
case Common::PageType::Unmapped:
LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
static_cast<u32>(data), vaddr);
return;
case PageType::Memory:
case Common::PageType::Memory:
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
break;
case PageType::RasterizerCachedMemory: {
case Common::PageType::RasterizerCachedMemory: {
auto host_ptr{GetPointerFromVMA(vaddr)};
Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T));
std::memcpy(host_ptr, &data, sizeof(T));
@ -232,10 +217,10 @@ bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) {
if (page_pointer)
return true;
if (page_table.attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory)
if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory)
return true;
if (page_table.attributes[vaddr >> PAGE_BITS] != PageType::Special)
if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special)
return false;
return false;
@ -255,7 +240,8 @@ u8* GetPointer(const VAddr vaddr) {
return page_pointer + (vaddr & PAGE_MASK);
}
if (current_page_table->attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) {
if (current_page_table->attributes[vaddr >> PAGE_BITS] ==
Common::PageType::RasterizerCachedMemory) {
return GetPointerFromVMA(vaddr);
}
@ -289,20 +275,20 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
if (cached) {
// Switch page type to cached if now cached
switch (page_type) {
case PageType::Unmapped:
case Common::PageType::Unmapped:
// It is not necessary for a process to have this region mapped into its address
// space, for example, a system module need not have a VRAM mapping.
break;
case PageType::Memory:
page_type = PageType::RasterizerCachedMemory;
case Common::PageType::Memory:
page_type = Common::PageType::RasterizerCachedMemory;
current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
break;
case PageType::RasterizerCachedMemory:
case Common::PageType::RasterizerCachedMemory:
// There can be more than one GPU region mapped per CPU region, so it's common that
// this area is already marked as cached.
break;
@ -312,23 +298,23 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
} else {
// Switch page type to uncached if now uncached
switch (page_type) {
case PageType::Unmapped:
case Common::PageType::Unmapped:
// It is not necessary for a process to have this region mapped into its address
// space, for example, a system module need not have a VRAM mapping.
break;
case PageType::Memory:
case Common::PageType::Memory:
// There can be more than one GPU region mapped per CPU region, so it's common that
// this area is already unmarked as cached.
break;
case PageType::RasterizerCachedMemory: {
case Common::PageType::RasterizerCachedMemory: {
u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
if (pointer == nullptr) {
// It's possible that this function has been called while updating the pagetable
// after unmapping a VMA. In that case the underlying VMA will no longer exist,
// and we should just leave the pagetable entry blank.
page_type = PageType::Unmapped;
page_type = Common::PageType::Unmapped;
} else {
page_type = PageType::Memory;
page_type = Common::PageType::Memory;
current_page_table->pointers[vaddr >> PAGE_BITS] = pointer;
}
break;
@ -370,21 +356,21 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
switch (page_table.attributes[page_index]) {
case PageType::Unmapped: {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
current_vaddr, src_addr, size);
std::memset(dest_buffer, 0, copy_amount);
break;
}
case PageType::Memory: {
case Common::PageType::Memory: {
DEBUG_ASSERT(page_table.pointers[page_index]);
const u8* src_ptr = page_table.pointers[page_index] + page_offset;
std::memcpy(dest_buffer, src_ptr, copy_amount);
break;
}
case PageType::RasterizerCachedMemory: {
case Common::PageType::RasterizerCachedMemory: {
const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount);
std::memcpy(dest_buffer, host_ptr, copy_amount);
@ -434,20 +420,20 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
switch (page_table.attributes[page_index]) {
case PageType::Unmapped: {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
current_vaddr, dest_addr, size);
break;
}
case PageType::Memory: {
case Common::PageType::Memory: {
DEBUG_ASSERT(page_table.pointers[page_index]);
u8* dest_ptr = page_table.pointers[page_index] + page_offset;
std::memcpy(dest_ptr, src_buffer, copy_amount);
break;
}
case PageType::RasterizerCachedMemory: {
case Common::PageType::RasterizerCachedMemory: {
const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount);
std::memcpy(host_ptr, src_buffer, copy_amount);
@ -480,20 +466,20 @@ void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std:
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
switch (page_table.attributes[page_index]) {
case PageType::Unmapped: {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
current_vaddr, dest_addr, size);
break;
}
case PageType::Memory: {
case Common::PageType::Memory: {
DEBUG_ASSERT(page_table.pointers[page_index]);
u8* dest_ptr = page_table.pointers[page_index] + page_offset;
std::memset(dest_ptr, 0, copy_amount);
break;
}
case PageType::RasterizerCachedMemory: {
case Common::PageType::RasterizerCachedMemory: {
const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount);
std::memset(host_ptr, 0, copy_amount);
@ -522,20 +508,20 @@ void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr,
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
switch (page_table.attributes[page_index]) {
case PageType::Unmapped: {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
current_vaddr, src_addr, size);
ZeroBlock(process, dest_addr, copy_amount);
break;
}
case PageType::Memory: {
case Common::PageType::Memory: {
DEBUG_ASSERT(page_table.pointers[page_index]);
const u8* src_ptr = page_table.pointers[page_index] + page_offset;
WriteBlock(process, dest_addr, src_ptr, copy_amount);
break;
}
case PageType::RasterizerCachedMemory: {
case Common::PageType::RasterizerCachedMemory: {
const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount);
WriteBlock(process, dest_addr, host_ptr, copy_amount);

View File

@ -10,7 +10,10 @@
#include <vector>
#include <boost/icl/interval_map.hpp>
#include "common/common_types.h"
#include "core/memory_hook.h"
namespace Common {
struct PageTable;
}
namespace Kernel {
class Process;
@ -26,71 +29,6 @@ constexpr std::size_t PAGE_BITS = 12;
constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS;
constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
enum class PageType : u8 {
/// Page is unmapped and should cause an access error.
Unmapped,
/// Page is mapped to regular memory. This is the only type you can get pointers to.
Memory,
/// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
/// invalidation
RasterizerCachedMemory,
/// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
Special,
};
struct SpecialRegion {
enum class Type {
DebugHook,
IODevice,
} type;
MemoryHookPointer handler;
bool operator<(const SpecialRegion& other) const {
return std::tie(type, handler) < std::tie(other.type, other.handler);
}
bool operator==(const SpecialRegion& other) const {
return std::tie(type, handler) == std::tie(other.type, other.handler);
}
};
/**
* A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
* mimics the way a real CPU page table works.
*/
struct PageTable {
explicit PageTable();
explicit PageTable(std::size_t address_space_width_in_bits);
~PageTable();
/**
* Resizes the page table to be able to accomodate enough pages within
* a given address space.
*
* @param address_space_width_in_bits The address size width in bits.
*/
void Resize(std::size_t address_space_width_in_bits);
/**
* Vector of memory pointers backing each page. An entry can only be non-null if the
* corresponding entry in the `attributes` vector is of type `Memory`.
*/
std::vector<u8*> pointers;
/**
* Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is
* of type `Special`.
*/
boost::icl::interval_map<VAddr, std::set<SpecialRegion>> special_regions;
/**
* Vector of fine grained page attributes. If it is set to any value other than `Memory`, then
* the corresponding entry in `pointers` MUST be set to null.
*/
std::vector<PageType> attributes;
};
/// Virtual user-space memory regions
enum : VAddr {
/// Read-only page containing kernel and system configuration values.
@ -116,8 +54,8 @@ enum : VAddr {
};
/// Currently active page table
void SetCurrentPageTable(PageTable* page_table);
PageTable* GetCurrentPageTable();
void SetCurrentPageTable(Common::PageTable* page_table);
Common::PageTable* GetCurrentPageTable();
/// Determines if the given VAddr is valid for the specified process.
bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr);

View File

@ -5,7 +5,11 @@
#pragma once
#include "common/common_types.h"
#include "core/memory_hook.h"
#include "common/memory_hook.h"
namespace Common {
struct PageTable;
}
namespace Memory {
@ -17,7 +21,7 @@ namespace Memory {
* @param size The amount of bytes to map. Must be page-aligned.
* @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
*/
void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target);
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target);
/**
* Maps a region of the emulated process address space as a IO region.
@ -26,11 +30,14 @@ void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target);
* @param size The amount of bytes to map. Must be page-aligned.
* @param mmio_handler The handler that backs the mapping.
*/
void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler);
void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer mmio_handler);
void UnmapRegion(PageTable& page_table, VAddr base, u64 size);
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size);
void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook);
void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook);
void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook);
void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook);
} // namespace Memory

View File

@ -4,6 +4,7 @@
#include <algorithm>
#include "common/page_table.h"
#include "core/core.h"
#include "core/hle/kernel/process.h"
#include "core/memory.h"
@ -22,7 +23,7 @@ TestEnvironment::TestEnvironment(bool mutable_memory_)
std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr);
page_table->special_regions.clear();
std::fill(page_table->attributes.begin(), page_table->attributes.end(),
Memory::PageType::Unmapped);
Common::PageType::Unmapped);
Memory::MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory);
Memory::MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory);

View File

@ -9,10 +9,10 @@
#include <vector>
#include "common/common_types.h"
#include "common/memory_hook.h"
#include "core/hle/kernel/kernel.h"
#include "core/memory_hook.h"
namespace Memory {
namespace Common {
struct PageTable;
}
@ -58,7 +58,7 @@ public:
private:
friend struct TestMemory;
struct TestMemory final : Memory::MemoryHook {
struct TestMemory final : Common::MemoryHook {
explicit TestMemory(TestEnvironment* env_) : env(env_) {}
TestEnvironment* env;
@ -86,7 +86,7 @@ private:
bool mutable_memory;
std::shared_ptr<TestMemory> test_memory;
std::vector<WriteRecord> write_records;
Memory::PageTable* page_table = nullptr;
Common::PageTable* page_table = nullptr;
Kernel::KernelCore kernel;
};