yuzu-emu
/
yuzu
Archived
1
0
Fork 0

core/memory: Migrate over memory mapping functions to the new Memory class

Migrates all of the direct mapping facilities over to the new memory
class. In the process, this also obsoletes the need for memory_setup.h,
so we can remove it entirely from the project.
This commit is contained in:
Lioncash 2019-11-26 13:09:12 -05:00
parent 4c2ed2706e
commit 323680e5ad
6 changed files with 187 additions and 135 deletions

View File

@ -509,7 +509,6 @@ add_library(core STATIC
memory/dmnt_cheat_vm.h memory/dmnt_cheat_vm.h
memory.cpp memory.cpp
memory.h memory.h
memory_setup.h
perf_stats.cpp perf_stats.cpp
perf_stats.h perf_stats.h
reporter.cpp reporter.cpp

View File

@ -16,7 +16,6 @@
#include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/vm_manager.h" #include "core/hle/kernel/vm_manager.h"
#include "core/memory.h" #include "core/memory.h"
#include "core/memory_setup.h"
namespace Kernel { namespace Kernel {
namespace { namespace {
@ -786,19 +785,21 @@ void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryAre
} }
void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
auto& memory = system.Memory();
switch (vma.type) { switch (vma.type) {
case VMAType::Free: case VMAType::Free:
Memory::UnmapRegion(page_table, vma.base, vma.size); memory.UnmapRegion(page_table, vma.base, vma.size);
break; break;
case VMAType::AllocatedMemoryBlock: case VMAType::AllocatedMemoryBlock:
Memory::MapMemoryRegion(page_table, vma.base, vma.size, memory.MapMemoryRegion(page_table, vma.base, vma.size,
vma.backing_block->data() + vma.offset); vma.backing_block->data() + vma.offset);
break; break;
case VMAType::BackingMemory: case VMAType::BackingMemory:
Memory::MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory); memory.MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory);
break; break;
case VMAType::MMIO: case VMAType::MMIO:
Memory::MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler); memory.MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler);
break; break;
} }
} }

View File

@ -17,7 +17,6 @@
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
#include "core/hle/kernel/vm_manager.h" #include "core/hle/kernel/vm_manager.h"
#include "core/memory.h" #include "core/memory.h"
#include "core/memory_setup.h"
#include "video_core/gpu.h" #include "video_core/gpu.h"
namespace Memory { namespace Memory {
@ -30,12 +29,125 @@ static Common::PageTable* current_page_table = nullptr;
struct Memory::Impl { struct Memory::Impl {
explicit Impl(Core::System& system_) : system{system_} {} explicit Impl(Core::System& system_) : system{system_} {}
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
}
void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer mmio_handler) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr,
Common::PageType::Special);
const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
const Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice,
std::move(mmio_handler)};
page_table.special_regions.add(
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
}
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr,
Common::PageType::Unmapped);
const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
page_table.special_regions.erase(interval);
}
void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook) {
const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
page_table.special_regions.add(
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
}
void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook) {
const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
page_table.special_regions.subtract(
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
}
/**
* Maps a region of pages as a specific type.
*
* @param page_table The page table to use to perform the mapping.
* @param base The base address to begin mapping at.
* @param size The total size of the range in bytes.
* @param memory The memory to map.
* @param type The page type to map the memory as.
*/
void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory,
Common::PageType type) {
LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE,
(base + size) * PAGE_SIZE);
// During boot, current_page_table might not be set yet, in which case we need not flush
if (system.IsPoweredOn()) {
auto& gpu = system.GPU();
for (u64 i = 0; i < size; i++) {
const auto page = base + i;
if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) {
gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
}
}
}
const VAddr end = base + size;
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
base + page_table.pointers.size());
std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type);
if (memory == nullptr) {
std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end,
memory);
} else {
while (base != end) {
page_table.pointers[base] = memory;
base += 1;
memory += PAGE_SIZE;
}
}
}
Core::System& system; Core::System& system;
}; };
Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {} Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {}
Memory::~Memory() = default; Memory::~Memory() = default;
void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
impl->MapMemoryRegion(page_table, base, size, target);
}
void Memory::MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer mmio_handler) {
impl->MapIoRegion(page_table, base, size, std::move(mmio_handler));
}
void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
impl->UnmapRegion(page_table, base, size);
}
void Memory::AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook) {
impl->AddDebugHook(page_table, base, size, std::move(hook));
}
void Memory::RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook) {
impl->RemoveDebugHook(page_table, base, size, std::move(hook));
}
void SetCurrentPageTable(Kernel::Process& process) { void SetCurrentPageTable(Kernel::Process& process) {
current_page_table = &process.VMManager().page_table; current_page_table = &process.VMManager().page_table;
@ -48,83 +160,6 @@ void SetCurrentPageTable(Kernel::Process& process) {
system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width);
} }
static void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory,
Common::PageType type) {
LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE,
(base + size) * PAGE_SIZE);
// During boot, current_page_table might not be set yet, in which case we need not flush
if (Core::System::GetInstance().IsPoweredOn()) {
auto& gpu = Core::System::GetInstance().GPU();
for (u64 i = 0; i < size; i++) {
const auto page = base + i;
if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) {
gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
}
}
}
VAddr end = base + size;
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
base + page_table.pointers.size());
std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type);
if (memory == nullptr) {
std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory);
} else {
while (base != end) {
page_table.pointers[base] = memory;
base += 1;
memory += PAGE_SIZE;
}
}
}
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
}
void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer mmio_handler) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Special);
auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, std::move(mmio_handler)};
page_table.special_regions.add(
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
}
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Unmapped);
auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
page_table.special_regions.erase(interval);
}
void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook) {
auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
page_table.special_regions.add(
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
}
void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook) {
auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
page_table.special_regions.subtract(
std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
}
/** /**
* Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned)
* using a VMA from the current process * using a VMA from the current process

View File

@ -5,8 +5,14 @@
#pragma once #pragma once
#include <cstddef> #include <cstddef>
#include <memory>
#include <string> #include <string>
#include "common/common_types.h" #include "common/common_types.h"
#include "common/memory_hook.h"
namespace Common {
struct PageTable;
}
namespace Core { namespace Core {
class System; class System;
@ -52,6 +58,59 @@ public:
Memory(Memory&&) = default; Memory(Memory&&) = default;
Memory& operator=(Memory&&) = default; Memory& operator=(Memory&&) = default;
/**
* Maps an allocated buffer onto a region of the emulated process address space.
*
* @param page_table The page table of the emulated process.
* @param base The address to start mapping at. Must be page-aligned.
* @param size The amount of bytes to map. Must be page-aligned.
* @param target Buffer with the memory backing the mapping. Must be of length at least
* `size`.
*/
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target);
/**
* Maps a region of the emulated process address space as a IO region.
*
* @param page_table The page table of the emulated process.
* @param base The address to start mapping at. Must be page-aligned.
* @param size The amount of bytes to map. Must be page-aligned.
* @param mmio_handler The handler that backs the mapping.
*/
void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer mmio_handler);
/**
* Unmaps a region of the emulated process address space.
*
* @param page_table The page table of the emulated process.
* @param base The address to begin unmapping at.
* @param size The amount of bytes to unmap.
*/
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size);
/**
* Adds a memory hook to intercept reads and writes to given region of memory.
*
* @param page_table The page table of the emulated process
* @param base The starting address to apply the hook to.
* @param size The size of the memory region to apply the hook to, in bytes.
* @param hook The hook to apply to the region of memory.
*/
void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook);
/**
* Removes a memory hook from a given range of memory.
*
* @param page_table The page table of the emulated process.
* @param base The starting address to remove the hook from.
* @param size The size of the memory region to remove the hook from, in bytes.
* @param hook The hook to remove from the specified region of memory.
*/
void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook);
private: private:
struct Impl; struct Impl;
std::unique_ptr<Impl> impl; std::unique_ptr<Impl> impl;

View File

@ -1,43 +0,0 @@
// Copyright 2015 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
#include "common/memory_hook.h"
namespace Common {
struct PageTable;
}
namespace Memory {
/**
* Maps an allocated buffer onto a region of the emulated process address space.
*
* @param page_table The page table of the emulated process.
* @param base The address to start mapping at. Must be page-aligned.
* @param size The amount of bytes to map. Must be page-aligned.
* @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
*/
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target);
/**
* Maps a region of the emulated process address space as a IO region.
* @param page_table The page table of the emulated process.
* @param base The address to start mapping at. Must be page-aligned.
* @param size The amount of bytes to map. Must be page-aligned.
* @param mmio_handler The handler that backs the mapping.
*/
void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer mmio_handler);
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size);
void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook);
void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
Common::MemoryHookPointer hook);
} // namespace Memory

View File

@ -8,7 +8,6 @@
#include "core/core.h" #include "core/core.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
#include "core/memory.h" #include "core/memory.h"
#include "core/memory_setup.h"
#include "tests/core/arm/arm_test_common.h" #include "tests/core/arm/arm_test_common.h"
namespace ArmTests { namespace ArmTests {
@ -16,8 +15,9 @@ namespace ArmTests {
TestEnvironment::TestEnvironment(bool mutable_memory_) TestEnvironment::TestEnvironment(bool mutable_memory_)
: mutable_memory(mutable_memory_), : mutable_memory(mutable_memory_),
test_memory(std::make_shared<TestMemory>(this)), kernel{Core::System::GetInstance()} { test_memory(std::make_shared<TestMemory>(this)), kernel{Core::System::GetInstance()} {
auto process = Kernel::Process::Create(Core::System::GetInstance(), "", auto& system = Core::System::GetInstance();
Kernel::Process::ProcessType::Userland);
auto process = Kernel::Process::Create(system, "", Kernel::Process::ProcessType::Userland);
page_table = &process->VMManager().page_table; page_table = &process->VMManager().page_table;
std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr); std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr);
@ -25,15 +25,16 @@ TestEnvironment::TestEnvironment(bool mutable_memory_)
std::fill(page_table->attributes.begin(), page_table->attributes.end(), std::fill(page_table->attributes.begin(), page_table->attributes.end(),
Common::PageType::Unmapped); Common::PageType::Unmapped);
Memory::MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); system.Memory().MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory);
Memory::MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); system.Memory().MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory);
kernel.MakeCurrentProcess(process.get()); kernel.MakeCurrentProcess(process.get());
} }
TestEnvironment::~TestEnvironment() { TestEnvironment::~TestEnvironment() {
Memory::UnmapRegion(*page_table, 0x80000000, 0x80000000); auto& system = Core::System::GetInstance();
Memory::UnmapRegion(*page_table, 0x00000000, 0x80000000); system.Memory().UnmapRegion(*page_table, 0x80000000, 0x80000000);
system.Memory().UnmapRegion(*page_table, 0x00000000, 0x80000000);
} }
void TestEnvironment::SetMemory64(VAddr vaddr, u64 value) { void TestEnvironment::SetMemory64(VAddr vaddr, u64 value) {