yuzu-emu
/
yuzu-android
Archived
1
0
Fork 0

core: Make use of fastmem

This commit is contained in:
Markus Wick 2020-01-19 01:49:30 +01:00
parent 740edacc8d
commit 621f3f5f47
7 changed files with 31 additions and 9 deletions

2
externals/dynarmic vendored

@ -1 +1 @@
Subproject commit 828959caedfac2d456a0c877fda4612e35fffc03 Subproject commit 0c12614d1a7a72d778609920dde96a4c63074ece

View File

@ -111,6 +111,8 @@ struct PageTable {
VirtualBuffer<u64> backing_addr; VirtualBuffer<u64> backing_addr;
size_t current_address_space_width_in_bits; size_t current_address_space_width_in_bits;
u8* fastmem_arena;
}; };
} // namespace Common } // namespace Common

View File

@ -128,6 +128,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
if (page_table) { if (page_table) {
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>( config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
page_table->pointers.data()); page_table->pointers.data());
config.fastmem_pointer = page_table->fastmem_arena;
} }
config.absolute_offset_page_table = true; config.absolute_offset_page_table = true;
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS; config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;

View File

@ -160,6 +160,10 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
config.absolute_offset_page_table = true; config.absolute_offset_page_table = true;
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128; config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
config.only_detect_misalignment_via_page_table_on_page_boundary = true; config.only_detect_misalignment_via_page_table_on_page_boundary = true;
config.fastmem_pointer = page_table->fastmem_arena;
config.fastmem_address_space_bits = address_space_bits;
config.silently_mirror_fastmem = false;
} }
// Multi-process state // Multi-process state

View File

@ -6,7 +6,7 @@
namespace Core { namespace Core {
DeviceMemory::DeviceMemory() : buffer{DramMemoryMap::Size} {} DeviceMemory::DeviceMemory() : buffer{DramMemoryMap::Size, 1ULL << 39} {}
DeviceMemory::~DeviceMemory() = default; DeviceMemory::~DeviceMemory() = default;
} // namespace Core } // namespace Core

View File

@ -5,7 +5,7 @@
#pragma once #pragma once
#include "common/common_types.h" #include "common/common_types.h"
#include "common/virtual_buffer.h" #include "common/host_memory.h"
namespace Core { namespace Core {
@ -21,27 +21,30 @@ enum : u64 {
}; };
}; // namespace DramMemoryMap }; // namespace DramMemoryMap
class DeviceMemory : NonCopyable { class DeviceMemory {
public: public:
explicit DeviceMemory(); explicit DeviceMemory();
~DeviceMemory(); ~DeviceMemory();
DeviceMemory& operator=(const DeviceMemory&) = delete;
DeviceMemory(const DeviceMemory&) = delete;
template <typename T> template <typename T>
PAddr GetPhysicalAddr(const T* ptr) const { PAddr GetPhysicalAddr(const T* ptr) const {
return (reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(buffer.data())) + return (reinterpret_cast<uintptr_t>(ptr) -
reinterpret_cast<uintptr_t>(buffer.BackingBasePointer())) +
DramMemoryMap::Base; DramMemoryMap::Base;
} }
u8* GetPointer(PAddr addr) { u8* GetPointer(PAddr addr) {
return buffer.data() + (addr - DramMemoryMap::Base); return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
} }
const u8* GetPointer(PAddr addr) const { const u8* GetPointer(PAddr addr) const {
return buffer.data() + (addr - DramMemoryMap::Base); return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
} }
private: Common::HostMemory buffer;
Common::VirtualBuffer<u8> buffer;
}; };
} // namespace Core } // namespace Core

View File

@ -12,6 +12,7 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/page_table.h" #include "common/page_table.h"
#include "common/settings.h"
#include "common/swap.h" #include "common/swap.h"
#include "core/arm/arm_interface.h" #include "core/arm/arm_interface.h"
#include "core/core.h" #include "core/core.h"
@ -32,6 +33,7 @@ struct Memory::Impl {
void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) { void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
current_page_table = &process.PageTable().PageTableImpl(); current_page_table = &process.PageTable().PageTableImpl();
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth(); const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth();
@ -41,13 +43,19 @@ struct Memory::Impl {
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
ASSERT_MSG(target >= DramMemoryMap::Base && target < DramMemoryMap::End,
"Out of bounds target: {:016X}", target);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
} }
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped); MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped);
system.DeviceMemory().buffer.Unmap(base, size);
} }
bool IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const { bool IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const {
@ -466,6 +474,10 @@ struct Memory::Impl {
if (vaddr == 0) { if (vaddr == 0) {
return; return;
} }
const bool is_read_enable = Settings::IsGPULevelHigh() || !cached;
system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU // Iterate over a contiguous CPU address space, which corresponds to the specified GPU
// address space, marking the region as un/cached. The region is marked un/cached at a // address space, marking the region as un/cached. The region is marked un/cached at a
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size