yuzu-emu
/
yuzu-android
Archived
1
0
Fork 0

video_core: memory_manager: Updates for Common::PageTable changes.

This commit is contained in:
bunnei 2020-04-08 22:51:31 -04:00
parent f7c03610e1
commit 32fc2aae3c
2 changed files with 34 additions and 67 deletions

View File

@ -6,8 +6,8 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "core/core.h" #include "core/core.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/memory.h" #include "core/memory.h"
#include "video_core/gpu.h" #include "video_core/gpu.h"
#include "video_core/memory_manager.h" #include "video_core/memory_manager.h"
@ -17,10 +17,7 @@ namespace Tegra {
MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer) MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
: rasterizer{rasterizer}, system{system} { : rasterizer{rasterizer}, system{system} {
std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); page_table.Resize(address_space_width, page_bits, false);
std::fill(page_table.attributes.begin(), page_table.attributes.end(),
Common::PageType::Unmapped);
page_table.Resize(address_space_width);
// Initialize the map with a single free region covering the entire managed space. // Initialize the map with a single free region covering the entire managed space.
VirtualMemoryArea initial_vma; VirtualMemoryArea initial_vma;
@ -55,9 +52,9 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
ASSERT(system.CurrentProcess() ASSERT(system.CurrentProcess()
->VMManager() ->PageTable()
.SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
Kernel::MemoryAttribute::DeviceMapped) Kernel::Memory::MemoryAttribute::DeviceShared)
.IsSuccess()); .IsSuccess());
return gpu_addr; return gpu_addr;
@ -70,9 +67,9 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size)
MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
ASSERT(system.CurrentProcess() ASSERT(system.CurrentProcess()
->VMManager() ->PageTable()
.SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
Kernel::MemoryAttribute::DeviceMapped) Kernel::Memory::MemoryAttribute::DeviceShared)
.IsSuccess()); .IsSuccess());
return gpu_addr; return gpu_addr;
} }
@ -89,9 +86,10 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
UnmapRange(gpu_addr, aligned_size); UnmapRange(gpu_addr, aligned_size);
ASSERT(system.CurrentProcess() ASSERT(system.CurrentProcess()
->VMManager() ->PageTable()
.SetMemoryAttribute(cpu_addr.value(), size, Kernel::MemoryAttribute::DeviceMapped, .SetMemoryAttribute(cpu_addr.value(), size,
Kernel::MemoryAttribute::None) Kernel::Memory::MemoryAttribute::DeviceShared,
Kernel::Memory::MemoryAttribute::None)
.IsSuccess()); .IsSuccess());
return gpu_addr; return gpu_addr;
@ -147,16 +145,8 @@ T MemoryManager::Read(GPUVAddr addr) const {
return value; return value;
} }
switch (page_table.attributes[addr >> page_bits]) { UNREACHABLE();
case Common::PageType::Unmapped:
LOG_ERROR(HW_GPU, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, addr);
return 0;
case Common::PageType::Memory:
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", addr);
break;
default:
UNREACHABLE();
}
return {}; return {};
} }
@ -173,17 +163,7 @@ void MemoryManager::Write(GPUVAddr addr, T data) {
return; return;
} }
switch (page_table.attributes[addr >> page_bits]) { UNREACHABLE();
case Common::PageType::Unmapped:
LOG_ERROR(HW_GPU, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
static_cast<u32>(data), addr);
return;
case Common::PageType::Memory:
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", addr);
break;
default:
UNREACHABLE();
}
} }
template u8 MemoryManager::Read<u8>(GPUVAddr addr) const; template u8 MemoryManager::Read<u8>(GPUVAddr addr) const;
@ -249,18 +229,11 @@ void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::s
const std::size_t copy_amount{ const std::size_t copy_amount{
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
switch (page_table.attributes[page_index]) { const VAddr src_addr{page_table.backing_addr[page_index] + page_offset};
case Common::PageType::Memory: { // Flush must happen on the rasterizer interface, such that memory is always synchronous
const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu.
// Flush must happen on the rasterizer interface, such that memory is always synchronous rasterizer.FlushRegion(src_addr, copy_amount);
// when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
rasterizer.FlushRegion(src_addr, copy_amount);
memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
break;
}
default:
UNREACHABLE();
}
page_index++; page_index++;
page_offset = 0; page_offset = 0;
@ -305,18 +278,11 @@ void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const
const std::size_t copy_amount{ const std::size_t copy_amount{
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
switch (page_table.attributes[page_index]) { const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset};
case Common::PageType::Memory: { // Invalidate must happen on the rasterizer interface, such that memory is always
const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; // synchronous when it is written (even when in asynchronous GPU mode).
// Invalidate must happen on the rasterizer interface, such that memory is always rasterizer.InvalidateRegion(dest_addr, copy_amount);
// synchronous when it is written (even when in asynchronous GPU mode). memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
rasterizer.InvalidateRegion(dest_addr, copy_amount);
memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
break;
}
default:
UNREACHABLE();
}
page_index++; page_index++;
page_offset = 0; page_offset = 0;
@ -362,8 +328,8 @@ void MemoryManager::CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const
bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) {
const VAddr addr = page_table.backing_addr[gpu_addr >> page_bits]; const VAddr addr = page_table.backing_addr[gpu_addr >> page_bits];
const std::size_t page = (addr & Memory::PAGE_MASK) + size; const std::size_t page = (addr & Core::Memory::PAGE_MASK) + size;
return page <= Memory::PAGE_SIZE; return page <= Core::Memory::PAGE_SIZE;
} }
void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type, void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
@ -375,12 +341,13 @@ void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageTy
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
base + page_table.pointers.size()); base + page_table.pointers.size());
std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type);
if (memory == nullptr) { if (memory == nullptr) {
std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory); while (base != end) {
std::fill(page_table.backing_addr.begin() + base, page_table.backing_addr.begin() + end, page_table.pointers[base] = nullptr;
backing_addr); page_table.backing_addr[base] = 0;
base += 1;
}
} else { } else {
while (base != end) { while (base != end) {
page_table.pointers[base] = memory; page_table.pointers[base] = memory;

View File

@ -179,7 +179,7 @@ private:
/// End of address space, based on address space in bits. /// End of address space, based on address space in bits.
static constexpr GPUVAddr address_space_end{1ULL << address_space_width}; static constexpr GPUVAddr address_space_end{1ULL << address_space_width};
Common::BackingPageTable page_table{page_bits}; Common::PageTable page_table;
VMAMap vma_map; VMAMap vma_map;
VideoCore::RasterizerInterface& rasterizer; VideoCore::RasterizerInterface& rasterizer;