yuzu-emu
/
yuzu-mainline
Archived
1
0
Fork 0

SMMU: Add continuity tracking optimization.

This commit is contained in:
Fernando Sahmkow 2023-12-30 09:37:16 +01:00 committed by Liam
parent 9b11b9dce5
commit d8f1ce2f76
4 changed files with 63 additions and 13 deletions

View File

@ -13,7 +13,6 @@
#include "common/scratch_buffer.h"
#include "common/virtual_buffer.h"
namespace Core {
class DeviceMemory;
@ -40,9 +39,17 @@ public:
void AllocateFixed(DAddr start, size_t size);
void Free(DAddr start, size_t size);
void Map(DAddr address, VAddr virtual_address, size_t size, size_t process_id);
void Map(DAddr address, VAddr virtual_address, size_t size, size_t process_id,
bool track = false);
void Unmap(DAddr address, size_t size);
void TrackContinuityImpl(DAddr address, VAddr virtual_address, size_t size, size_t process_id);
void TrackContinuity(DAddr address, VAddr virtual_address, size_t size, size_t process_id) {
std::scoped_lock lk(mapping_guard);
TrackContinuityImpl(address, virtual_address, size, process_id);
}
// Write / Read
template <typename T>
T* GetPointer(DAddr address);
@ -86,13 +93,8 @@ public:
template <typename T>
T Read(DAddr address) const;
const u8* GetSpan(const DAddr src_addr, const std::size_t size) const {
return nullptr;
}
u8* GetSpan(const DAddr src_addr, const std::size_t size) {
return nullptr;
}
u8* GetSpan(const DAddr src_addr, const std::size_t size);
const u8* GetSpan(const DAddr src_addr, const std::size_t size) const;
void ReadBlock(DAddr address, void* dest_pointer, size_t size);
void ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size);
@ -144,6 +146,7 @@ private:
DeviceInterface* interface;
Common::VirtualBuffer<u32> compressed_physical_ptr;
Common::VirtualBuffer<u32> compressed_device_addr;
Common::VirtualBuffer<u32> continuity_tracker;
// Process memory interfaces

View File

@ -164,6 +164,7 @@ DeviceMemoryManager<Traits>::DeviceMemoryManager(const DeviceMemory& device_memo
: physical_base{reinterpret_cast<const uintptr_t>(device_memory_.buffer.BackingBasePointer())},
interface{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS),
compressed_device_addr(1ULL << (physical_max_bits - Memory::YUZU_PAGEBITS)),
continuity_tracker(device_as_size >> Memory::YUZU_PAGEBITS),
cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) {
impl = std::make_unique<DeviceMemoryManagerAllocator<Traits>>();
cached_pages = std::make_unique<CachedPages>();
@ -194,7 +195,7 @@ void DeviceMemoryManager<Traits>::Free(DAddr start, size_t size) {
template <typename Traits>
void DeviceMemoryManager<Traits>::Map(DAddr address, VAddr virtual_address, size_t size,
size_t process_id) {
size_t process_id, bool track) {
Core::Memory::Memory* process_memory = registered_processes[process_id];
size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
@ -222,6 +223,9 @@ void DeviceMemoryManager<Traits>::Map(DAddr address, VAddr virtual_address, size
}
impl->multi_dev_address.Register(new_dev, start_id);
}
if (track) {
TrackContinuityImpl(address, virtual_address, size, process_id);
}
}
template <typename Traits>
@ -251,6 +255,47 @@ void DeviceMemoryManager<Traits>::Unmap(DAddr address, size_t size) {
}
}
}
template <typename Traits>
void DeviceMemoryManager<Traits>::TrackContinuityImpl(DAddr address, VAddr virtual_address,
size_t size, size_t process_id) {
Core::Memory::Memory* process_memory = registered_processes[process_id];
size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
uintptr_t last_ptr = 0;
size_t page_count = 1;
for (size_t i = num_pages; i > 0; i--) {
size_t index = i - 1;
const VAddr new_vaddress = virtual_address + index * Memory::YUZU_PAGESIZE;
const uintptr_t new_ptr = reinterpret_cast<uintptr_t>(
process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress)));
if (new_ptr + page_size == last_ptr) {
page_count++;
} else {
page_count = 1;
}
last_ptr = new_ptr;
continuity_tracker[start_page_d + index] = static_cast<u32>(page_count);
}
}
template <typename Traits>
u8* DeviceMemoryManager<Traits>::GetSpan(const DAddr src_addr, const std::size_t size) {
size_t page_index = src_addr >> page_bits;
size_t subbits = src_addr & page_mask;
if ((continuity_tracker[page_index] << page_bits) >= size + subbits) {
return GetPointer<u8>(src_addr);
}
return nullptr;
}
template <typename Traits>
const u8* DeviceMemoryManager<Traits>::GetSpan(const DAddr src_addr, const std::size_t size) const {
size_t page_index = src_addr >> page_bits;
size_t subbits = src_addr & page_mask;
if ((continuity_tracker[page_index] << page_bits) >= size + subbits) {
return GetPointer<u8>(src_addr);
}
return nullptr;
}
template <typename Traits>
void DeviceMemoryManager<Traits>::InnerGatherDeviceAddresses(Common::ScratchBuffer<u32>& buffer,
@ -322,12 +367,13 @@ void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto o
std::size_t page_offset = addr & Memory::YUZU_PAGEMASK;
while (remaining_size) {
const size_t next_pages = static_cast<std::size_t>(continuity_tracker[page_index]);
const std::size_t copy_amount =
std::min(static_cast<std::size_t>(Memory::YUZU_PAGESIZE) - page_offset, remaining_size);
std::min((next_pages << Memory::YUZU_PAGEBITS) - page_offset, remaining_size);
const auto current_vaddr =
static_cast<u64>((page_index << Memory::YUZU_PAGEBITS) + page_offset);
SCOPE_EXIT({
page_index++;
page_index += next_pages;
page_offset = 0;
increment(copy_amount);
remaining_size -= copy_amount;

View File

@ -95,6 +95,7 @@ size_t Container::OpenSession(Kernel::KProcess* process) {
if (start_region != 0) {
session.mapper = std::make_unique<HeapMapper>(region_start, start_region, region_size,
smmu_id, impl->host1x);
smmu.TrackContinuity(start_region, region_start, region_size, smmu_id);
session.has_preallocated_area = true;
LOG_CRITICAL(Debug, "Preallocation created!");
}

View File

@ -221,7 +221,7 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are
}
handle_description->d_address = address;
smmu.Map(address, vaddress, map_size, session->smmu_id);
smmu.Map(address, vaddress, map_size, session->smmu_id, true);
handle_description->in_heap = false;
}
}