yuzu-emu
/
yuzu-mainline
Archived
1
0
Fork 0

core/memory: Migrate over RasterizerMarkRegionCached() to the Memory class

This is only used within the accelerated rasterizer in two places, so
this is also a very trivial migration.
This commit is contained in:
Lioncash 2019-11-26 15:56:13 -05:00
parent b2165c6b35
commit 849581075a
3 changed files with 79 additions and 70 deletions

View File

@ -225,6 +225,69 @@ struct Memory::Impl {
return string; return string;
} }
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
if (vaddr == 0) {
return;
}
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU
// address space, marking the region as un/cached. The region is marked un/cached at a
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
// is different). This assumes the specified GPU address region is contiguous as well.
u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
if (cached) {
// Switch page type to cached if now cached
switch (page_type) {
case Common::PageType::Unmapped:
// It is not necessary for a process to have this region mapped into its address
// space, for example, a system module need not have a VRAM mapping.
break;
case Common::PageType::Memory:
page_type = Common::PageType::RasterizerCachedMemory;
current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
break;
case Common::PageType::RasterizerCachedMemory:
// There can be more than one GPU region mapped per CPU region, so it's common
// that this area is already marked as cached.
break;
default:
UNREACHABLE();
}
} else {
// Switch page type to uncached if now uncached
switch (page_type) {
case Common::PageType::Unmapped:
// It is not necessary for a process to have this region mapped into its address
// space, for example, a system module need not have a VRAM mapping.
break;
case Common::PageType::Memory:
// There can be more than one GPU region mapped per CPU region, so it's common
// that this area is already unmarked as cached.
break;
case Common::PageType::RasterizerCachedMemory: {
u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
if (pointer == nullptr) {
// It's possible that this function has been called while updating the
// pagetable after unmapping a VMA. In that case the underlying VMA will no
// longer exist, and we should just leave the pagetable entry blank.
page_type = Common::PageType::Unmapped;
} else {
page_type = Common::PageType::Memory;
current_page_table->pointers[vaddr >> PAGE_BITS] = pointer;
}
break;
}
default:
UNREACHABLE();
}
}
}
}
/** /**
* Maps a region of pages as a specific type. * Maps a region of pages as a specific type.
* *
@ -318,6 +381,10 @@ std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
return impl->ReadCString(vaddr, max_length); return impl->ReadCString(vaddr, max_length);
} }
void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
impl->RasterizerMarkRegionCached(vaddr, size, cached);
}
void SetCurrentPageTable(Kernel::Process& process) { void SetCurrentPageTable(Kernel::Process& process) {
current_page_table = &process.VMManager().page_table; current_page_table = &process.VMManager().page_table;
@ -334,69 +401,6 @@ bool IsKernelVirtualAddress(const VAddr vaddr) {
return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END;
} }
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
if (vaddr == 0) {
return;
}
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU address
// space, marking the region as un/cached. The region is marked un/cached at a granularity of
// CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This
// assumes the specified GPU address region is contiguous as well.
u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
if (cached) {
// Switch page type to cached if now cached
switch (page_type) {
case Common::PageType::Unmapped:
// It is not necessary for a process to have this region mapped into its address
// space, for example, a system module need not have a VRAM mapping.
break;
case Common::PageType::Memory:
page_type = Common::PageType::RasterizerCachedMemory;
current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
break;
case Common::PageType::RasterizerCachedMemory:
// There can be more than one GPU region mapped per CPU region, so it's common that
// this area is already marked as cached.
break;
default:
UNREACHABLE();
}
} else {
// Switch page type to uncached if now uncached
switch (page_type) {
case Common::PageType::Unmapped:
// It is not necessary for a process to have this region mapped into its address
// space, for example, a system module need not have a VRAM mapping.
break;
case Common::PageType::Memory:
// There can be more than one GPU region mapped per CPU region, so it's common that
// this area is already unmarked as cached.
break;
case Common::PageType::RasterizerCachedMemory: {
u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
if (pointer == nullptr) {
// It's possible that this function has been called while updating the pagetable
// after unmapping a VMA. In that case the underlying VMA will no longer exist,
// and we should just leave the pagetable entry blank.
page_type = Common::PageType::Unmapped;
} else {
page_type = Common::PageType::Memory;
current_page_table->pointers[vaddr >> PAGE_BITS] = pointer;
}
break;
}
default:
UNREACHABLE();
}
}
}
}
u8 Read8(const VAddr addr) { u8 Read8(const VAddr addr) {
return Read<u8>(addr); return Read<u8>(addr);
} }

View File

@ -169,6 +169,16 @@ public:
*/ */
std::string ReadCString(VAddr vaddr, std::size_t max_length); std::string ReadCString(VAddr vaddr, std::size_t max_length);
/**
* Marks each page within the specified address range as cached or uncached.
*
* @param vaddr The virtual address indicating the start of the address range.
* @param size The size of the address range in bytes.
* @param cached Whether or not any pages within the address range should be
* marked as cached or uncached.
*/
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached);
private: private:
struct Impl; struct Impl;
std::unique_ptr<Impl> impl; std::unique_ptr<Impl> impl;
@ -199,9 +209,4 @@ void WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size);
void ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size); void ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size);
void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size); void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size);
/**
* Mark each page touching the region as cached.
*/
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached);
} // namespace Memory } // namespace Memory

View File

@ -48,9 +48,9 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del
const u64 interval_size = interval_end_addr - interval_start_addr; const u64 interval_size = interval_end_addr - interval_start_addr;
if (delta > 0 && count == delta) { if (delta > 0 && count == delta) {
Memory::RasterizerMarkRegionCached(interval_start_addr, interval_size, true); cpu_memory.RasterizerMarkRegionCached(interval_start_addr, interval_size, true);
} else if (delta < 0 && count == -delta) { } else if (delta < 0 && count == -delta) {
Memory::RasterizerMarkRegionCached(interval_start_addr, interval_size, false); cpu_memory.RasterizerMarkRegionCached(interval_start_addr, interval_size, false);
} else { } else {
ASSERT(count >= 0); ASSERT(count >= 0);
} }