yuzu-emu
/
yuzu-android
Archived
1
0
Fork 0

Revert "Memory GPU <-> CPU: reduce infighting in the texture cache by adding CPU Cached memory."

This commit is contained in:
bunnei 2022-03-26 12:38:30 -07:00 committed by GitHub
parent 664d8c8732
commit af04f8b8e9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 4 additions and 65 deletions

View File

@ -322,7 +322,7 @@ struct Memory::Impl {
} }
if (Settings::IsFastmemEnabled()) { if (Settings::IsFastmemEnabled()) {
const bool is_read_enable = !Settings::IsGPULevelExtreme() || !cached; const bool is_read_enable = Settings::IsGPULevelHigh() || !cached;
system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
} }

View File

@ -352,7 +352,7 @@ void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
shader_cache.OnCPUWrite(addr, size); shader_cache.OnCPUWrite(addr, size);
{ {
std::scoped_lock lock{texture_cache.mutex}; std::scoped_lock lock{texture_cache.mutex};
texture_cache.CachedWriteMemory(addr, size); texture_cache.WriteMemory(addr, size);
} }
{ {
std::scoped_lock lock{buffer_cache.mutex}; std::scoped_lock lock{buffer_cache.mutex};
@ -363,10 +363,6 @@ void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
void RasterizerOpenGL::SyncGuestHost() { void RasterizerOpenGL::SyncGuestHost() {
MICROPROFILE_SCOPE(OpenGL_CacheManagement); MICROPROFILE_SCOPE(OpenGL_CacheManagement);
shader_cache.SyncGuestHost(); shader_cache.SyncGuestHost();
{
std::scoped_lock lock{texture_cache.mutex};
texture_cache.FlushCachedWrites();
}
{ {
std::scoped_lock lock{buffer_cache.mutex}; std::scoped_lock lock{buffer_cache.mutex};
buffer_cache.FlushCachedWrites(); buffer_cache.FlushCachedWrites();

View File

@ -408,7 +408,7 @@ void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
pipeline_cache.OnCPUWrite(addr, size); pipeline_cache.OnCPUWrite(addr, size);
{ {
std::scoped_lock lock{texture_cache.mutex}; std::scoped_lock lock{texture_cache.mutex};
texture_cache.CachedWriteMemory(addr, size); texture_cache.WriteMemory(addr, size);
} }
{ {
std::scoped_lock lock{buffer_cache.mutex}; std::scoped_lock lock{buffer_cache.mutex};
@ -418,10 +418,6 @@ void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
void RasterizerVulkan::SyncGuestHost() { void RasterizerVulkan::SyncGuestHost() {
pipeline_cache.SyncGuestHost(); pipeline_cache.SyncGuestHost();
{
std::scoped_lock lock{texture_cache.mutex};
texture_cache.FlushCachedWrites();
}
{ {
std::scoped_lock lock{buffer_cache.mutex}; std::scoped_lock lock{buffer_cache.mutex};
buffer_cache.FlushCachedWrites(); buffer_cache.FlushCachedWrites();

View File

@ -39,9 +39,6 @@ enum class ImageFlagBits : u32 {
Rescaled = 1 << 13, Rescaled = 1 << 13,
CheckingRescalable = 1 << 14, CheckingRescalable = 1 << 14,
IsRescalable = 1 << 15, IsRescalable = 1 << 15,
// Cached CPU
CachedCpuModified = 1 << 16, ///< Contents have been modified from the CPU
}; };
DECLARE_ENUM_FLAG_OPERATORS(ImageFlagBits) DECLARE_ENUM_FLAG_OPERATORS(ImageFlagBits)

View File

@ -437,23 +437,6 @@ void TextureCache<P>::WriteMemory(VAddr cpu_addr, size_t size) {
}); });
} }
template <class P>
void TextureCache<P>::CachedWriteMemory(VAddr cpu_addr, size_t size) {
const VAddr new_cpu_addr = Common::AlignDown(cpu_addr, CPU_PAGE_SIZE);
const size_t new_size = Common::AlignUp(size + cpu_addr - new_cpu_addr, CPU_PAGE_SIZE);
ForEachImageInRegion(new_cpu_addr, new_size, [this](ImageId image_id, Image& image) {
if (True(image.flags & ImageFlagBits::CachedCpuModified)) {
return;
}
image.flags |= ImageFlagBits::CachedCpuModified;
cached_cpu_invalidate.insert(image_id);
if (True(image.flags & ImageFlagBits::Tracked)) {
UntrackImage(image, image_id);
}
});
}
template <class P> template <class P>
void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) { void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
std::vector<ImageId> images; std::vector<ImageId> images;
@ -511,18 +494,6 @@ void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) {
} }
} }
template <class P>
void TextureCache<P>::FlushCachedWrites() {
for (ImageId image_id : cached_cpu_invalidate) {
Image& image = slot_images[image_id];
if (True(image.flags & ImageFlagBits::CachedCpuModified)) {
image.flags &= ~ImageFlagBits::CachedCpuModified;
image.flags |= ImageFlagBits::CpuModified;
}
}
cached_cpu_invalidate.clear();
}
template <class P> template <class P>
void TextureCache<P>::BlitImage(const Tegra::Engines::Fermi2D::Surface& dst, void TextureCache<P>::BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
const Tegra::Engines::Fermi2D::Surface& src, const Tegra::Engines::Fermi2D::Surface& src,
@ -1589,9 +1560,6 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
template <class P> template <class P>
void TextureCache<P>::TrackImage(ImageBase& image, ImageId image_id) { void TextureCache<P>::TrackImage(ImageBase& image, ImageId image_id) {
ASSERT(False(image.flags & ImageFlagBits::Tracked)); ASSERT(False(image.flags & ImageFlagBits::Tracked));
if (True(image.flags & ImageFlagBits::CachedCpuModified)) {
return;
}
image.flags |= ImageFlagBits::Tracked; image.flags |= ImageFlagBits::Tracked;
if (False(image.flags & ImageFlagBits::Sparse)) { if (False(image.flags & ImageFlagBits::Sparse)) {
rasterizer.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, 1); rasterizer.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, 1);
@ -1648,9 +1616,6 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format); tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format);
} }
total_used_memory -= Common::AlignUp(tentative_size, 1024); total_used_memory -= Common::AlignUp(tentative_size, 1024);
if (True(image.flags & ImageFlagBits::CachedCpuModified)) {
cached_cpu_invalidate.erase(image_id);
}
const GPUVAddr gpu_addr = image.gpu_addr; const GPUVAddr gpu_addr = image.gpu_addr;
const auto alloc_it = image_allocs_table.find(gpu_addr); const auto alloc_it = image_allocs_table.find(gpu_addr);
if (alloc_it == image_allocs_table.end()) { if (alloc_it == image_allocs_table.end()) {
@ -1817,11 +1782,7 @@ template <class P>
void TextureCache<P>::PrepareImage(ImageId image_id, bool is_modification, bool invalidate) { void TextureCache<P>::PrepareImage(ImageId image_id, bool is_modification, bool invalidate) {
Image& image = slot_images[image_id]; Image& image = slot_images[image_id];
if (invalidate) { if (invalidate) {
if (True(image.flags & ImageFlagBits::CachedCpuModified)) { image.flags &= ~(ImageFlagBits::CpuModified | ImageFlagBits::GpuModified);
cached_cpu_invalidate.erase(image_id);
}
image.flags &= ~(ImageFlagBits::CpuModified | ImageFlagBits::GpuModified |
ImageFlagBits::CachedCpuModified);
if (False(image.flags & ImageFlagBits::Tracked)) { if (False(image.flags & ImageFlagBits::Tracked)) {
TrackImage(image, image_id); TrackImage(image, image_id);
} }

View File

@ -8,7 +8,6 @@
#include <span> #include <span>
#include <type_traits> #include <type_traits>
#include <unordered_map> #include <unordered_map>
#include <unordered_set>
#include <vector> #include <vector>
#include <queue> #include <queue>
@ -51,9 +50,6 @@ class TextureCache {
/// Address shift for caching images into a hash table /// Address shift for caching images into a hash table
static constexpr u64 PAGE_BITS = 20; static constexpr u64 PAGE_BITS = 20;
static constexpr u64 CPU_PAGE_BITS = 12;
static constexpr u64 CPU_PAGE_SIZE = 1ULL << CPU_PAGE_BITS;
/// Enables debugging features to the texture cache /// Enables debugging features to the texture cache
static constexpr bool ENABLE_VALIDATION = P::ENABLE_VALIDATION; static constexpr bool ENABLE_VALIDATION = P::ENABLE_VALIDATION;
/// Implement blits as copies between framebuffers /// Implement blits as copies between framebuffers
@ -140,9 +136,6 @@ public:
/// Mark images in a range as modified from the CPU /// Mark images in a range as modified from the CPU
void WriteMemory(VAddr cpu_addr, size_t size); void WriteMemory(VAddr cpu_addr, size_t size);
/// Mark images in a range as modified from the CPU
void CachedWriteMemory(VAddr cpu_addr, size_t size);
/// Download contents of host images to guest memory in a region /// Download contents of host images to guest memory in a region
void DownloadMemory(VAddr cpu_addr, size_t size); void DownloadMemory(VAddr cpu_addr, size_t size);
@ -152,8 +145,6 @@ public:
/// Remove images in a region /// Remove images in a region
void UnmapGPUMemory(GPUVAddr gpu_addr, size_t size); void UnmapGPUMemory(GPUVAddr gpu_addr, size_t size);
void FlushCachedWrites();
/// Blit an image with the given parameters /// Blit an image with the given parameters
void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst, void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
const Tegra::Engines::Fermi2D::Surface& src, const Tegra::Engines::Fermi2D::Surface& src,
@ -375,8 +366,6 @@ private:
std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views; std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views;
std::unordered_set<ImageId> cached_cpu_invalidate;
VAddr virtual_invalid_space{}; VAddr virtual_invalid_space{};
bool has_deleted_images = false; bool has_deleted_images = false;