yuzu-emu
/
yuzu
Archived
1
0
Fork 0

Texture Cache: Implement OnCPUWrite and SyncGuestHost

This commit is contained in:
Fernando Sahmkow 2020-02-16 09:33:03 -04:00
parent 084ceb925a
commit a60a22d9c2
2 changed files with 63 additions and 3 deletions

View File

@ -192,6 +192,22 @@ public:
index = index_;
}
void SetMemoryMarked(bool is_memory_marked_) {
is_memory_marked = is_memory_marked_;
}
bool IsMemoryMarked() const {
return is_memory_marked;
}
void SetSyncPending(bool is_sync_pending_) {
is_sync_pending = is_sync_pending_;
}
bool IsSyncPending() const {
return is_sync_pending;
}
void MarkAsPicked(bool is_picked_) {
is_picked = is_picked_;
}
@ -303,6 +319,8 @@ private:
bool is_target{};
bool is_registered{};
bool is_picked{};
bool is_memory_marked{};
bool is_sync_pending{};
u32 index{NO_RT};
u64 modification_tick{};
};

View File

@ -6,6 +6,7 @@
#include <algorithm>
#include <array>
#include <list>
#include <memory>
#include <mutex>
#include <set>
@ -62,6 +63,30 @@ public:
}
}
void OnCPUWrite(CacheAddr addr, std::size_t size) {
std::lock_guard lock{mutex};
for (const auto& surface : GetSurfacesInRegion(addr, size)) {
if (surface->IsMemoryMarked()) {
Unmark(surface);
surface->SetSyncPending(true);
marked_for_unregister.emplace_back(surface);
}
}
}
void SyncGuestHost() {
std::lock_guard lock{mutex};
for (const auto& surface : marked_for_unregister) {
if (surface->IsRegistered()) {
surface->SetSyncPending(false);
Unregister(surface);
}
}
marked_for_unregister.clear();
}
/**
* Guarantees that rendertargets don't unregister themselves if the
* collide. Protection is currently only done on 3D slices.
@ -85,7 +110,9 @@ public:
return a->GetModificationTick() < b->GetModificationTick();
});
for (const auto& surface : surfaces) {
mutex.unlock();
FlushSurface(surface);
mutex.lock();
}
}
@ -345,9 +372,20 @@ protected:
surface->SetCpuAddr(*cpu_addr);
RegisterInnerCache(surface);
surface->MarkAsRegistered(true);
surface->SetMemoryMarked(true);
rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1);
}
void Unmark(TSurface surface) {
if (!surface->IsMemoryMarked()) {
return;
}
const std::size_t size = surface->GetSizeInBytes();
const VAddr cpu_addr = surface->GetCpuAddr();
rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
surface->SetMemoryMarked(false);
}
void Unregister(TSurface surface) {
if (guard_render_targets && surface->IsProtected()) {
return;
@ -355,9 +393,11 @@ protected:
if (!guard_render_targets && surface->IsRenderTarget()) {
ManageRenderTargetUnregister(surface);
}
const std::size_t size = surface->GetSizeInBytes();
const VAddr cpu_addr = surface->GetCpuAddr();
rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
Unmark(surface);
if (surface->IsSyncPending()) {
marked_for_unregister.remove(surface);
surface->SetSyncPending(false);
}
UnregisterInnerCache(surface);
surface->MarkAsRegistered(false);
ReserveSurface(surface->GetSurfaceParams(), surface);
@ -1150,6 +1190,8 @@ private:
std::unordered_map<u32, TSurface> invalid_cache;
std::vector<u8> invalid_memory;
std::list<TSurface> marked_for_unregister;
StagingCache staging_cache;
std::recursive_mutex mutex;
};