yuzu-emu
/
yuzu-android
Archived
1
0
Fork 0

smmu: use new range mutex construction for protecting counters

This commit is contained in:
Liam 2024-01-16 23:35:48 -05:00 committed by Fernando Sahmkow
parent a7c1306e2d
commit 8f848f43e9
4 changed files with 97 additions and 11 deletions

View File

@ -106,6 +106,7 @@ add_library(common STATIC
precompiled_headers.h
quaternion.h
range_map.h
range_mutex.h
reader_writer_queue.h
ring_buffer.h
${CMAKE_CURRENT_BINARY_DIR}/scm_rev.cpp

93
src/common/range_mutex.h Normal file
View File

@ -0,0 +1,93 @@
// SPDX-FileCopyrightText: 2024 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <condition_variable>
#include <mutex>
#include "common/intrusive_list.h"
namespace Common {
class ScopedRangeLock;
class RangeMutex {
public:
explicit RangeMutex() = default;
~RangeMutex() = default;
private:
friend class ScopedRangeLock;
void Lock(ScopedRangeLock& l);
void Unlock(ScopedRangeLock& l);
bool HasIntersectionLocked(ScopedRangeLock& l);
private:
std::mutex m_mutex;
std::condition_variable m_cv;
using LockList = Common::IntrusiveListBaseTraits<ScopedRangeLock>::ListType;
LockList m_list;
};
class ScopedRangeLock : public Common::IntrusiveListBaseNode<ScopedRangeLock> {
public:
explicit ScopedRangeLock(RangeMutex& mutex, u64 address, u64 size)
: m_mutex(mutex), m_address(address), m_size(size) {
if (m_size > 0) {
m_mutex.Lock(*this);
}
}
~ScopedRangeLock() {
if (m_size > 0) {
m_mutex.Unlock(*this);
}
}
u64 GetAddress() const {
return m_address;
}
u64 GetSize() const {
return m_size;
}
private:
RangeMutex& m_mutex;
const u64 m_address{};
const u64 m_size{};
};
inline void RangeMutex::Lock(ScopedRangeLock& l) {
std::unique_lock lk{m_mutex};
m_cv.wait(lk, [&] { return !HasIntersectionLocked(l); });
m_list.push_back(l);
}
inline void RangeMutex::Unlock(ScopedRangeLock& l) {
{
std::scoped_lock lk{m_mutex};
m_list.erase(m_list.iterator_to(l));
}
m_cv.notify_all();
}
inline bool RangeMutex::HasIntersectionLocked(ScopedRangeLock& l) {
const auto cur_begin = l.GetAddress();
const auto cur_last = l.GetAddress() + l.GetSize() - 1;
for (const auto& other : m_list) {
const auto other_begin = other.GetAddress();
const auto other_last = other.GetAddress() + other.GetSize() - 1;
if (cur_begin <= other_last && other_begin <= cur_last) {
return true;
}
}
return false;
}
} // namespace Common

View File

@ -10,6 +10,7 @@
#include <mutex>
#include "common/common_types.h"
#include "common/range_mutex.h"
#include "common/scratch_buffer.h"
#include "common/virtual_buffer.h"
@ -204,7 +205,7 @@ private:
(1ULL << (device_virtual_bits - page_bits)) / subentries;
using CachedPages = std::array<CounterEntry, num_counter_entries>;
std::unique_ptr<CachedPages> cached_pages;
std::mutex counter_guard;
Common::RangeMutex counter_guard;
std::mutex mapping_guard;
};

View File

@ -508,12 +508,7 @@ void DeviceMemoryManager<Traits>::UnregisterProcess(Asid asid) {
template <typename Traits>
void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta) {
std::unique_lock<std::mutex> lk(counter_guard, std::defer_lock);
const auto Lock = [&] {
if (!lk) {
lk.lock();
}
};
Common::ScopedRangeLock lk(counter_guard, addr, size);
u64 uncache_begin = 0;
u64 cache_begin = 0;
u64 uncache_bytes = 0;
@ -548,7 +543,6 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
}
uncache_bytes += Memory::YUZU_PAGESIZE;
} else if (uncache_bytes > 0) {
Lock();
MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS,
uncache_bytes, false);
uncache_bytes = 0;
@ -559,7 +553,6 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
}
cache_bytes += Memory::YUZU_PAGESIZE;
} else if (cache_bytes > 0) {
Lock();
MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
true);
cache_bytes = 0;
@ -567,12 +560,10 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
vpage++;
}
if (uncache_bytes > 0) {
Lock();
MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes,
false);
}
if (cache_bytes > 0) {
Lock();
MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
true);
}