yuzu-emu
/
yuzu-mainline
Archived
1
0
Fork 0

vk_scheduler: Use locks instead of SPSC a queue

This tries to fix a data race where we'd wait forever for the GPU.
This commit is contained in:
ReinUsesLisp 2021-05-07 06:26:12 -03:00 committed by ameerj
parent 56c47951c5
commit 36f1586267
2 changed files with 42 additions and 32 deletions

View File

@ -47,8 +47,11 @@ VKScheduler::VKScheduler(const Device& device_, StateTracker& state_tracker_)
}
VKScheduler::~VKScheduler() {
{
std::lock_guard lock{work_mutex};
quit = true;
cv.notify_all();
}
work_cv.notify_all();
worker_thread.join();
}
@ -69,20 +72,19 @@ void VKScheduler::WaitWorker() {
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
DispatchWork();
bool finished = false;
do {
cv.notify_all();
std::unique_lock lock{mutex};
finished = chunk_queue.Empty();
} while (!finished);
std::unique_lock lock{work_mutex};
wait_cv.wait(lock, [this] { return work_queue.empty(); });
}
void VKScheduler::DispatchWork() {
if (chunk->Empty()) {
return;
}
chunk_queue.Push(std::move(chunk));
cv.notify_all();
{
std::lock_guard lock{work_mutex};
work_queue.push(std::move(chunk));
}
work_cv.notify_one();
AcquireNewChunk();
}
@ -135,22 +137,27 @@ bool VKScheduler::UpdateGraphicsPipeline(GraphicsPipeline* pipeline) {
void VKScheduler::WorkerThread() {
Common::SetCurrentThreadName("yuzu:VulkanWorker");
std::unique_lock lock{mutex};
do {
cv.wait(lock, [this] { return !chunk_queue.Empty() || quit; });
if (work_queue.empty()) {
wait_cv.notify_all();
}
std::unique_ptr<CommandChunk> work;
{
std::unique_lock lock{work_mutex};
work_cv.wait(lock, [this] { return !work_queue.empty() || quit; });
if (quit) {
continue;
}
while (!chunk_queue.Empty()) {
auto extracted_chunk = std::move(chunk_queue.Front());
chunk_queue.Pop();
const bool has_submit = extracted_chunk->HasSubmit();
extracted_chunk->ExecuteAll(current_cmdbuf);
work = std::move(work_queue.front());
work_queue.pop();
}
const bool has_submit = work->HasSubmit();
work->ExecuteAll(current_cmdbuf);
if (has_submit) {
AllocateWorkerCommandBuffer();
}
chunk_reserve.Push(std::move(extracted_chunk));
}
std::lock_guard reserve_lock{reserve_mutex};
chunk_reserve.push_back(std::move(work));
} while (!quit);
}
@ -269,12 +276,13 @@ void VKScheduler::EndRenderPass() {
}
void VKScheduler::AcquireNewChunk() {
if (chunk_reserve.Empty()) {
std::lock_guard lock{reserve_mutex};
if (chunk_reserve.empty()) {
chunk = std::make_unique<CommandChunk>();
return;
}
chunk = std::move(chunk_reserve.Front());
chunk_reserve.Pop();
chunk = std::move(chunk_reserve.back());
chunk_reserve.pop_back();
}
} // namespace Vulkan

View File

@ -6,14 +6,14 @@
#include <atomic>
#include <condition_variable>
#include <queue>
#include <cstddef>
#include <memory>
#include <stack>
#include <thread>
#include <utility>
#include "common/alignment.h"
#include "common/common_types.h"
#include "common/threadsafe_queue.h"
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
@ -220,11 +220,13 @@ private:
std::array<VkImage, 9> renderpass_images{};
std::array<VkImageSubresourceRange, 9> renderpass_image_ranges{};
Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_queue;
Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_reserve;
std::mutex mutex;
std::condition_variable cv;
bool quit = false;
std::queue<std::unique_ptr<CommandChunk>> work_queue;
std::vector<std::unique_ptr<CommandChunk>> chunk_reserve;
std::mutex reserve_mutex;
std::mutex work_mutex;
std::condition_variable work_cv;
std::condition_variable wait_cv;
std::atomic_bool quit{};
};
} // namespace Vulkan