yuzu-emu
/
yuzu-mainline
Archived
1
0
Fork 0

vk_pipeline_cache,shader_notify: Add shader notifications

This commit is contained in:
ReinUsesLisp 2021-06-06 00:11:36 -03:00 committed by ameerj
parent 48aad8dc05
commit cffd4716c5
10 changed files with 126 additions and 95 deletions

View File

@ -14,6 +14,7 @@
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/shader_notify.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
@ -24,14 +25,18 @@ using Tegra::Texture::TexturePair;
ComputePipeline::ComputePipeline(const Device& device_, DescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue_,
Common::ThreadWorker* thread_worker, const Shader::Info& info_,
Common::ThreadWorker* thread_worker,
VideoCore::ShaderNotify* shader_notify, const Shader::Info& info_,
vk::ShaderModule spv_module_)
: device{device_}, update_descriptor_queue{update_descriptor_queue_}, info{info_},
spv_module(std::move(spv_module_)) {
if (shader_notify) {
shader_notify->MarkShaderBuilding();
}
std::copy_n(info.constant_buffer_used_sizes.begin(), uniform_buffer_sizes.size(),
uniform_buffer_sizes.begin());
auto func{[this, &descriptor_pool] {
auto func{[this, &descriptor_pool, shader_notify] {
DescriptorLayoutBuilder builder{device.GetLogical()};
builder.Add(info, VK_SHADER_STAGE_COMPUTE_BIT);
@ -66,6 +71,9 @@ ComputePipeline::ComputePipeline(const Device& device_, DescriptorPool& descript
std::lock_guard lock{build_mutex};
is_built = true;
build_condvar.notify_one();
if (shader_notify) {
shader_notify->MarkShaderComplete();
}
}};
if (thread_worker) {
thread_worker->QueueWork(std::move(func));

View File

@ -18,6 +18,10 @@
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace VideoCore {
class ShaderNotify;
}
namespace Vulkan {
class Device;
@ -27,7 +31,8 @@ class ComputePipeline {
public:
explicit ComputePipeline(const Device& device, DescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue,
Common::ThreadWorker* thread_worker, const Shader::Info& info,
Common::ThreadWorker* thread_worker,
VideoCore::ShaderNotify* shader_notify, const Shader::Info& info,
vk::ShaderModule spv_module);
ComputePipeline& operator=(ComputePipeline&&) noexcept = delete;

View File

@ -17,6 +17,7 @@
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/shader_notify.h"
#include "video_core/vulkan_common/vulkan_device.h"
#if defined(_MSC_VER) && defined(NDEBUG)
@ -203,30 +204,30 @@ ConfigureFuncPtr ConfigureFunc(const std::array<vk::ShaderModule, NUM_STAGES>& m
}
} // Anonymous namespace
GraphicsPipeline::GraphicsPipeline(Tegra::Engines::Maxwell3D& maxwell3d_,
Tegra::MemoryManager& gpu_memory_, VKScheduler& scheduler_,
BufferCache& buffer_cache_, TextureCache& texture_cache_,
const Device& device_, DescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue_,
Common::ThreadWorker* worker_thread,
RenderPassCache& render_pass_cache,
const GraphicsPipelineCacheKey& key_,
std::array<vk::ShaderModule, NUM_STAGES> stages,
const std::array<const Shader::Info*, NUM_STAGES>& infos)
GraphicsPipeline::GraphicsPipeline(
Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
VKScheduler& scheduler_, BufferCache& buffer_cache_, TextureCache& texture_cache_,
VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread,
RenderPassCache& render_pass_cache, const GraphicsPipelineCacheKey& key_,
std::array<vk::ShaderModule, NUM_STAGES> stages,
const std::array<const Shader::Info*, NUM_STAGES>& infos)
: key{key_}, maxwell3d{maxwell3d_}, gpu_memory{gpu_memory_}, device{device_},
texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, scheduler{scheduler_},
update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} {
std::ranges::transform(infos, stage_infos.begin(),
[](const Shader::Info* info) { return info ? *info : Shader::Info{}; });
if (shader_notify) {
shader_notify->MarkShaderBuilding();
}
for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
const Shader::Info* const info{infos[stage]};
if (!info) {
continue;
}
stage_infos[stage] = *info;
enabled_uniform_buffer_masks[stage] = info->constant_buffer_mask;
std::ranges::copy(info->constant_buffer_used_sizes, uniform_buffer_sizes[stage].begin());
}
auto func{[this, &render_pass_cache, &descriptor_pool] {
auto func{[this, shader_notify, &render_pass_cache, &descriptor_pool] {
DescriptorLayoutBuilder builder{MakeBuilder(device, stage_infos)};
descriptor_set_layout = builder.CreateDescriptorSetLayout();
descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, stage_infos);
@ -242,6 +243,9 @@ GraphicsPipeline::GraphicsPipeline(Tegra::Engines::Maxwell3D& maxwell3d_,
std::lock_guard lock{build_mutex};
is_built = true;
build_condvar.notify_one();
if (shader_notify) {
shader_notify->MarkShaderComplete();
}
}};
if (worker_thread) {
worker_thread->QueueWork(std::move(func));

View File

@ -20,6 +20,10 @@
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace VideoCore {
class ShaderNotify;
}
namespace Vulkan {
struct GraphicsPipelineCacheKey {
@ -64,16 +68,14 @@ class GraphicsPipeline {
static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
public:
explicit GraphicsPipeline(Tegra::Engines::Maxwell3D& maxwell3d,
Tegra::MemoryManager& gpu_memory, VKScheduler& scheduler,
BufferCache& buffer_cache, TextureCache& texture_cache,
const Device& device, DescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue,
Common::ThreadWorker* worker_thread,
RenderPassCache& render_pass_cache,
const GraphicsPipelineCacheKey& key,
std::array<vk::ShaderModule, NUM_STAGES> stages,
const std::array<const Shader::Info*, NUM_STAGES>& infos);
explicit GraphicsPipeline(
Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
VKScheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache,
VideoCore::ShaderNotify* shader_notify, const Device& device,
DescriptorPool& descriptor_pool, VKUpdateDescriptorQueue& update_descriptor_queue,
Common::ThreadWorker* worker_thread, RenderPassCache& render_pass_cache,
const GraphicsPipelineCacheKey& key, std::array<vk::ShaderModule, NUM_STAGES> stages,
const std::array<const Shader::Info*, NUM_STAGES>& infos);
GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete;
GraphicsPipeline(GraphicsPipeline&&) noexcept = delete;

View File

@ -235,11 +235,11 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxw
VKScheduler& scheduler_, DescriptorPool& descriptor_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
TextureCache& texture_cache_)
TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
: VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_},
device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_},
update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_},
buffer_cache{buffer_cache_}, texture_cache{texture_cache_},
buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_},
use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()},
workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "yuzu:PipelineBuilder"),
serialization_thread(1, "yuzu:PipelineSerialization") {
@ -307,19 +307,7 @@ GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() {
return BuiltPipeline(current_pipeline);
}
}
const auto [pair, is_new]{graphics_cache.try_emplace(graphics_key)};
auto& pipeline{pair->second};
if (is_new) {
pipeline = CreateGraphicsPipeline();
}
if (!pipeline) {
return nullptr;
}
if (current_pipeline) {
current_pipeline->AddTransition(pipeline.get());
}
current_pipeline = pipeline.get();
return BuiltPipeline(current_pipeline);
return CurrentGraphicsPipelineSlowPath();
}
ComputePipeline* PipelineCache::CurrentComputePipeline() {
@ -416,6 +404,22 @@ void PipelineCache::LoadDiskResources(u64 title_id, std::stop_token stop_loading
workers.WaitForRequests();
}
GraphicsPipeline* PipelineCache::CurrentGraphicsPipelineSlowPath() {
const auto [pair, is_new]{graphics_cache.try_emplace(graphics_key)};
auto& pipeline{pair->second};
if (is_new) {
pipeline = CreateGraphicsPipeline();
}
if (!pipeline) {
return nullptr;
}
if (current_pipeline) {
current_pipeline->AddTransition(pipeline.get());
}
current_pipeline = pipeline.get();
return BuiltPipeline(current_pipeline);
}
GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const noexcept {
if (pipeline->IsBuilt()) {
return pipeline;
@ -484,14 +488,16 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
device.SaveShader(code);
modules[stage_index] = BuildShader(device, code);
if (device.HasDebuggingToolAttached()) {
const std::string name{fmt::format("{:016x}", key.unique_hashes[index])};
const std::string name{fmt::format("Shader {:016x}", key.unique_hashes[index])};
modules[stage_index].SetObjectNameEXT(name.c_str());
}
}
Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
return std::make_unique<GraphicsPipeline>(
maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, device, descriptor_pool,
update_descriptor_queue, thread_worker, render_pass_cache, key, std::move(modules), infos);
VideoCore::ShaderNotify* const notify{build_in_parallel ? &shader_notify : nullptr};
return std::make_unique<GraphicsPipeline>(maxwell3d, gpu_memory, scheduler, buffer_cache,
texture_cache, notify, device, descriptor_pool,
update_descriptor_queue, thread_worker,
render_pass_cache, key, std::move(modules), infos);
} catch (const Shader::Exception& exception) {
LOG_ERROR(Render_Vulkan, "{}", exception.what());
@ -550,12 +556,14 @@ std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
device.SaveShader(code);
vk::ShaderModule spv_module{BuildShader(device, code)};
if (device.HasDebuggingToolAttached()) {
const auto name{fmt::format("{:016x}", key.unique_hash)};
const auto name{fmt::format("Shader {:016x}", key.unique_hash)};
spv_module.SetObjectNameEXT(name.c_str());
}
Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
VideoCore::ShaderNotify* const notify{build_in_parallel ? &shader_notify : nullptr};
return std::make_unique<ComputePipeline>(device, descriptor_pool, update_descriptor_queue,
thread_worker, program.info, std::move(spv_module));
thread_worker, notify, program.info,
std::move(spv_module));
} catch (const Shader::Exception& exception) {
LOG_ERROR(Render_Vulkan, "{}", exception.what());

View File

@ -38,6 +38,10 @@ namespace Shader::IR {
struct Program;
}
namespace VideoCore {
class ShaderNotify;
}
namespace Vulkan {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
@ -104,7 +108,7 @@ public:
VKScheduler& scheduler, DescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue,
RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
TextureCache& texture_cache);
TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
~PipelineCache();
[[nodiscard]] GraphicsPipeline* CurrentGraphicsPipeline();
@ -115,6 +119,8 @@ public:
const VideoCore::DiskResourceLoadCallback& callback);
private:
[[nodiscard]] GraphicsPipeline* CurrentGraphicsPipelineSlowPath();
[[nodiscard]] GraphicsPipeline* BuiltPipeline(GraphicsPipeline* pipeline) const noexcept;
std::unique_ptr<GraphicsPipeline> CreateGraphicsPipeline();
@ -138,6 +144,7 @@ private:
RenderPassCache& render_pass_cache;
BufferCache& buffer_cache;
TextureCache& texture_cache;
VideoCore::ShaderNotify& shader_notify;
GraphicsPipelineCacheKey graphics_key{};
GraphicsPipeline* current_pipeline{};

View File

@ -140,7 +140,7 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime),
pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache,
texture_cache),
texture_cache, gpu.ShaderNotify()),
query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{ buffer_cache },
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
wfi_event(device.GetLogical().CreateEvent()) {

View File

@ -2,42 +2,35 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <mutex>
#include <atomic>
#include <chrono>
#include <optional>
#include "video_core/shader_notify.h"
using namespace std::chrono_literals;
namespace VideoCore {
namespace {
constexpr auto UPDATE_TICK = 32ms;
}
ShaderNotify::ShaderNotify() = default;
ShaderNotify::~ShaderNotify() = default;
const auto TIME_TO_STOP_REPORTING = 2s;
std::size_t ShaderNotify::GetShadersBuilding() {
const auto now = std::chrono::high_resolution_clock::now();
const auto diff = now - last_update;
if (diff > UPDATE_TICK) {
std::shared_lock lock(mutex);
last_updated_count = accurate_count;
int ShaderNotify::ShadersBuilding() noexcept {
const int now_complete = num_complete.load(std::memory_order::relaxed);
const int now_building = num_building.load(std::memory_order::relaxed);
if (now_complete == now_building) {
const auto now = std::chrono::high_resolution_clock::now();
if (completed && num_complete == num_when_completed) {
if (now - complete_time > TIME_TO_STOP_REPORTING) {
report_base = now_complete;
completed = false;
}
} else {
completed = true;
num_when_completed = num_complete;
complete_time = now;
}
}
return last_updated_count;
}
std::size_t ShaderNotify::GetShadersBuildingAccurate() {
std::shared_lock lock{mutex};
return accurate_count;
}
void ShaderNotify::MarkShaderComplete() {
std::unique_lock lock{mutex};
accurate_count--;
}
void ShaderNotify::MarkSharderBuilding() {
std::unique_lock lock{mutex};
accurate_count++;
return now_building - report_base;
}
} // namespace VideoCore

View File

@ -4,26 +4,30 @@
#pragma once
#include <atomic>
#include <chrono>
#include <shared_mutex>
#include "common/common_types.h"
#include <optional>
namespace VideoCore {
class ShaderNotify {
public:
ShaderNotify();
~ShaderNotify();
[[nodiscard]] int ShadersBuilding() noexcept;
std::size_t GetShadersBuilding();
std::size_t GetShadersBuildingAccurate();
void MarkShaderComplete() noexcept {
++num_complete;
}
void MarkShaderComplete();
void MarkSharderBuilding();
void MarkShaderBuilding() noexcept {
++num_building;
}
private:
std::size_t last_updated_count{};
std::size_t accurate_count{};
std::shared_mutex mutex;
std::chrono::high_resolution_clock::time_point last_update{};
std::atomic_int num_building{};
std::atomic_int num_complete{};
int report_base{};
bool completed{};
int num_when_completed{};
std::chrono::high_resolution_clock::time_point complete_time;
};
} // namespace VideoCore

View File

@ -2900,13 +2900,13 @@ void GMainWindow::UpdateStatusBar() {
return;
}
auto results = Core::System::GetInstance().GetAndResetPerfStats();
auto& shader_notify = Core::System::GetInstance().GPU().ShaderNotify();
const auto shaders_building = shader_notify.GetShadersBuilding();
auto& system = Core::System::GetInstance();
auto results = system.GetAndResetPerfStats();
auto& shader_notify = system.GPU().ShaderNotify();
const int shaders_building = shader_notify.ShadersBuilding();
if (shaders_building != 0) {
shader_building_label->setText(
tr("Building: %n shader(s)", "", static_cast<int>(shaders_building)));
if (shaders_building > 0) {
shader_building_label->setText(tr("Building: %n shader(s)", "", shaders_building));
shader_building_label->setVisible(true);
} else {
shader_building_label->setVisible(false);