yuzu-emu
/
yuzu-android
Archived
1
0
Fork 0

Merge pull request #2122 from ReinUsesLisp/vulkan-resource-manager

vk_resource_manager: Implement fence and command buffer allocator
This commit is contained in:
bunnei 2019-02-18 21:05:28 -05:00 committed by GitHub
commit 4bce08d497
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 468 additions and 1 deletions

View File

@ -105,7 +105,9 @@ if (ENABLE_VULKAN)
target_sources(video_core PRIVATE target_sources(video_core PRIVATE
renderer_vulkan/declarations.h renderer_vulkan/declarations.h
renderer_vulkan/vk_device.cpp renderer_vulkan/vk_device.cpp
renderer_vulkan/vk_device.h) renderer_vulkan/vk_device.h
renderer_vulkan/vk_resource_manager.cpp
renderer_vulkan/vk_resource_manager.h)
target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include) target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include)
target_compile_definitions(video_core PRIVATE HAS_VULKAN) target_compile_definitions(video_core PRIVATE HAS_VULKAN)

View File

@ -0,0 +1,285 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <optional>
#include "common/assert.h"
#include "common/logging/log.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
namespace Vulkan {
// TODO(Rodrigo): Fine tune these numbers.
constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
constexpr std::size_t FENCES_GROW_STEP = 0x40;
class CommandBufferPool final : public VKFencedPool {
public:
CommandBufferPool(const VKDevice& device)
: VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
void Allocate(std::size_t begin, std::size_t end) {
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
const u32 graphics_family = device.GetGraphicsFamily();
auto pool = std::make_unique<Pool>();
// Command buffers are going to be commited, recorded, executed every single usage cycle.
// They are also going to be reseted when commited.
const auto pool_flags = vk::CommandPoolCreateFlagBits::eTransient |
vk::CommandPoolCreateFlagBits::eResetCommandBuffer;
const vk::CommandPoolCreateInfo cmdbuf_pool_ci(pool_flags, graphics_family);
pool->handle = dev.createCommandPoolUnique(cmdbuf_pool_ci, nullptr, dld);
const vk::CommandBufferAllocateInfo cmdbuf_ai(*pool->handle,
vk::CommandBufferLevel::ePrimary,
static_cast<u32>(COMMAND_BUFFER_POOL_SIZE));
pool->cmdbufs =
dev.allocateCommandBuffersUnique<std::allocator<UniqueCommandBuffer>>(cmdbuf_ai, dld);
pools.push_back(std::move(pool));
}
vk::CommandBuffer Commit(VKFence& fence) {
const std::size_t index = CommitResource(fence);
const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE;
const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE;
return *pools[pool_index]->cmdbufs[sub_index];
}
private:
struct Pool {
UniqueCommandPool handle;
std::vector<UniqueCommandBuffer> cmdbufs;
};
const VKDevice& device;
std::vector<std::unique_ptr<Pool>> pools;
};
VKResource::VKResource() = default;
VKResource::~VKResource() = default;
VKFence::VKFence(const VKDevice& device, UniqueFence handle)
: device{device}, handle{std::move(handle)} {}
VKFence::~VKFence() = default;
void VKFence::Wait() {
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld);
}
void VKFence::Release() {
is_owned = false;
}
void VKFence::Commit() {
is_owned = true;
is_used = true;
}
bool VKFence::Tick(bool gpu_wait, bool owner_wait) {
if (!is_used) {
// If a fence is not used it's always free.
return true;
}
if (is_owned && !owner_wait) {
// The fence is still being owned (Release has not been called) and ownership wait has
// not been asked.
return false;
}
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
if (gpu_wait) {
// Wait for the fence if it has been requested.
dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld);
} else {
if (dev.getFenceStatus(*handle, dld) != vk::Result::eSuccess) {
// Vulkan fence is not ready, not much it can do here
return false;
}
}
// Broadcast resources their free state.
for (auto* resource : protected_resources) {
resource->OnFenceRemoval(this);
}
protected_resources.clear();
// Prepare fence for reusage.
dev.resetFences({*handle}, dld);
is_used = false;
return true;
}
void VKFence::Protect(VKResource* resource) {
protected_resources.push_back(resource);
}
void VKFence::Unprotect(const VKResource* resource) {
const auto it = std::find(protected_resources.begin(), protected_resources.end(), resource);
if (it != protected_resources.end()) {
protected_resources.erase(it);
}
}
VKFenceWatch::VKFenceWatch() = default;
VKFenceWatch::~VKFenceWatch() {
if (fence) {
fence->Unprotect(this);
}
}
void VKFenceWatch::Wait() {
if (!fence) {
return;
}
fence->Wait();
fence->Unprotect(this);
fence = nullptr;
}
void VKFenceWatch::Watch(VKFence& new_fence) {
Wait();
fence = &new_fence;
fence->Protect(this);
}
bool VKFenceWatch::TryWatch(VKFence& new_fence) {
if (fence) {
return false;
}
fence = &new_fence;
fence->Protect(this);
return true;
}
void VKFenceWatch::OnFenceRemoval(VKFence* signaling_fence) {
ASSERT_MSG(signaling_fence == fence, "Removing the wrong fence");
fence = nullptr;
}
VKFencedPool::VKFencedPool(std::size_t grow_step) : grow_step{grow_step} {}
VKFencedPool::~VKFencedPool() = default;
std::size_t VKFencedPool::CommitResource(VKFence& fence) {
const auto Search = [&](std::size_t begin, std::size_t end) -> std::optional<std::size_t> {
for (std::size_t iterator = begin; iterator < end; ++iterator) {
if (watches[iterator]->TryWatch(fence)) {
// The resource is now being watched, a free resource was successfully found.
return iterator;
}
}
return {};
};
// Try to find a free resource from the hinted position to the end.
auto found = Search(free_iterator, watches.size());
if (!found) {
// Search from beginning to the hinted position.
found = Search(0, free_iterator);
if (!found) {
// Both searches failed, the pool is full; handle it.
const std::size_t free_resource = ManageOverflow();
// Watch will wait for the resource to be free.
watches[free_resource]->Watch(fence);
found = free_resource;
}
}
// Free iterator is hinted to the resource after the one that's been commited.
free_iterator = (*found + 1) % watches.size();
return *found;
}
std::size_t VKFencedPool::ManageOverflow() {
const std::size_t old_capacity = watches.size();
Grow();
// The last entry is guaranted to be free, since it's the first element of the freshly
// allocated resources.
return old_capacity;
}
void VKFencedPool::Grow() {
const std::size_t old_capacity = watches.size();
watches.resize(old_capacity + grow_step);
std::generate(watches.begin() + old_capacity, watches.end(),
[]() { return std::make_unique<VKFenceWatch>(); });
Allocate(old_capacity, old_capacity + grow_step);
}
VKResourceManager::VKResourceManager(const VKDevice& device) : device{device} {
GrowFences(FENCES_GROW_STEP);
command_buffer_pool = std::make_unique<CommandBufferPool>(device);
}
VKResourceManager::~VKResourceManager() = default;
VKFence& VKResourceManager::CommitFence() {
const auto StepFences = [&](bool gpu_wait, bool owner_wait) -> VKFence* {
const auto Tick = [=](auto& fence) { return fence->Tick(gpu_wait, owner_wait); };
const auto hinted = fences.begin() + fences_iterator;
auto it = std::find_if(hinted, fences.end(), Tick);
if (it == fences.end()) {
it = std::find_if(fences.begin(), hinted, Tick);
if (it == hinted) {
return nullptr;
}
}
fences_iterator = std::distance(fences.begin(), it) + 1;
if (fences_iterator >= fences.size())
fences_iterator = 0;
auto& fence = *it;
fence->Commit();
return fence.get();
};
VKFence* found_fence = StepFences(false, false);
if (!found_fence) {
// Try again, this time waiting.
found_fence = StepFences(true, false);
if (!found_fence) {
// Allocate new fences and try again.
LOG_INFO(Render_Vulkan, "Allocating new fences {} -> {}", fences.size(),
fences.size() + FENCES_GROW_STEP);
GrowFences(FENCES_GROW_STEP);
found_fence = StepFences(true, false);
ASSERT(found_fence != nullptr);
}
}
return *found_fence;
}
vk::CommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) {
return command_buffer_pool->Commit(fence);
}
void VKResourceManager::GrowFences(std::size_t new_fences_count) {
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
const vk::FenceCreateInfo fence_ci;
const std::size_t previous_size = fences.size();
fences.resize(previous_size + new_fences_count);
std::generate(fences.begin() + previous_size, fences.end(), [&]() {
return std::make_unique<VKFence>(device, dev.createFenceUnique(fence_ci, nullptr, dld));
});
}
} // namespace Vulkan

View File

@ -0,0 +1,180 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <cstddef>
#include <memory>
#include <vector>
#include "video_core/renderer_vulkan/declarations.h"
namespace Vulkan {
class VKDevice;
class VKFence;
class VKResourceManager;
class CommandBufferPool;
/// Interface for a Vulkan resource
class VKResource {
public:
explicit VKResource();
virtual ~VKResource();
/**
* Signals the object that an owning fence has been signaled.
* @param signaling_fence Fence that signals its usage end.
*/
virtual void OnFenceRemoval(VKFence* signaling_fence) = 0;
};
/**
* Fences take ownership of objects, protecting them from GPU-side or driver-side concurrent access.
* They must be commited from the resource manager. Their usage flow is: commit the fence from the
* resource manager, protect resources with it and use them, send the fence to an execution queue
* and Wait for it if needed and then call Release. Used resources will automatically be signaled
* when they are free to be reused.
* @brief Protects resources for concurrent usage and signals its release.
*/
class VKFence {
friend class VKResourceManager;
public:
explicit VKFence(const VKDevice& device, UniqueFence handle);
~VKFence();
/**
* Waits for the fence to be signaled.
* @warning You must have ownership of the fence and it has to be previously sent to a queue to
* call this function.
*/
void Wait();
/**
* Releases ownership of the fence. Pass after it has been sent to an execution queue.
* Unmanaged usage of the fence after the call will result in undefined behavior because it may
* be being used for something else.
*/
void Release();
/// Protects a resource with this fence.
void Protect(VKResource* resource);
/// Removes protection for a resource.
void Unprotect(const VKResource* resource);
/// Retreives the fence.
operator vk::Fence() const {
return *handle;
}
private:
/// Take ownership of the fence.
void Commit();
/**
* Updates the fence status.
* @warning Waiting for the owner might soft lock the execution.
* @param gpu_wait Wait for the fence to be signaled by the driver.
* @param owner_wait Wait for the owner to signal its freedom.
* @returns True if the fence is free. Waiting for gpu and owner will always return true.
*/
bool Tick(bool gpu_wait, bool owner_wait);
const VKDevice& device; ///< Device handler
UniqueFence handle; ///< Vulkan fence
std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence
bool is_owned = false; ///< The fence has been commited but not released yet.
bool is_used = false; ///< The fence has been commited but it has not been checked to be free.
};
/**
* A fence watch is used to keep track of the usage of a fence and protect a resource or set of
* resources without having to inherit VKResource from their handlers.
*/
class VKFenceWatch final : public VKResource {
public:
explicit VKFenceWatch();
~VKFenceWatch();
/// Waits for the fence to be released.
void Wait();
/**
* Waits for a previous fence and watches a new one.
* @param new_fence New fence to wait to.
*/
void Watch(VKFence& new_fence);
/**
* Checks if it's currently being watched and starts watching it if it's available.
* @returns True if a watch has started, false if it's being watched.
*/
bool TryWatch(VKFence& new_fence);
void OnFenceRemoval(VKFence* signaling_fence) override;
private:
VKFence* fence{}; ///< Fence watching this resource. nullptr when the watch is free.
};
/**
* Handles a pool of resources protected by fences. Manages resource overflow allocating more
* resources.
*/
class VKFencedPool {
public:
explicit VKFencedPool(std::size_t grow_step);
virtual ~VKFencedPool();
protected:
/**
* Commits a free resource and protects it with a fence. It may allocate new resources.
* @param fence Fence that protects the commited resource.
* @returns Index of the resource commited.
*/
std::size_t CommitResource(VKFence& fence);
/// Called when a chunk of resources have to be allocated.
virtual void Allocate(std::size_t begin, std::size_t end) = 0;
private:
/// Manages pool overflow allocating new resources.
std::size_t ManageOverflow();
/// Allocates a new page of resources.
void Grow();
std::size_t grow_step = 0; ///< Number of new resources created after an overflow
std::size_t free_iterator = 0; ///< Hint to where the next free resources is likely to be found
std::vector<std::unique_ptr<VKFenceWatch>> watches; ///< Set of watched resources
};
/**
* The resource manager handles all resources that can be protected with a fence avoiding
* driver-side or GPU-side concurrent usage. Usage is documented in VKFence.
*/
class VKResourceManager final {
public:
explicit VKResourceManager(const VKDevice& device);
~VKResourceManager();
/// Commits a fence. It has to be sent to a queue and released.
VKFence& CommitFence();
/// Commits an unused command buffer and protects it with a fence.
vk::CommandBuffer CommitCommandBuffer(VKFence& fence);
private:
/// Allocates new fences.
void GrowFences(std::size_t new_fences_count);
const VKDevice& device; ///< Device handler.
std::size_t fences_iterator = 0; ///< Index where a free fence is likely to be found.
std::vector<std::unique_ptr<VKFence>> fences; ///< Pool of fences.
std::unique_ptr<CommandBufferPool> command_buffer_pool; ///< Pool of command buffers.
};
} // namespace Vulkan