yuzu-emu
/
yuzu-mainline
Archived
1
0
Fork 0

Merge pull request #2686 from ReinUsesLisp/vk-scheduler

vk_scheduler: Drop execution context in favor of views
This commit is contained in:
bunnei 2019-07-10 16:35:48 -04:00 committed by GitHub
commit 7fb7054bc8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 60 additions and 50 deletions

View File

@ -109,8 +109,8 @@ void VKBufferCache::Reserve(std::size_t max_size) {
} }
} }
VKExecutionContext VKBufferCache::Send(VKExecutionContext exctx) { void VKBufferCache::Send() {
return stream_buffer->Send(exctx, buffer_offset - buffer_offset_base); stream_buffer->Send(buffer_offset - buffer_offset_base);
} }
void VKBufferCache::AlignBuffer(std::size_t alignment) { void VKBufferCache::AlignBuffer(std::size_t alignment) {

View File

@ -77,7 +77,7 @@ public:
void Reserve(std::size_t max_size); void Reserve(std::size_t max_size);
/// Ensures that the set data is sent to the device. /// Ensures that the set data is sent to the device.
[[nodiscard]] VKExecutionContext Send(VKExecutionContext exctx); void Send();
/// Returns the buffer cache handle. /// Returns the buffer cache handle.
vk::Buffer GetBuffer() const { vk::Buffer GetBuffer() const {

View File

@ -19,23 +19,19 @@ VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_man
VKScheduler::~VKScheduler() = default; VKScheduler::~VKScheduler() = default;
VKExecutionContext VKScheduler::GetExecutionContext() const { void VKScheduler::Flush(bool release_fence, vk::Semaphore semaphore) {
return VKExecutionContext(current_fence, current_cmdbuf);
}
VKExecutionContext VKScheduler::Flush(vk::Semaphore semaphore) {
SubmitExecution(semaphore); SubmitExecution(semaphore);
current_fence->Release(); if (release_fence)
current_fence->Release();
AllocateNewContext(); AllocateNewContext();
return GetExecutionContext();
} }
VKExecutionContext VKScheduler::Finish(vk::Semaphore semaphore) { void VKScheduler::Finish(bool release_fence, vk::Semaphore semaphore) {
SubmitExecution(semaphore); SubmitExecution(semaphore);
current_fence->Wait(); current_fence->Wait();
current_fence->Release(); if (release_fence)
current_fence->Release();
AllocateNewContext(); AllocateNewContext();
return GetExecutionContext();
} }
void VKScheduler::SubmitExecution(vk::Semaphore semaphore) { void VKScheduler::SubmitExecution(vk::Semaphore semaphore) {

View File

@ -10,10 +10,43 @@
namespace Vulkan { namespace Vulkan {
class VKDevice; class VKDevice;
class VKExecutionContext;
class VKFence; class VKFence;
class VKResourceManager; class VKResourceManager;
class VKFenceView {
public:
VKFenceView() = default;
VKFenceView(VKFence* const& fence) : fence{fence} {}
VKFence* operator->() const noexcept {
return fence;
}
operator VKFence&() const noexcept {
return *fence;
}
private:
VKFence* const& fence;
};
class VKCommandBufferView {
public:
VKCommandBufferView() = default;
VKCommandBufferView(const vk::CommandBuffer& cmdbuf) : cmdbuf{cmdbuf} {}
const vk::CommandBuffer* operator->() const noexcept {
return &cmdbuf;
}
operator vk::CommandBuffer() const noexcept {
return cmdbuf;
}
private:
const vk::CommandBuffer& cmdbuf;
};
/// The scheduler abstracts command buffer and fence management with an interface that's able to do /// The scheduler abstracts command buffer and fence management with an interface that's able to do
/// OpenGL-like operations on Vulkan command buffers. /// OpenGL-like operations on Vulkan command buffers.
class VKScheduler { class VKScheduler {
@ -21,16 +54,21 @@ public:
explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager); explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager);
~VKScheduler(); ~VKScheduler();
/// Gets the current execution context. /// Gets a reference to the current fence.
[[nodiscard]] VKExecutionContext GetExecutionContext() const; VKFenceView GetFence() const {
return current_fence;
}
/// Sends the current execution context to the GPU. It invalidates the current execution context /// Gets a reference to the current command buffer.
/// and returns a new one. VKCommandBufferView GetCommandBuffer() const {
VKExecutionContext Flush(vk::Semaphore semaphore = nullptr); return current_cmdbuf;
}
/// Sends the current execution context to the GPU and waits for it to complete. It invalidates /// Sends the current execution context to the GPU.
/// the current execution context and returns a new one. void Flush(bool release_fence = true, vk::Semaphore semaphore = nullptr);
VKExecutionContext Finish(vk::Semaphore semaphore = nullptr);
/// Sends the current execution context to the GPU and waits for it to complete.
void Finish(bool release_fence = true, vk::Semaphore semaphore = nullptr);
private: private:
void SubmitExecution(vk::Semaphore semaphore); void SubmitExecution(vk::Semaphore semaphore);
@ -44,26 +82,4 @@ private:
VKFence* next_fence = nullptr; VKFence* next_fence = nullptr;
}; };
class VKExecutionContext {
friend class VKScheduler;
public:
VKExecutionContext() = default;
VKFence& GetFence() const {
return *fence;
}
vk::CommandBuffer GetCommandBuffer() const {
return cmdbuf;
}
private:
explicit VKExecutionContext(VKFence* fence, vk::CommandBuffer cmdbuf)
: fence{fence}, cmdbuf{cmdbuf} {}
VKFence* fence{};
vk::CommandBuffer cmdbuf;
};
} // namespace Vulkan } // namespace Vulkan

View File

@ -46,12 +46,12 @@ std::tuple<u8*, u64, bool> VKStreamBuffer::Reserve(u64 size) {
return {mapped_pointer + offset, offset, invalidation_mark.has_value()}; return {mapped_pointer + offset, offset, invalidation_mark.has_value()};
} }
VKExecutionContext VKStreamBuffer::Send(VKExecutionContext exctx, u64 size) { void VKStreamBuffer::Send(u64 size) {
ASSERT_MSG(size <= mapped_size, "Reserved size is too small"); ASSERT_MSG(size <= mapped_size, "Reserved size is too small");
if (invalidation_mark) { if (invalidation_mark) {
// TODO(Rodrigo): Find a better way to invalidate than waiting for all watches to finish. // TODO(Rodrigo): Find a better way to invalidate than waiting for all watches to finish.
exctx = scheduler.Flush(); scheduler.Flush();
std::for_each(watches.begin(), watches.begin() + *invalidation_mark, std::for_each(watches.begin(), watches.begin() + *invalidation_mark,
[&](auto& resource) { resource->Wait(); }); [&](auto& resource) { resource->Wait(); });
invalidation_mark = std::nullopt; invalidation_mark = std::nullopt;
@ -62,11 +62,9 @@ VKExecutionContext VKStreamBuffer::Send(VKExecutionContext exctx, u64 size) {
ReserveWatches(WATCHES_RESERVE_CHUNK); ReserveWatches(WATCHES_RESERVE_CHUNK);
} }
// Add a watch for this allocation. // Add a watch for this allocation.
watches[used_watches++]->Watch(exctx.GetFence()); watches[used_watches++]->Watch(scheduler.GetFence());
offset += size; offset += size;
return exctx;
} }
void VKStreamBuffer::CreateBuffers(VKMemoryManager& memory_manager, vk::BufferUsageFlags usage) { void VKStreamBuffer::CreateBuffers(VKMemoryManager& memory_manager, vk::BufferUsageFlags usage) {

View File

@ -37,7 +37,7 @@ public:
std::tuple<u8*, u64, bool> Reserve(u64 size); std::tuple<u8*, u64, bool> Reserve(u64 size);
/// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy. /// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy.
[[nodiscard]] VKExecutionContext Send(VKExecutionContext exctx, u64 size); void Send(u64 size);
vk::Buffer GetBuffer() const { vk::Buffer GetBuffer() const {
return *buffer; return *buffer;