yuzu-emu
/
yuzu
Archived
1
0
Fork 0

Merge pull request #3636 from ReinUsesLisp/drop-vk-hpp

renderer_vulkan: Drop Vulkan-Hpp
This commit is contained in:
Rodrigo Locatti 2020-04-13 17:08:04 -03:00 committed by GitHub
commit 7e4a132a77
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 2830 additions and 2221 deletions

@ -1 +1 @@
Subproject commit d42d0747ee1b7a6726fb8948444b4993f9dcd2e5 Subproject commit 0e78ffd1dcfc3e9f14a966b9660dbc59bd967c5c

View File

@ -156,7 +156,6 @@ add_library(video_core STATIC
if (ENABLE_VULKAN) if (ENABLE_VULKAN)
target_sources(video_core PRIVATE target_sources(video_core PRIVATE
renderer_vulkan/declarations.h
renderer_vulkan/fixed_pipeline_state.cpp renderer_vulkan/fixed_pipeline_state.cpp
renderer_vulkan/fixed_pipeline_state.h renderer_vulkan/fixed_pipeline_state.h
renderer_vulkan/maxwell_to_vk.cpp renderer_vulkan/maxwell_to_vk.cpp

View File

@ -1,60 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
namespace vk {
class DispatchLoaderDynamic;
}
namespace Vulkan {
constexpr vk::DispatchLoaderDynamic* dont_use_me_dld = nullptr;
}
#define VULKAN_HPP_DEFAULT_DISPATCHER (*::Vulkan::dont_use_me_dld)
#define VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL 0
#define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 1
#include <vulkan/vulkan.hpp>
namespace Vulkan {
// vulkan.hpp unique handlers use DispatchLoaderStatic
template <typename T>
using UniqueHandle = vk::UniqueHandle<T, vk::DispatchLoaderDynamic>;
using UniqueAccelerationStructureNV = UniqueHandle<vk::AccelerationStructureNV>;
using UniqueBuffer = UniqueHandle<vk::Buffer>;
using UniqueBufferView = UniqueHandle<vk::BufferView>;
using UniqueCommandBuffer = UniqueHandle<vk::CommandBuffer>;
using UniqueCommandPool = UniqueHandle<vk::CommandPool>;
using UniqueDescriptorPool = UniqueHandle<vk::DescriptorPool>;
using UniqueDescriptorSet = UniqueHandle<vk::DescriptorSet>;
using UniqueDescriptorSetLayout = UniqueHandle<vk::DescriptorSetLayout>;
using UniqueDescriptorUpdateTemplate = UniqueHandle<vk::DescriptorUpdateTemplate>;
using UniqueDevice = UniqueHandle<vk::Device>;
using UniqueDeviceMemory = UniqueHandle<vk::DeviceMemory>;
using UniqueEvent = UniqueHandle<vk::Event>;
using UniqueFence = UniqueHandle<vk::Fence>;
using UniqueFramebuffer = UniqueHandle<vk::Framebuffer>;
using UniqueImage = UniqueHandle<vk::Image>;
using UniqueImageView = UniqueHandle<vk::ImageView>;
using UniqueInstance = UniqueHandle<vk::Instance>;
using UniqueIndirectCommandsLayoutNVX = UniqueHandle<vk::IndirectCommandsLayoutNVX>;
using UniqueObjectTableNVX = UniqueHandle<vk::ObjectTableNVX>;
using UniquePipeline = UniqueHandle<vk::Pipeline>;
using UniquePipelineCache = UniqueHandle<vk::PipelineCache>;
using UniquePipelineLayout = UniqueHandle<vk::PipelineLayout>;
using UniqueQueryPool = UniqueHandle<vk::QueryPool>;
using UniqueRenderPass = UniqueHandle<vk::RenderPass>;
using UniqueSampler = UniqueHandle<vk::Sampler>;
using UniqueSamplerYcbcrConversion = UniqueHandle<vk::SamplerYcbcrConversion>;
using UniqueSemaphore = UniqueHandle<vk::Semaphore>;
using UniqueShaderModule = UniqueHandle<vk::ShaderModule>;
using UniqueSurfaceKHR = UniqueHandle<vk::SurfaceKHR>;
using UniqueSwapchainKHR = UniqueHandle<vk::SwapchainKHR>;
using UniqueValidationCacheEXT = UniqueHandle<vk::ValidationCacheEXT>;
using UniqueDebugReportCallbackEXT = UniqueHandle<vk::DebugReportCallbackEXT>;
using UniqueDebugUtilsMessengerEXT = UniqueHandle<vk::DebugUtilsMessengerEXT>;
} // namespace Vulkan

View File

@ -2,13 +2,15 @@
// Licensed under GPLv2 or any later version // Licensed under GPLv2 or any later version
// Refer to the license.txt file included. // Refer to the license.txt file included.
#include <iterator>
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h" #include "video_core/surface.h"
namespace Vulkan::MaxwellToVK { namespace Vulkan::MaxwellToVK {
@ -17,88 +19,89 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
namespace Sampler { namespace Sampler {
vk::Filter Filter(Tegra::Texture::TextureFilter filter) { VkFilter Filter(Tegra::Texture::TextureFilter filter) {
switch (filter) { switch (filter) {
case Tegra::Texture::TextureFilter::Linear: case Tegra::Texture::TextureFilter::Linear:
return vk::Filter::eLinear; return VK_FILTER_LINEAR;
case Tegra::Texture::TextureFilter::Nearest: case Tegra::Texture::TextureFilter::Nearest:
return vk::Filter::eNearest; return VK_FILTER_NEAREST;
} }
UNIMPLEMENTED_MSG("Unimplemented sampler filter={}", static_cast<u32>(filter)); UNIMPLEMENTED_MSG("Unimplemented sampler filter={}", static_cast<u32>(filter));
return {}; return {};
} }
vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter) { VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter) {
switch (mipmap_filter) { switch (mipmap_filter) {
case Tegra::Texture::TextureMipmapFilter::None: case Tegra::Texture::TextureMipmapFilter::None:
// TODO(Rodrigo): None seems to be mapped to OpenGL's mag and min filters without mipmapping // TODO(Rodrigo): None seems to be mapped to OpenGL's mag and min filters without mipmapping
// (e.g. GL_NEAREST and GL_LINEAR). Vulkan doesn't have such a thing, find out if we have to // (e.g. GL_NEAREST and GL_LINEAR). Vulkan doesn't have such a thing, find out if we have to
// use an image view with a single mipmap level to emulate this. // use an image view with a single mipmap level to emulate this.
return vk::SamplerMipmapMode::eLinear; return VK_SAMPLER_MIPMAP_MODE_LINEAR;
;
case Tegra::Texture::TextureMipmapFilter::Linear: case Tegra::Texture::TextureMipmapFilter::Linear:
return vk::SamplerMipmapMode::eLinear; return VK_SAMPLER_MIPMAP_MODE_LINEAR;
case Tegra::Texture::TextureMipmapFilter::Nearest: case Tegra::Texture::TextureMipmapFilter::Nearest:
return vk::SamplerMipmapMode::eNearest; return VK_SAMPLER_MIPMAP_MODE_NEAREST;
} }
UNIMPLEMENTED_MSG("Unimplemented sampler mipmap mode={}", static_cast<u32>(mipmap_filter)); UNIMPLEMENTED_MSG("Unimplemented sampler mipmap mode={}", static_cast<u32>(mipmap_filter));
return {}; return {};
} }
vk::SamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode, VkSamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode,
Tegra::Texture::TextureFilter filter) { Tegra::Texture::TextureFilter filter) {
switch (wrap_mode) { switch (wrap_mode) {
case Tegra::Texture::WrapMode::Wrap: case Tegra::Texture::WrapMode::Wrap:
return vk::SamplerAddressMode::eRepeat; return VK_SAMPLER_ADDRESS_MODE_REPEAT;
case Tegra::Texture::WrapMode::Mirror: case Tegra::Texture::WrapMode::Mirror:
return vk::SamplerAddressMode::eMirroredRepeat; return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
case Tegra::Texture::WrapMode::ClampToEdge: case Tegra::Texture::WrapMode::ClampToEdge:
return vk::SamplerAddressMode::eClampToEdge; return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case Tegra::Texture::WrapMode::Border: case Tegra::Texture::WrapMode::Border:
return vk::SamplerAddressMode::eClampToBorder; return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
case Tegra::Texture::WrapMode::Clamp: case Tegra::Texture::WrapMode::Clamp:
if (device.GetDriverID() == vk::DriverIdKHR::eNvidiaProprietary) { if (device.GetDriverID() == VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR) {
// Nvidia's Vulkan driver defaults to GL_CLAMP on invalid enumerations, we can hack this // Nvidia's Vulkan driver defaults to GL_CLAMP on invalid enumerations, we can hack this
// by sending an invalid enumeration. // by sending an invalid enumeration.
return static_cast<vk::SamplerAddressMode>(0xcafe); return static_cast<VkSamplerAddressMode>(0xcafe);
} }
// TODO(Rodrigo): Emulate GL_CLAMP properly on other vendors // TODO(Rodrigo): Emulate GL_CLAMP properly on other vendors
switch (filter) { switch (filter) {
case Tegra::Texture::TextureFilter::Nearest: case Tegra::Texture::TextureFilter::Nearest:
return vk::SamplerAddressMode::eClampToEdge; return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case Tegra::Texture::TextureFilter::Linear: case Tegra::Texture::TextureFilter::Linear:
return vk::SamplerAddressMode::eClampToBorder; return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
} }
UNREACHABLE(); UNREACHABLE();
return vk::SamplerAddressMode::eClampToEdge; return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case Tegra::Texture::WrapMode::MirrorOnceClampToEdge: case Tegra::Texture::WrapMode::MirrorOnceClampToEdge:
return vk::SamplerAddressMode::eMirrorClampToEdge; return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
case Tegra::Texture::WrapMode::MirrorOnceBorder: case Tegra::Texture::WrapMode::MirrorOnceBorder:
UNIMPLEMENTED(); UNIMPLEMENTED();
return vk::SamplerAddressMode::eMirrorClampToEdge; return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
default: default:
UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode)); UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode));
return {}; return {};
} }
} }
vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) { VkCompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) {
switch (depth_compare_func) { switch (depth_compare_func) {
case Tegra::Texture::DepthCompareFunc::Never: case Tegra::Texture::DepthCompareFunc::Never:
return vk::CompareOp::eNever; return VK_COMPARE_OP_NEVER;
case Tegra::Texture::DepthCompareFunc::Less: case Tegra::Texture::DepthCompareFunc::Less:
return vk::CompareOp::eLess; return VK_COMPARE_OP_LESS;
case Tegra::Texture::DepthCompareFunc::LessEqual: case Tegra::Texture::DepthCompareFunc::LessEqual:
return vk::CompareOp::eLessOrEqual; return VK_COMPARE_OP_LESS_OR_EQUAL;
case Tegra::Texture::DepthCompareFunc::Equal: case Tegra::Texture::DepthCompareFunc::Equal:
return vk::CompareOp::eEqual; return VK_COMPARE_OP_EQUAL;
case Tegra::Texture::DepthCompareFunc::NotEqual: case Tegra::Texture::DepthCompareFunc::NotEqual:
return vk::CompareOp::eNotEqual; return VK_COMPARE_OP_NOT_EQUAL;
case Tegra::Texture::DepthCompareFunc::Greater: case Tegra::Texture::DepthCompareFunc::Greater:
return vk::CompareOp::eGreater; return VK_COMPARE_OP_GREATER;
case Tegra::Texture::DepthCompareFunc::GreaterEqual: case Tegra::Texture::DepthCompareFunc::GreaterEqual:
return vk::CompareOp::eGreaterOrEqual; return VK_COMPARE_OP_GREATER_OR_EQUAL;
case Tegra::Texture::DepthCompareFunc::Always: case Tegra::Texture::DepthCompareFunc::Always:
return vk::CompareOp::eAlways; return VK_COMPARE_OP_ALWAYS;
} }
UNIMPLEMENTED_MSG("Unimplemented sampler depth compare function={}", UNIMPLEMENTED_MSG("Unimplemented sampler depth compare function={}",
static_cast<u32>(depth_compare_func)); static_cast<u32>(depth_compare_func));
@ -112,92 +115,92 @@ namespace {
enum : u32 { Attachable = 1, Storage = 2 }; enum : u32 { Attachable = 1, Storage = 2 };
struct FormatTuple { struct FormatTuple {
vk::Format format; ///< Vulkan format VkFormat format; ///< Vulkan format
int usage; ///< Describes image format usage int usage = 0; ///< Describes image format usage
} constexpr tex_format_tuples[] = { } constexpr tex_format_tuples[] = {
{vk::Format::eA8B8G8R8UnormPack32, Attachable | Storage}, // ABGR8U {VK_FORMAT_A8B8G8R8_UNORM_PACK32, Attachable | Storage}, // ABGR8U
{vk::Format::eA8B8G8R8SnormPack32, Attachable | Storage}, // ABGR8S {VK_FORMAT_A8B8G8R8_SNORM_PACK32, Attachable | Storage}, // ABGR8S
{vk::Format::eA8B8G8R8UintPack32, Attachable | Storage}, // ABGR8UI {VK_FORMAT_A8B8G8R8_UINT_PACK32, Attachable | Storage}, // ABGR8UI
{vk::Format::eB5G6R5UnormPack16, {}}, // B5G6R5U {VK_FORMAT_B5G6R5_UNORM_PACK16}, // B5G6R5U
{vk::Format::eA2B10G10R10UnormPack32, Attachable | Storage}, // A2B10G10R10U {VK_FORMAT_A2B10G10R10_UNORM_PACK32, Attachable | Storage}, // A2B10G10R10U
{vk::Format::eA1R5G5B5UnormPack16, Attachable}, // A1B5G5R5U (flipped with swizzle) {VK_FORMAT_A1R5G5B5_UNORM_PACK16, Attachable}, // A1B5G5R5U (flipped with swizzle)
{vk::Format::eR8Unorm, Attachable | Storage}, // R8U {VK_FORMAT_R8_UNORM, Attachable | Storage}, // R8U
{vk::Format::eR8Uint, Attachable | Storage}, // R8UI {VK_FORMAT_R8_UINT, Attachable | Storage}, // R8UI
{vk::Format::eR16G16B16A16Sfloat, Attachable | Storage}, // RGBA16F {VK_FORMAT_R16G16B16A16_SFLOAT, Attachable | Storage}, // RGBA16F
{vk::Format::eR16G16B16A16Unorm, Attachable | Storage}, // RGBA16U {VK_FORMAT_R16G16B16A16_UNORM, Attachable | Storage}, // RGBA16U
{vk::Format::eR16G16B16A16Snorm, Attachable | Storage}, // RGBA16S {VK_FORMAT_R16G16B16A16_SNORM, Attachable | Storage}, // RGBA16S
{vk::Format::eR16G16B16A16Uint, Attachable | Storage}, // RGBA16UI {VK_FORMAT_R16G16B16A16_UINT, Attachable | Storage}, // RGBA16UI
{vk::Format::eB10G11R11UfloatPack32, Attachable | Storage}, // R11FG11FB10F {VK_FORMAT_B10G11R11_UFLOAT_PACK32, Attachable | Storage}, // R11FG11FB10F
{vk::Format::eR32G32B32A32Uint, Attachable | Storage}, // RGBA32UI {VK_FORMAT_R32G32B32A32_UINT, Attachable | Storage}, // RGBA32UI
{vk::Format::eBc1RgbaUnormBlock, {}}, // DXT1 {VK_FORMAT_BC1_RGBA_UNORM_BLOCK}, // DXT1
{vk::Format::eBc2UnormBlock, {}}, // DXT23 {VK_FORMAT_BC2_UNORM_BLOCK}, // DXT23
{vk::Format::eBc3UnormBlock, {}}, // DXT45 {VK_FORMAT_BC3_UNORM_BLOCK}, // DXT45
{vk::Format::eBc4UnormBlock, {}}, // DXN1 {VK_FORMAT_BC4_UNORM_BLOCK}, // DXN1
{vk::Format::eBc5UnormBlock, {}}, // DXN2UNORM {VK_FORMAT_BC5_UNORM_BLOCK}, // DXN2UNORM
{vk::Format::eBc5SnormBlock, {}}, // DXN2SNORM {VK_FORMAT_BC5_SNORM_BLOCK}, // DXN2SNORM
{vk::Format::eBc7UnormBlock, {}}, // BC7U {VK_FORMAT_BC7_UNORM_BLOCK}, // BC7U
{vk::Format::eBc6HUfloatBlock, {}}, // BC6H_UF16 {VK_FORMAT_BC6H_UFLOAT_BLOCK}, // BC6H_UF16
{vk::Format::eBc6HSfloatBlock, {}}, // BC6H_SF16 {VK_FORMAT_BC6H_SFLOAT_BLOCK}, // BC6H_SF16
{vk::Format::eAstc4x4UnormBlock, {}}, // ASTC_2D_4X4 {VK_FORMAT_ASTC_4x4_UNORM_BLOCK}, // ASTC_2D_4X4
{vk::Format::eB8G8R8A8Unorm, {}}, // BGRA8 {VK_FORMAT_B8G8R8A8_UNORM}, // BGRA8
{vk::Format::eR32G32B32A32Sfloat, Attachable | Storage}, // RGBA32F {VK_FORMAT_R32G32B32A32_SFLOAT, Attachable | Storage}, // RGBA32F
{vk::Format::eR32G32Sfloat, Attachable | Storage}, // RG32F {VK_FORMAT_R32G32_SFLOAT, Attachable | Storage}, // RG32F
{vk::Format::eR32Sfloat, Attachable | Storage}, // R32F {VK_FORMAT_R32_SFLOAT, Attachable | Storage}, // R32F
{vk::Format::eR16Sfloat, Attachable | Storage}, // R16F {VK_FORMAT_R16_SFLOAT, Attachable | Storage}, // R16F
{vk::Format::eR16Unorm, Attachable | Storage}, // R16U {VK_FORMAT_R16_UNORM, Attachable | Storage}, // R16U
{vk::Format::eUndefined, {}}, // R16S {VK_FORMAT_UNDEFINED}, // R16S
{vk::Format::eUndefined, {}}, // R16UI {VK_FORMAT_UNDEFINED}, // R16UI
{vk::Format::eUndefined, {}}, // R16I {VK_FORMAT_UNDEFINED}, // R16I
{vk::Format::eR16G16Unorm, Attachable | Storage}, // RG16 {VK_FORMAT_R16G16_UNORM, Attachable | Storage}, // RG16
{vk::Format::eR16G16Sfloat, Attachable | Storage}, // RG16F {VK_FORMAT_R16G16_SFLOAT, Attachable | Storage}, // RG16F
{vk::Format::eUndefined, {}}, // RG16UI {VK_FORMAT_UNDEFINED}, // RG16UI
{vk::Format::eUndefined, {}}, // RG16I {VK_FORMAT_UNDEFINED}, // RG16I
{vk::Format::eR16G16Snorm, Attachable | Storage}, // RG16S {VK_FORMAT_R16G16_SNORM, Attachable | Storage}, // RG16S
{vk::Format::eUndefined, {}}, // RGB32F {VK_FORMAT_UNDEFINED}, // RGB32F
{vk::Format::eR8G8B8A8Srgb, Attachable}, // RGBA8_SRGB {VK_FORMAT_R8G8B8A8_SRGB, Attachable}, // RGBA8_SRGB
{vk::Format::eR8G8Unorm, Attachable | Storage}, // RG8U {VK_FORMAT_R8G8_UNORM, Attachable | Storage}, // RG8U
{vk::Format::eR8G8Snorm, Attachable | Storage}, // RG8S {VK_FORMAT_R8G8_SNORM, Attachable | Storage}, // RG8S
{vk::Format::eR32G32Uint, Attachable | Storage}, // RG32UI {VK_FORMAT_R32G32_UINT, Attachable | Storage}, // RG32UI
{vk::Format::eUndefined, {}}, // RGBX16F {VK_FORMAT_UNDEFINED}, // RGBX16F
{vk::Format::eR32Uint, Attachable | Storage}, // R32UI {VK_FORMAT_R32_UINT, Attachable | Storage}, // R32UI
{vk::Format::eR32Sint, Attachable | Storage}, // R32I {VK_FORMAT_R32_SINT, Attachable | Storage}, // R32I
{vk::Format::eAstc8x8UnormBlock, {}}, // ASTC_2D_8X8 {VK_FORMAT_ASTC_8x8_UNORM_BLOCK}, // ASTC_2D_8X8
{vk::Format::eUndefined, {}}, // ASTC_2D_8X5 {VK_FORMAT_UNDEFINED}, // ASTC_2D_8X5
{vk::Format::eUndefined, {}}, // ASTC_2D_5X4 {VK_FORMAT_UNDEFINED}, // ASTC_2D_5X4
{vk::Format::eUndefined, {}}, // BGRA8_SRGB {VK_FORMAT_UNDEFINED}, // BGRA8_SRGB
{vk::Format::eBc1RgbaSrgbBlock, {}}, // DXT1_SRGB {VK_FORMAT_BC1_RGBA_SRGB_BLOCK}, // DXT1_SRGB
{vk::Format::eBc2SrgbBlock, {}}, // DXT23_SRGB {VK_FORMAT_BC2_SRGB_BLOCK}, // DXT23_SRGB
{vk::Format::eBc3SrgbBlock, {}}, // DXT45_SRGB {VK_FORMAT_BC3_SRGB_BLOCK}, // DXT45_SRGB
{vk::Format::eBc7SrgbBlock, {}}, // BC7U_SRGB {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7U_SRGB
{vk::Format::eR4G4B4A4UnormPack16, Attachable}, // R4G4B4A4U {VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // R4G4B4A4U
{vk::Format::eAstc4x4SrgbBlock, {}}, // ASTC_2D_4X4_SRGB {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB
{vk::Format::eAstc8x8SrgbBlock, {}}, // ASTC_2D_8X8_SRGB {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB
{vk::Format::eAstc8x5SrgbBlock, {}}, // ASTC_2D_8X5_SRGB {VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB
{vk::Format::eAstc5x4SrgbBlock, {}}, // ASTC_2D_5X4_SRGB {VK_FORMAT_ASTC_5x4_SRGB_BLOCK}, // ASTC_2D_5X4_SRGB
{vk::Format::eAstc5x5UnormBlock, {}}, // ASTC_2D_5X5 {VK_FORMAT_ASTC_5x5_UNORM_BLOCK}, // ASTC_2D_5X5
{vk::Format::eAstc5x5SrgbBlock, {}}, // ASTC_2D_5X5_SRGB {VK_FORMAT_ASTC_5x5_SRGB_BLOCK}, // ASTC_2D_5X5_SRGB
{vk::Format::eAstc10x8UnormBlock, {}}, // ASTC_2D_10X8 {VK_FORMAT_ASTC_10x8_UNORM_BLOCK}, // ASTC_2D_10X8
{vk::Format::eAstc10x8SrgbBlock, {}}, // ASTC_2D_10X8_SRGB {VK_FORMAT_ASTC_10x8_SRGB_BLOCK}, // ASTC_2D_10X8_SRGB
{vk::Format::eAstc6x6UnormBlock, {}}, // ASTC_2D_6X6 {VK_FORMAT_ASTC_6x6_UNORM_BLOCK}, // ASTC_2D_6X6
{vk::Format::eAstc6x6SrgbBlock, {}}, // ASTC_2D_6X6_SRGB {VK_FORMAT_ASTC_6x6_SRGB_BLOCK}, // ASTC_2D_6X6_SRGB
{vk::Format::eAstc10x10UnormBlock, {}}, // ASTC_2D_10X10 {VK_FORMAT_ASTC_10x10_UNORM_BLOCK}, // ASTC_2D_10X10
{vk::Format::eAstc10x10SrgbBlock, {}}, // ASTC_2D_10X10_SRGB {VK_FORMAT_ASTC_10x10_SRGB_BLOCK}, // ASTC_2D_10X10_SRGB
{vk::Format::eAstc12x12UnormBlock, {}}, // ASTC_2D_12X12 {VK_FORMAT_ASTC_12x12_UNORM_BLOCK}, // ASTC_2D_12X12
{vk::Format::eAstc12x12SrgbBlock, {}}, // ASTC_2D_12X12_SRGB {VK_FORMAT_ASTC_12x12_SRGB_BLOCK}, // ASTC_2D_12X12_SRGB
{vk::Format::eAstc8x6UnormBlock, {}}, // ASTC_2D_8X6 {VK_FORMAT_ASTC_8x6_UNORM_BLOCK}, // ASTC_2D_8X6
{vk::Format::eAstc8x6SrgbBlock, {}}, // ASTC_2D_8X6_SRGB {VK_FORMAT_ASTC_8x6_SRGB_BLOCK}, // ASTC_2D_8X6_SRGB
{vk::Format::eAstc6x5UnormBlock, {}}, // ASTC_2D_6X5 {VK_FORMAT_ASTC_6x5_UNORM_BLOCK}, // ASTC_2D_6X5
{vk::Format::eAstc6x5SrgbBlock, {}}, // ASTC_2D_6X5_SRGB {VK_FORMAT_ASTC_6x5_SRGB_BLOCK}, // ASTC_2D_6X5_SRGB
{vk::Format::eE5B9G9R9UfloatPack32, {}}, // E5B9G9R9F {VK_FORMAT_E5B9G9R9_UFLOAT_PACK32}, // E5B9G9R9F
// Depth formats // Depth formats
{vk::Format::eD32Sfloat, Attachable}, // Z32F {VK_FORMAT_D32_SFLOAT, Attachable}, // Z32F
{vk::Format::eD16Unorm, Attachable}, // Z16 {VK_FORMAT_D16_UNORM, Attachable}, // Z16
// DepthStencil formats // DepthStencil formats
{vk::Format::eD24UnormS8Uint, Attachable}, // Z24S8 {VK_FORMAT_D24_UNORM_S8_UINT, Attachable}, // Z24S8
{vk::Format::eD24UnormS8Uint, Attachable}, // S8Z24 (emulated) {VK_FORMAT_D24_UNORM_S8_UINT, Attachable}, // S8Z24 (emulated)
{vk::Format::eD32SfloatS8Uint, Attachable}, // Z32FS8 {VK_FORMAT_D32_SFLOAT_S8_UINT, Attachable}, // Z32FS8
}; };
static_assert(std::size(tex_format_tuples) == VideoCore::Surface::MaxPixelFormat); static_assert(std::size(tex_format_tuples) == VideoCore::Surface::MaxPixelFormat);
@ -212,106 +215,106 @@ FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFo
ASSERT(static_cast<std::size_t>(pixel_format) < std::size(tex_format_tuples)); ASSERT(static_cast<std::size_t>(pixel_format) < std::size(tex_format_tuples));
auto tuple = tex_format_tuples[static_cast<std::size_t>(pixel_format)]; auto tuple = tex_format_tuples[static_cast<std::size_t>(pixel_format)];
if (tuple.format == vk::Format::eUndefined) { if (tuple.format == VK_FORMAT_UNDEFINED) {
UNIMPLEMENTED_MSG("Unimplemented texture format with pixel format={}", UNIMPLEMENTED_MSG("Unimplemented texture format with pixel format={}",
static_cast<u32>(pixel_format)); static_cast<u32>(pixel_format));
return {vk::Format::eA8B8G8R8UnormPack32, true, true}; return {VK_FORMAT_A8B8G8R8_UNORM_PACK32, true, true};
} }
// Use ABGR8 on hardware that doesn't support ASTC natively // Use ABGR8 on hardware that doesn't support ASTC natively
if (!device.IsOptimalAstcSupported() && VideoCore::Surface::IsPixelFormatASTC(pixel_format)) { if (!device.IsOptimalAstcSupported() && VideoCore::Surface::IsPixelFormatASTC(pixel_format)) {
tuple.format = VideoCore::Surface::IsPixelFormatSRGB(pixel_format) tuple.format = VideoCore::Surface::IsPixelFormatSRGB(pixel_format)
? vk::Format::eA8B8G8R8SrgbPack32 ? VK_FORMAT_A8B8G8R8_SRGB_PACK32
: vk::Format::eA8B8G8R8UnormPack32; : VK_FORMAT_A8B8G8R8_UNORM_PACK32;
} }
const bool attachable = tuple.usage & Attachable; const bool attachable = tuple.usage & Attachable;
const bool storage = tuple.usage & Storage; const bool storage = tuple.usage & Storage;
vk::FormatFeatureFlags usage; VkFormatFeatureFlags usage;
if (format_type == FormatType::Buffer) { if (format_type == FormatType::Buffer) {
usage = vk::FormatFeatureFlagBits::eStorageTexelBuffer | usage =
vk::FormatFeatureFlagBits::eUniformTexelBuffer; VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT | VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
} else { } else {
usage = vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eTransferDst | usage = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT |
vk::FormatFeatureFlagBits::eTransferSrc; VK_FORMAT_FEATURE_TRANSFER_SRC_BIT;
if (attachable) { if (attachable) {
usage |= IsZetaFormat(pixel_format) ? vk::FormatFeatureFlagBits::eDepthStencilAttachment usage |= IsZetaFormat(pixel_format) ? VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT
: vk::FormatFeatureFlagBits::eColorAttachment; : VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
} }
if (storage) { if (storage) {
usage |= vk::FormatFeatureFlagBits::eStorageImage; usage |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
} }
} }
return {device.GetSupportedFormat(tuple.format, usage, format_type), attachable, storage}; return {device.GetSupportedFormat(tuple.format, usage, format_type), attachable, storage};
} }
vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) { VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) {
switch (stage) { switch (stage) {
case Tegra::Engines::ShaderType::Vertex: case Tegra::Engines::ShaderType::Vertex:
return vk::ShaderStageFlagBits::eVertex; return VK_SHADER_STAGE_VERTEX_BIT;
case Tegra::Engines::ShaderType::TesselationControl: case Tegra::Engines::ShaderType::TesselationControl:
return vk::ShaderStageFlagBits::eTessellationControl; return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
case Tegra::Engines::ShaderType::TesselationEval: case Tegra::Engines::ShaderType::TesselationEval:
return vk::ShaderStageFlagBits::eTessellationEvaluation; return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
case Tegra::Engines::ShaderType::Geometry: case Tegra::Engines::ShaderType::Geometry:
return vk::ShaderStageFlagBits::eGeometry; return VK_SHADER_STAGE_GEOMETRY_BIT;
case Tegra::Engines::ShaderType::Fragment: case Tegra::Engines::ShaderType::Fragment:
return vk::ShaderStageFlagBits::eFragment; return VK_SHADER_STAGE_FRAGMENT_BIT;
case Tegra::Engines::ShaderType::Compute: case Tegra::Engines::ShaderType::Compute:
return vk::ShaderStageFlagBits::eCompute; return VK_SHADER_STAGE_COMPUTE_BIT;
} }
UNIMPLEMENTED_MSG("Unimplemented shader stage={}", static_cast<u32>(stage)); UNIMPLEMENTED_MSG("Unimplemented shader stage={}", static_cast<u32>(stage));
return {}; return {};
} }
vk::PrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device, VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device,
Maxwell::PrimitiveTopology topology) { Maxwell::PrimitiveTopology topology) {
switch (topology) { switch (topology) {
case Maxwell::PrimitiveTopology::Points: case Maxwell::PrimitiveTopology::Points:
return vk::PrimitiveTopology::ePointList; return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
case Maxwell::PrimitiveTopology::Lines: case Maxwell::PrimitiveTopology::Lines:
return vk::PrimitiveTopology::eLineList; return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
case Maxwell::PrimitiveTopology::LineStrip: case Maxwell::PrimitiveTopology::LineStrip:
return vk::PrimitiveTopology::eLineStrip; return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
case Maxwell::PrimitiveTopology::Triangles: case Maxwell::PrimitiveTopology::Triangles:
return vk::PrimitiveTopology::eTriangleList; return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
case Maxwell::PrimitiveTopology::TriangleStrip: case Maxwell::PrimitiveTopology::TriangleStrip:
return vk::PrimitiveTopology::eTriangleStrip; return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
case Maxwell::PrimitiveTopology::TriangleFan: case Maxwell::PrimitiveTopology::TriangleFan:
return vk::PrimitiveTopology::eTriangleFan; return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
case Maxwell::PrimitiveTopology::Quads: case Maxwell::PrimitiveTopology::Quads:
// TODO(Rodrigo): Use VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT whenever it releases // TODO(Rodrigo): Use VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT whenever it releases
return vk::PrimitiveTopology::eTriangleList; return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
case Maxwell::PrimitiveTopology::Patches: case Maxwell::PrimitiveTopology::Patches:
return vk::PrimitiveTopology::ePatchList; return VK_PRIMITIVE_TOPOLOGY_PATCH_LIST;
default: default:
UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology)); UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology));
return {}; return {};
} }
} }
vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) { VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) {
switch (type) { switch (type) {
case Maxwell::VertexAttribute::Type::SignedNorm: case Maxwell::VertexAttribute::Type::SignedNorm:
switch (size) { switch (size) {
case Maxwell::VertexAttribute::Size::Size_8: case Maxwell::VertexAttribute::Size::Size_8:
return vk::Format::eR8Snorm; return VK_FORMAT_R8_SNORM;
case Maxwell::VertexAttribute::Size::Size_8_8: case Maxwell::VertexAttribute::Size::Size_8_8:
return vk::Format::eR8G8Snorm; return VK_FORMAT_R8G8_SNORM;
case Maxwell::VertexAttribute::Size::Size_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8:
return vk::Format::eR8G8B8Snorm; return VK_FORMAT_R8G8B8_SNORM;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
return vk::Format::eR8G8B8A8Snorm; return VK_FORMAT_R8G8B8A8_SNORM;
case Maxwell::VertexAttribute::Size::Size_16: case Maxwell::VertexAttribute::Size::Size_16:
return vk::Format::eR16Snorm; return VK_FORMAT_R16_SNORM;
case Maxwell::VertexAttribute::Size::Size_16_16: case Maxwell::VertexAttribute::Size::Size_16_16:
return vk::Format::eR16G16Snorm; return VK_FORMAT_R16G16_SNORM;
case Maxwell::VertexAttribute::Size::Size_16_16_16: case Maxwell::VertexAttribute::Size::Size_16_16_16:
return vk::Format::eR16G16B16Snorm; return VK_FORMAT_R16G16B16_SNORM;
case Maxwell::VertexAttribute::Size::Size_16_16_16_16: case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
return vk::Format::eR16G16B16A16Snorm; return VK_FORMAT_R16G16B16A16_SNORM;
case Maxwell::VertexAttribute::Size::Size_10_10_10_2: case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
return vk::Format::eA2B10G10R10SnormPack32; return VK_FORMAT_A2B10G10R10_SNORM_PACK32;
default: default:
break; break;
} }
@ -319,23 +322,23 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
case Maxwell::VertexAttribute::Type::UnsignedNorm: case Maxwell::VertexAttribute::Type::UnsignedNorm:
switch (size) { switch (size) {
case Maxwell::VertexAttribute::Size::Size_8: case Maxwell::VertexAttribute::Size::Size_8:
return vk::Format::eR8Unorm; return VK_FORMAT_R8_UNORM;
case Maxwell::VertexAttribute::Size::Size_8_8: case Maxwell::VertexAttribute::Size::Size_8_8:
return vk::Format::eR8G8Unorm; return VK_FORMAT_R8G8_UNORM;
case Maxwell::VertexAttribute::Size::Size_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8:
return vk::Format::eR8G8B8Unorm; return VK_FORMAT_R8G8B8_UNORM;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
return vk::Format::eR8G8B8A8Unorm; return VK_FORMAT_R8G8B8A8_UNORM;
case Maxwell::VertexAttribute::Size::Size_16: case Maxwell::VertexAttribute::Size::Size_16:
return vk::Format::eR16Unorm; return VK_FORMAT_R16_UNORM;
case Maxwell::VertexAttribute::Size::Size_16_16: case Maxwell::VertexAttribute::Size::Size_16_16:
return vk::Format::eR16G16Unorm; return VK_FORMAT_R16G16_UNORM;
case Maxwell::VertexAttribute::Size::Size_16_16_16: case Maxwell::VertexAttribute::Size::Size_16_16_16:
return vk::Format::eR16G16B16Unorm; return VK_FORMAT_R16G16B16_UNORM;
case Maxwell::VertexAttribute::Size::Size_16_16_16_16: case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
return vk::Format::eR16G16B16A16Unorm; return VK_FORMAT_R16G16B16A16_UNORM;
case Maxwell::VertexAttribute::Size::Size_10_10_10_2: case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
return vk::Format::eA2B10G10R10UnormPack32; return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
default: default:
break; break;
} }
@ -343,59 +346,59 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
case Maxwell::VertexAttribute::Type::SignedInt: case Maxwell::VertexAttribute::Type::SignedInt:
switch (size) { switch (size) {
case Maxwell::VertexAttribute::Size::Size_16_16_16_16: case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
return vk::Format::eR16G16B16A16Sint; return VK_FORMAT_R16G16B16A16_SINT;
case Maxwell::VertexAttribute::Size::Size_8: case Maxwell::VertexAttribute::Size::Size_8:
return vk::Format::eR8Sint; return VK_FORMAT_R8_SINT;
case Maxwell::VertexAttribute::Size::Size_8_8: case Maxwell::VertexAttribute::Size::Size_8_8:
return vk::Format::eR8G8Sint; return VK_FORMAT_R8G8_SINT;
case Maxwell::VertexAttribute::Size::Size_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8:
return vk::Format::eR8G8B8Sint; return VK_FORMAT_R8G8B8_SINT;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
return vk::Format::eR8G8B8A8Sint; return VK_FORMAT_R8G8B8A8_SINT;
case Maxwell::VertexAttribute::Size::Size_32: case Maxwell::VertexAttribute::Size::Size_32:
return vk::Format::eR32Sint; return VK_FORMAT_R32_SINT;
default: default:
break; break;
} }
case Maxwell::VertexAttribute::Type::UnsignedInt: case Maxwell::VertexAttribute::Type::UnsignedInt:
switch (size) { switch (size) {
case Maxwell::VertexAttribute::Size::Size_8: case Maxwell::VertexAttribute::Size::Size_8:
return vk::Format::eR8Uint; return VK_FORMAT_R8_UINT;
case Maxwell::VertexAttribute::Size::Size_8_8: case Maxwell::VertexAttribute::Size::Size_8_8:
return vk::Format::eR8G8Uint; return VK_FORMAT_R8G8_UINT;
case Maxwell::VertexAttribute::Size::Size_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8:
return vk::Format::eR8G8B8Uint; return VK_FORMAT_R8G8B8_UINT;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
return vk::Format::eR8G8B8A8Uint; return VK_FORMAT_R8G8B8A8_UINT;
case Maxwell::VertexAttribute::Size::Size_32: case Maxwell::VertexAttribute::Size::Size_32:
return vk::Format::eR32Uint; return VK_FORMAT_R32_UINT;
case Maxwell::VertexAttribute::Size::Size_32_32: case Maxwell::VertexAttribute::Size::Size_32_32:
return vk::Format::eR32G32Uint; return VK_FORMAT_R32G32_UINT;
case Maxwell::VertexAttribute::Size::Size_32_32_32: case Maxwell::VertexAttribute::Size::Size_32_32_32:
return vk::Format::eR32G32B32Uint; return VK_FORMAT_R32G32B32_UINT;
case Maxwell::VertexAttribute::Size::Size_32_32_32_32: case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
return vk::Format::eR32G32B32A32Uint; return VK_FORMAT_R32G32B32A32_UINT;
default: default:
break; break;
} }
case Maxwell::VertexAttribute::Type::UnsignedScaled: case Maxwell::VertexAttribute::Type::UnsignedScaled:
switch (size) { switch (size) {
case Maxwell::VertexAttribute::Size::Size_8: case Maxwell::VertexAttribute::Size::Size_8:
return vk::Format::eR8Uscaled; return VK_FORMAT_R8_USCALED;
case Maxwell::VertexAttribute::Size::Size_8_8: case Maxwell::VertexAttribute::Size::Size_8_8:
return vk::Format::eR8G8Uscaled; return VK_FORMAT_R8G8_USCALED;
case Maxwell::VertexAttribute::Size::Size_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8:
return vk::Format::eR8G8B8Uscaled; return VK_FORMAT_R8G8B8_USCALED;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
return vk::Format::eR8G8B8A8Uscaled; return VK_FORMAT_R8G8B8A8_USCALED;
case Maxwell::VertexAttribute::Size::Size_16: case Maxwell::VertexAttribute::Size::Size_16:
return vk::Format::eR16Uscaled; return VK_FORMAT_R16_USCALED;
case Maxwell::VertexAttribute::Size::Size_16_16: case Maxwell::VertexAttribute::Size::Size_16_16:
return vk::Format::eR16G16Uscaled; return VK_FORMAT_R16G16_USCALED;
case Maxwell::VertexAttribute::Size::Size_16_16_16: case Maxwell::VertexAttribute::Size::Size_16_16_16:
return vk::Format::eR16G16B16Uscaled; return VK_FORMAT_R16G16B16_USCALED;
case Maxwell::VertexAttribute::Size::Size_16_16_16_16: case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
return vk::Format::eR16G16B16A16Uscaled; return VK_FORMAT_R16G16B16A16_USCALED;
default: default:
break; break;
} }
@ -403,21 +406,21 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
case Maxwell::VertexAttribute::Type::SignedScaled: case Maxwell::VertexAttribute::Type::SignedScaled:
switch (size) { switch (size) {
case Maxwell::VertexAttribute::Size::Size_8: case Maxwell::VertexAttribute::Size::Size_8:
return vk::Format::eR8Sscaled; return VK_FORMAT_R8_SSCALED;
case Maxwell::VertexAttribute::Size::Size_8_8: case Maxwell::VertexAttribute::Size::Size_8_8:
return vk::Format::eR8G8Sscaled; return VK_FORMAT_R8G8_SSCALED;
case Maxwell::VertexAttribute::Size::Size_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8:
return vk::Format::eR8G8B8Sscaled; return VK_FORMAT_R8G8B8_SSCALED;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8: case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
return vk::Format::eR8G8B8A8Sscaled; return VK_FORMAT_R8G8B8A8_SSCALED;
case Maxwell::VertexAttribute::Size::Size_16: case Maxwell::VertexAttribute::Size::Size_16:
return vk::Format::eR16Sscaled; return VK_FORMAT_R16_SSCALED;
case Maxwell::VertexAttribute::Size::Size_16_16: case Maxwell::VertexAttribute::Size::Size_16_16:
return vk::Format::eR16G16Sscaled; return VK_FORMAT_R16G16_SSCALED;
case Maxwell::VertexAttribute::Size::Size_16_16_16: case Maxwell::VertexAttribute::Size::Size_16_16_16:
return vk::Format::eR16G16B16Sscaled; return VK_FORMAT_R16G16B16_SSCALED;
case Maxwell::VertexAttribute::Size::Size_16_16_16_16: case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
return vk::Format::eR16G16B16A16Sscaled; return VK_FORMAT_R16G16B16A16_SSCALED;
default: default:
break; break;
} }
@ -425,21 +428,21 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
case Maxwell::VertexAttribute::Type::Float: case Maxwell::VertexAttribute::Type::Float:
switch (size) { switch (size) {
case Maxwell::VertexAttribute::Size::Size_32: case Maxwell::VertexAttribute::Size::Size_32:
return vk::Format::eR32Sfloat; return VK_FORMAT_R32_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_32_32: case Maxwell::VertexAttribute::Size::Size_32_32:
return vk::Format::eR32G32Sfloat; return VK_FORMAT_R32G32_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_32_32_32: case Maxwell::VertexAttribute::Size::Size_32_32_32:
return vk::Format::eR32G32B32Sfloat; return VK_FORMAT_R32G32B32_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_32_32_32_32: case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
return vk::Format::eR32G32B32A32Sfloat; return VK_FORMAT_R32G32B32A32_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_16: case Maxwell::VertexAttribute::Size::Size_16:
return vk::Format::eR16Sfloat; return VK_FORMAT_R16_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_16_16: case Maxwell::VertexAttribute::Size::Size_16_16:
return vk::Format::eR16G16Sfloat; return VK_FORMAT_R16G16_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_16_16_16: case Maxwell::VertexAttribute::Size::Size_16_16_16:
return vk::Format::eR16G16B16Sfloat; return VK_FORMAT_R16G16B16_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_16_16_16_16: case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
return vk::Format::eR16G16B16A16Sfloat; return VK_FORMAT_R16G16B16A16_SFLOAT;
default: default:
break; break;
} }
@ -450,210 +453,210 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
return {}; return {};
} }
vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison) { VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison) {
switch (comparison) { switch (comparison) {
case Maxwell::ComparisonOp::Never: case Maxwell::ComparisonOp::Never:
case Maxwell::ComparisonOp::NeverOld: case Maxwell::ComparisonOp::NeverOld:
return vk::CompareOp::eNever; return VK_COMPARE_OP_NEVER;
case Maxwell::ComparisonOp::Less: case Maxwell::ComparisonOp::Less:
case Maxwell::ComparisonOp::LessOld: case Maxwell::ComparisonOp::LessOld:
return vk::CompareOp::eLess; return VK_COMPARE_OP_LESS;
case Maxwell::ComparisonOp::Equal: case Maxwell::ComparisonOp::Equal:
case Maxwell::ComparisonOp::EqualOld: case Maxwell::ComparisonOp::EqualOld:
return vk::CompareOp::eEqual; return VK_COMPARE_OP_EQUAL;
case Maxwell::ComparisonOp::LessEqual: case Maxwell::ComparisonOp::LessEqual:
case Maxwell::ComparisonOp::LessEqualOld: case Maxwell::ComparisonOp::LessEqualOld:
return vk::CompareOp::eLessOrEqual; return VK_COMPARE_OP_LESS_OR_EQUAL;
case Maxwell::ComparisonOp::Greater: case Maxwell::ComparisonOp::Greater:
case Maxwell::ComparisonOp::GreaterOld: case Maxwell::ComparisonOp::GreaterOld:
return vk::CompareOp::eGreater; return VK_COMPARE_OP_GREATER;
case Maxwell::ComparisonOp::NotEqual: case Maxwell::ComparisonOp::NotEqual:
case Maxwell::ComparisonOp::NotEqualOld: case Maxwell::ComparisonOp::NotEqualOld:
return vk::CompareOp::eNotEqual; return VK_COMPARE_OP_NOT_EQUAL;
case Maxwell::ComparisonOp::GreaterEqual: case Maxwell::ComparisonOp::GreaterEqual:
case Maxwell::ComparisonOp::GreaterEqualOld: case Maxwell::ComparisonOp::GreaterEqualOld:
return vk::CompareOp::eGreaterOrEqual; return VK_COMPARE_OP_GREATER_OR_EQUAL;
case Maxwell::ComparisonOp::Always: case Maxwell::ComparisonOp::Always:
case Maxwell::ComparisonOp::AlwaysOld: case Maxwell::ComparisonOp::AlwaysOld:
return vk::CompareOp::eAlways; return VK_COMPARE_OP_ALWAYS;
} }
UNIMPLEMENTED_MSG("Unimplemented comparison op={}", static_cast<u32>(comparison)); UNIMPLEMENTED_MSG("Unimplemented comparison op={}", static_cast<u32>(comparison));
return {}; return {};
} }
vk::IndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format) { VkIndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format) {
switch (index_format) { switch (index_format) {
case Maxwell::IndexFormat::UnsignedByte: case Maxwell::IndexFormat::UnsignedByte:
if (!device.IsExtIndexTypeUint8Supported()) { if (!device.IsExtIndexTypeUint8Supported()) {
UNIMPLEMENTED_MSG("Native uint8 indices are not supported on this device"); UNIMPLEMENTED_MSG("Native uint8 indices are not supported on this device");
return vk::IndexType::eUint16; return VK_INDEX_TYPE_UINT16;
} }
return vk::IndexType::eUint8EXT; return VK_INDEX_TYPE_UINT8_EXT;
case Maxwell::IndexFormat::UnsignedShort: case Maxwell::IndexFormat::UnsignedShort:
return vk::IndexType::eUint16; return VK_INDEX_TYPE_UINT16;
case Maxwell::IndexFormat::UnsignedInt: case Maxwell::IndexFormat::UnsignedInt:
return vk::IndexType::eUint32; return VK_INDEX_TYPE_UINT32;
} }
UNIMPLEMENTED_MSG("Unimplemented index_format={}", static_cast<u32>(index_format)); UNIMPLEMENTED_MSG("Unimplemented index_format={}", static_cast<u32>(index_format));
return {}; return {};
} }
vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op) { VkStencilOp StencilOp(Maxwell::StencilOp stencil_op) {
switch (stencil_op) { switch (stencil_op) {
case Maxwell::StencilOp::Keep: case Maxwell::StencilOp::Keep:
case Maxwell::StencilOp::KeepOGL: case Maxwell::StencilOp::KeepOGL:
return vk::StencilOp::eKeep; return VK_STENCIL_OP_KEEP;
case Maxwell::StencilOp::Zero: case Maxwell::StencilOp::Zero:
case Maxwell::StencilOp::ZeroOGL: case Maxwell::StencilOp::ZeroOGL:
return vk::StencilOp::eZero; return VK_STENCIL_OP_ZERO;
case Maxwell::StencilOp::Replace: case Maxwell::StencilOp::Replace:
case Maxwell::StencilOp::ReplaceOGL: case Maxwell::StencilOp::ReplaceOGL:
return vk::StencilOp::eReplace; return VK_STENCIL_OP_REPLACE;
case Maxwell::StencilOp::Incr: case Maxwell::StencilOp::Incr:
case Maxwell::StencilOp::IncrOGL: case Maxwell::StencilOp::IncrOGL:
return vk::StencilOp::eIncrementAndClamp; return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
case Maxwell::StencilOp::Decr: case Maxwell::StencilOp::Decr:
case Maxwell::StencilOp::DecrOGL: case Maxwell::StencilOp::DecrOGL:
return vk::StencilOp::eDecrementAndClamp; return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
case Maxwell::StencilOp::Invert: case Maxwell::StencilOp::Invert:
case Maxwell::StencilOp::InvertOGL: case Maxwell::StencilOp::InvertOGL:
return vk::StencilOp::eInvert; return VK_STENCIL_OP_INVERT;
case Maxwell::StencilOp::IncrWrap: case Maxwell::StencilOp::IncrWrap:
case Maxwell::StencilOp::IncrWrapOGL: case Maxwell::StencilOp::IncrWrapOGL:
return vk::StencilOp::eIncrementAndWrap; return VK_STENCIL_OP_INCREMENT_AND_WRAP;
case Maxwell::StencilOp::DecrWrap: case Maxwell::StencilOp::DecrWrap:
case Maxwell::StencilOp::DecrWrapOGL: case Maxwell::StencilOp::DecrWrapOGL:
return vk::StencilOp::eDecrementAndWrap; return VK_STENCIL_OP_DECREMENT_AND_WRAP;
} }
UNIMPLEMENTED_MSG("Unimplemented stencil op={}", static_cast<u32>(stencil_op)); UNIMPLEMENTED_MSG("Unimplemented stencil op={}", static_cast<u32>(stencil_op));
return {}; return {};
} }
vk::BlendOp BlendEquation(Maxwell::Blend::Equation equation) { VkBlendOp BlendEquation(Maxwell::Blend::Equation equation) {
switch (equation) { switch (equation) {
case Maxwell::Blend::Equation::Add: case Maxwell::Blend::Equation::Add:
case Maxwell::Blend::Equation::AddGL: case Maxwell::Blend::Equation::AddGL:
return vk::BlendOp::eAdd; return VK_BLEND_OP_ADD;
case Maxwell::Blend::Equation::Subtract: case Maxwell::Blend::Equation::Subtract:
case Maxwell::Blend::Equation::SubtractGL: case Maxwell::Blend::Equation::SubtractGL:
return vk::BlendOp::eSubtract; return VK_BLEND_OP_SUBTRACT;
case Maxwell::Blend::Equation::ReverseSubtract: case Maxwell::Blend::Equation::ReverseSubtract:
case Maxwell::Blend::Equation::ReverseSubtractGL: case Maxwell::Blend::Equation::ReverseSubtractGL:
return vk::BlendOp::eReverseSubtract; return VK_BLEND_OP_REVERSE_SUBTRACT;
case Maxwell::Blend::Equation::Min: case Maxwell::Blend::Equation::Min:
case Maxwell::Blend::Equation::MinGL: case Maxwell::Blend::Equation::MinGL:
return vk::BlendOp::eMin; return VK_BLEND_OP_MIN;
case Maxwell::Blend::Equation::Max: case Maxwell::Blend::Equation::Max:
case Maxwell::Blend::Equation::MaxGL: case Maxwell::Blend::Equation::MaxGL:
return vk::BlendOp::eMax; return VK_BLEND_OP_MAX;
} }
UNIMPLEMENTED_MSG("Unimplemented blend equation={}", static_cast<u32>(equation)); UNIMPLEMENTED_MSG("Unimplemented blend equation={}", static_cast<u32>(equation));
return {}; return {};
} }
vk::BlendFactor BlendFactor(Maxwell::Blend::Factor factor) { VkBlendFactor BlendFactor(Maxwell::Blend::Factor factor) {
switch (factor) { switch (factor) {
case Maxwell::Blend::Factor::Zero: case Maxwell::Blend::Factor::Zero:
case Maxwell::Blend::Factor::ZeroGL: case Maxwell::Blend::Factor::ZeroGL:
return vk::BlendFactor::eZero; return VK_BLEND_FACTOR_ZERO;
case Maxwell::Blend::Factor::One: case Maxwell::Blend::Factor::One:
case Maxwell::Blend::Factor::OneGL: case Maxwell::Blend::Factor::OneGL:
return vk::BlendFactor::eOne; return VK_BLEND_FACTOR_ONE;
case Maxwell::Blend::Factor::SourceColor: case Maxwell::Blend::Factor::SourceColor:
case Maxwell::Blend::Factor::SourceColorGL: case Maxwell::Blend::Factor::SourceColorGL:
return vk::BlendFactor::eSrcColor; return VK_BLEND_FACTOR_SRC_COLOR;
case Maxwell::Blend::Factor::OneMinusSourceColor: case Maxwell::Blend::Factor::OneMinusSourceColor:
case Maxwell::Blend::Factor::OneMinusSourceColorGL: case Maxwell::Blend::Factor::OneMinusSourceColorGL:
return vk::BlendFactor::eOneMinusSrcColor; return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
case Maxwell::Blend::Factor::SourceAlpha: case Maxwell::Blend::Factor::SourceAlpha:
case Maxwell::Blend::Factor::SourceAlphaGL: case Maxwell::Blend::Factor::SourceAlphaGL:
return vk::BlendFactor::eSrcAlpha; return VK_BLEND_FACTOR_SRC_ALPHA;
case Maxwell::Blend::Factor::OneMinusSourceAlpha: case Maxwell::Blend::Factor::OneMinusSourceAlpha:
case Maxwell::Blend::Factor::OneMinusSourceAlphaGL: case Maxwell::Blend::Factor::OneMinusSourceAlphaGL:
return vk::BlendFactor::eOneMinusSrcAlpha; return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
case Maxwell::Blend::Factor::DestAlpha: case Maxwell::Blend::Factor::DestAlpha:
case Maxwell::Blend::Factor::DestAlphaGL: case Maxwell::Blend::Factor::DestAlphaGL:
return vk::BlendFactor::eDstAlpha; return VK_BLEND_FACTOR_DST_ALPHA;
case Maxwell::Blend::Factor::OneMinusDestAlpha: case Maxwell::Blend::Factor::OneMinusDestAlpha:
case Maxwell::Blend::Factor::OneMinusDestAlphaGL: case Maxwell::Blend::Factor::OneMinusDestAlphaGL:
return vk::BlendFactor::eOneMinusDstAlpha; return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
case Maxwell::Blend::Factor::DestColor: case Maxwell::Blend::Factor::DestColor:
case Maxwell::Blend::Factor::DestColorGL: case Maxwell::Blend::Factor::DestColorGL:
return vk::BlendFactor::eDstColor; return VK_BLEND_FACTOR_DST_COLOR;
case Maxwell::Blend::Factor::OneMinusDestColor: case Maxwell::Blend::Factor::OneMinusDestColor:
case Maxwell::Blend::Factor::OneMinusDestColorGL: case Maxwell::Blend::Factor::OneMinusDestColorGL:
return vk::BlendFactor::eOneMinusDstColor; return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
case Maxwell::Blend::Factor::SourceAlphaSaturate: case Maxwell::Blend::Factor::SourceAlphaSaturate:
case Maxwell::Blend::Factor::SourceAlphaSaturateGL: case Maxwell::Blend::Factor::SourceAlphaSaturateGL:
return vk::BlendFactor::eSrcAlphaSaturate; return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
case Maxwell::Blend::Factor::Source1Color: case Maxwell::Blend::Factor::Source1Color:
case Maxwell::Blend::Factor::Source1ColorGL: case Maxwell::Blend::Factor::Source1ColorGL:
return vk::BlendFactor::eSrc1Color; return VK_BLEND_FACTOR_SRC1_COLOR;
case Maxwell::Blend::Factor::OneMinusSource1Color: case Maxwell::Blend::Factor::OneMinusSource1Color:
case Maxwell::Blend::Factor::OneMinusSource1ColorGL: case Maxwell::Blend::Factor::OneMinusSource1ColorGL:
return vk::BlendFactor::eOneMinusSrc1Color; return VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR;
case Maxwell::Blend::Factor::Source1Alpha: case Maxwell::Blend::Factor::Source1Alpha:
case Maxwell::Blend::Factor::Source1AlphaGL: case Maxwell::Blend::Factor::Source1AlphaGL:
return vk::BlendFactor::eSrc1Alpha; return VK_BLEND_FACTOR_SRC1_ALPHA;
case Maxwell::Blend::Factor::OneMinusSource1Alpha: case Maxwell::Blend::Factor::OneMinusSource1Alpha:
case Maxwell::Blend::Factor::OneMinusSource1AlphaGL: case Maxwell::Blend::Factor::OneMinusSource1AlphaGL:
return vk::BlendFactor::eOneMinusSrc1Alpha; return VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
case Maxwell::Blend::Factor::ConstantColor: case Maxwell::Blend::Factor::ConstantColor:
case Maxwell::Blend::Factor::ConstantColorGL: case Maxwell::Blend::Factor::ConstantColorGL:
return vk::BlendFactor::eConstantColor; return VK_BLEND_FACTOR_CONSTANT_COLOR;
case Maxwell::Blend::Factor::OneMinusConstantColor: case Maxwell::Blend::Factor::OneMinusConstantColor:
case Maxwell::Blend::Factor::OneMinusConstantColorGL: case Maxwell::Blend::Factor::OneMinusConstantColorGL:
return vk::BlendFactor::eOneMinusConstantColor; return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
case Maxwell::Blend::Factor::ConstantAlpha: case Maxwell::Blend::Factor::ConstantAlpha:
case Maxwell::Blend::Factor::ConstantAlphaGL: case Maxwell::Blend::Factor::ConstantAlphaGL:
return vk::BlendFactor::eConstantAlpha; return VK_BLEND_FACTOR_CONSTANT_ALPHA;
case Maxwell::Blend::Factor::OneMinusConstantAlpha: case Maxwell::Blend::Factor::OneMinusConstantAlpha:
case Maxwell::Blend::Factor::OneMinusConstantAlphaGL: case Maxwell::Blend::Factor::OneMinusConstantAlphaGL:
return vk::BlendFactor::eOneMinusConstantAlpha; return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA;
} }
UNIMPLEMENTED_MSG("Unimplemented blend factor={}", static_cast<u32>(factor)); UNIMPLEMENTED_MSG("Unimplemented blend factor={}", static_cast<u32>(factor));
return {}; return {};
} }
vk::FrontFace FrontFace(Maxwell::FrontFace front_face) { VkFrontFace FrontFace(Maxwell::FrontFace front_face) {
switch (front_face) { switch (front_face) {
case Maxwell::FrontFace::ClockWise: case Maxwell::FrontFace::ClockWise:
return vk::FrontFace::eClockwise; return VK_FRONT_FACE_CLOCKWISE;
case Maxwell::FrontFace::CounterClockWise: case Maxwell::FrontFace::CounterClockWise:
return vk::FrontFace::eCounterClockwise; return VK_FRONT_FACE_COUNTER_CLOCKWISE;
} }
UNIMPLEMENTED_MSG("Unimplemented front face={}", static_cast<u32>(front_face)); UNIMPLEMENTED_MSG("Unimplemented front face={}", static_cast<u32>(front_face));
return {}; return {};
} }
vk::CullModeFlags CullFace(Maxwell::CullFace cull_face) { VkCullModeFlags CullFace(Maxwell::CullFace cull_face) {
switch (cull_face) { switch (cull_face) {
case Maxwell::CullFace::Front: case Maxwell::CullFace::Front:
return vk::CullModeFlagBits::eFront; return VK_CULL_MODE_FRONT_BIT;
case Maxwell::CullFace::Back: case Maxwell::CullFace::Back:
return vk::CullModeFlagBits::eBack; return VK_CULL_MODE_BACK_BIT;
case Maxwell::CullFace::FrontAndBack: case Maxwell::CullFace::FrontAndBack:
return vk::CullModeFlagBits::eFrontAndBack; return VK_CULL_MODE_FRONT_AND_BACK;
} }
UNIMPLEMENTED_MSG("Unimplemented cull face={}", static_cast<u32>(cull_face)); UNIMPLEMENTED_MSG("Unimplemented cull face={}", static_cast<u32>(cull_face));
return {}; return {};
} }
vk::ComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) { VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) {
switch (swizzle) { switch (swizzle) {
case Tegra::Texture::SwizzleSource::Zero: case Tegra::Texture::SwizzleSource::Zero:
return vk::ComponentSwizzle::eZero; return VK_COMPONENT_SWIZZLE_ZERO;
case Tegra::Texture::SwizzleSource::R: case Tegra::Texture::SwizzleSource::R:
return vk::ComponentSwizzle::eR; return VK_COMPONENT_SWIZZLE_R;
case Tegra::Texture::SwizzleSource::G: case Tegra::Texture::SwizzleSource::G:
return vk::ComponentSwizzle::eG; return VK_COMPONENT_SWIZZLE_G;
case Tegra::Texture::SwizzleSource::B: case Tegra::Texture::SwizzleSource::B:
return vk::ComponentSwizzle::eB; return VK_COMPONENT_SWIZZLE_B;
case Tegra::Texture::SwizzleSource::A: case Tegra::Texture::SwizzleSource::A:
return vk::ComponentSwizzle::eA; return VK_COMPONENT_SWIZZLE_A;
case Tegra::Texture::SwizzleSource::OneInt: case Tegra::Texture::SwizzleSource::OneInt:
case Tegra::Texture::SwizzleSource::OneFloat: case Tegra::Texture::SwizzleSource::OneFloat:
return vk::ComponentSwizzle::eOne; return VK_COMPONENT_SWIZZLE_ONE;
} }
UNIMPLEMENTED_MSG("Unimplemented swizzle source={}", static_cast<u32>(swizzle)); UNIMPLEMENTED_MSG("Unimplemented swizzle source={}", static_cast<u32>(swizzle));
return {}; return {};

View File

@ -6,8 +6,8 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h" #include "video_core/surface.h"
#include "video_core/textures/texture.h" #include "video_core/textures/texture.h"
@ -18,46 +18,45 @@ using PixelFormat = VideoCore::Surface::PixelFormat;
namespace Sampler { namespace Sampler {
vk::Filter Filter(Tegra::Texture::TextureFilter filter); VkFilter Filter(Tegra::Texture::TextureFilter filter);
vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter); VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter);
vk::SamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode, VkSamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode,
Tegra::Texture::TextureFilter filter); Tegra::Texture::TextureFilter filter);
vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func); VkCompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func);
} // namespace Sampler } // namespace Sampler
struct FormatInfo { struct FormatInfo {
vk::Format format; VkFormat format;
bool attachable; bool attachable;
bool storage; bool storage;
}; };
FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFormat pixel_format); FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFormat pixel_format);
vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage); VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage);
vk::PrimitiveTopology PrimitiveTopology(const VKDevice& device, VkPrimitiveTopology PrimitiveTopology(const VKDevice& device, Maxwell::PrimitiveTopology topology);
Maxwell::PrimitiveTopology topology);
vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size); VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size);
vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison); VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison);
vk::IndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format); VkIndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format);
vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op); VkStencilOp StencilOp(Maxwell::StencilOp stencil_op);
vk::BlendOp BlendEquation(Maxwell::Blend::Equation equation); VkBlendOp BlendEquation(Maxwell::Blend::Equation equation);
vk::BlendFactor BlendFactor(Maxwell::Blend::Factor factor); VkBlendFactor BlendFactor(Maxwell::Blend::Factor factor);
vk::FrontFace FrontFace(Maxwell::FrontFace front_face); VkFrontFace FrontFace(Maxwell::FrontFace front_face);
vk::CullModeFlags CullFace(Maxwell::CullFace cull_face); VkCullModeFlags CullFace(Maxwell::CullFace cull_face);
vk::ComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle); VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle);
} // namespace Vulkan::MaxwellToVK } // namespace Vulkan::MaxwellToVK

View File

@ -24,7 +24,6 @@
#include "core/settings.h" #include "core/settings.h"
#include "core/telemetry_session.h" #include "core/telemetry_session.h"
#include "video_core/gpu.h" #include "video_core/gpu.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/renderer_vulkan.h" #include "video_core/renderer_vulkan/renderer_vulkan.h"
#include "video_core/renderer_vulkan/vk_blit_screen.h" #include "video_core/renderer_vulkan/vk_blit_screen.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
@ -34,8 +33,9 @@
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h" #include "video_core/renderer_vulkan/vk_state_tracker.h"
#include "video_core/renderer_vulkan/vk_swapchain.h" #include "video_core/renderer_vulkan/vk_swapchain.h"
#include "video_core/renderer_vulkan/wrapper.h"
// Include these late to avoid changing Vulkan-Hpp's dynamic dispatcher size // Include these late to avoid polluting previous headers
#ifdef _WIN32 #ifdef _WIN32
#include <windows.h> #include <windows.h>
// ensure include order // ensure include order
@ -54,20 +54,19 @@ namespace {
using Core::Frontend::WindowSystemType; using Core::Frontend::WindowSystemType;
VkBool32 DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT severity_, VkBool32 DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
VkDebugUtilsMessageTypeFlagsEXT type, VkDebugUtilsMessageTypeFlagsEXT type,
const VkDebugUtilsMessengerCallbackDataEXT* data, const VkDebugUtilsMessengerCallbackDataEXT* data,
[[maybe_unused]] void* user_data) { [[maybe_unused]] void* user_data) {
const auto severity{static_cast<vk::DebugUtilsMessageSeverityFlagBitsEXT>(severity_)};
const char* message{data->pMessage}; const char* message{data->pMessage};
if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eError) { if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
LOG_CRITICAL(Render_Vulkan, "{}", message); LOG_CRITICAL(Render_Vulkan, "{}", message);
} else if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning) { } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) {
LOG_WARNING(Render_Vulkan, "{}", message); LOG_WARNING(Render_Vulkan, "{}", message);
} else if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eInfo) { } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) {
LOG_INFO(Render_Vulkan, "{}", message); LOG_INFO(Render_Vulkan, "{}", message);
} else if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eVerbose) { } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) {
LOG_DEBUG(Render_Vulkan, "{}", message); LOG_DEBUG(Render_Vulkan, "{}", message);
} }
return VK_FALSE; return VK_FALSE;
@ -94,22 +93,24 @@ Common::DynamicLibrary OpenVulkanLibrary() {
return library; return library;
} }
UniqueInstance CreateInstance(Common::DynamicLibrary& library, vk::DispatchLoaderDynamic& dld, vk::Instance CreateInstance(Common::DynamicLibrary& library, vk::InstanceDispatch& dld,
WindowSystemType window_type = WindowSystemType::Headless, WindowSystemType window_type = WindowSystemType::Headless,
bool enable_layers = false) { bool enable_layers = false) {
if (!library.IsOpen()) { if (!library.IsOpen()) {
LOG_ERROR(Render_Vulkan, "Vulkan library not available"); LOG_ERROR(Render_Vulkan, "Vulkan library not available");
return UniqueInstance{}; return {};
} }
PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; if (!library.GetSymbol("vkGetInstanceProcAddr", &dld.vkGetInstanceProcAddr)) {
if (!library.GetSymbol("vkGetInstanceProcAddr", &vkGetInstanceProcAddr)) {
LOG_ERROR(Render_Vulkan, "vkGetInstanceProcAddr not present in Vulkan"); LOG_ERROR(Render_Vulkan, "vkGetInstanceProcAddr not present in Vulkan");
return UniqueInstance{}; return {};
}
if (!vk::Load(dld)) {
LOG_ERROR(Render_Vulkan, "Failed to load Vulkan function pointers");
return {};
} }
dld.init(vkGetInstanceProcAddr);
std::vector<const char*> extensions; std::vector<const char*> extensions;
extensions.reserve(4); extensions.reserve(6);
switch (window_type) { switch (window_type) {
case Core::Frontend::WindowSystemType::Headless: case Core::Frontend::WindowSystemType::Headless:
break; break;
@ -136,45 +137,39 @@ UniqueInstance CreateInstance(Common::DynamicLibrary& library, vk::DispatchLoade
if (enable_layers) { if (enable_layers) {
extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME); extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
} }
extensions.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
u32 num_properties; const std::optional properties = vk::EnumerateInstanceExtensionProperties(dld);
if (vk::enumerateInstanceExtensionProperties(nullptr, &num_properties, nullptr, dld) != if (!properties) {
vk::Result::eSuccess) {
LOG_ERROR(Render_Vulkan, "Failed to query number of extension properties");
return UniqueInstance{};
}
std::vector<vk::ExtensionProperties> properties(num_properties);
if (vk::enumerateInstanceExtensionProperties(nullptr, &num_properties, properties.data(),
dld) != vk::Result::eSuccess) {
LOG_ERROR(Render_Vulkan, "Failed to query extension properties"); LOG_ERROR(Render_Vulkan, "Failed to query extension properties");
return UniqueInstance{}; return {};
} }
for (const char* extension : extensions) { for (const char* extension : extensions) {
const auto it = const auto it =
std::find_if(properties.begin(), properties.end(), [extension](const auto& prop) { std::find_if(properties->begin(), properties->end(), [extension](const auto& prop) {
return !std::strcmp(extension, prop.extensionName); return !std::strcmp(extension, prop.extensionName);
}); });
if (it == properties.end()) { if (it == properties->end()) {
LOG_ERROR(Render_Vulkan, "Required instance extension {} is not available", extension); LOG_ERROR(Render_Vulkan, "Required instance extension {} is not available", extension);
return UniqueInstance{}; return {};
} }
} }
const vk::ApplicationInfo application_info("yuzu Emulator", VK_MAKE_VERSION(0, 1, 0), static constexpr std::array layers_data{"VK_LAYER_LUNARG_standard_validation"};
"yuzu Emulator", VK_MAKE_VERSION(0, 1, 0), vk::Span<const char*> layers = layers_data;
VK_API_VERSION_1_1); if (!enable_layers) {
const std::array layers = {"VK_LAYER_LUNARG_standard_validation"}; layers = {};
const vk::InstanceCreateInfo instance_ci(
{}, &application_info, enable_layers ? static_cast<u32>(layers.size()) : 0, layers.data(),
static_cast<u32>(extensions.size()), extensions.data());
vk::Instance unsafe_instance;
if (vk::createInstance(&instance_ci, nullptr, &unsafe_instance, dld) != vk::Result::eSuccess) {
LOG_ERROR(Render_Vulkan, "Failed to create Vulkan instance");
return UniqueInstance{};
} }
dld.init(unsafe_instance); vk::Instance instance = vk::Instance::Create(layers, extensions, dld);
return UniqueInstance(unsafe_instance, {nullptr, dld}); if (!instance) {
LOG_ERROR(Render_Vulkan, "Failed to create Vulkan instance");
return {};
}
if (!vk::Load(*instance, dld)) {
LOG_ERROR(Render_Vulkan, "Failed to load Vulkan instance function pointers");
}
return instance;
} }
std::string GetReadableVersion(u32 version) { std::string GetReadableVersion(u32 version) {
@ -187,14 +182,14 @@ std::string GetDriverVersion(const VKDevice& device) {
// https://github.com/SaschaWillems/vulkan.gpuinfo.org/blob/5dddea46ea1120b0df14eef8f15ff8e318e35462/functions.php#L308-L314 // https://github.com/SaschaWillems/vulkan.gpuinfo.org/blob/5dddea46ea1120b0df14eef8f15ff8e318e35462/functions.php#L308-L314
const u32 version = device.GetDriverVersion(); const u32 version = device.GetDriverVersion();
if (device.GetDriverID() == vk::DriverIdKHR::eNvidiaProprietary) { if (device.GetDriverID() == VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR) {
const u32 major = (version >> 22) & 0x3ff; const u32 major = (version >> 22) & 0x3ff;
const u32 minor = (version >> 14) & 0x0ff; const u32 minor = (version >> 14) & 0x0ff;
const u32 secondary = (version >> 6) & 0x0ff; const u32 secondary = (version >> 6) & 0x0ff;
const u32 tertiary = version & 0x003f; const u32 tertiary = version & 0x003f;
return fmt::format("{}.{}.{}.{}", major, minor, secondary, tertiary); return fmt::format("{}.{}.{}.{}", major, minor, secondary, tertiary);
} }
if (device.GetDriverID() == vk::DriverIdKHR::eIntelProprietaryWindows) { if (device.GetDriverID() == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR) {
const u32 major = version >> 14; const u32 major = version >> 14;
const u32 minor = version & 0x3fff; const u32 minor = version & 0x3fff;
return fmt::format("{}.{}", major, minor); return fmt::format("{}.{}", major, minor);
@ -307,10 +302,8 @@ void RendererVulkan::ShutDown() {
if (!device) { if (!device) {
return; return;
} }
const auto dev = device->GetLogical(); if (const auto& dev = device->GetLogical()) {
const auto& dld = device->GetDispatchLoader(); dev.WaitIdle();
if (dev && dld.vkDeviceWaitIdle) {
dev.waitIdle(dld);
} }
rasterizer.reset(); rasterizer.reset();
@ -326,23 +319,11 @@ bool RendererVulkan::CreateDebugCallback() {
if (!Settings::values.renderer_debug) { if (!Settings::values.renderer_debug) {
return true; return true;
} }
const vk::DebugUtilsMessengerCreateInfoEXT callback_ci( debug_callback = instance.TryCreateDebugCallback(DebugCallback);
{}, if (!debug_callback) {
vk::DebugUtilsMessageSeverityFlagBitsEXT::eError |
vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning |
vk::DebugUtilsMessageSeverityFlagBitsEXT::eInfo |
vk::DebugUtilsMessageSeverityFlagBitsEXT::eVerbose,
vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral |
vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation |
vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance,
&DebugCallback, nullptr);
vk::DebugUtilsMessengerEXT unsafe_callback;
if (instance->createDebugUtilsMessengerEXT(&callback_ci, nullptr, &unsafe_callback, dld) !=
vk::Result::eSuccess) {
LOG_ERROR(Render_Vulkan, "Failed to create debug callback"); LOG_ERROR(Render_Vulkan, "Failed to create debug callback");
return false; return false;
} }
debug_callback = UniqueDebugUtilsMessengerEXT(unsafe_callback, {*instance, nullptr, dld});
return true; return true;
} }
@ -357,8 +338,8 @@ bool RendererVulkan::CreateSurface() {
nullptr, 0, nullptr, hWnd}; nullptr, 0, nullptr, hWnd};
const auto vkCreateWin32SurfaceKHR = reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>( const auto vkCreateWin32SurfaceKHR = reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>(
dld.vkGetInstanceProcAddr(*instance, "vkCreateWin32SurfaceKHR")); dld.vkGetInstanceProcAddr(*instance, "vkCreateWin32SurfaceKHR"));
if (!vkCreateWin32SurfaceKHR || vkCreateWin32SurfaceKHR(instance.get(), &win32_ci, nullptr, if (!vkCreateWin32SurfaceKHR ||
&unsafe_surface) != VK_SUCCESS) { vkCreateWin32SurfaceKHR(*instance, &win32_ci, nullptr, &unsafe_surface) != VK_SUCCESS) {
LOG_ERROR(Render_Vulkan, "Failed to initialize Win32 surface"); LOG_ERROR(Render_Vulkan, "Failed to initialize Win32 surface");
return false; return false;
} }
@ -372,8 +353,8 @@ bool RendererVulkan::CreateSurface() {
reinterpret_cast<Window>(window_info.render_surface)}; reinterpret_cast<Window>(window_info.render_surface)};
const auto vkCreateXlibSurfaceKHR = reinterpret_cast<PFN_vkCreateXlibSurfaceKHR>( const auto vkCreateXlibSurfaceKHR = reinterpret_cast<PFN_vkCreateXlibSurfaceKHR>(
dld.vkGetInstanceProcAddr(*instance, "vkCreateXlibSurfaceKHR")); dld.vkGetInstanceProcAddr(*instance, "vkCreateXlibSurfaceKHR"));
if (!vkCreateXlibSurfaceKHR || vkCreateXlibSurfaceKHR(instance.get(), &xlib_ci, nullptr, if (!vkCreateXlibSurfaceKHR ||
&unsafe_surface) != VK_SUCCESS) { vkCreateXlibSurfaceKHR(*instance, &xlib_ci, nullptr, &unsafe_surface) != VK_SUCCESS) {
LOG_ERROR(Render_Vulkan, "Failed to initialize Xlib surface"); LOG_ERROR(Render_Vulkan, "Failed to initialize Xlib surface");
return false; return false;
} }
@ -386,7 +367,7 @@ bool RendererVulkan::CreateSurface() {
const auto vkCreateWaylandSurfaceKHR = reinterpret_cast<PFN_vkCreateWaylandSurfaceKHR>( const auto vkCreateWaylandSurfaceKHR = reinterpret_cast<PFN_vkCreateWaylandSurfaceKHR>(
dld.vkGetInstanceProcAddr(*instance, "vkCreateWaylandSurfaceKHR")); dld.vkGetInstanceProcAddr(*instance, "vkCreateWaylandSurfaceKHR"));
if (!vkCreateWaylandSurfaceKHR || if (!vkCreateWaylandSurfaceKHR ||
vkCreateWaylandSurfaceKHR(instance.get(), &wayland_ci, nullptr, &unsafe_surface) != vkCreateWaylandSurfaceKHR(*instance, &wayland_ci, nullptr, &unsafe_surface) !=
VK_SUCCESS) { VK_SUCCESS) {
LOG_ERROR(Render_Vulkan, "Failed to initialize Wayland surface"); LOG_ERROR(Render_Vulkan, "Failed to initialize Wayland surface");
return false; return false;
@ -398,26 +379,30 @@ bool RendererVulkan::CreateSurface() {
return false; return false;
} }
surface = UniqueSurfaceKHR(unsafe_surface, {*instance, nullptr, dld}); surface = vk::SurfaceKHR(unsafe_surface, *instance, dld);
return true; return true;
} }
bool RendererVulkan::PickDevices() { bool RendererVulkan::PickDevices() {
const auto devices = instance->enumeratePhysicalDevices(dld); const auto devices = instance.EnumeratePhysicalDevices();
if (!devices) {
LOG_ERROR(Render_Vulkan, "Failed to enumerate physical devices");
return false;
}
const s32 device_index = Settings::values.vulkan_device; const s32 device_index = Settings::values.vulkan_device;
if (device_index < 0 || device_index >= static_cast<s32>(devices.size())) { if (device_index < 0 || device_index >= static_cast<s32>(devices->size())) {
LOG_ERROR(Render_Vulkan, "Invalid device index {}!", device_index); LOG_ERROR(Render_Vulkan, "Invalid device index {}!", device_index);
return false; return false;
} }
const vk::PhysicalDevice physical_device = devices[static_cast<std::size_t>(device_index)]; const vk::PhysicalDevice physical_device((*devices)[static_cast<std::size_t>(device_index)],
dld);
if (!VKDevice::IsSuitable(physical_device, *surface, dld)) { if (!VKDevice::IsSuitable(physical_device, *surface)) {
return false; return false;
} }
device = std::make_unique<VKDevice>(dld, physical_device, *surface); device = std::make_unique<VKDevice>(*instance, physical_device, *surface, dld);
return device->Create(*instance); return device->Create();
} }
void RendererVulkan::Report() const { void RendererVulkan::Report() const {
@ -444,30 +429,22 @@ void RendererVulkan::Report() const {
} }
std::vector<std::string> RendererVulkan::EnumerateDevices() { std::vector<std::string> RendererVulkan::EnumerateDevices() {
// Avoid putting DispatchLoaderDynamic, it's too large vk::InstanceDispatch dld;
auto dld_memory = std::make_unique<vk::DispatchLoaderDynamic>();
auto& dld = *dld_memory;
Common::DynamicLibrary library = OpenVulkanLibrary(); Common::DynamicLibrary library = OpenVulkanLibrary();
UniqueInstance instance = CreateInstance(library, dld); vk::Instance instance = CreateInstance(library, dld);
if (!instance) { if (!instance) {
return {}; return {};
} }
u32 num_devices; const std::optional physical_devices = instance.EnumeratePhysicalDevices();
if (instance->enumeratePhysicalDevices(&num_devices, nullptr, dld) != vk::Result::eSuccess) { if (!physical_devices) {
return {};
}
std::vector<vk::PhysicalDevice> devices(num_devices);
if (instance->enumeratePhysicalDevices(&num_devices, devices.data(), dld) !=
vk::Result::eSuccess) {
return {}; return {};
} }
std::vector<std::string> names; std::vector<std::string> names;
names.reserve(num_devices); names.reserve(physical_devices->size());
for (auto& device : devices) { for (const auto& device : *physical_devices) {
names.push_back(device.getProperties(dld).deviceName); names.push_back(vk::PhysicalDevice(device, dld).GetProperties().deviceName);
} }
return names; return names;
} }

View File

@ -12,7 +12,7 @@
#include "common/dynamic_library.h" #include "common/dynamic_library.h"
#include "video_core/renderer_base.h" #include "video_core/renderer_base.h"
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Core { namespace Core {
class System; class System;
@ -61,14 +61,14 @@ private:
Core::System& system; Core::System& system;
Common::DynamicLibrary library; Common::DynamicLibrary library;
vk::DispatchLoaderDynamic dld; vk::InstanceDispatch dld;
UniqueInstance instance; vk::Instance instance;
UniqueSurfaceKHR surface; vk::SurfaceKHR surface;
VKScreenInfo screen_info; VKScreenInfo screen_info;
UniqueDebugUtilsMessengerEXT debug_callback; vk::DebugCallback debug_callback;
std::unique_ptr<VKDevice> device; std::unique_ptr<VKDevice> device;
std::unique_ptr<VKSwapchain> swapchain; std::unique_ptr<VKSwapchain> swapchain;
std::unique_ptr<VKMemoryManager> memory_manager; std::unique_ptr<VKMemoryManager> memory_manager;

View File

@ -20,7 +20,6 @@
#include "video_core/gpu.h" #include "video_core/gpu.h"
#include "video_core/morton.h" #include "video_core/morton.h"
#include "video_core/rasterizer_interface.h" #include "video_core/rasterizer_interface.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/renderer_vulkan.h" #include "video_core/renderer_vulkan/renderer_vulkan.h"
#include "video_core/renderer_vulkan/vk_blit_screen.h" #include "video_core/renderer_vulkan/vk_blit_screen.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
@ -30,6 +29,7 @@
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_util.h" #include "video_core/renderer_vulkan/vk_shader_util.h"
#include "video_core/renderer_vulkan/vk_swapchain.h" #include "video_core/renderer_vulkan/vk_swapchain.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h" #include "video_core/surface.h"
namespace Vulkan { namespace Vulkan {
@ -140,16 +140,25 @@ struct ScreenRectVertex {
std::array<f32, 2> position; std::array<f32, 2> position;
std::array<f32, 2> tex_coord; std::array<f32, 2> tex_coord;
static vk::VertexInputBindingDescription GetDescription() { static VkVertexInputBindingDescription GetDescription() {
return vk::VertexInputBindingDescription(0, sizeof(ScreenRectVertex), VkVertexInputBindingDescription description;
vk::VertexInputRate::eVertex); description.binding = 0;
description.stride = sizeof(ScreenRectVertex);
description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
return description;
} }
static std::array<vk::VertexInputAttributeDescription, 2> GetAttributes() { static std::array<VkVertexInputAttributeDescription, 2> GetAttributes() {
return {vk::VertexInputAttributeDescription(0, 0, vk::Format::eR32G32Sfloat, std::array<VkVertexInputAttributeDescription, 2> attributes;
offsetof(ScreenRectVertex, position)), attributes[0].location = 0;
vk::VertexInputAttributeDescription(1, 0, vk::Format::eR32G32Sfloat, attributes[0].binding = 0;
offsetof(ScreenRectVertex, tex_coord))}; attributes[0].format = VK_FORMAT_R32G32_SFLOAT;
attributes[0].offset = offsetof(ScreenRectVertex, position);
attributes[1].location = 1;
attributes[1].binding = 0;
attributes[1].format = VK_FORMAT_R32G32_SFLOAT;
attributes[1].offset = offsetof(ScreenRectVertex, tex_coord);
return attributes;
} }
}; };
@ -172,16 +181,16 @@ std::size_t GetSizeInBytes(const Tegra::FramebufferConfig& framebuffer) {
static_cast<std::size_t>(framebuffer.height) * GetBytesPerPixel(framebuffer); static_cast<std::size_t>(framebuffer.height) * GetBytesPerPixel(framebuffer);
} }
vk::Format GetFormat(const Tegra::FramebufferConfig& framebuffer) { VkFormat GetFormat(const Tegra::FramebufferConfig& framebuffer) {
switch (framebuffer.pixel_format) { switch (framebuffer.pixel_format) {
case Tegra::FramebufferConfig::PixelFormat::ABGR8: case Tegra::FramebufferConfig::PixelFormat::ABGR8:
return vk::Format::eA8B8G8R8UnormPack32; return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
case Tegra::FramebufferConfig::PixelFormat::RGB565: case Tegra::FramebufferConfig::PixelFormat::RGB565:
return vk::Format::eR5G6B5UnormPack16; return VK_FORMAT_R5G6B5_UNORM_PACK16;
default: default:
UNIMPLEMENTED_MSG("Unknown framebuffer pixel format: {}", UNIMPLEMENTED_MSG("Unknown framebuffer pixel format: {}",
static_cast<u32>(framebuffer.pixel_format)); static_cast<u32>(framebuffer.pixel_format));
return vk::Format::eA8B8G8R8UnormPack32; return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
} }
} }
@ -219,7 +228,7 @@ void VKBlitScreen::Recreate() {
CreateDynamicResources(); CreateDynamicResources();
} }
std::tuple<VKFence&, vk::Semaphore> VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, std::tuple<VKFence&, VkSemaphore> VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
bool use_accelerated) { bool use_accelerated) {
RefreshResources(framebuffer); RefreshResources(framebuffer);
@ -255,46 +264,76 @@ std::tuple<VKFence&, vk::Semaphore> VKBlitScreen::Draw(const Tegra::FramebufferC
framebuffer.stride, block_height_log2, framebuffer.height, 0, 1, 1, framebuffer.stride, block_height_log2, framebuffer.height, 0, 1, 1,
map.GetAddress() + image_offset, host_ptr); map.GetAddress() + image_offset, host_ptr);
blit_image->Transition(0, 1, 0, 1, vk::PipelineStageFlagBits::eTransfer, blit_image->Transition(0, 1, 0, 1, VK_PIPELINE_STAGE_TRANSFER_BIT,
vk::AccessFlagBits::eTransferWrite, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vk::ImageLayout::eTransferDstOptimal);
const vk::BufferImageCopy copy(image_offset, 0, 0, VkBufferImageCopy copy;
{vk::ImageAspectFlagBits::eColor, 0, 0, 1}, {0, 0, 0}, copy.bufferOffset = image_offset;
{framebuffer.width, framebuffer.height, 1}); copy.bufferRowLength = 0;
scheduler.Record([buffer_handle = *buffer, image = blit_image->GetHandle(), copy.bufferImageHeight = 0;
copy](auto cmdbuf, auto& dld) { copy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
cmdbuf.copyBufferToImage(buffer_handle, image, vk::ImageLayout::eTransferDstOptimal, copy.imageSubresource.mipLevel = 0;
{copy}, dld); copy.imageSubresource.baseArrayLayer = 0;
copy.imageSubresource.layerCount = 1;
copy.imageOffset.x = 0;
copy.imageOffset.y = 0;
copy.imageOffset.z = 0;
copy.imageExtent.width = framebuffer.width;
copy.imageExtent.height = framebuffer.height;
copy.imageExtent.depth = 1;
scheduler.Record(
[buffer = *buffer, image = *blit_image->GetHandle(), copy](vk::CommandBuffer cmdbuf) {
cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, copy);
}); });
} }
map.Release(); map.Release();
blit_image->Transition(0, 1, 0, 1, vk::PipelineStageFlagBits::eFragmentShader, blit_image->Transition(0, 1, 0, 1, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
vk::AccessFlagBits::eShaderRead, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
vk::ImageLayout::eShaderReadOnlyOptimal);
scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index], scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index],
descriptor_set = descriptor_sets[image_index], buffer = *buffer, descriptor_set = descriptor_sets[image_index], buffer = *buffer,
size = swapchain.GetSize(), pipeline = *pipeline, size = swapchain.GetSize(), pipeline = *pipeline,
layout = *pipeline_layout](auto cmdbuf, auto& dld) { layout = *pipeline_layout](vk::CommandBuffer cmdbuf) {
const vk::ClearValue clear_color{std::array{0.0f, 0.0f, 0.0f, 1.0f}}; VkClearValue clear_color;
const vk::RenderPassBeginInfo renderpass_bi(renderpass, framebuffer, {{0, 0}, size}, 1, clear_color.color.float32[0] = 0.0f;
&clear_color); clear_color.color.float32[1] = 0.0f;
clear_color.color.float32[2] = 0.0f;
clear_color.color.float32[3] = 0.0f;
cmdbuf.beginRenderPass(renderpass_bi, vk::SubpassContents::eInline, dld); VkRenderPassBeginInfo renderpass_bi;
cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline, dld); renderpass_bi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
cmdbuf.setViewport( renderpass_bi.pNext = nullptr;
0, renderpass_bi.renderPass = renderpass;
{{0.0f, 0.0f, static_cast<f32>(size.width), static_cast<f32>(size.height), 0.0f, 1.0f}}, renderpass_bi.framebuffer = framebuffer;
dld); renderpass_bi.renderArea.offset.x = 0;
cmdbuf.setScissor(0, {{{0, 0}, size}}, dld); renderpass_bi.renderArea.offset.y = 0;
renderpass_bi.renderArea.extent = size;
renderpass_bi.clearValueCount = 1;
renderpass_bi.pClearValues = &clear_color;
cmdbuf.bindVertexBuffers(0, {buffer}, {offsetof(BufferData, vertices)}, dld); VkViewport viewport;
cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, layout, 0, {descriptor_set}, {}, viewport.x = 0.0f;
dld); viewport.y = 0.0f;
cmdbuf.draw(4, 1, 0, 0, dld); viewport.width = static_cast<float>(size.width);
cmdbuf.endRenderPass(dld); viewport.height = static_cast<float>(size.height);
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
VkRect2D scissor;
scissor.offset.x = 0;
scissor.offset.y = 0;
scissor.extent = size;
cmdbuf.BeginRenderPass(renderpass_bi, VK_SUBPASS_CONTENTS_INLINE);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
cmdbuf.SetViewport(0, viewport);
cmdbuf.SetScissor(0, scissor);
cmdbuf.BindVertexBuffer(0, buffer, offsetof(BufferData, vertices));
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set, {});
cmdbuf.Draw(4, 1, 0, 0);
cmdbuf.EndRenderPass();
}); });
return {scheduler.GetFence(), *semaphores[image_index]}; return {scheduler.GetFence(), *semaphores[image_index]};
@ -334,165 +373,295 @@ void VKBlitScreen::CreateShaders() {
} }
void VKBlitScreen::CreateSemaphores() { void VKBlitScreen::CreateSemaphores() {
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
semaphores.resize(image_count); semaphores.resize(image_count);
for (std::size_t i = 0; i < image_count; ++i) { std::generate(semaphores.begin(), semaphores.end(),
semaphores[i] = dev.createSemaphoreUnique({}, nullptr, dld); [this] { return device.GetLogical().CreateSemaphore(); });
}
} }
void VKBlitScreen::CreateDescriptorPool() { void VKBlitScreen::CreateDescriptorPool() {
const std::array<vk::DescriptorPoolSize, 2> pool_sizes{ std::array<VkDescriptorPoolSize, 2> pool_sizes;
vk::DescriptorPoolSize{vk::DescriptorType::eUniformBuffer, static_cast<u32>(image_count)}, pool_sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
vk::DescriptorPoolSize{vk::DescriptorType::eCombinedImageSampler, pool_sizes[0].descriptorCount = static_cast<u32>(image_count);
static_cast<u32>(image_count)}}; pool_sizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
const vk::DescriptorPoolCreateInfo pool_ci( pool_sizes[1].descriptorCount = static_cast<u32>(image_count);
{}, static_cast<u32>(image_count), static_cast<u32>(pool_sizes.size()), pool_sizes.data());
const auto dev = device.GetLogical(); VkDescriptorPoolCreateInfo ci;
descriptor_pool = dev.createDescriptorPoolUnique(pool_ci, nullptr, device.GetDispatchLoader()); ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ci.pNext = nullptr;
ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
ci.maxSets = static_cast<u32>(image_count);
ci.poolSizeCount = static_cast<u32>(pool_sizes.size());
ci.pPoolSizes = pool_sizes.data();
descriptor_pool = device.GetLogical().CreateDescriptorPool(ci);
} }
void VKBlitScreen::CreateRenderPass() { void VKBlitScreen::CreateRenderPass() {
const vk::AttachmentDescription color_attachment( VkAttachmentDescription color_attachment;
{}, swapchain.GetImageFormat(), vk::SampleCountFlagBits::e1, vk::AttachmentLoadOp::eClear, color_attachment.flags = 0;
vk::AttachmentStoreOp::eStore, vk::AttachmentLoadOp::eDontCare, color_attachment.format = swapchain.GetImageFormat();
vk::AttachmentStoreOp::eDontCare, vk::ImageLayout::eUndefined, color_attachment.samples = VK_SAMPLE_COUNT_1_BIT;
vk::ImageLayout::ePresentSrcKHR); color_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
color_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
color_attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
color_attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
color_attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
color_attachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
const vk::AttachmentReference color_attachment_ref(0, vk::ImageLayout::eColorAttachmentOptimal); VkAttachmentReference color_attachment_ref;
color_attachment_ref.attachment = 0;
color_attachment_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
const vk::SubpassDescription subpass_description({}, vk::PipelineBindPoint::eGraphics, 0, VkSubpassDescription subpass_description;
nullptr, 1, &color_attachment_ref, nullptr, subpass_description.flags = 0;
nullptr, 0, nullptr); subpass_description.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass_description.inputAttachmentCount = 0;
subpass_description.pInputAttachments = nullptr;
subpass_description.colorAttachmentCount = 1;
subpass_description.pColorAttachments = &color_attachment_ref;
subpass_description.pResolveAttachments = nullptr;
subpass_description.pDepthStencilAttachment = nullptr;
subpass_description.preserveAttachmentCount = 0;
subpass_description.pPreserveAttachments = nullptr;
const vk::SubpassDependency dependency( VkSubpassDependency dependency;
VK_SUBPASS_EXTERNAL, 0, vk::PipelineStageFlagBits::eColorAttachmentOutput, dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
vk::PipelineStageFlagBits::eColorAttachmentOutput, {}, dependency.dstSubpass = 0;
vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite, {}); dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.srcAccessMask = 0;
dependency.dstAccessMask =
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
dependency.dependencyFlags = 0;
const vk::RenderPassCreateInfo renderpass_ci({}, 1, &color_attachment, 1, &subpass_description, VkRenderPassCreateInfo renderpass_ci;
1, &dependency); renderpass_ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderpass_ci.pNext = nullptr;
renderpass_ci.flags = 0;
renderpass_ci.attachmentCount = 1;
renderpass_ci.pAttachments = &color_attachment;
renderpass_ci.subpassCount = 1;
renderpass_ci.pSubpasses = &subpass_description;
renderpass_ci.dependencyCount = 1;
renderpass_ci.pDependencies = &dependency;
const auto dev = device.GetLogical(); renderpass = device.GetLogical().CreateRenderPass(renderpass_ci);
renderpass = dev.createRenderPassUnique(renderpass_ci, nullptr, device.GetDispatchLoader());
} }
void VKBlitScreen::CreateDescriptorSetLayout() { void VKBlitScreen::CreateDescriptorSetLayout() {
const std::array<vk::DescriptorSetLayoutBinding, 2> layout_bindings{ std::array<VkDescriptorSetLayoutBinding, 2> layout_bindings;
vk::DescriptorSetLayoutBinding(0, vk::DescriptorType::eUniformBuffer, 1, layout_bindings[0].binding = 0;
vk::ShaderStageFlagBits::eVertex, nullptr), layout_bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
vk::DescriptorSetLayoutBinding(1, vk::DescriptorType::eCombinedImageSampler, 1, layout_bindings[0].descriptorCount = 1;
vk::ShaderStageFlagBits::eFragment, nullptr)}; layout_bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
const vk::DescriptorSetLayoutCreateInfo descriptor_layout_ci( layout_bindings[0].pImmutableSamplers = nullptr;
{}, static_cast<u32>(layout_bindings.size()), layout_bindings.data()); layout_bindings[1].binding = 1;
layout_bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
layout_bindings[1].descriptorCount = 1;
layout_bindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
layout_bindings[1].pImmutableSamplers = nullptr;
const auto dev = device.GetLogical(); VkDescriptorSetLayoutCreateInfo ci;
const auto& dld = device.GetDispatchLoader(); ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
descriptor_set_layout = dev.createDescriptorSetLayoutUnique(descriptor_layout_ci, nullptr, dld); ci.pNext = nullptr;
ci.flags = 0;
ci.bindingCount = static_cast<u32>(layout_bindings.size());
ci.pBindings = layout_bindings.data();
descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout(ci);
} }
void VKBlitScreen::CreateDescriptorSets() { void VKBlitScreen::CreateDescriptorSets() {
const auto dev = device.GetLogical(); const std::vector layouts(image_count, *descriptor_set_layout);
const auto& dld = device.GetDispatchLoader();
descriptor_sets.resize(image_count); VkDescriptorSetAllocateInfo ai;
for (std::size_t i = 0; i < image_count; ++i) { ai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
const vk::DescriptorSetLayout layout = *descriptor_set_layout; ai.pNext = nullptr;
const vk::DescriptorSetAllocateInfo descriptor_set_ai(*descriptor_pool, 1, &layout); ai.descriptorPool = *descriptor_pool;
const vk::Result result = ai.descriptorSetCount = static_cast<u32>(image_count);
dev.allocateDescriptorSets(&descriptor_set_ai, &descriptor_sets[i], dld); ai.pSetLayouts = layouts.data();
ASSERT(result == vk::Result::eSuccess); descriptor_sets = descriptor_pool.Allocate(ai);
}
} }
void VKBlitScreen::CreatePipelineLayout() { void VKBlitScreen::CreatePipelineLayout() {
const vk::PipelineLayoutCreateInfo pipeline_layout_ci({}, 1, &descriptor_set_layout.get(), 0, VkPipelineLayoutCreateInfo ci;
nullptr); ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
const auto dev = device.GetLogical(); ci.pNext = nullptr;
const auto& dld = device.GetDispatchLoader(); ci.flags = 0;
pipeline_layout = dev.createPipelineLayoutUnique(pipeline_layout_ci, nullptr, dld); ci.setLayoutCount = 1;
ci.pSetLayouts = descriptor_set_layout.address();
ci.pushConstantRangeCount = 0;
ci.pPushConstantRanges = nullptr;
pipeline_layout = device.GetLogical().CreatePipelineLayout(ci);
} }
void VKBlitScreen::CreateGraphicsPipeline() { void VKBlitScreen::CreateGraphicsPipeline() {
const std::array shader_stages = { std::array<VkPipelineShaderStageCreateInfo, 2> shader_stages;
vk::PipelineShaderStageCreateInfo({}, vk::ShaderStageFlagBits::eVertex, *vertex_shader, shader_stages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
"main", nullptr), shader_stages[0].pNext = nullptr;
vk::PipelineShaderStageCreateInfo({}, vk::ShaderStageFlagBits::eFragment, *fragment_shader, shader_stages[0].flags = 0;
"main", nullptr)}; shader_stages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
shader_stages[0].module = *vertex_shader;
shader_stages[0].pName = "main";
shader_stages[0].pSpecializationInfo = nullptr;
shader_stages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stages[1].pNext = nullptr;
shader_stages[1].flags = 0;
shader_stages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
shader_stages[1].module = *fragment_shader;
shader_stages[1].pName = "main";
shader_stages[1].pSpecializationInfo = nullptr;
const auto vertex_binding_description = ScreenRectVertex::GetDescription(); const auto vertex_binding_description = ScreenRectVertex::GetDescription();
const auto vertex_attrs_description = ScreenRectVertex::GetAttributes(); const auto vertex_attrs_description = ScreenRectVertex::GetAttributes();
const vk::PipelineVertexInputStateCreateInfo vertex_input(
{}, 1, &vertex_binding_description, static_cast<u32>(vertex_attrs_description.size()),
vertex_attrs_description.data());
const vk::PipelineInputAssemblyStateCreateInfo input_assembly( VkPipelineVertexInputStateCreateInfo vertex_input_ci;
{}, vk::PrimitiveTopology::eTriangleStrip, false); vertex_input_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertex_input_ci.pNext = nullptr;
vertex_input_ci.flags = 0;
vertex_input_ci.vertexBindingDescriptionCount = 1;
vertex_input_ci.pVertexBindingDescriptions = &vertex_binding_description;
vertex_input_ci.vertexAttributeDescriptionCount = u32{vertex_attrs_description.size()};
vertex_input_ci.pVertexAttributeDescriptions = vertex_attrs_description.data();
// Set a dummy viewport, it's going to be replaced by dynamic states. VkPipelineInputAssemblyStateCreateInfo input_assembly_ci;
const vk::Viewport viewport(0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f); input_assembly_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
const vk::Rect2D scissor({0, 0}, {1, 1}); input_assembly_ci.pNext = nullptr;
input_assembly_ci.flags = 0;
input_assembly_ci.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
input_assembly_ci.primitiveRestartEnable = VK_FALSE;
const vk::PipelineViewportStateCreateInfo viewport_state({}, 1, &viewport, 1, &scissor); VkPipelineViewportStateCreateInfo viewport_state_ci;
viewport_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewport_state_ci.pNext = nullptr;
viewport_state_ci.flags = 0;
viewport_state_ci.viewportCount = 1;
viewport_state_ci.scissorCount = 1;
const vk::PipelineRasterizationStateCreateInfo rasterizer( VkPipelineRasterizationStateCreateInfo rasterization_ci;
{}, false, false, vk::PolygonMode::eFill, vk::CullModeFlagBits::eNone, rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
vk::FrontFace::eClockwise, false, 0.0f, 0.0f, 0.0f, 1.0f); rasterization_ci.pNext = nullptr;
rasterization_ci.flags = 0;
rasterization_ci.depthClampEnable = VK_FALSE;
rasterization_ci.rasterizerDiscardEnable = VK_FALSE;
rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL;
rasterization_ci.cullMode = VK_CULL_MODE_NONE;
rasterization_ci.frontFace = VK_FRONT_FACE_CLOCKWISE;
rasterization_ci.depthBiasEnable = VK_FALSE;
rasterization_ci.depthBiasConstantFactor = 0.0f;
rasterization_ci.depthBiasClamp = 0.0f;
rasterization_ci.depthBiasSlopeFactor = 0.0f;
rasterization_ci.lineWidth = 1.0f;
const vk::PipelineMultisampleStateCreateInfo multisampling({}, vk::SampleCountFlagBits::e1, VkPipelineMultisampleStateCreateInfo multisampling_ci;
false, 0.0f, nullptr, false, false); multisampling_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisampling_ci.pNext = nullptr;
multisampling_ci.flags = 0;
multisampling_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisampling_ci.sampleShadingEnable = VK_FALSE;
multisampling_ci.minSampleShading = 0.0f;
multisampling_ci.pSampleMask = nullptr;
multisampling_ci.alphaToCoverageEnable = VK_FALSE;
multisampling_ci.alphaToOneEnable = VK_FALSE;
const vk::PipelineColorBlendAttachmentState color_blend_attachment( VkPipelineColorBlendAttachmentState color_blend_attachment;
false, vk::BlendFactor::eZero, vk::BlendFactor::eZero, vk::BlendOp::eAdd, color_blend_attachment.blendEnable = VK_FALSE;
vk::BlendFactor::eZero, vk::BlendFactor::eZero, vk::BlendOp::eAdd, color_blend_attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ZERO;
vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | color_blend_attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA); color_blend_attachment.colorBlendOp = VK_BLEND_OP_ADD;
color_blend_attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
color_blend_attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
color_blend_attachment.alphaBlendOp = VK_BLEND_OP_ADD;
color_blend_attachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
const vk::PipelineColorBlendStateCreateInfo color_blending( VkPipelineColorBlendStateCreateInfo color_blend_ci;
{}, false, vk::LogicOp::eCopy, 1, &color_blend_attachment, {0.0f, 0.0f, 0.0f, 0.0f}); color_blend_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
color_blend_ci.flags = 0;
color_blend_ci.pNext = nullptr;
color_blend_ci.logicOpEnable = VK_FALSE;
color_blend_ci.logicOp = VK_LOGIC_OP_COPY;
color_blend_ci.attachmentCount = 1;
color_blend_ci.pAttachments = &color_blend_attachment;
color_blend_ci.blendConstants[0] = 0.0f;
color_blend_ci.blendConstants[1] = 0.0f;
color_blend_ci.blendConstants[2] = 0.0f;
color_blend_ci.blendConstants[3] = 0.0f;
const std::array<vk::DynamicState, 2> dynamic_states = {vk::DynamicState::eViewport, static constexpr std::array dynamic_states = {VK_DYNAMIC_STATE_VIEWPORT,
vk::DynamicState::eScissor}; VK_DYNAMIC_STATE_SCISSOR};
VkPipelineDynamicStateCreateInfo dynamic_state_ci;
dynamic_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamic_state_ci.pNext = nullptr;
dynamic_state_ci.flags = 0;
dynamic_state_ci.dynamicStateCount = static_cast<u32>(dynamic_states.size());
dynamic_state_ci.pDynamicStates = dynamic_states.data();
const vk::PipelineDynamicStateCreateInfo dynamic_state( VkGraphicsPipelineCreateInfo pipeline_ci;
{}, static_cast<u32>(dynamic_states.size()), dynamic_states.data()); pipeline_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipeline_ci.pNext = nullptr;
pipeline_ci.flags = 0;
pipeline_ci.stageCount = static_cast<u32>(shader_stages.size());
pipeline_ci.pStages = shader_stages.data();
pipeline_ci.pVertexInputState = &vertex_input_ci;
pipeline_ci.pInputAssemblyState = &input_assembly_ci;
pipeline_ci.pTessellationState = nullptr;
pipeline_ci.pViewportState = &viewport_state_ci;
pipeline_ci.pRasterizationState = &rasterization_ci;
pipeline_ci.pMultisampleState = &multisampling_ci;
pipeline_ci.pDepthStencilState = nullptr;
pipeline_ci.pColorBlendState = &color_blend_ci;
pipeline_ci.pDynamicState = &dynamic_state_ci;
pipeline_ci.layout = *pipeline_layout;
pipeline_ci.renderPass = *renderpass;
pipeline_ci.subpass = 0;
pipeline_ci.basePipelineHandle = 0;
pipeline_ci.basePipelineIndex = 0;
const vk::GraphicsPipelineCreateInfo pipeline_ci( pipeline = device.GetLogical().CreateGraphicsPipeline(pipeline_ci);
{}, static_cast<u32>(shader_stages.size()), shader_stages.data(), &vertex_input,
&input_assembly, nullptr, &viewport_state, &rasterizer, &multisampling, nullptr,
&color_blending, &dynamic_state, *pipeline_layout, *renderpass, 0, nullptr, 0);
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
pipeline = dev.createGraphicsPipelineUnique({}, pipeline_ci, nullptr, dld);
} }
void VKBlitScreen::CreateSampler() { void VKBlitScreen::CreateSampler() {
const auto dev = device.GetLogical(); VkSamplerCreateInfo ci;
const auto& dld = device.GetDispatchLoader(); ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
const vk::SamplerCreateInfo sampler_ci( ci.pNext = nullptr;
{}, vk::Filter::eLinear, vk::Filter::eLinear, vk::SamplerMipmapMode::eLinear, ci.flags = 0;
vk::SamplerAddressMode::eClampToBorder, vk::SamplerAddressMode::eClampToBorder, ci.magFilter = VK_FILTER_LINEAR;
vk::SamplerAddressMode::eClampToBorder, 0.0f, false, 0.0f, false, vk::CompareOp::eNever, ci.minFilter = VK_FILTER_NEAREST;
0.0f, 0.0f, vk::BorderColor::eFloatOpaqueBlack, false); ci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
sampler = dev.createSamplerUnique(sampler_ci, nullptr, dld); ci.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
ci.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
ci.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
ci.mipLodBias = 0.0f;
ci.anisotropyEnable = VK_FALSE;
ci.maxAnisotropy = 0.0f;
ci.compareEnable = VK_FALSE;
ci.compareOp = VK_COMPARE_OP_NEVER;
ci.minLod = 0.0f;
ci.maxLod = 0.0f;
ci.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
ci.unnormalizedCoordinates = VK_FALSE;
sampler = device.GetLogical().CreateSampler(ci);
} }
void VKBlitScreen::CreateFramebuffers() { void VKBlitScreen::CreateFramebuffers() {
const vk::Extent2D size{swapchain.GetSize()}; const VkExtent2D size{swapchain.GetSize()};
framebuffers.clear();
framebuffers.resize(image_count); framebuffers.resize(image_count);
const auto dev = device.GetLogical(); VkFramebufferCreateInfo ci;
const auto& dld = device.GetDispatchLoader(); ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
ci.pNext = nullptr;
ci.flags = 0;
ci.renderPass = *renderpass;
ci.attachmentCount = 1;
ci.width = size.width;
ci.height = size.height;
ci.layers = 1;
for (std::size_t i = 0; i < image_count; ++i) { for (std::size_t i = 0; i < image_count; ++i) {
const vk::ImageView image_view{swapchain.GetImageViewIndex(i)}; const VkImageView image_view{swapchain.GetImageViewIndex(i)};
const vk::FramebufferCreateInfo framebuffer_ci({}, *renderpass, 1, &image_view, size.width, ci.pAttachments = &image_view;
size.height, 1); framebuffers[i] = device.GetLogical().CreateFramebuffer(ci);
framebuffers[i] = dev.createFramebufferUnique(framebuffer_ci, nullptr, dld);
} }
} }
@ -507,54 +676,86 @@ void VKBlitScreen::ReleaseRawImages() {
} }
void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) { void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) {
const auto dev = device.GetLogical(); VkBufferCreateInfo ci;
const auto& dld = device.GetDispatchLoader(); ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
ci.pNext = nullptr;
ci.flags = 0;
ci.size = CalculateBufferSize(framebuffer);
ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = nullptr;
const vk::BufferCreateInfo buffer_ci({}, CalculateBufferSize(framebuffer), buffer = device.GetLogical().CreateBuffer(ci);
vk::BufferUsageFlagBits::eTransferSrc | buffer_commit = memory_manager.Commit(buffer, true);
vk::BufferUsageFlagBits::eVertexBuffer |
vk::BufferUsageFlagBits::eUniformBuffer,
vk::SharingMode::eExclusive, 0, nullptr);
buffer = dev.createBufferUnique(buffer_ci, nullptr, dld);
buffer_commit = memory_manager.Commit(*buffer, true);
} }
void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) { void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) {
raw_images.resize(image_count); raw_images.resize(image_count);
raw_buffer_commits.resize(image_count); raw_buffer_commits.resize(image_count);
const auto format = GetFormat(framebuffer); VkImageCreateInfo ci;
for (std::size_t i = 0; i < image_count; ++i) { ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
const vk::ImageCreateInfo image_ci( ci.pNext = nullptr;
{}, vk::ImageType::e2D, format, {framebuffer.width, framebuffer.height, 1}, 1, 1, ci.flags = 0;
vk::SampleCountFlagBits::e1, vk::ImageTiling::eOptimal, ci.imageType = VK_IMAGE_TYPE_2D;
vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled, ci.format = GetFormat(framebuffer);
vk::SharingMode::eExclusive, 0, nullptr, vk::ImageLayout::eUndefined); ci.extent.width = framebuffer.width;
ci.extent.height = framebuffer.height;
ci.extent.depth = 1;
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_LINEAR;
ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = nullptr;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
raw_images[i] = for (std::size_t i = 0; i < image_count; ++i) {
std::make_unique<VKImage>(device, scheduler, image_ci, vk::ImageAspectFlagBits::eColor); raw_images[i] = std::make_unique<VKImage>(device, scheduler, ci, VK_IMAGE_ASPECT_COLOR_BIT);
raw_buffer_commits[i] = memory_manager.Commit(raw_images[i]->GetHandle(), false); raw_buffer_commits[i] = memory_manager.Commit(raw_images[i]->GetHandle(), false);
} }
} }
void VKBlitScreen::UpdateDescriptorSet(std::size_t image_index, vk::ImageView image_view) const { void VKBlitScreen::UpdateDescriptorSet(std::size_t image_index, VkImageView image_view) const {
const vk::DescriptorSet descriptor_set = descriptor_sets[image_index]; VkDescriptorBufferInfo buffer_info;
buffer_info.buffer = *buffer;
buffer_info.offset = offsetof(BufferData, uniform);
buffer_info.range = sizeof(BufferData::uniform);
const vk::DescriptorBufferInfo buffer_info(*buffer, offsetof(BufferData, uniform), VkWriteDescriptorSet ubo_write;
sizeof(BufferData::uniform)); ubo_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
const vk::WriteDescriptorSet ubo_write(descriptor_set, 0, 0, 1, ubo_write.pNext = nullptr;
vk::DescriptorType::eUniformBuffer, nullptr, ubo_write.dstSet = descriptor_sets[image_index];
&buffer_info, nullptr); ubo_write.dstBinding = 0;
ubo_write.dstArrayElement = 0;
ubo_write.descriptorCount = 1;
ubo_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
ubo_write.pImageInfo = nullptr;
ubo_write.pBufferInfo = &buffer_info;
ubo_write.pTexelBufferView = nullptr;
const vk::DescriptorImageInfo image_info(*sampler, image_view, VkDescriptorImageInfo image_info;
vk::ImageLayout::eShaderReadOnlyOptimal); image_info.sampler = *sampler;
const vk::WriteDescriptorSet sampler_write(descriptor_set, 1, 0, 1, image_info.imageView = image_view;
vk::DescriptorType::eCombinedImageSampler, image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
&image_info, nullptr, nullptr);
const auto dev = device.GetLogical(); VkWriteDescriptorSet sampler_write;
const auto& dld = device.GetDispatchLoader(); sampler_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dev.updateDescriptorSets({ubo_write, sampler_write}, {}, dld); sampler_write.pNext = nullptr;
sampler_write.dstSet = descriptor_sets[image_index];
sampler_write.dstBinding = 1;
sampler_write.dstArrayElement = 0;
sampler_write.descriptorCount = 1;
sampler_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
sampler_write.pImageInfo = &image_info;
sampler_write.pBufferInfo = nullptr;
sampler_write.pTexelBufferView = nullptr;
device.GetLogical().UpdateDescriptorSets(std::array{ubo_write, sampler_write}, {});
} }
void VKBlitScreen::SetUniformData(BufferData& data, void VKBlitScreen::SetUniformData(BufferData& data,

View File

@ -8,9 +8,9 @@
#include <memory> #include <memory>
#include <tuple> #include <tuple>
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Core { namespace Core {
class System; class System;
@ -49,7 +49,7 @@ public:
void Recreate(); void Recreate();
std::tuple<VKFence&, vk::Semaphore> Draw(const Tegra::FramebufferConfig& framebuffer, std::tuple<VKFence&, VkSemaphore> Draw(const Tegra::FramebufferConfig& framebuffer,
bool use_accelerated); bool use_accelerated);
private: private:
@ -74,7 +74,7 @@ private:
void CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer); void CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer);
void CreateRawImages(const Tegra::FramebufferConfig& framebuffer); void CreateRawImages(const Tegra::FramebufferConfig& framebuffer);
void UpdateDescriptorSet(std::size_t image_index, vk::ImageView image_view) const; void UpdateDescriptorSet(std::size_t image_index, VkImageView image_view) const;
void SetUniformData(BufferData& data, const Tegra::FramebufferConfig& framebuffer) const; void SetUniformData(BufferData& data, const Tegra::FramebufferConfig& framebuffer) const;
void SetVertexData(BufferData& data, const Tegra::FramebufferConfig& framebuffer) const; void SetVertexData(BufferData& data, const Tegra::FramebufferConfig& framebuffer) const;
@ -93,23 +93,23 @@ private:
const std::size_t image_count; const std::size_t image_count;
const VKScreenInfo& screen_info; const VKScreenInfo& screen_info;
UniqueShaderModule vertex_shader; vk::ShaderModule vertex_shader;
UniqueShaderModule fragment_shader; vk::ShaderModule fragment_shader;
UniqueDescriptorPool descriptor_pool; vk::DescriptorPool descriptor_pool;
UniqueDescriptorSetLayout descriptor_set_layout; vk::DescriptorSetLayout descriptor_set_layout;
UniquePipelineLayout pipeline_layout; vk::PipelineLayout pipeline_layout;
UniquePipeline pipeline; vk::Pipeline pipeline;
UniqueRenderPass renderpass; vk::RenderPass renderpass;
std::vector<UniqueFramebuffer> framebuffers; std::vector<vk::Framebuffer> framebuffers;
std::vector<vk::DescriptorSet> descriptor_sets; vk::DescriptorSets descriptor_sets;
UniqueSampler sampler; vk::Sampler sampler;
UniqueBuffer buffer; vk::Buffer buffer;
VKMemoryCommit buffer_commit; VKMemoryCommit buffer_commit;
std::vector<std::unique_ptr<VKFenceWatch>> watches; std::vector<std::unique_ptr<VKFenceWatch>> watches;
std::vector<UniqueSemaphore> semaphores; std::vector<vk::Semaphore> semaphores;
std::vector<std::unique_ptr<VKImage>> raw_images; std::vector<std::unique_ptr<VKImage>> raw_images;
std::vector<VKMemoryCommit> raw_buffer_commits; std::vector<VKMemoryCommit> raw_buffer_commits;
u32 raw_width = 0; u32 raw_width = 0;

View File

@ -11,32 +11,31 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/bit_util.h" #include "common/bit_util.h"
#include "core/core.h" #include "core/core.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h" #include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h" #include "video_core/renderer_vulkan/vk_stream_buffer.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
namespace { namespace {
const auto BufferUsage = constexpr VkBufferUsageFlags BUFFER_USAGE =
vk::BufferUsageFlagBits::eVertexBuffer | vk::BufferUsageFlagBits::eIndexBuffer | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eStorageBuffer; VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
const auto UploadPipelineStage = constexpr VkPipelineStageFlags UPLOAD_PIPELINE_STAGE =
vk::PipelineStageFlagBits::eTransfer | vk::PipelineStageFlagBits::eVertexInput | VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
vk::PipelineStageFlagBits::eVertexShader | vk::PipelineStageFlagBits::eFragmentShader | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
vk::PipelineStageFlagBits::eComputeShader; VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
const auto UploadAccessBarriers = constexpr VkAccessFlags UPLOAD_ACCESS_BARRIERS =
vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eShaderRead | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT |
vk::AccessFlagBits::eUniformRead | vk::AccessFlagBits::eVertexAttributeRead | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDEX_READ_BIT;
vk::AccessFlagBits::eIndexRead;
auto CreateStreamBuffer(const VKDevice& device, VKScheduler& scheduler) { std::unique_ptr<VKStreamBuffer> CreateStreamBuffer(const VKDevice& device, VKScheduler& scheduler) {
return std::make_unique<VKStreamBuffer>(device, scheduler, BufferUsage); return std::make_unique<VKStreamBuffer>(device, scheduler, BUFFER_USAGE);
} }
} // Anonymous namespace } // Anonymous namespace
@ -44,15 +43,18 @@ auto CreateStreamBuffer(const VKDevice& device, VKScheduler& scheduler) {
CachedBufferBlock::CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager, CachedBufferBlock::CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager,
VAddr cpu_addr, std::size_t size) VAddr cpu_addr, std::size_t size)
: VideoCommon::BufferBlock{cpu_addr, size} { : VideoCommon::BufferBlock{cpu_addr, size} {
const vk::BufferCreateInfo buffer_ci({}, static_cast<vk::DeviceSize>(size), VkBufferCreateInfo ci;
BufferUsage | vk::BufferUsageFlagBits::eTransferSrc | ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
vk::BufferUsageFlagBits::eTransferDst, ci.pNext = nullptr;
vk::SharingMode::eExclusive, 0, nullptr); ci.flags = 0;
ci.size = static_cast<VkDeviceSize>(size);
ci.usage = BUFFER_USAGE | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = nullptr;
const auto& dld{device.GetDispatchLoader()}; buffer.handle = device.GetLogical().CreateBuffer(ci);
const auto dev{device.GetLogical()}; buffer.commit = memory_manager.Commit(buffer.handle, false);
buffer.handle = dev.createBufferUnique(buffer_ci, nullptr, dld);
buffer.commit = memory_manager.Commit(*buffer.handle, false);
} }
CachedBufferBlock::~CachedBufferBlock() = default; CachedBufferBlock::~CachedBufferBlock() = default;
@ -60,7 +62,7 @@ CachedBufferBlock::~CachedBufferBlock() = default;
VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
const VKDevice& device, VKMemoryManager& memory_manager, const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler, VKStagingBufferPool& staging_pool) VKScheduler& scheduler, VKStagingBufferPool& staging_pool)
: VideoCommon::BufferCache<Buffer, vk::Buffer, VKStreamBuffer>{rasterizer, system, : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer, system,
CreateStreamBuffer(device, CreateStreamBuffer(device,
scheduler)}, scheduler)},
device{device}, memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{ device{device}, memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{
@ -72,18 +74,18 @@ Buffer VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size); return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size);
} }
const vk::Buffer* VKBufferCache::ToHandle(const Buffer& buffer) { const VkBuffer* VKBufferCache::ToHandle(const Buffer& buffer) {
return buffer->GetHandle(); return buffer->GetHandle();
} }
const vk::Buffer* VKBufferCache::GetEmptyBuffer(std::size_t size) { const VkBuffer* VKBufferCache::GetEmptyBuffer(std::size_t size) {
size = std::max(size, std::size_t(4)); size = std::max(size, std::size_t(4));
const auto& empty = staging_pool.GetUnusedBuffer(size, false); const auto& empty = staging_pool.GetUnusedBuffer(size, false);
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf, auto& dld) { scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) {
cmdbuf.fillBuffer(buffer, 0, size, 0, dld); cmdbuf.FillBuffer(buffer, 0, size, 0);
}); });
return &*empty.handle; return empty.handle.address();
} }
void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
@ -93,14 +95,21 @@ void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, st
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset,
size](auto cmdbuf, auto& dld) { size](vk::CommandBuffer cmdbuf) {
cmdbuf.copyBuffer(staging, buffer, {{0, offset, size}}, dld); cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size});
cmdbuf.pipelineBarrier(
vk::PipelineStageFlagBits::eTransfer, UploadPipelineStage, {}, {}, VkBufferMemoryBarrier barrier;
{vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferWrite, UploadAccessBarriers, barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, buffer, barrier.pNext = nullptr;
offset, size)}, barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
{}, dld); barrier.dstAccessMask = UPLOAD_ACCESS_BARRIERS;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.buffer = buffer;
barrier.offset = offset;
barrier.size = size;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {},
barrier, {});
}); });
} }
@ -109,16 +118,23 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
const auto& staging = staging_pool.GetUnusedBuffer(size, true); const auto& staging = staging_pool.GetUnusedBuffer(size, true);
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset,
size](auto cmdbuf, auto& dld) { size](vk::CommandBuffer cmdbuf) {
cmdbuf.pipelineBarrier( VkBufferMemoryBarrier barrier;
vk::PipelineStageFlagBits::eVertexShader | vk::PipelineStageFlagBits::eFragmentShader | barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
vk::PipelineStageFlagBits::eComputeShader, barrier.pNext = nullptr;
vk::PipelineStageFlagBits::eTransfer, {}, {}, barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
{vk::BufferMemoryBarrier(vk::AccessFlagBits::eShaderWrite, barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
vk::AccessFlagBits::eTransferRead, VK_QUEUE_FAMILY_IGNORED, barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
VK_QUEUE_FAMILY_IGNORED, buffer, offset, size)}, barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
{}, dld); barrier.buffer = buffer;
cmdbuf.copyBuffer(buffer, staging, {{offset, 0, size}}, dld); barrier.offset = offset;
barrier.size = size;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, {}, barrier, {});
cmdbuf.CopyBuffer(buffer, staging, VkBufferCopy{offset, 0, size});
}); });
scheduler.Finish(); scheduler.Finish();
@ -129,17 +145,30 @@ void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t
std::size_t dst_offset, std::size_t size) { std::size_t dst_offset, std::size_t size) {
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([src_buffer = *src->GetHandle(), dst_buffer = *dst->GetHandle(), src_offset, scheduler.Record([src_buffer = *src->GetHandle(), dst_buffer = *dst->GetHandle(), src_offset,
dst_offset, size](auto cmdbuf, auto& dld) { dst_offset, size](vk::CommandBuffer cmdbuf) {
cmdbuf.copyBuffer(src_buffer, dst_buffer, {{src_offset, dst_offset, size}}, dld); cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size});
cmdbuf.pipelineBarrier(
vk::PipelineStageFlagBits::eTransfer, UploadPipelineStage, {}, {}, std::array<VkBufferMemoryBarrier, 2> barriers;
{vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferRead, barriers[0].sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
vk::AccessFlagBits::eShaderWrite, VK_QUEUE_FAMILY_IGNORED, barriers[0].pNext = nullptr;
VK_QUEUE_FAMILY_IGNORED, src_buffer, src_offset, size), barriers[0].srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferWrite, UploadAccessBarriers, barriers[0].dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, dst_buffer, barriers[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
dst_offset, size)}, barriers[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
{}, dld); barriers[0].buffer = src_buffer;
barriers[0].offset = src_offset;
barriers[0].size = size;
barriers[1].sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
barriers[1].pNext = nullptr;
barriers[1].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barriers[1].dstAccessMask = UPLOAD_ACCESS_BARRIERS;
barriers[1].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barriers[1].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barriers[1].buffer = dst_buffer;
barriers[1].offset = dst_offset;
barriers[1].size = size;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {},
barriers, {});
}); });
} }

View File

@ -11,11 +11,11 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/buffer_cache/buffer_cache.h" #include "video_core/buffer_cache/buffer_cache.h"
#include "video_core/rasterizer_cache.h" #include "video_core/rasterizer_cache.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h" #include "video_core/renderer_vulkan/vk_stream_buffer.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Core { namespace Core {
class System; class System;
@ -33,8 +33,8 @@ public:
VAddr cpu_addr, std::size_t size); VAddr cpu_addr, std::size_t size);
~CachedBufferBlock(); ~CachedBufferBlock();
const vk::Buffer* GetHandle() const { const VkBuffer* GetHandle() const {
return &*buffer.handle; return buffer.handle.address();
} }
private: private:
@ -43,21 +43,21 @@ private:
using Buffer = std::shared_ptr<CachedBufferBlock>; using Buffer = std::shared_ptr<CachedBufferBlock>;
class VKBufferCache final : public VideoCommon::BufferCache<Buffer, vk::Buffer, VKStreamBuffer> { class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> {
public: public:
explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
const VKDevice& device, VKMemoryManager& memory_manager, const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler, VKStagingBufferPool& staging_pool); VKScheduler& scheduler, VKStagingBufferPool& staging_pool);
~VKBufferCache(); ~VKBufferCache();
const vk::Buffer* GetEmptyBuffer(std::size_t size) override; const VkBuffer* GetEmptyBuffer(std::size_t size) override;
protected: protected:
void WriteBarrier() override {} void WriteBarrier() override {}
Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
const vk::Buffer* ToHandle(const Buffer& buffer) override; const VkBuffer* ToHandle(const Buffer& buffer) override;
void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
const u8* data) override; const u8* data) override;

View File

@ -10,13 +10,13 @@
#include "common/alignment.h" #include "common/alignment.h"
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_compute_pass.h" #include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -114,6 +114,35 @@ constexpr u8 quad_array[] = {
0xf9, 0x00, 0x02, 0x00, 0x4c, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x4b, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, 0x4c, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x4b, 0x00, 0x00, 0x00,
0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00}; 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
VkDescriptorSetLayoutBinding BuildQuadArrayPassDescriptorSetLayoutBinding() {
VkDescriptorSetLayoutBinding binding;
binding.binding = 0;
binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
binding.descriptorCount = 1;
binding.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
binding.pImmutableSamplers = nullptr;
return binding;
}
VkDescriptorUpdateTemplateEntryKHR BuildQuadArrayPassDescriptorUpdateTemplateEntry() {
VkDescriptorUpdateTemplateEntryKHR entry;
entry.dstBinding = 0;
entry.dstArrayElement = 0;
entry.descriptorCount = 1;
entry.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
entry.offset = 0;
entry.stride = sizeof(DescriptorUpdateEntry);
return entry;
}
VkPushConstantRange BuildQuadArrayPassPushConstantRange() {
VkPushConstantRange range;
range.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
range.offset = 0;
range.size = sizeof(u32);
return range;
}
// Uint8 SPIR-V module. Generated from the "shaders/" directory. // Uint8 SPIR-V module. Generated from the "shaders/" directory.
constexpr u8 uint8_pass[] = { constexpr u8 uint8_pass[] = {
0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x07, 0x00, 0x08, 0x00, 0x2f, 0x00, 0x00, 0x00, 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x07, 0x00, 0x08, 0x00, 0x2f, 0x00, 0x00, 0x00,
@ -191,53 +220,111 @@ constexpr u8 uint8_pass[] = {
0xf9, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00,
0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00}; 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
std::array<VkDescriptorSetLayoutBinding, 2> BuildUint8PassDescriptorSetBindings() {
std::array<VkDescriptorSetLayoutBinding, 2> bindings;
bindings[0].binding = 0;
bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
bindings[0].descriptorCount = 1;
bindings[0].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
bindings[0].pImmutableSamplers = nullptr;
bindings[1].binding = 1;
bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
bindings[1].descriptorCount = 1;
bindings[1].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
bindings[1].pImmutableSamplers = nullptr;
return bindings;
}
VkDescriptorUpdateTemplateEntryKHR BuildUint8PassDescriptorUpdateTemplateEntry() {
VkDescriptorUpdateTemplateEntryKHR entry;
entry.dstBinding = 0;
entry.dstArrayElement = 0;
entry.descriptorCount = 2;
entry.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
entry.offset = 0;
entry.stride = sizeof(DescriptorUpdateEntry);
return entry;
}
} // Anonymous namespace } // Anonymous namespace
VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool, VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool,
const std::vector<vk::DescriptorSetLayoutBinding>& bindings, vk::Span<VkDescriptorSetLayoutBinding> bindings,
const std::vector<vk::DescriptorUpdateTemplateEntry>& templates, vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
const std::vector<vk::PushConstantRange> push_constants, vk::Span<VkPushConstantRange> push_constants, std::size_t code_size,
std::size_t code_size, const u8* code) { const u8* code) {
const auto dev = device.GetLogical(); VkDescriptorSetLayoutCreateInfo descriptor_layout_ci;
const auto& dld = device.GetDispatchLoader(); descriptor_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
descriptor_layout_ci.pNext = nullptr;
descriptor_layout_ci.flags = 0;
descriptor_layout_ci.bindingCount = bindings.size();
descriptor_layout_ci.pBindings = bindings.data();
descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout(descriptor_layout_ci);
const vk::DescriptorSetLayoutCreateInfo descriptor_layout_ci( VkPipelineLayoutCreateInfo pipeline_layout_ci;
{}, static_cast<u32>(bindings.size()), bindings.data()); pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
descriptor_set_layout = dev.createDescriptorSetLayoutUnique(descriptor_layout_ci, nullptr, dld); pipeline_layout_ci.pNext = nullptr;
pipeline_layout_ci.flags = 0;
const vk::PipelineLayoutCreateInfo pipeline_layout_ci({}, 1, &*descriptor_set_layout, pipeline_layout_ci.setLayoutCount = 1;
static_cast<u32>(push_constants.size()), pipeline_layout_ci.pSetLayouts = descriptor_set_layout.address();
push_constants.data()); pipeline_layout_ci.pushConstantRangeCount = push_constants.size();
layout = dev.createPipelineLayoutUnique(pipeline_layout_ci, nullptr, dld); pipeline_layout_ci.pPushConstantRanges = push_constants.data();
layout = device.GetLogical().CreatePipelineLayout(pipeline_layout_ci);
if (!templates.empty()) { if (!templates.empty()) {
const vk::DescriptorUpdateTemplateCreateInfo template_ci( VkDescriptorUpdateTemplateCreateInfoKHR template_ci;
{}, static_cast<u32>(templates.size()), templates.data(), template_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR;
vk::DescriptorUpdateTemplateType::eDescriptorSet, *descriptor_set_layout, template_ci.pNext = nullptr;
vk::PipelineBindPoint::eGraphics, *layout, 0); template_ci.flags = 0;
descriptor_template = dev.createDescriptorUpdateTemplateUnique(template_ci, nullptr, dld); template_ci.descriptorUpdateEntryCount = templates.size();
template_ci.pDescriptorUpdateEntries = templates.data();
template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
template_ci.descriptorSetLayout = *descriptor_set_layout;
template_ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
template_ci.pipelineLayout = *layout;
template_ci.set = 0;
descriptor_template = device.GetLogical().CreateDescriptorUpdateTemplateKHR(template_ci);
descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout); descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout);
} }
auto code_copy = std::make_unique<u32[]>(code_size / sizeof(u32) + 1); auto code_copy = std::make_unique<u32[]>(code_size / sizeof(u32) + 1);
std::memcpy(code_copy.get(), code, code_size); std::memcpy(code_copy.get(), code, code_size);
const vk::ShaderModuleCreateInfo module_ci({}, code_size, code_copy.get());
module = dev.createShaderModuleUnique(module_ci, nullptr, dld);
const vk::PipelineShaderStageCreateInfo stage_ci({}, vk::ShaderStageFlagBits::eCompute, *module, VkShaderModuleCreateInfo module_ci;
"main", nullptr); module_ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
module_ci.pNext = nullptr;
module_ci.flags = 0;
module_ci.codeSize = code_size;
module_ci.pCode = code_copy.get();
module = device.GetLogical().CreateShaderModule(module_ci);
const vk::ComputePipelineCreateInfo pipeline_ci({}, stage_ci, *layout, nullptr, 0); VkComputePipelineCreateInfo pipeline_ci;
pipeline = dev.createComputePipelineUnique(nullptr, pipeline_ci, nullptr, dld); pipeline_ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
pipeline_ci.pNext = nullptr;
pipeline_ci.flags = 0;
pipeline_ci.layout = *layout;
pipeline_ci.basePipelineHandle = nullptr;
pipeline_ci.basePipelineIndex = 0;
VkPipelineShaderStageCreateInfo& stage_ci = pipeline_ci.stage;
stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
stage_ci.pNext = nullptr;
stage_ci.flags = 0;
stage_ci.stage = VK_SHADER_STAGE_COMPUTE_BIT;
stage_ci.module = *module;
stage_ci.pName = "main";
stage_ci.pSpecializationInfo = nullptr;
pipeline = device.GetLogical().CreateComputePipeline(pipeline_ci);
} }
VKComputePass::~VKComputePass() = default; VKComputePass::~VKComputePass() = default;
vk::DescriptorSet VKComputePass::CommitDescriptorSet( VkDescriptorSet VKComputePass::CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue,
VKUpdateDescriptorQueue& update_descriptor_queue, VKFence& fence) { VKFence& fence) {
if (!descriptor_template) { if (!descriptor_template) {
return {}; return nullptr;
} }
const auto set = descriptor_allocator->Commit(fence); const auto set = descriptor_allocator->Commit(fence);
update_descriptor_queue.Send(*descriptor_template, set); update_descriptor_queue.Send(*descriptor_template, set);
@ -248,25 +335,21 @@ QuadArrayPass::QuadArrayPass(const VKDevice& device, VKScheduler& scheduler,
VKDescriptorPool& descriptor_pool, VKDescriptorPool& descriptor_pool,
VKStagingBufferPool& staging_buffer_pool, VKStagingBufferPool& staging_buffer_pool,
VKUpdateDescriptorQueue& update_descriptor_queue) VKUpdateDescriptorQueue& update_descriptor_queue)
: VKComputePass(device, descriptor_pool, : VKComputePass(device, descriptor_pool, BuildQuadArrayPassDescriptorSetLayoutBinding(),
{vk::DescriptorSetLayoutBinding(0, vk::DescriptorType::eStorageBuffer, 1, BuildQuadArrayPassDescriptorUpdateTemplateEntry(),
vk::ShaderStageFlagBits::eCompute, nullptr)}, BuildQuadArrayPassPushConstantRange(), std::size(quad_array), quad_array),
{vk::DescriptorUpdateTemplateEntry(0, 0, 1, vk::DescriptorType::eStorageBuffer,
0, sizeof(DescriptorUpdateEntry))},
{vk::PushConstantRange(vk::ShaderStageFlagBits::eCompute, 0, sizeof(u32))},
std::size(quad_array), quad_array),
scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool}, scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool},
update_descriptor_queue{update_descriptor_queue} {} update_descriptor_queue{update_descriptor_queue} {}
QuadArrayPass::~QuadArrayPass() = default; QuadArrayPass::~QuadArrayPass() = default;
std::pair<const vk::Buffer&, vk::DeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { std::pair<const VkBuffer*, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) {
const u32 num_triangle_vertices = num_vertices * 6 / 4; const u32 num_triangle_vertices = num_vertices * 6 / 4;
const std::size_t staging_size = num_triangle_vertices * sizeof(u32); const std::size_t staging_size = num_triangle_vertices * sizeof(u32);
auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
update_descriptor_queue.Acquire(); update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(&*buffer.handle, 0, staging_size); update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size);
const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
@ -274,66 +357,72 @@ std::pair<const vk::Buffer&, vk::DeviceSize> QuadArrayPass::Assemble(u32 num_ver
ASSERT(num_vertices % 4 == 0); ASSERT(num_vertices % 4 == 0);
const u32 num_quads = num_vertices / 4; const u32 num_quads = num_vertices / 4;
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, num_quads, scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, num_quads,
first, set](auto cmdbuf, auto& dld) { first, set](vk::CommandBuffer cmdbuf) {
constexpr u32 dispatch_size = 1024; constexpr u32 dispatch_size = 1024;
cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline, dld); cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, layout, 0, {set}, {}, dld); cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
cmdbuf.pushConstants(layout, vk::ShaderStageFlagBits::eCompute, 0, sizeof(first), &first, cmdbuf.PushConstants(layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(first), &first);
dld); cmdbuf.Dispatch(Common::AlignUp(num_quads, dispatch_size) / dispatch_size, 1, 1);
cmdbuf.dispatch(Common::AlignUp(num_quads, dispatch_size) / dispatch_size, 1, 1, dld);
const vk::BufferMemoryBarrier barrier( VkBufferMemoryBarrier barrier;
vk::AccessFlagBits::eShaderWrite, vk::AccessFlagBits::eVertexAttributeRead, barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, buffer, 0, barrier.pNext = nullptr;
static_cast<vk::DeviceSize>(num_quads) * 6 * sizeof(u32)); barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eComputeShader, barrier.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
vk::PipelineStageFlagBits::eVertexInput, {}, {}, {barrier}, {}, dld); barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.buffer = buffer;
barrier.offset = 0;
barrier.size = static_cast<VkDeviceSize>(num_quads) * 6 * sizeof(u32);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {});
}); });
return {*buffer.handle, 0}; return {buffer.handle.address(), 0};
} }
Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler, Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler,
VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool, VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool,
VKUpdateDescriptorQueue& update_descriptor_queue) VKUpdateDescriptorQueue& update_descriptor_queue)
: VKComputePass(device, descriptor_pool, : VKComputePass(device, descriptor_pool, BuildUint8PassDescriptorSetBindings(),
{vk::DescriptorSetLayoutBinding(0, vk::DescriptorType::eStorageBuffer, 1, BuildUint8PassDescriptorUpdateTemplateEntry(), {}, std::size(uint8_pass),
vk::ShaderStageFlagBits::eCompute, nullptr), uint8_pass),
vk::DescriptorSetLayoutBinding(1, vk::DescriptorType::eStorageBuffer, 1,
vk::ShaderStageFlagBits::eCompute, nullptr)},
{vk::DescriptorUpdateTemplateEntry(0, 0, 2, vk::DescriptorType::eStorageBuffer,
0, sizeof(DescriptorUpdateEntry))},
{}, std::size(uint8_pass), uint8_pass),
scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool}, scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool},
update_descriptor_queue{update_descriptor_queue} {} update_descriptor_queue{update_descriptor_queue} {}
Uint8Pass::~Uint8Pass() = default; Uint8Pass::~Uint8Pass() = default;
std::pair<const vk::Buffer*, u64> Uint8Pass::Assemble(u32 num_vertices, vk::Buffer src_buffer, std::pair<const VkBuffer*, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
u64 src_offset) { u64 src_offset) {
const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16)); const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16));
auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
update_descriptor_queue.Acquire(); update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(&src_buffer, src_offset, num_vertices); update_descriptor_queue.AddBuffer(&src_buffer, src_offset, num_vertices);
update_descriptor_queue.AddBuffer(&*buffer.handle, 0, staging_size); update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size);
const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set,
num_vertices](auto cmdbuf, auto& dld) { num_vertices](vk::CommandBuffer cmdbuf) {
constexpr u32 dispatch_size = 1024; constexpr u32 dispatch_size = 1024;
cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline, dld); cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, layout, 0, {set}, {}, dld); cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
cmdbuf.dispatch(Common::AlignUp(num_vertices, dispatch_size) / dispatch_size, 1, 1, dld); cmdbuf.Dispatch(Common::AlignUp(num_vertices, dispatch_size) / dispatch_size, 1, 1);
const vk::BufferMemoryBarrier barrier( VkBufferMemoryBarrier barrier;
vk::AccessFlagBits::eShaderWrite, vk::AccessFlagBits::eVertexAttributeRead, barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, buffer, 0, barrier.pNext = nullptr;
static_cast<vk::DeviceSize>(num_vertices) * sizeof(u16)); barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eComputeShader, barrier.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
vk::PipelineStageFlagBits::eVertexInput, {}, {}, {barrier}, {}, dld); barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.buffer = buffer;
barrier.offset = 0;
barrier.size = static_cast<VkDeviceSize>(num_vertices * sizeof(u16));
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
}); });
return {&*buffer.handle, 0}; return {buffer.handle.address(), 0};
} }
} // namespace Vulkan } // namespace Vulkan

View File

@ -8,8 +8,8 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -22,24 +22,24 @@ class VKUpdateDescriptorQueue;
class VKComputePass { class VKComputePass {
public: public:
explicit VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool, explicit VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool,
const std::vector<vk::DescriptorSetLayoutBinding>& bindings, vk::Span<VkDescriptorSetLayoutBinding> bindings,
const std::vector<vk::DescriptorUpdateTemplateEntry>& templates, vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
const std::vector<vk::PushConstantRange> push_constants, vk::Span<VkPushConstantRange> push_constants, std::size_t code_size,
std::size_t code_size, const u8* code); const u8* code);
~VKComputePass(); ~VKComputePass();
protected: protected:
vk::DescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue, VkDescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue,
VKFence& fence); VKFence& fence);
UniqueDescriptorUpdateTemplate descriptor_template; vk::DescriptorUpdateTemplateKHR descriptor_template;
UniquePipelineLayout layout; vk::PipelineLayout layout;
UniquePipeline pipeline; vk::Pipeline pipeline;
private: private:
UniqueDescriptorSetLayout descriptor_set_layout; vk::DescriptorSetLayout descriptor_set_layout;
std::optional<DescriptorAllocator> descriptor_allocator; std::optional<DescriptorAllocator> descriptor_allocator;
UniqueShaderModule module; vk::ShaderModule module;
}; };
class QuadArrayPass final : public VKComputePass { class QuadArrayPass final : public VKComputePass {
@ -50,7 +50,7 @@ public:
VKUpdateDescriptorQueue& update_descriptor_queue); VKUpdateDescriptorQueue& update_descriptor_queue);
~QuadArrayPass(); ~QuadArrayPass();
std::pair<const vk::Buffer&, vk::DeviceSize> Assemble(u32 num_vertices, u32 first); std::pair<const VkBuffer*, VkDeviceSize> Assemble(u32 num_vertices, u32 first);
private: private:
VKScheduler& scheduler; VKScheduler& scheduler;
@ -65,8 +65,7 @@ public:
VKUpdateDescriptorQueue& update_descriptor_queue); VKUpdateDescriptorQueue& update_descriptor_queue);
~Uint8Pass(); ~Uint8Pass();
std::pair<const vk::Buffer*, u64> Assemble(u32 num_vertices, vk::Buffer src_buffer, std::pair<const VkBuffer*, u64> Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset);
u64 src_offset);
private: private:
VKScheduler& scheduler; VKScheduler& scheduler;

View File

@ -5,7 +5,6 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h" #include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
@ -14,6 +13,7 @@
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h" #include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -30,7 +30,7 @@ VKComputePipeline::VKComputePipeline(const VKDevice& device, VKScheduler& schedu
VKComputePipeline::~VKComputePipeline() = default; VKComputePipeline::~VKComputePipeline() = default;
vk::DescriptorSet VKComputePipeline::CommitDescriptorSet() { VkDescriptorSet VKComputePipeline::CommitDescriptorSet() {
if (!descriptor_template) { if (!descriptor_template) {
return {}; return {};
} }
@ -39,74 +39,109 @@ vk::DescriptorSet VKComputePipeline::CommitDescriptorSet() {
return set; return set;
} }
UniqueDescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const { vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
std::vector<vk::DescriptorSetLayoutBinding> bindings; std::vector<VkDescriptorSetLayoutBinding> bindings;
u32 binding = 0; u32 binding = 0;
const auto AddBindings = [&](vk::DescriptorType descriptor_type, std::size_t num_entries) { const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) {
// TODO(Rodrigo): Maybe make individual bindings here? // TODO(Rodrigo): Maybe make individual bindings here?
for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) { for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
bindings.emplace_back(binding++, descriptor_type, 1, vk::ShaderStageFlagBits::eCompute, VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
nullptr); entry.binding = binding++;
entry.descriptorType = descriptor_type;
entry.descriptorCount = 1;
entry.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
entry.pImmutableSamplers = nullptr;
} }
}; };
AddBindings(vk::DescriptorType::eUniformBuffer, entries.const_buffers.size()); add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
AddBindings(vk::DescriptorType::eStorageBuffer, entries.global_buffers.size()); add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size());
AddBindings(vk::DescriptorType::eUniformTexelBuffer, entries.texel_buffers.size()); add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.texel_buffers.size());
AddBindings(vk::DescriptorType::eCombinedImageSampler, entries.samplers.size()); add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size());
AddBindings(vk::DescriptorType::eStorageImage, entries.images.size()); add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
const vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_ci( VkDescriptorSetLayoutCreateInfo ci;
{}, static_cast<u32>(bindings.size()), bindings.data()); ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
ci.pNext = nullptr;
const auto dev = device.GetLogical(); ci.flags = 0;
const auto& dld = device.GetDispatchLoader(); ci.bindingCount = static_cast<u32>(bindings.size());
return dev.createDescriptorSetLayoutUnique(descriptor_set_layout_ci, nullptr, dld); ci.pBindings = bindings.data();
return device.GetLogical().CreateDescriptorSetLayout(ci);
} }
UniquePipelineLayout VKComputePipeline::CreatePipelineLayout() const { vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const {
const vk::PipelineLayoutCreateInfo layout_ci({}, 1, &*descriptor_set_layout, 0, nullptr); VkPipelineLayoutCreateInfo ci;
const auto dev = device.GetLogical(); ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
return dev.createPipelineLayoutUnique(layout_ci, nullptr, device.GetDispatchLoader()); ci.pNext = nullptr;
ci.flags = 0;
ci.setLayoutCount = 1;
ci.pSetLayouts = descriptor_set_layout.address();
ci.pushConstantRangeCount = 0;
ci.pPushConstantRanges = nullptr;
return device.GetLogical().CreatePipelineLayout(ci);
} }
UniqueDescriptorUpdateTemplate VKComputePipeline::CreateDescriptorUpdateTemplate() const { vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const {
std::vector<vk::DescriptorUpdateTemplateEntry> template_entries; std::vector<VkDescriptorUpdateTemplateEntryKHR> template_entries;
u32 binding = 0; u32 binding = 0;
u32 offset = 0; u32 offset = 0;
FillDescriptorUpdateTemplateEntries(entries, binding, offset, template_entries); FillDescriptorUpdateTemplateEntries(entries, binding, offset, template_entries);
if (template_entries.empty()) { if (template_entries.empty()) {
// If the shader doesn't use descriptor sets, skip template creation. // If the shader doesn't use descriptor sets, skip template creation.
return UniqueDescriptorUpdateTemplate{}; return {};
} }
const vk::DescriptorUpdateTemplateCreateInfo template_ci( VkDescriptorUpdateTemplateCreateInfoKHR ci;
{}, static_cast<u32>(template_entries.size()), template_entries.data(), ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR;
vk::DescriptorUpdateTemplateType::eDescriptorSet, *descriptor_set_layout, ci.pNext = nullptr;
vk::PipelineBindPoint::eGraphics, *layout, DESCRIPTOR_SET); ci.flags = 0;
ci.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size());
const auto dev = device.GetLogical(); ci.pDescriptorUpdateEntries = template_entries.data();
const auto& dld = device.GetDispatchLoader(); ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
return dev.createDescriptorUpdateTemplateUnique(template_ci, nullptr, dld); ci.descriptorSetLayout = *descriptor_set_layout;
ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
ci.pipelineLayout = *layout;
ci.set = DESCRIPTOR_SET;
return device.GetLogical().CreateDescriptorUpdateTemplateKHR(ci);
} }
UniqueShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const { vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
const vk::ShaderModuleCreateInfo module_ci({}, code.size() * sizeof(u32), code.data()); VkShaderModuleCreateInfo ci;
const auto dev = device.GetLogical(); ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
return dev.createShaderModuleUnique(module_ci, nullptr, device.GetDispatchLoader()); ci.pNext = nullptr;
ci.flags = 0;
ci.codeSize = code.size() * sizeof(u32);
ci.pCode = code.data();
return device.GetLogical().CreateShaderModule(ci);
} }
UniquePipeline VKComputePipeline::CreatePipeline() const { vk::Pipeline VKComputePipeline::CreatePipeline() const {
vk::PipelineShaderStageCreateInfo shader_stage_ci({}, vk::ShaderStageFlagBits::eCompute, VkComputePipelineCreateInfo ci;
*shader_module, "main", nullptr); VkPipelineShaderStageCreateInfo& stage_ci = ci.stage;
vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci; stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
stage_ci.pNext = nullptr;
stage_ci.flags = 0;
stage_ci.stage = VK_SHADER_STAGE_COMPUTE_BIT;
stage_ci.module = *shader_module;
stage_ci.pName = "main";
stage_ci.pSpecializationInfo = nullptr;
VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci;
subgroup_size_ci.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT;
subgroup_size_ci.pNext = nullptr;
subgroup_size_ci.requiredSubgroupSize = GuestWarpSize; subgroup_size_ci.requiredSubgroupSize = GuestWarpSize;
if (entries.uses_warps && device.IsGuestWarpSizeSupported(vk::ShaderStageFlagBits::eCompute)) {
shader_stage_ci.pNext = &subgroup_size_ci; if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) {
stage_ci.pNext = &subgroup_size_ci;
} }
const vk::ComputePipelineCreateInfo create_info({}, shader_stage_ci, *layout, {}, 0); ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
const auto dev = device.GetLogical(); ci.pNext = nullptr;
return dev.createComputePipelineUnique({}, create_info, nullptr, device.GetDispatchLoader()); ci.flags = 0;
ci.layout = *layout;
ci.basePipelineHandle = nullptr;
ci.basePipelineIndex = 0;
return device.GetLogical().CreateComputePipeline(ci);
} }
} // namespace Vulkan } // namespace Vulkan

View File

@ -7,9 +7,9 @@
#include <memory> #include <memory>
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h" #include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -25,42 +25,42 @@ public:
const SPIRVShader& shader); const SPIRVShader& shader);
~VKComputePipeline(); ~VKComputePipeline();
vk::DescriptorSet CommitDescriptorSet(); VkDescriptorSet CommitDescriptorSet();
vk::Pipeline GetHandle() const { VkPipeline GetHandle() const {
return *pipeline; return *pipeline;
} }
vk::PipelineLayout GetLayout() const { VkPipelineLayout GetLayout() const {
return *layout; return *layout;
} }
const ShaderEntries& GetEntries() { const ShaderEntries& GetEntries() const {
return entries; return entries;
} }
private: private:
UniqueDescriptorSetLayout CreateDescriptorSetLayout() const; vk::DescriptorSetLayout CreateDescriptorSetLayout() const;
UniquePipelineLayout CreatePipelineLayout() const; vk::PipelineLayout CreatePipelineLayout() const;
UniqueDescriptorUpdateTemplate CreateDescriptorUpdateTemplate() const; vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate() const;
UniqueShaderModule CreateShaderModule(const std::vector<u32>& code) const; vk::ShaderModule CreateShaderModule(const std::vector<u32>& code) const;
UniquePipeline CreatePipeline() const; vk::Pipeline CreatePipeline() const;
const VKDevice& device; const VKDevice& device;
VKScheduler& scheduler; VKScheduler& scheduler;
ShaderEntries entries; ShaderEntries entries;
UniqueDescriptorSetLayout descriptor_set_layout; vk::DescriptorSetLayout descriptor_set_layout;
DescriptorAllocator descriptor_allocator; DescriptorAllocator descriptor_allocator;
VKUpdateDescriptorQueue& update_descriptor_queue; VKUpdateDescriptorQueue& update_descriptor_queue;
UniquePipelineLayout layout; vk::PipelineLayout layout;
UniqueDescriptorUpdateTemplate descriptor_template; vk::DescriptorUpdateTemplateKHR descriptor_template;
UniqueShaderModule shader_module; vk::ShaderModule shader_module;
UniquePipeline pipeline; vk::Pipeline pipeline;
}; };
} // namespace Vulkan } // namespace Vulkan

View File

@ -6,10 +6,10 @@
#include <vector> #include <vector>
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -17,19 +17,18 @@ namespace Vulkan {
constexpr std::size_t SETS_GROW_RATE = 0x20; constexpr std::size_t SETS_GROW_RATE = 0x20;
DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool, DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool,
vk::DescriptorSetLayout layout) VkDescriptorSetLayout layout)
: VKFencedPool{SETS_GROW_RATE}, descriptor_pool{descriptor_pool}, layout{layout} {} : VKFencedPool{SETS_GROW_RATE}, descriptor_pool{descriptor_pool}, layout{layout} {}
DescriptorAllocator::~DescriptorAllocator() = default; DescriptorAllocator::~DescriptorAllocator() = default;
vk::DescriptorSet DescriptorAllocator::Commit(VKFence& fence) { VkDescriptorSet DescriptorAllocator::Commit(VKFence& fence) {
return *descriptors[CommitResource(fence)]; const std::size_t index = CommitResource(fence);
return descriptors_allocations[index / SETS_GROW_RATE][index % SETS_GROW_RATE];
} }
void DescriptorAllocator::Allocate(std::size_t begin, std::size_t end) { void DescriptorAllocator::Allocate(std::size_t begin, std::size_t end) {
auto new_sets = descriptor_pool.AllocateDescriptors(layout, end - begin); descriptors_allocations.push_back(descriptor_pool.AllocateDescriptors(layout, end - begin));
descriptors.insert(descriptors.end(), std::make_move_iterator(new_sets.begin()),
std::make_move_iterator(new_sets.end()));
} }
VKDescriptorPool::VKDescriptorPool(const VKDevice& device) VKDescriptorPool::VKDescriptorPool(const VKDevice& device)
@ -37,53 +36,50 @@ VKDescriptorPool::VKDescriptorPool(const VKDevice& device)
VKDescriptorPool::~VKDescriptorPool() = default; VKDescriptorPool::~VKDescriptorPool() = default;
vk::DescriptorPool VKDescriptorPool::AllocateNewPool() { vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
static constexpr u32 num_sets = 0x20000; static constexpr u32 num_sets = 0x20000;
static constexpr vk::DescriptorPoolSize pool_sizes[] = { static constexpr VkDescriptorPoolSize pool_sizes[] = {
{vk::DescriptorType::eUniformBuffer, num_sets * 90}, {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, num_sets * 90},
{vk::DescriptorType::eStorageBuffer, num_sets * 60}, {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60},
{vk::DescriptorType::eUniformTexelBuffer, num_sets * 64}, {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
{vk::DescriptorType::eCombinedImageSampler, num_sets * 64}, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
{vk::DescriptorType::eStorageImage, num_sets * 40}}; {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}};
const vk::DescriptorPoolCreateInfo create_info( VkDescriptorPoolCreateInfo ci;
vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, num_sets, ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
static_cast<u32>(std::size(pool_sizes)), std::data(pool_sizes)); ci.pNext = nullptr;
const auto dev = device.GetLogical(); ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
return *pools.emplace_back( ci.maxSets = num_sets;
dev.createDescriptorPoolUnique(create_info, nullptr, device.GetDispatchLoader())); ci.poolSizeCount = static_cast<u32>(std::size(pool_sizes));
ci.pPoolSizes = std::data(pool_sizes);
return &pools.emplace_back(device.GetLogical().CreateDescriptorPool(ci));
} }
std::vector<UniqueDescriptorSet> VKDescriptorPool::AllocateDescriptors( vk::DescriptorSets VKDescriptorPool::AllocateDescriptors(VkDescriptorSetLayout layout,
vk::DescriptorSetLayout layout, std::size_t count) { std::size_t count) {
std::vector layout_copies(count, layout); const std::vector layout_copies(count, layout);
vk::DescriptorSetAllocateInfo allocate_info(active_pool, static_cast<u32>(count), VkDescriptorSetAllocateInfo ai;
layout_copies.data()); ai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
ai.pNext = nullptr;
ai.descriptorPool = **active_pool;
ai.descriptorSetCount = static_cast<u32>(count);
ai.pSetLayouts = layout_copies.data();
std::vector<vk::DescriptorSet> sets(count); vk::DescriptorSets sets = active_pool->Allocate(ai);
const auto dev = device.GetLogical(); if (!sets.IsOutOfPoolMemory()) {
const auto& dld = device.GetDispatchLoader(); return sets;
switch (const auto result = dev.allocateDescriptorSets(&allocate_info, sets.data(), dld)) { }
case vk::Result::eSuccess:
break; // Our current pool is out of memory. Allocate a new one and retry
case vk::Result::eErrorOutOfPoolMemory:
active_pool = AllocateNewPool(); active_pool = AllocateNewPool();
allocate_info.descriptorPool = active_pool; ai.descriptorPool = **active_pool;
if (dev.allocateDescriptorSets(&allocate_info, sets.data(), dld) == vk::Result::eSuccess) { sets = active_pool->Allocate(ai);
break; if (!sets.IsOutOfPoolMemory()) {
} return sets;
[[fallthrough]];
default:
vk::throwResultException(result, "vk::Device::allocateDescriptorSetsUnique");
} }
vk::PoolFree deleter(dev, active_pool, dld); // After allocating a new pool, we are out of memory again. We can't handle this from here.
std::vector<UniqueDescriptorSet> unique_sets; throw vk::Exception(VK_ERROR_OUT_OF_POOL_MEMORY);
unique_sets.reserve(count);
for (const auto set : sets) {
unique_sets.push_back(UniqueDescriptorSet{set, deleter});
}
return unique_sets;
} }
} // namespace Vulkan } // namespace Vulkan

View File

@ -8,8 +8,8 @@
#include <vector> #include <vector>
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -17,21 +17,21 @@ class VKDescriptorPool;
class DescriptorAllocator final : public VKFencedPool { class DescriptorAllocator final : public VKFencedPool {
public: public:
explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, vk::DescriptorSetLayout layout); explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, VkDescriptorSetLayout layout);
~DescriptorAllocator() override; ~DescriptorAllocator() override;
DescriptorAllocator(const DescriptorAllocator&) = delete; DescriptorAllocator(const DescriptorAllocator&) = delete;
vk::DescriptorSet Commit(VKFence& fence); VkDescriptorSet Commit(VKFence& fence);
protected: protected:
void Allocate(std::size_t begin, std::size_t end) override; void Allocate(std::size_t begin, std::size_t end) override;
private: private:
VKDescriptorPool& descriptor_pool; VKDescriptorPool& descriptor_pool;
const vk::DescriptorSetLayout layout; const VkDescriptorSetLayout layout;
std::vector<UniqueDescriptorSet> descriptors; std::vector<vk::DescriptorSets> descriptors_allocations;
}; };
class VKDescriptorPool final { class VKDescriptorPool final {
@ -42,15 +42,14 @@ public:
~VKDescriptorPool(); ~VKDescriptorPool();
private: private:
vk::DescriptorPool AllocateNewPool(); vk::DescriptorPool* AllocateNewPool();
std::vector<UniqueDescriptorSet> AllocateDescriptors(vk::DescriptorSetLayout layout, vk::DescriptorSets AllocateDescriptors(VkDescriptorSetLayout layout, std::size_t count);
std::size_t count);
const VKDevice& device; const VKDevice& device;
std::vector<UniqueDescriptorPool> pools; std::vector<vk::DescriptorPool> pools;
vk::DescriptorPool active_pool; vk::DescriptorPool* active_pool;
}; };
} // namespace Vulkan } // namespace Vulkan

View File

@ -6,15 +6,15 @@
#include <chrono> #include <chrono>
#include <cstdlib> #include <cstdlib>
#include <optional> #include <optional>
#include <set>
#include <string_view> #include <string_view>
#include <thread> #include <thread>
#include <unordered_set>
#include <vector> #include <vector>
#include "common/assert.h" #include "common/assert.h"
#include "core/settings.h" #include "core/settings.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -22,49 +22,43 @@ namespace {
namespace Alternatives { namespace Alternatives {
constexpr std::array Depth24UnormS8Uint = {vk::Format::eD32SfloatS8Uint, constexpr std::array Depth24UnormS8_UINT = {VK_FORMAT_D32_SFLOAT_S8_UINT,
vk::Format::eD16UnormS8Uint, vk::Format{}}; VK_FORMAT_D16_UNORM_S8_UINT, VkFormat{}};
constexpr std::array Depth16UnormS8Uint = {vk::Format::eD24UnormS8Uint, constexpr std::array Depth16UnormS8_UINT = {VK_FORMAT_D24_UNORM_S8_UINT,
vk::Format::eD32SfloatS8Uint, vk::Format{}}; VK_FORMAT_D32_SFLOAT_S8_UINT, VkFormat{}};
} // namespace Alternatives } // namespace Alternatives
constexpr std::array REQUIRED_EXTENSIONS = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_16BIT_STORAGE_EXTENSION_NAME,
VK_KHR_8BIT_STORAGE_EXTENSION_NAME,
VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME,
VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME,
};
template <typename T> template <typename T>
void SetNext(void**& next, T& data) { void SetNext(void**& next, T& data) {
*next = &data; *next = &data;
next = &data.pNext; next = &data.pNext;
} }
template <typename T> constexpr const VkFormat* GetFormatAlternatives(VkFormat format) {
T GetFeatures(vk::PhysicalDevice physical, const vk::DispatchLoaderDynamic& dld) {
vk::PhysicalDeviceFeatures2 features;
T extension_features;
features.pNext = &extension_features;
physical.getFeatures2(&features, dld);
return extension_features;
}
template <typename T>
T GetProperties(vk::PhysicalDevice physical, const vk::DispatchLoaderDynamic& dld) {
vk::PhysicalDeviceProperties2 properties;
T extension_properties;
properties.pNext = &extension_properties;
physical.getProperties2(&properties, dld);
return extension_properties;
}
constexpr const vk::Format* GetFormatAlternatives(vk::Format format) {
switch (format) { switch (format) {
case vk::Format::eD24UnormS8Uint: case VK_FORMAT_D24_UNORM_S8_UINT:
return Alternatives::Depth24UnormS8Uint.data(); return Alternatives::Depth24UnormS8_UINT.data();
case vk::Format::eD16UnormS8Uint: case VK_FORMAT_D16_UNORM_S8_UINT:
return Alternatives::Depth16UnormS8Uint.data(); return Alternatives::Depth16UnormS8_UINT.data();
default: default:
return nullptr; return nullptr;
} }
} }
vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properties, FormatType format_type) { VkFormatFeatureFlags GetFormatFeatures(VkFormatProperties properties, FormatType format_type) {
switch (format_type) { switch (format_type) {
case FormatType::Linear: case FormatType::Linear:
return properties.linearTilingFeatures; return properties.linearTilingFeatures;
@ -77,79 +71,220 @@ vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properties, Format
} }
} }
std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(
vk::PhysicalDevice physical, const vk::InstanceDispatch& dld) {
static constexpr std::array formats{VK_FORMAT_A8B8G8R8_UNORM_PACK32,
VK_FORMAT_A8B8G8R8_UINT_PACK32,
VK_FORMAT_A8B8G8R8_SNORM_PACK32,
VK_FORMAT_A8B8G8R8_SRGB_PACK32,
VK_FORMAT_B5G6R5_UNORM_PACK16,
VK_FORMAT_A2B10G10R10_UNORM_PACK32,
VK_FORMAT_A1R5G5B5_UNORM_PACK16,
VK_FORMAT_R32G32B32A32_SFLOAT,
VK_FORMAT_R32G32B32A32_UINT,
VK_FORMAT_R32G32_SFLOAT,
VK_FORMAT_R32G32_UINT,
VK_FORMAT_R16G16B16A16_UINT,
VK_FORMAT_R16G16B16A16_SNORM,
VK_FORMAT_R16G16B16A16_UNORM,
VK_FORMAT_R16G16_UNORM,
VK_FORMAT_R16G16_SNORM,
VK_FORMAT_R16G16_SFLOAT,
VK_FORMAT_R16_UNORM,
VK_FORMAT_R8G8B8A8_SRGB,
VK_FORMAT_R8G8_UNORM,
VK_FORMAT_R8G8_SNORM,
VK_FORMAT_R8_UNORM,
VK_FORMAT_R8_UINT,
VK_FORMAT_B10G11R11_UFLOAT_PACK32,
VK_FORMAT_R32_SFLOAT,
VK_FORMAT_R32_UINT,
VK_FORMAT_R32_SINT,
VK_FORMAT_R16_SFLOAT,
VK_FORMAT_R16G16B16A16_SFLOAT,
VK_FORMAT_B8G8R8A8_UNORM,
VK_FORMAT_R4G4B4A4_UNORM_PACK16,
VK_FORMAT_D32_SFLOAT,
VK_FORMAT_D16_UNORM,
VK_FORMAT_D16_UNORM_S8_UINT,
VK_FORMAT_D24_UNORM_S8_UINT,
VK_FORMAT_D32_SFLOAT_S8_UINT,
VK_FORMAT_BC1_RGBA_UNORM_BLOCK,
VK_FORMAT_BC2_UNORM_BLOCK,
VK_FORMAT_BC3_UNORM_BLOCK,
VK_FORMAT_BC4_UNORM_BLOCK,
VK_FORMAT_BC5_UNORM_BLOCK,
VK_FORMAT_BC5_SNORM_BLOCK,
VK_FORMAT_BC7_UNORM_BLOCK,
VK_FORMAT_BC6H_UFLOAT_BLOCK,
VK_FORMAT_BC6H_SFLOAT_BLOCK,
VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
VK_FORMAT_BC2_SRGB_BLOCK,
VK_FORMAT_BC3_SRGB_BLOCK,
VK_FORMAT_BC7_SRGB_BLOCK,
VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
VK_FORMAT_ASTC_5x5_UNORM_BLOCK,
VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
VK_FORMAT_ASTC_10x8_UNORM_BLOCK,
VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
VK_FORMAT_ASTC_6x6_UNORM_BLOCK,
VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
VK_FORMAT_ASTC_10x10_UNORM_BLOCK,
VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
VK_FORMAT_ASTC_12x12_UNORM_BLOCK,
VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
VK_FORMAT_E5B9G9R9_UFLOAT_PACK32};
std::unordered_map<VkFormat, VkFormatProperties> format_properties;
for (const auto format : formats) {
format_properties.emplace(format, physical.GetFormatProperties(format));
}
return format_properties;
}
} // Anonymous namespace } // Anonymous namespace
VKDevice::VKDevice(const vk::DispatchLoaderDynamic& dld, vk::PhysicalDevice physical, VKDevice::VKDevice(VkInstance instance, vk::PhysicalDevice physical, VkSurfaceKHR surface,
vk::SurfaceKHR surface) const vk::InstanceDispatch& dld)
: dld{dld}, physical{physical}, properties{physical.getProperties(dld)}, : dld{dld}, physical{physical}, properties{physical.GetProperties()},
format_properties{GetFormatProperties(dld, physical)} { format_properties{GetFormatProperties(physical, dld)} {
SetupFamilies(surface); SetupFamilies(surface);
SetupFeatures(); SetupFeatures();
} }
VKDevice::~VKDevice() = default; VKDevice::~VKDevice() = default;
bool VKDevice::Create(vk::Instance instance) { bool VKDevice::Create() {
const auto queue_cis = GetDeviceQueueCreateInfos(); const auto queue_cis = GetDeviceQueueCreateInfos();
const std::vector extensions = LoadExtensions(); const std::vector extensions = LoadExtensions();
vk::PhysicalDeviceFeatures2 features2; VkPhysicalDeviceFeatures2 features2;
features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
features2.pNext = nullptr;
void** next = &features2.pNext; void** next = &features2.pNext;
auto& features = features2.features; auto& features = features2.features;
features.vertexPipelineStoresAndAtomics = true; features.robustBufferAccess = false;
features.fullDrawIndexUint32 = false;
features.imageCubeArray = false;
features.independentBlend = true; features.independentBlend = true;
features.depthClamp = true;
features.samplerAnisotropy = true;
features.largePoints = true;
features.multiViewport = true;
features.depthBiasClamp = true;
features.geometryShader = true; features.geometryShader = true;
features.tessellationShader = true; features.tessellationShader = true;
features.sampleRateShading = false;
features.dualSrcBlend = false;
features.logicOp = false;
features.multiDrawIndirect = false;
features.drawIndirectFirstInstance = false;
features.depthClamp = true;
features.depthBiasClamp = true;
features.fillModeNonSolid = false;
features.depthBounds = false;
features.wideLines = false;
features.largePoints = true;
features.alphaToOne = false;
features.multiViewport = true;
features.samplerAnisotropy = true;
features.textureCompressionETC2 = false;
features.textureCompressionASTC_LDR = is_optimal_astc_supported;
features.textureCompressionBC = false;
features.occlusionQueryPrecise = true; features.occlusionQueryPrecise = true;
features.pipelineStatisticsQuery = false;
features.vertexPipelineStoresAndAtomics = true;
features.fragmentStoresAndAtomics = true; features.fragmentStoresAndAtomics = true;
features.shaderTessellationAndGeometryPointSize = false;
features.shaderImageGatherExtended = true; features.shaderImageGatherExtended = true;
features.shaderStorageImageExtendedFormats = false;
features.shaderStorageImageMultisample = false;
features.shaderStorageImageReadWithoutFormat = is_formatless_image_load_supported; features.shaderStorageImageReadWithoutFormat = is_formatless_image_load_supported;
features.shaderStorageImageWriteWithoutFormat = true; features.shaderStorageImageWriteWithoutFormat = true;
features.textureCompressionASTC_LDR = is_optimal_astc_supported; features.shaderUniformBufferArrayDynamicIndexing = false;
features.shaderSampledImageArrayDynamicIndexing = false;
features.shaderStorageBufferArrayDynamicIndexing = false;
features.shaderStorageImageArrayDynamicIndexing = false;
features.shaderClipDistance = false;
features.shaderCullDistance = false;
features.shaderFloat64 = false;
features.shaderInt64 = false;
features.shaderInt16 = false;
features.shaderResourceResidency = false;
features.shaderResourceMinLod = false;
features.sparseBinding = false;
features.sparseResidencyBuffer = false;
features.sparseResidencyImage2D = false;
features.sparseResidencyImage3D = false;
features.sparseResidency2Samples = false;
features.sparseResidency4Samples = false;
features.sparseResidency8Samples = false;
features.sparseResidency16Samples = false;
features.sparseResidencyAliased = false;
features.variableMultisampleRate = false;
features.inheritedQueries = false;
vk::PhysicalDevice16BitStorageFeaturesKHR bit16_storage; VkPhysicalDevice16BitStorageFeaturesKHR bit16_storage;
bit16_storage.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR;
bit16_storage.pNext = nullptr;
bit16_storage.storageBuffer16BitAccess = false;
bit16_storage.uniformAndStorageBuffer16BitAccess = true; bit16_storage.uniformAndStorageBuffer16BitAccess = true;
bit16_storage.storagePushConstant16 = false;
bit16_storage.storageInputOutput16 = false;
SetNext(next, bit16_storage); SetNext(next, bit16_storage);
vk::PhysicalDevice8BitStorageFeaturesKHR bit8_storage; VkPhysicalDevice8BitStorageFeaturesKHR bit8_storage;
bit8_storage.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR;
bit8_storage.pNext = nullptr;
bit8_storage.storageBuffer8BitAccess = false;
bit8_storage.uniformAndStorageBuffer8BitAccess = true; bit8_storage.uniformAndStorageBuffer8BitAccess = true;
bit8_storage.storagePushConstant8 = false;
SetNext(next, bit8_storage); SetNext(next, bit8_storage);
vk::PhysicalDeviceHostQueryResetFeaturesEXT host_query_reset; VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset;
host_query_reset.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
host_query_reset.hostQueryReset = true; host_query_reset.hostQueryReset = true;
SetNext(next, host_query_reset); SetNext(next, host_query_reset);
vk::PhysicalDeviceFloat16Int8FeaturesKHR float16_int8; VkPhysicalDeviceFloat16Int8FeaturesKHR float16_int8;
if (is_float16_supported) { if (is_float16_supported) {
float16_int8.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR;
float16_int8.pNext = nullptr;
float16_int8.shaderFloat16 = true; float16_int8.shaderFloat16 = true;
float16_int8.shaderInt8 = false;
SetNext(next, float16_int8); SetNext(next, float16_int8);
} else { } else {
LOG_INFO(Render_Vulkan, "Device doesn't support float16 natively"); LOG_INFO(Render_Vulkan, "Device doesn't support float16 natively");
} }
vk::PhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout; VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout;
if (khr_uniform_buffer_standard_layout) { if (khr_uniform_buffer_standard_layout) {
std430_layout.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR;
std430_layout.pNext = nullptr;
std430_layout.uniformBufferStandardLayout = true; std430_layout.uniformBufferStandardLayout = true;
SetNext(next, std430_layout); SetNext(next, std430_layout);
} else { } else {
LOG_INFO(Render_Vulkan, "Device doesn't support packed UBOs"); LOG_INFO(Render_Vulkan, "Device doesn't support packed UBOs");
} }
vk::PhysicalDeviceIndexTypeUint8FeaturesEXT index_type_uint8; VkPhysicalDeviceIndexTypeUint8FeaturesEXT index_type_uint8;
if (ext_index_type_uint8) { if (ext_index_type_uint8) {
index_type_uint8.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT;
index_type_uint8.pNext = nullptr;
index_type_uint8.indexTypeUint8 = true; index_type_uint8.indexTypeUint8 = true;
SetNext(next, index_type_uint8); SetNext(next, index_type_uint8);
} else { } else {
LOG_INFO(Render_Vulkan, "Device doesn't support uint8 indexes"); LOG_INFO(Render_Vulkan, "Device doesn't support uint8 indexes");
} }
vk::PhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback; VkPhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback;
if (ext_transform_feedback) { if (ext_transform_feedback) {
transform_feedback.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT;
transform_feedback.pNext = nullptr;
transform_feedback.transformFeedback = true; transform_feedback.transformFeedback = true;
transform_feedback.geometryStreams = true; transform_feedback.geometryStreams = true;
SetNext(next, transform_feedback); SetNext(next, transform_feedback);
@ -161,60 +296,48 @@ bool VKDevice::Create(vk::Instance instance) {
LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted"); LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted");
} }
vk::DeviceCreateInfo device_ci({}, static_cast<u32>(queue_cis.size()), queue_cis.data(), 0, logical = vk::Device::Create(physical, queue_cis, extensions, features2, dld);
nullptr, static_cast<u32>(extensions.size()), extensions.data(), if (!logical) {
nullptr); LOG_ERROR(Render_Vulkan, "Failed to create logical device");
device_ci.pNext = &features2;
vk::Device unsafe_logical;
if (physical.createDevice(&device_ci, nullptr, &unsafe_logical, dld) != vk::Result::eSuccess) {
LOG_CRITICAL(Render_Vulkan, "Logical device failed to be created!");
return false; return false;
} }
dld.init(instance, dld.vkGetInstanceProcAddr, unsafe_logical);
logical = UniqueDevice(unsafe_logical, {nullptr, dld});
CollectTelemetryParameters(); CollectTelemetryParameters();
graphics_queue = logical->getQueue(graphics_family, 0, dld); graphics_queue = logical.GetQueue(graphics_family);
present_queue = logical->getQueue(present_family, 0, dld); present_queue = logical.GetQueue(present_family);
return true; return true;
} }
vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format, VkFormat VKDevice::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
vk::FormatFeatureFlags wanted_usage,
FormatType format_type) const { FormatType format_type) const {
if (IsFormatSupported(wanted_format, wanted_usage, format_type)) { if (IsFormatSupported(wanted_format, wanted_usage, format_type)) {
return wanted_format; return wanted_format;
} }
// The wanted format is not supported by hardware, search for alternatives // The wanted format is not supported by hardware, search for alternatives
const vk::Format* alternatives = GetFormatAlternatives(wanted_format); const VkFormat* alternatives = GetFormatAlternatives(wanted_format);
if (alternatives == nullptr) { if (alternatives == nullptr) {
UNREACHABLE_MSG("Format={} with usage={} and type={} has no defined alternatives and host " UNREACHABLE_MSG("Format={} with usage={} and type={} has no defined alternatives and host "
"hardware does not support it", "hardware does not support it",
vk::to_string(wanted_format), vk::to_string(wanted_usage), wanted_format, wanted_usage, format_type);
static_cast<u32>(format_type));
return wanted_format; return wanted_format;
} }
std::size_t i = 0; std::size_t i = 0;
for (vk::Format alternative = alternatives[0]; alternative != vk::Format{}; for (VkFormat alternative = *alternatives; alternative; alternative = alternatives[++i]) {
alternative = alternatives[++i]) {
if (!IsFormatSupported(alternative, wanted_usage, format_type)) { if (!IsFormatSupported(alternative, wanted_usage, format_type)) {
continue; continue;
} }
LOG_WARNING(Render_Vulkan, LOG_WARNING(Render_Vulkan,
"Emulating format={} with alternative format={} with usage={} and type={}", "Emulating format={} with alternative format={} with usage={} and type={}",
static_cast<u32>(wanted_format), static_cast<u32>(alternative), wanted_format, alternative, wanted_usage, format_type);
static_cast<u32>(wanted_usage), static_cast<u32>(format_type));
return alternative; return alternative;
} }
// No alternatives found, panic // No alternatives found, panic
UNREACHABLE_MSG("Format={} with usage={} and type={} is not supported by the host hardware and " UNREACHABLE_MSG("Format={} with usage={} and type={} is not supported by the host hardware and "
"doesn't support any of the alternatives", "doesn't support any of the alternatives",
static_cast<u32>(wanted_format), static_cast<u32>(wanted_usage), wanted_format, wanted_usage, format_type);
static_cast<u32>(format_type));
return wanted_format; return wanted_format;
} }
@ -228,38 +351,39 @@ void VKDevice::ReportLoss() const {
return; return;
} }
[[maybe_unused]] const std::vector data = graphics_queue.getCheckpointDataNV(dld); [[maybe_unused]] const std::vector data = graphics_queue.GetCheckpointDataNV(dld);
// Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be // Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be
// executed. It can be done on a debugger by evaluating the expression: // executed. It can be done on a debugger by evaluating the expression:
// *(VKGraphicsPipeline*)data[0] // *(VKGraphicsPipeline*)data[0]
} }
bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features) const { bool VKDevice::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const {
// Disable for now to avoid converting ASTC twice. // Disable for now to avoid converting ASTC twice.
static constexpr std::array astc_formats = { static constexpr std::array astc_formats = {
vk::Format::eAstc4x4UnormBlock, vk::Format::eAstc4x4SrgbBlock, VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
vk::Format::eAstc5x4UnormBlock, vk::Format::eAstc5x4SrgbBlock, VK_FORMAT_ASTC_5x4_UNORM_BLOCK, VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
vk::Format::eAstc5x5UnormBlock, vk::Format::eAstc5x5SrgbBlock, VK_FORMAT_ASTC_5x5_UNORM_BLOCK, VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
vk::Format::eAstc6x5UnormBlock, vk::Format::eAstc6x5SrgbBlock, VK_FORMAT_ASTC_6x5_UNORM_BLOCK, VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
vk::Format::eAstc6x6UnormBlock, vk::Format::eAstc6x6SrgbBlock, VK_FORMAT_ASTC_6x6_UNORM_BLOCK, VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
vk::Format::eAstc8x5UnormBlock, vk::Format::eAstc8x5SrgbBlock, VK_FORMAT_ASTC_8x5_UNORM_BLOCK, VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
vk::Format::eAstc8x6UnormBlock, vk::Format::eAstc8x6SrgbBlock, VK_FORMAT_ASTC_8x6_UNORM_BLOCK, VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
vk::Format::eAstc8x8UnormBlock, vk::Format::eAstc8x8SrgbBlock, VK_FORMAT_ASTC_8x8_UNORM_BLOCK, VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
vk::Format::eAstc10x5UnormBlock, vk::Format::eAstc10x5SrgbBlock, VK_FORMAT_ASTC_10x5_UNORM_BLOCK, VK_FORMAT_ASTC_10x5_SRGB_BLOCK,
vk::Format::eAstc10x6UnormBlock, vk::Format::eAstc10x6SrgbBlock, VK_FORMAT_ASTC_10x6_UNORM_BLOCK, VK_FORMAT_ASTC_10x6_SRGB_BLOCK,
vk::Format::eAstc10x8UnormBlock, vk::Format::eAstc10x8SrgbBlock, VK_FORMAT_ASTC_10x8_UNORM_BLOCK, VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
vk::Format::eAstc10x10UnormBlock, vk::Format::eAstc10x10SrgbBlock, VK_FORMAT_ASTC_10x10_UNORM_BLOCK, VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
vk::Format::eAstc12x10UnormBlock, vk::Format::eAstc12x10SrgbBlock, VK_FORMAT_ASTC_12x10_UNORM_BLOCK, VK_FORMAT_ASTC_12x10_SRGB_BLOCK,
vk::Format::eAstc12x12UnormBlock, vk::Format::eAstc12x12SrgbBlock}; VK_FORMAT_ASTC_12x12_UNORM_BLOCK, VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
};
if (!features.textureCompressionASTC_LDR) { if (!features.textureCompressionASTC_LDR) {
return false; return false;
} }
const auto format_feature_usage{ const auto format_feature_usage{
vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eBlitSrc | VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_BLIT_SRC_BIT |
vk::FormatFeatureFlagBits::eBlitDst | vk::FormatFeatureFlagBits::eTransferSrc | VK_FORMAT_FEATURE_BLIT_DST_BIT | VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
vk::FormatFeatureFlagBits::eTransferDst}; VK_FORMAT_FEATURE_TRANSFER_DST_BIT};
for (const auto format : astc_formats) { for (const auto format : astc_formats) {
const auto format_properties{physical.getFormatProperties(format, dld)}; const auto format_properties{physical.GetFormatProperties(format)};
if (!(format_properties.optimalTilingFeatures & format_feature_usage)) { if (!(format_properties.optimalTilingFeatures & format_feature_usage)) {
return false; return false;
} }
@ -267,61 +391,49 @@ bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features
return true; return true;
} }
bool VKDevice::IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage, bool VKDevice::IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
FormatType format_type) const { FormatType format_type) const {
const auto it = format_properties.find(wanted_format); const auto it = format_properties.find(wanted_format);
if (it == format_properties.end()) { if (it == format_properties.end()) {
UNIMPLEMENTED_MSG("Unimplemented format query={}", vk::to_string(wanted_format)); UNIMPLEMENTED_MSG("Unimplemented format query={}", wanted_format);
return true; return true;
} }
const auto supported_usage = GetFormatFeatures(it->second, format_type); const auto supported_usage = GetFormatFeatures(it->second, format_type);
return (supported_usage & wanted_usage) == wanted_usage; return (supported_usage & wanted_usage) == wanted_usage;
} }
bool VKDevice::IsSuitable(vk::PhysicalDevice physical, vk::SurfaceKHR surface, bool VKDevice::IsSuitable(vk::PhysicalDevice physical, VkSurfaceKHR surface) {
const vk::DispatchLoaderDynamic& dld) {
static constexpr std::array required_extensions = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_16BIT_STORAGE_EXTENSION_NAME,
VK_KHR_8BIT_STORAGE_EXTENSION_NAME,
VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME,
VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME,
};
bool is_suitable = true; bool is_suitable = true;
std::bitset<required_extensions.size()> available_extensions{}; std::bitset<REQUIRED_EXTENSIONS.size()> available_extensions;
for (const auto& prop : physical.enumerateDeviceExtensionProperties(nullptr, dld)) { for (const auto& prop : physical.EnumerateDeviceExtensionProperties()) {
for (std::size_t i = 0; i < required_extensions.size(); ++i) { for (std::size_t i = 0; i < REQUIRED_EXTENSIONS.size(); ++i) {
if (available_extensions[i]) { if (available_extensions[i]) {
continue; continue;
} }
available_extensions[i] = const std::string_view name{prop.extensionName};
required_extensions[i] == std::string_view{prop.extensionName}; available_extensions[i] = name == REQUIRED_EXTENSIONS[i];
} }
} }
if (!available_extensions.all()) { if (!available_extensions.all()) {
for (std::size_t i = 0; i < required_extensions.size(); ++i) { for (std::size_t i = 0; i < REQUIRED_EXTENSIONS.size(); ++i) {
if (available_extensions[i]) { if (available_extensions[i]) {
continue; continue;
} }
LOG_ERROR(Render_Vulkan, "Missing required extension: {}", required_extensions[i]); LOG_ERROR(Render_Vulkan, "Missing required extension: {}", REQUIRED_EXTENSIONS[i]);
is_suitable = false; is_suitable = false;
} }
} }
bool has_graphics{}, has_present{}; bool has_graphics{}, has_present{};
const auto queue_family_properties = physical.getQueueFamilyProperties(dld); const std::vector queue_family_properties = physical.GetQueueFamilyProperties();
for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) { for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) {
const auto& family = queue_family_properties[i]; const auto& family = queue_family_properties[i];
if (family.queueCount == 0) { if (family.queueCount == 0) {
continue; continue;
} }
has_graphics |= has_graphics |= family.queueFlags & VK_QUEUE_GRAPHICS_BIT;
(family.queueFlags & vk::QueueFlagBits::eGraphics) != static_cast<vk::QueueFlagBits>(0); has_present |= physical.GetSurfaceSupportKHR(i, surface);
has_present |= physical.getSurfaceSupportKHR(i, surface, dld) != 0;
} }
if (!has_graphics || !has_present) { if (!has_graphics || !has_present) {
LOG_ERROR(Render_Vulkan, "Device lacks a graphics and present queue"); LOG_ERROR(Render_Vulkan, "Device lacks a graphics and present queue");
@ -329,7 +441,7 @@ bool VKDevice::IsSuitable(vk::PhysicalDevice physical, vk::SurfaceKHR surface,
} }
// TODO(Rodrigo): Check if the device matches all requeriments. // TODO(Rodrigo): Check if the device matches all requeriments.
const auto properties{physical.getProperties(dld)}; const auto properties{physical.GetProperties()};
const auto& limits{properties.limits}; const auto& limits{properties.limits};
constexpr u32 required_ubo_size = 65536; constexpr u32 required_ubo_size = 65536;
@ -346,7 +458,7 @@ bool VKDevice::IsSuitable(vk::PhysicalDevice physical, vk::SurfaceKHR surface,
is_suitable = false; is_suitable = false;
} }
const auto features{physical.getFeatures(dld)}; const auto features{physical.GetFeatures()};
const std::array feature_report = { const std::array feature_report = {
std::make_pair(features.vertexPipelineStoresAndAtomics, "vertexPipelineStoresAndAtomics"), std::make_pair(features.vertexPipelineStoresAndAtomics, "vertexPipelineStoresAndAtomics"),
std::make_pair(features.independentBlend, "independentBlend"), std::make_pair(features.independentBlend, "independentBlend"),
@ -380,7 +492,7 @@ bool VKDevice::IsSuitable(vk::PhysicalDevice physical, vk::SurfaceKHR surface,
std::vector<const char*> VKDevice::LoadExtensions() { std::vector<const char*> VKDevice::LoadExtensions() {
std::vector<const char*> extensions; std::vector<const char*> extensions;
const auto Test = [&](const vk::ExtensionProperties& extension, const auto Test = [&](const VkExtensionProperties& extension,
std::optional<std::reference_wrapper<bool>> status, const char* name, std::optional<std::reference_wrapper<bool>> status, const char* name,
bool push) { bool push) {
if (extension.extensionName != std::string_view(name)) { if (extension.extensionName != std::string_view(name)) {
@ -394,22 +506,13 @@ std::vector<const char*> VKDevice::LoadExtensions() {
} }
}; };
extensions.reserve(15); extensions.reserve(7 + REQUIRED_EXTENSIONS.size());
extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); extensions.insert(extensions.begin(), REQUIRED_EXTENSIONS.begin(), REQUIRED_EXTENSIONS.end());
extensions.push_back(VK_KHR_16BIT_STORAGE_EXTENSION_NAME);
extensions.push_back(VK_KHR_8BIT_STORAGE_EXTENSION_NAME);
extensions.push_back(VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME);
extensions.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME);
extensions.push_back(VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME);
extensions.push_back(VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME);
extensions.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
[[maybe_unused]] const bool nsight =
std::getenv("NVTX_INJECTION64_PATH") || std::getenv("NSIGHT_LAUNCHED");
bool has_khr_shader_float16_int8{}; bool has_khr_shader_float16_int8{};
bool has_ext_subgroup_size_control{}; bool has_ext_subgroup_size_control{};
bool has_ext_transform_feedback{}; bool has_ext_transform_feedback{};
for (const auto& extension : physical.enumerateDeviceExtensionProperties(nullptr, dld)) { for (const auto& extension : physical.EnumerateDeviceExtensionProperties()) {
Test(extension, khr_uniform_buffer_standard_layout, Test(extension, khr_uniform_buffer_standard_layout,
VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true); VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true);
Test(extension, has_khr_shader_float16_int8, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME, Test(extension, has_khr_shader_float16_int8, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME,
@ -429,38 +532,67 @@ std::vector<const char*> VKDevice::LoadExtensions() {
} }
} }
VkPhysicalDeviceFeatures2KHR features;
features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR;
VkPhysicalDeviceProperties2KHR properties;
properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR;
if (has_khr_shader_float16_int8) { if (has_khr_shader_float16_int8) {
is_float16_supported = VkPhysicalDeviceFloat16Int8FeaturesKHR float16_int8_features;
GetFeatures<vk::PhysicalDeviceFloat16Int8FeaturesKHR>(physical, dld).shaderFloat16; float16_int8_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR;
float16_int8_features.pNext = nullptr;
features.pNext = &float16_int8_features;
physical.GetFeatures2KHR(features);
is_float16_supported = float16_int8_features.shaderFloat16;
extensions.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME); extensions.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME);
} }
if (has_ext_subgroup_size_control) { if (has_ext_subgroup_size_control) {
const auto features = VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroup_features;
GetFeatures<vk::PhysicalDeviceSubgroupSizeControlFeaturesEXT>(physical, dld); subgroup_features.sType =
const auto properties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
GetProperties<vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT>(physical, dld); subgroup_features.pNext = nullptr;
features.pNext = &subgroup_features;
physical.GetFeatures2KHR(features);
is_warp_potentially_bigger = properties.maxSubgroupSize > GuestWarpSize; VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_properties;
subgroup_properties.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
subgroup_properties.pNext = nullptr;
properties.pNext = &subgroup_properties;
physical.GetProperties2KHR(properties);
if (features.subgroupSizeControl && properties.minSubgroupSize <= GuestWarpSize && is_warp_potentially_bigger = subgroup_properties.maxSubgroupSize > GuestWarpSize;
properties.maxSubgroupSize >= GuestWarpSize) {
if (subgroup_features.subgroupSizeControl &&
subgroup_properties.minSubgroupSize <= GuestWarpSize &&
subgroup_properties.maxSubgroupSize >= GuestWarpSize) {
extensions.push_back(VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME); extensions.push_back(VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME);
guest_warp_stages = properties.requiredSubgroupSizeStages; guest_warp_stages = subgroup_properties.requiredSubgroupSizeStages;
} }
} else { } else {
is_warp_potentially_bigger = true; is_warp_potentially_bigger = true;
} }
if (has_ext_transform_feedback) { if (has_ext_transform_feedback) {
const auto features = VkPhysicalDeviceTransformFeedbackFeaturesEXT tfb_features;
GetFeatures<vk::PhysicalDeviceTransformFeedbackFeaturesEXT>(physical, dld); tfb_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT;
const auto properties = tfb_features.pNext = nullptr;
GetProperties<vk::PhysicalDeviceTransformFeedbackPropertiesEXT>(physical, dld); features.pNext = &tfb_features;
physical.GetFeatures2KHR(features);
if (features.transformFeedback && features.geometryStreams && VkPhysicalDeviceTransformFeedbackPropertiesEXT tfb_properties;
properties.maxTransformFeedbackStreams >= 4 && properties.maxTransformFeedbackBuffers && tfb_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT;
properties.transformFeedbackQueries && properties.transformFeedbackDraw) { tfb_properties.pNext = nullptr;
properties.pNext = &tfb_properties;
physical.GetProperties2KHR(properties);
if (tfb_features.transformFeedback && tfb_features.geometryStreams &&
tfb_properties.maxTransformFeedbackStreams >= 4 &&
tfb_properties.maxTransformFeedbackBuffers && tfb_properties.transformFeedbackQueries &&
tfb_properties.transformFeedbackDraw) {
extensions.push_back(VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME); extensions.push_back(VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME);
ext_transform_feedback = true; ext_transform_feedback = true;
} }
@ -469,10 +601,10 @@ std::vector<const char*> VKDevice::LoadExtensions() {
return extensions; return extensions;
} }
void VKDevice::SetupFamilies(vk::SurfaceKHR surface) { void VKDevice::SetupFamilies(VkSurfaceKHR surface) {
std::optional<u32> graphics_family_, present_family_; std::optional<u32> graphics_family_, present_family_;
const auto queue_family_properties = physical.getQueueFamilyProperties(dld); const std::vector queue_family_properties = physical.GetQueueFamilyProperties();
for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) { for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) {
if (graphics_family_ && present_family_) if (graphics_family_ && present_family_)
break; break;
@ -481,10 +613,10 @@ void VKDevice::SetupFamilies(vk::SurfaceKHR surface) {
if (queue_family.queueCount == 0) if (queue_family.queueCount == 0)
continue; continue;
if (queue_family.queueFlags & vk::QueueFlagBits::eGraphics) { if (queue_family.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
graphics_family_ = i; graphics_family_ = i;
} }
if (physical.getSurfaceSupportKHR(i, surface, dld)) { if (physical.GetSurfaceSupportKHR(i, surface)) {
present_family_ = i; present_family_ = i;
} }
} }
@ -495,120 +627,48 @@ void VKDevice::SetupFamilies(vk::SurfaceKHR surface) {
} }
void VKDevice::SetupFeatures() { void VKDevice::SetupFeatures() {
const auto supported_features{physical.getFeatures(dld)}; const auto supported_features{physical.GetFeatures()};
is_formatless_image_load_supported = supported_features.shaderStorageImageReadWithoutFormat; is_formatless_image_load_supported = supported_features.shaderStorageImageReadWithoutFormat;
is_optimal_astc_supported = IsOptimalAstcSupported(supported_features); is_optimal_astc_supported = IsOptimalAstcSupported(supported_features);
} }
void VKDevice::CollectTelemetryParameters() { void VKDevice::CollectTelemetryParameters() {
const auto driver = GetProperties<vk::PhysicalDeviceDriverPropertiesKHR>(physical, dld); VkPhysicalDeviceDriverPropertiesKHR driver;
driver.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR;
driver.pNext = nullptr;
VkPhysicalDeviceProperties2KHR properties;
properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR;
properties.pNext = &driver;
physical.GetProperties2KHR(properties);
driver_id = driver.driverID; driver_id = driver.driverID;
vendor_name = driver.driverName; vendor_name = driver.driverName;
const auto extensions = physical.enumerateDeviceExtensionProperties(nullptr, dld); const std::vector extensions = physical.EnumerateDeviceExtensionProperties();
reported_extensions.reserve(std::size(extensions)); reported_extensions.reserve(std::size(extensions));
for (const auto& extension : extensions) { for (const auto& extension : extensions) {
reported_extensions.push_back(extension.extensionName); reported_extensions.push_back(extension.extensionName);
} }
} }
std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const { std::vector<VkDeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const {
static const float QUEUE_PRIORITY = 1.0f; static constexpr float QUEUE_PRIORITY = 1.0f;
std::set<u32> unique_queue_families = {graphics_family, present_family}; std::unordered_set<u32> unique_queue_families = {graphics_family, present_family};
std::vector<vk::DeviceQueueCreateInfo> queue_cis; std::vector<VkDeviceQueueCreateInfo> queue_cis;
for (u32 queue_family : unique_queue_families) for (const u32 queue_family : unique_queue_families) {
queue_cis.push_back({{}, queue_family, 1, &QUEUE_PRIORITY}); VkDeviceQueueCreateInfo& ci = queue_cis.emplace_back();
ci.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
ci.pNext = nullptr;
ci.flags = 0;
ci.queueFamilyIndex = queue_family;
ci.queueCount = 1;
ci.pQueuePriorities = &QUEUE_PRIORITY;
}
return queue_cis; return queue_cis;
} }
std::unordered_map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties(
const vk::DispatchLoaderDynamic& dld, vk::PhysicalDevice physical) {
static constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32,
vk::Format::eA8B8G8R8UintPack32,
vk::Format::eA8B8G8R8SnormPack32,
vk::Format::eA8B8G8R8SrgbPack32,
vk::Format::eB5G6R5UnormPack16,
vk::Format::eA2B10G10R10UnormPack32,
vk::Format::eA1R5G5B5UnormPack16,
vk::Format::eR32G32B32A32Sfloat,
vk::Format::eR32G32B32A32Uint,
vk::Format::eR32G32Sfloat,
vk::Format::eR32G32Uint,
vk::Format::eR16G16B16A16Uint,
vk::Format::eR16G16B16A16Snorm,
vk::Format::eR16G16B16A16Unorm,
vk::Format::eR16G16Unorm,
vk::Format::eR16G16Snorm,
vk::Format::eR16G16Sfloat,
vk::Format::eR16Unorm,
vk::Format::eR8G8B8A8Srgb,
vk::Format::eR8G8Unorm,
vk::Format::eR8G8Snorm,
vk::Format::eR8Unorm,
vk::Format::eR8Uint,
vk::Format::eB10G11R11UfloatPack32,
vk::Format::eR32Sfloat,
vk::Format::eR32Uint,
vk::Format::eR32Sint,
vk::Format::eR16Sfloat,
vk::Format::eR16G16B16A16Sfloat,
vk::Format::eB8G8R8A8Unorm,
vk::Format::eR4G4B4A4UnormPack16,
vk::Format::eD32Sfloat,
vk::Format::eD16Unorm,
vk::Format::eD16UnormS8Uint,
vk::Format::eD24UnormS8Uint,
vk::Format::eD32SfloatS8Uint,
vk::Format::eBc1RgbaUnormBlock,
vk::Format::eBc2UnormBlock,
vk::Format::eBc3UnormBlock,
vk::Format::eBc4UnormBlock,
vk::Format::eBc5UnormBlock,
vk::Format::eBc5SnormBlock,
vk::Format::eBc7UnormBlock,
vk::Format::eBc6HUfloatBlock,
vk::Format::eBc6HSfloatBlock,
vk::Format::eBc1RgbaSrgbBlock,
vk::Format::eBc2SrgbBlock,
vk::Format::eBc3SrgbBlock,
vk::Format::eBc7SrgbBlock,
vk::Format::eAstc4x4UnormBlock,
vk::Format::eAstc4x4SrgbBlock,
vk::Format::eAstc5x4UnormBlock,
vk::Format::eAstc5x4SrgbBlock,
vk::Format::eAstc5x5UnormBlock,
vk::Format::eAstc5x5SrgbBlock,
vk::Format::eAstc6x5UnormBlock,
vk::Format::eAstc6x5SrgbBlock,
vk::Format::eAstc6x6UnormBlock,
vk::Format::eAstc6x6SrgbBlock,
vk::Format::eAstc8x5UnormBlock,
vk::Format::eAstc8x5SrgbBlock,
vk::Format::eAstc8x6UnormBlock,
vk::Format::eAstc8x6SrgbBlock,
vk::Format::eAstc8x8UnormBlock,
vk::Format::eAstc8x8SrgbBlock,
vk::Format::eAstc10x5UnormBlock,
vk::Format::eAstc10x5SrgbBlock,
vk::Format::eAstc10x6UnormBlock,
vk::Format::eAstc10x6SrgbBlock,
vk::Format::eAstc10x8UnormBlock,
vk::Format::eAstc10x8SrgbBlock,
vk::Format::eAstc10x10UnormBlock,
vk::Format::eAstc10x10SrgbBlock,
vk::Format::eAstc12x10UnormBlock,
vk::Format::eAstc12x10SrgbBlock,
vk::Format::eAstc12x12UnormBlock,
vk::Format::eAstc12x12SrgbBlock,
vk::Format::eE5B9G9R9UfloatPack32};
std::unordered_map<vk::Format, vk::FormatProperties> format_properties;
for (const auto format : formats) {
format_properties.emplace(format, physical.getFormatProperties(format, dld));
}
return format_properties;
}
} // namespace Vulkan } // namespace Vulkan

View File

@ -8,8 +8,9 @@
#include <string_view> #include <string_view>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -22,12 +23,12 @@ const u32 GuestWarpSize = 32;
/// Handles data specific to a physical device. /// Handles data specific to a physical device.
class VKDevice final { class VKDevice final {
public: public:
explicit VKDevice(const vk::DispatchLoaderDynamic& dld, vk::PhysicalDevice physical, explicit VKDevice(VkInstance instance, vk::PhysicalDevice physical, VkSurfaceKHR surface,
vk::SurfaceKHR surface); const vk::InstanceDispatch& dld);
~VKDevice(); ~VKDevice();
/// Initializes the device. Returns true on success. /// Initializes the device. Returns true on success.
bool Create(vk::Instance instance); bool Create();
/** /**
* Returns a format supported by the device for the passed requeriments. * Returns a format supported by the device for the passed requeriments.
@ -36,20 +37,20 @@ public:
* @param format_type Format type usage. * @param format_type Format type usage.
* @returns A format supported by the device. * @returns A format supported by the device.
*/ */
vk::Format GetSupportedFormat(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage, VkFormat GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
FormatType format_type) const; FormatType format_type) const;
/// Reports a device loss. /// Reports a device loss.
void ReportLoss() const; void ReportLoss() const;
/// Returns the dispatch loader with direct function pointers of the device. /// Returns the dispatch loader with direct function pointers of the device.
const vk::DispatchLoaderDynamic& GetDispatchLoader() const { const vk::DeviceDispatch& GetDispatchLoader() const {
return dld; return dld;
} }
/// Returns the logical device. /// Returns the logical device.
vk::Device GetLogical() const { const vk::Device& GetLogical() const {
return logical.get(); return logical;
} }
/// Returns the physical device. /// Returns the physical device.
@ -79,7 +80,7 @@ public:
/// Returns true if the device is integrated with the host CPU. /// Returns true if the device is integrated with the host CPU.
bool IsIntegrated() const { bool IsIntegrated() const {
return properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu; return properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
} }
/// Returns the current Vulkan API version provided in Vulkan-formatted version numbers. /// Returns the current Vulkan API version provided in Vulkan-formatted version numbers.
@ -98,27 +99,27 @@ public:
} }
/// Returns the driver ID. /// Returns the driver ID.
vk::DriverIdKHR GetDriverID() const { VkDriverIdKHR GetDriverID() const {
return driver_id; return driver_id;
} }
/// Returns uniform buffer alignment requeriment. /// Returns uniform buffer alignment requeriment.
vk::DeviceSize GetUniformBufferAlignment() const { VkDeviceSize GetUniformBufferAlignment() const {
return properties.limits.minUniformBufferOffsetAlignment; return properties.limits.minUniformBufferOffsetAlignment;
} }
/// Returns storage alignment requeriment. /// Returns storage alignment requeriment.
vk::DeviceSize GetStorageBufferAlignment() const { VkDeviceSize GetStorageBufferAlignment() const {
return properties.limits.minStorageBufferOffsetAlignment; return properties.limits.minStorageBufferOffsetAlignment;
} }
/// Returns the maximum range for storage buffers. /// Returns the maximum range for storage buffers.
vk::DeviceSize GetMaxStorageBufferRange() const { VkDeviceSize GetMaxStorageBufferRange() const {
return properties.limits.maxStorageBufferRange; return properties.limits.maxStorageBufferRange;
} }
/// Returns the maximum size for push constants. /// Returns the maximum size for push constants.
vk::DeviceSize GetMaxPushConstantsSize() const { VkDeviceSize GetMaxPushConstantsSize() const {
return properties.limits.maxPushConstantsSize; return properties.limits.maxPushConstantsSize;
} }
@ -138,8 +139,8 @@ public:
} }
/// Returns true if the device can be forced to use the guest warp size. /// Returns true if the device can be forced to use the guest warp size.
bool IsGuestWarpSizeSupported(vk::ShaderStageFlagBits stage) const { bool IsGuestWarpSizeSupported(VkShaderStageFlagBits stage) const {
return (guest_warp_stages & stage) != vk::ShaderStageFlags{}; return guest_warp_stages & stage;
} }
/// Returns true if formatless image load is supported. /// Returns true if formatless image load is supported.
@ -188,15 +189,14 @@ public:
} }
/// Checks if the physical device is suitable. /// Checks if the physical device is suitable.
static bool IsSuitable(vk::PhysicalDevice physical, vk::SurfaceKHR surface, static bool IsSuitable(vk::PhysicalDevice physical, VkSurfaceKHR surface);
const vk::DispatchLoaderDynamic& dld);
private: private:
/// Loads extensions into a vector and stores available ones in this object. /// Loads extensions into a vector and stores available ones in this object.
std::vector<const char*> LoadExtensions(); std::vector<const char*> LoadExtensions();
/// Sets up queue families. /// Sets up queue families.
void SetupFamilies(vk::SurfaceKHR surface); void SetupFamilies(VkSurfaceKHR surface);
/// Sets up device features. /// Sets up device features.
void SetupFeatures(); void SetupFeatures();
@ -205,29 +205,25 @@ private:
void CollectTelemetryParameters(); void CollectTelemetryParameters();
/// Returns a list of queue initialization descriptors. /// Returns a list of queue initialization descriptors.
std::vector<vk::DeviceQueueCreateInfo> GetDeviceQueueCreateInfos() const; std::vector<VkDeviceQueueCreateInfo> GetDeviceQueueCreateInfos() const;
/// Returns true if ASTC textures are natively supported. /// Returns true if ASTC textures are natively supported.
bool IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features) const; bool IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const;
/// Returns true if a format is supported. /// Returns true if a format is supported.
bool IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage, bool IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
FormatType format_type) const; FormatType format_type) const;
/// Returns the device properties for Vulkan formats. vk::DeviceDispatch dld; ///< Device function pointers.
static std::unordered_map<vk::Format, vk::FormatProperties> GetFormatProperties(
const vk::DispatchLoaderDynamic& dld, vk::PhysicalDevice physical);
vk::DispatchLoaderDynamic dld; ///< Device function pointers.
vk::PhysicalDevice physical; ///< Physical device. vk::PhysicalDevice physical; ///< Physical device.
vk::PhysicalDeviceProperties properties; ///< Device properties. VkPhysicalDeviceProperties properties; ///< Device properties.
UniqueDevice logical; ///< Logical device. vk::Device logical; ///< Logical device.
vk::Queue graphics_queue; ///< Main graphics queue. vk::Queue graphics_queue; ///< Main graphics queue.
vk::Queue present_queue; ///< Main present queue. vk::Queue present_queue; ///< Main present queue.
u32 graphics_family{}; ///< Main graphics queue family index. u32 graphics_family{}; ///< Main graphics queue family index.
u32 present_family{}; ///< Main present queue family index. u32 present_family{}; ///< Main present queue family index.
vk::DriverIdKHR driver_id{}; ///< Driver ID. VkDriverIdKHR driver_id{}; ///< Driver ID.
vk::ShaderStageFlags guest_warp_stages{}; ///< Stages where the guest warp size can be forced.ed VkShaderStageFlags guest_warp_stages{}; ///< Stages where the guest warp size can be forced.ed
bool is_optimal_astc_supported{}; ///< Support for native ASTC. bool is_optimal_astc_supported{}; ///< Support for native ASTC.
bool is_float16_supported{}; ///< Support for float16 arithmetics. bool is_float16_supported{}; ///< Support for float16 arithmetics.
bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest. bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest.
@ -244,7 +240,7 @@ private:
std::vector<std::string> reported_extensions; ///< Reported Vulkan extensions. std::vector<std::string> reported_extensions; ///< Reported Vulkan extensions.
/// Format properties dictionary. /// Format properties dictionary.
std::unordered_map<vk::Format, vk::FormatProperties> format_properties; std::unordered_map<VkFormat, VkFormatProperties> format_properties;
}; };
} // namespace Vulkan } // namespace Vulkan

View File

@ -2,11 +2,13 @@
// Licensed under GPLv2 or any later version // Licensed under GPLv2 or any later version
// Refer to the license.txt file included. // Refer to the license.txt file included.
#include <array>
#include <cstring>
#include <vector> #include <vector>
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/microprofile.h" #include "common/microprofile.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h" #include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
@ -16,6 +18,7 @@
#include "video_core/renderer_vulkan/vk_renderpass_cache.h" #include "video_core/renderer_vulkan/vk_renderpass_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -23,21 +26,26 @@ MICROPROFILE_DECLARE(Vulkan_PipelineCache);
namespace { namespace {
vk::StencilOpState GetStencilFaceState(const FixedPipelineState::StencilFace& face) { VkStencilOpState GetStencilFaceState(const FixedPipelineState::StencilFace& face) {
return vk::StencilOpState(MaxwellToVK::StencilOp(face.action_stencil_fail), VkStencilOpState state;
MaxwellToVK::StencilOp(face.action_depth_pass), state.failOp = MaxwellToVK::StencilOp(face.action_stencil_fail);
MaxwellToVK::StencilOp(face.action_depth_fail), state.passOp = MaxwellToVK::StencilOp(face.action_depth_pass);
MaxwellToVK::ComparisonOp(face.test_func), 0, 0, 0); state.depthFailOp = MaxwellToVK::StencilOp(face.action_depth_fail);
state.compareOp = MaxwellToVK::ComparisonOp(face.test_func);
state.compareMask = 0;
state.writeMask = 0;
state.reference = 0;
return state;
} }
bool SupportsPrimitiveRestart(vk::PrimitiveTopology topology) { bool SupportsPrimitiveRestart(VkPrimitiveTopology topology) {
static constexpr std::array unsupported_topologies = { static constexpr std::array unsupported_topologies = {
vk::PrimitiveTopology::ePointList, VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
vk::PrimitiveTopology::eLineList, VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
vk::PrimitiveTopology::eTriangleList, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
vk::PrimitiveTopology::eLineListWithAdjacency, VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
vk::PrimitiveTopology::eTriangleListWithAdjacency, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
vk::PrimitiveTopology::ePatchList}; VK_PRIMITIVE_TOPOLOGY_PATCH_LIST};
return std::find(std::begin(unsupported_topologies), std::end(unsupported_topologies), return std::find(std::begin(unsupported_topologies), std::end(unsupported_topologies),
topology) == std::end(unsupported_topologies); topology) == std::end(unsupported_topologies);
} }
@ -49,7 +57,7 @@ VKGraphicsPipeline::VKGraphicsPipeline(const VKDevice& device, VKScheduler& sche
VKUpdateDescriptorQueue& update_descriptor_queue, VKUpdateDescriptorQueue& update_descriptor_queue,
VKRenderPassCache& renderpass_cache, VKRenderPassCache& renderpass_cache,
const GraphicsPipelineCacheKey& key, const GraphicsPipelineCacheKey& key,
const std::vector<vk::DescriptorSetLayoutBinding>& bindings, vk::Span<VkDescriptorSetLayoutBinding> bindings,
const SPIRVProgram& program) const SPIRVProgram& program)
: device{device}, scheduler{scheduler}, fixed_state{key.fixed_state}, hash{key.Hash()}, : device{device}, scheduler{scheduler}, fixed_state{key.fixed_state}, hash{key.Hash()},
descriptor_set_layout{CreateDescriptorSetLayout(bindings)}, descriptor_set_layout{CreateDescriptorSetLayout(bindings)},
@ -63,7 +71,7 @@ VKGraphicsPipeline::VKGraphicsPipeline(const VKDevice& device, VKScheduler& sche
VKGraphicsPipeline::~VKGraphicsPipeline() = default; VKGraphicsPipeline::~VKGraphicsPipeline() = default;
vk::DescriptorSet VKGraphicsPipeline::CommitDescriptorSet() { VkDescriptorSet VKGraphicsPipeline::CommitDescriptorSet() {
if (!descriptor_template) { if (!descriptor_template) {
return {}; return {};
} }
@ -72,27 +80,32 @@ vk::DescriptorSet VKGraphicsPipeline::CommitDescriptorSet() {
return set; return set;
} }
UniqueDescriptorSetLayout VKGraphicsPipeline::CreateDescriptorSetLayout( vk::DescriptorSetLayout VKGraphicsPipeline::CreateDescriptorSetLayout(
const std::vector<vk::DescriptorSetLayoutBinding>& bindings) const { vk::Span<VkDescriptorSetLayoutBinding> bindings) const {
const vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_ci( VkDescriptorSetLayoutCreateInfo ci;
{}, static_cast<u32>(bindings.size()), bindings.data()); ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
ci.pNext = nullptr;
const auto dev = device.GetLogical(); ci.flags = 0;
const auto& dld = device.GetDispatchLoader(); ci.bindingCount = bindings.size();
return dev.createDescriptorSetLayoutUnique(descriptor_set_layout_ci, nullptr, dld); ci.pBindings = bindings.data();
return device.GetLogical().CreateDescriptorSetLayout(ci);
} }
UniquePipelineLayout VKGraphicsPipeline::CreatePipelineLayout() const { vk::PipelineLayout VKGraphicsPipeline::CreatePipelineLayout() const {
const vk::PipelineLayoutCreateInfo pipeline_layout_ci({}, 1, &*descriptor_set_layout, 0, VkPipelineLayoutCreateInfo ci;
nullptr); ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
const auto dev = device.GetLogical(); ci.pNext = nullptr;
const auto& dld = device.GetDispatchLoader(); ci.flags = 0;
return dev.createPipelineLayoutUnique(pipeline_layout_ci, nullptr, dld); ci.setLayoutCount = 1;
ci.pSetLayouts = descriptor_set_layout.address();
ci.pushConstantRangeCount = 0;
ci.pPushConstantRanges = nullptr;
return device.GetLogical().CreatePipelineLayout(ci);
} }
UniqueDescriptorUpdateTemplate VKGraphicsPipeline::CreateDescriptorUpdateTemplate( vk::DescriptorUpdateTemplateKHR VKGraphicsPipeline::CreateDescriptorUpdateTemplate(
const SPIRVProgram& program) const { const SPIRVProgram& program) const {
std::vector<vk::DescriptorUpdateTemplateEntry> template_entries; std::vector<VkDescriptorUpdateTemplateEntry> template_entries;
u32 binding = 0; u32 binding = 0;
u32 offset = 0; u32 offset = 0;
for (const auto& stage : program) { for (const auto& stage : program) {
@ -102,37 +115,46 @@ UniqueDescriptorUpdateTemplate VKGraphicsPipeline::CreateDescriptorUpdateTemplat
} }
if (template_entries.empty()) { if (template_entries.empty()) {
// If the shader doesn't use descriptor sets, skip template creation. // If the shader doesn't use descriptor sets, skip template creation.
return UniqueDescriptorUpdateTemplate{}; return {};
} }
const vk::DescriptorUpdateTemplateCreateInfo template_ci( VkDescriptorUpdateTemplateCreateInfoKHR ci;
{}, static_cast<u32>(template_entries.size()), template_entries.data(), ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR;
vk::DescriptorUpdateTemplateType::eDescriptorSet, *descriptor_set_layout, ci.pNext = nullptr;
vk::PipelineBindPoint::eGraphics, *layout, DESCRIPTOR_SET); ci.flags = 0;
ci.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size());
const auto dev = device.GetLogical(); ci.pDescriptorUpdateEntries = template_entries.data();
const auto& dld = device.GetDispatchLoader(); ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
return dev.createDescriptorUpdateTemplateUnique(template_ci, nullptr, dld); ci.descriptorSetLayout = *descriptor_set_layout;
ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
ci.pipelineLayout = *layout;
ci.set = DESCRIPTOR_SET;
return device.GetLogical().CreateDescriptorUpdateTemplateKHR(ci);
} }
std::vector<UniqueShaderModule> VKGraphicsPipeline::CreateShaderModules( std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules(
const SPIRVProgram& program) const { const SPIRVProgram& program) const {
std::vector<UniqueShaderModule> modules; VkShaderModuleCreateInfo ci;
const auto dev = device.GetLogical(); ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
const auto& dld = device.GetDispatchLoader(); ci.pNext = nullptr;
ci.flags = 0;
std::vector<vk::ShaderModule> modules;
modules.reserve(Maxwell::MaxShaderStage);
for (std::size_t i = 0; i < Maxwell::MaxShaderStage; ++i) { for (std::size_t i = 0; i < Maxwell::MaxShaderStage; ++i) {
const auto& stage = program[i]; const auto& stage = program[i];
if (!stage) { if (!stage) {
continue; continue;
} }
const vk::ShaderModuleCreateInfo module_ci({}, stage->code.size() * sizeof(u32),
stage->code.data()); ci.codeSize = stage->code.size() * sizeof(u32);
modules.emplace_back(dev.createShaderModuleUnique(module_ci, nullptr, dld)); ci.pCode = stage->code.data();
modules.push_back(device.GetLogical().CreateShaderModule(ci));
} }
return modules; return modules;
} }
UniquePipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params, vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params,
const SPIRVProgram& program) const { const SPIRVProgram& program) const {
const auto& vi = fixed_state.vertex_input; const auto& vi = fixed_state.vertex_input;
const auto& ia = fixed_state.input_assembly; const auto& ia = fixed_state.input_assembly;
@ -141,19 +163,26 @@ UniquePipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& render
const auto& ts = fixed_state.tessellation; const auto& ts = fixed_state.tessellation;
const auto& rs = fixed_state.rasterizer; const auto& rs = fixed_state.rasterizer;
std::vector<vk::VertexInputBindingDescription> vertex_bindings; std::vector<VkVertexInputBindingDescription> vertex_bindings;
std::vector<vk::VertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors; std::vector<VkVertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors;
for (std::size_t i = 0; i < vi.num_bindings; ++i) { for (std::size_t i = 0; i < vi.num_bindings; ++i) {
const auto& binding = vi.bindings[i]; const auto& binding = vi.bindings[i];
const bool instanced = binding.divisor != 0; const bool instanced = binding.divisor != 0;
const auto rate = instanced ? vk::VertexInputRate::eInstance : vk::VertexInputRate::eVertex; const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
vertex_bindings.emplace_back(binding.index, binding.stride, rate);
auto& vertex_binding = vertex_bindings.emplace_back();
vertex_binding.binding = binding.index;
vertex_binding.stride = binding.stride;
vertex_binding.inputRate = rate;
if (instanced) { if (instanced) {
vertex_binding_divisors.emplace_back(binding.index, binding.divisor); auto& binding_divisor = vertex_binding_divisors.emplace_back();
binding_divisor.binding = binding.index;
binding_divisor.divisor = binding.divisor;
} }
} }
std::vector<vk::VertexInputAttributeDescription> vertex_attributes; std::vector<VkVertexInputAttributeDescription> vertex_attributes;
const auto& input_attributes = program[0]->entries.attributes; const auto& input_attributes = program[0]->entries.attributes;
for (std::size_t i = 0; i < vi.num_attributes; ++i) { for (std::size_t i = 0; i < vi.num_attributes; ++i) {
const auto& attribute = vi.attributes[i]; const auto& attribute = vi.attributes[i];
@ -161,109 +190,194 @@ UniquePipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& render
// Skip attributes not used by the vertex shaders. // Skip attributes not used by the vertex shaders.
continue; continue;
} }
vertex_attributes.emplace_back(attribute.index, attribute.buffer, auto& vertex_attribute = vertex_attributes.emplace_back();
MaxwellToVK::VertexFormat(attribute.type, attribute.size), vertex_attribute.location = attribute.index;
attribute.offset); vertex_attribute.binding = attribute.buffer;
vertex_attribute.format = MaxwellToVK::VertexFormat(attribute.type, attribute.size);
vertex_attribute.offset = attribute.offset;
} }
vk::PipelineVertexInputStateCreateInfo vertex_input_ci( VkPipelineVertexInputStateCreateInfo vertex_input_ci;
{}, static_cast<u32>(vertex_bindings.size()), vertex_bindings.data(), vertex_input_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
static_cast<u32>(vertex_attributes.size()), vertex_attributes.data()); vertex_input_ci.pNext = nullptr;
vertex_input_ci.flags = 0;
vertex_input_ci.vertexBindingDescriptionCount = static_cast<u32>(vertex_bindings.size());
vertex_input_ci.pVertexBindingDescriptions = vertex_bindings.data();
vertex_input_ci.vertexAttributeDescriptionCount = static_cast<u32>(vertex_attributes.size());
vertex_input_ci.pVertexAttributeDescriptions = vertex_attributes.data();
const vk::PipelineVertexInputDivisorStateCreateInfoEXT vertex_input_divisor_ci( VkPipelineVertexInputDivisorStateCreateInfoEXT input_divisor_ci;
static_cast<u32>(vertex_binding_divisors.size()), vertex_binding_divisors.data()); input_divisor_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT;
input_divisor_ci.pNext = nullptr;
input_divisor_ci.vertexBindingDivisorCount = static_cast<u32>(vertex_binding_divisors.size());
input_divisor_ci.pVertexBindingDivisors = vertex_binding_divisors.data();
if (!vertex_binding_divisors.empty()) { if (!vertex_binding_divisors.empty()) {
vertex_input_ci.pNext = &vertex_input_divisor_ci; vertex_input_ci.pNext = &input_divisor_ci;
} }
const auto primitive_topology = MaxwellToVK::PrimitiveTopology(device, ia.topology); VkPipelineInputAssemblyStateCreateInfo input_assembly_ci;
const vk::PipelineInputAssemblyStateCreateInfo input_assembly_ci( input_assembly_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
{}, primitive_topology, input_assembly_ci.pNext = nullptr;
ia.primitive_restart_enable && SupportsPrimitiveRestart(primitive_topology)); input_assembly_ci.flags = 0;
input_assembly_ci.topology = MaxwellToVK::PrimitiveTopology(device, ia.topology);
input_assembly_ci.primitiveRestartEnable =
ia.primitive_restart_enable && SupportsPrimitiveRestart(input_assembly_ci.topology);
const vk::PipelineTessellationStateCreateInfo tessellation_ci({}, ts.patch_control_points); VkPipelineTessellationStateCreateInfo tessellation_ci;
tessellation_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
tessellation_ci.pNext = nullptr;
tessellation_ci.flags = 0;
tessellation_ci.patchControlPoints = ts.patch_control_points;
const vk::PipelineViewportStateCreateInfo viewport_ci({}, Maxwell::NumViewports, nullptr, VkPipelineViewportStateCreateInfo viewport_ci;
Maxwell::NumViewports, nullptr); viewport_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewport_ci.pNext = nullptr;
viewport_ci.flags = 0;
viewport_ci.viewportCount = Maxwell::NumViewports;
viewport_ci.pViewports = nullptr;
viewport_ci.scissorCount = Maxwell::NumViewports;
viewport_ci.pScissors = nullptr;
// TODO(Rodrigo): Find out what's the default register value for front face VkPipelineRasterizationStateCreateInfo rasterization_ci;
const vk::PipelineRasterizationStateCreateInfo rasterizer_ci( rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
{}, rs.depth_clamp_enable, false, vk::PolygonMode::eFill, rasterization_ci.pNext = nullptr;
rs.cull_enable ? MaxwellToVK::CullFace(rs.cull_face) : vk::CullModeFlagBits::eNone, rasterization_ci.flags = 0;
MaxwellToVK::FrontFace(rs.front_face), rs.depth_bias_enable, 0.0f, 0.0f, 0.0f, 1.0f); rasterization_ci.depthClampEnable = rs.depth_clamp_enable;
rasterization_ci.rasterizerDiscardEnable = VK_FALSE;
rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL;
rasterization_ci.cullMode =
rs.cull_enable ? MaxwellToVK::CullFace(rs.cull_face) : VK_CULL_MODE_NONE;
rasterization_ci.frontFace = MaxwellToVK::FrontFace(rs.front_face);
rasterization_ci.depthBiasEnable = rs.depth_bias_enable;
rasterization_ci.depthBiasConstantFactor = 0.0f;
rasterization_ci.depthBiasClamp = 0.0f;
rasterization_ci.depthBiasSlopeFactor = 0.0f;
rasterization_ci.lineWidth = 1.0f;
const vk::PipelineMultisampleStateCreateInfo multisampling_ci( VkPipelineMultisampleStateCreateInfo multisample_ci;
{}, vk::SampleCountFlagBits::e1, false, 0.0f, nullptr, false, false); multisample_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisample_ci.pNext = nullptr;
multisample_ci.flags = 0;
multisample_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisample_ci.sampleShadingEnable = VK_FALSE;
multisample_ci.minSampleShading = 0.0f;
multisample_ci.pSampleMask = nullptr;
multisample_ci.alphaToCoverageEnable = VK_FALSE;
multisample_ci.alphaToOneEnable = VK_FALSE;
const vk::CompareOp depth_test_compare = ds.depth_test_enable VkPipelineDepthStencilStateCreateInfo depth_stencil_ci;
depth_stencil_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
depth_stencil_ci.pNext = nullptr;
depth_stencil_ci.flags = 0;
depth_stencil_ci.depthTestEnable = ds.depth_test_enable;
depth_stencil_ci.depthWriteEnable = ds.depth_write_enable;
depth_stencil_ci.depthCompareOp = ds.depth_test_enable
? MaxwellToVK::ComparisonOp(ds.depth_test_function) ? MaxwellToVK::ComparisonOp(ds.depth_test_function)
: vk::CompareOp::eAlways; : VK_COMPARE_OP_ALWAYS;
depth_stencil_ci.depthBoundsTestEnable = ds.depth_bounds_enable;
depth_stencil_ci.stencilTestEnable = ds.stencil_enable;
depth_stencil_ci.front = GetStencilFaceState(ds.front_stencil);
depth_stencil_ci.back = GetStencilFaceState(ds.back_stencil);
depth_stencil_ci.minDepthBounds = 0.0f;
depth_stencil_ci.maxDepthBounds = 0.0f;
const vk::PipelineDepthStencilStateCreateInfo depth_stencil_ci( std::array<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
{}, ds.depth_test_enable, ds.depth_write_enable, depth_test_compare, ds.depth_bounds_enable,
ds.stencil_enable, GetStencilFaceState(ds.front_stencil),
GetStencilFaceState(ds.back_stencil), 0.0f, 0.0f);
std::array<vk::PipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
const std::size_t num_attachments = const std::size_t num_attachments =
std::min(cd.attachments_count, renderpass_params.color_attachments.size()); std::min(cd.attachments_count, renderpass_params.color_attachments.size());
for (std::size_t i = 0; i < num_attachments; ++i) { for (std::size_t i = 0; i < num_attachments; ++i) {
constexpr std::array component_table{ static constexpr std::array component_table = {
vk::ColorComponentFlagBits::eR, vk::ColorComponentFlagBits::eG, VK_COLOR_COMPONENT_R_BIT, VK_COLOR_COMPONENT_G_BIT, VK_COLOR_COMPONENT_B_BIT,
vk::ColorComponentFlagBits::eB, vk::ColorComponentFlagBits::eA}; VK_COLOR_COMPONENT_A_BIT};
const auto& blend = cd.attachments[i]; const auto& blend = cd.attachments[i];
vk::ColorComponentFlags color_components{}; VkColorComponentFlags color_components = 0;
for (std::size_t j = 0; j < component_table.size(); ++j) { for (std::size_t j = 0; j < component_table.size(); ++j) {
if (blend.components[j]) if (blend.components[j]) {
color_components |= component_table[j]; color_components |= component_table[j];
} }
cb_attachments[i] = vk::PipelineColorBlendAttachmentState(
blend.enable, MaxwellToVK::BlendFactor(blend.src_rgb_func),
MaxwellToVK::BlendFactor(blend.dst_rgb_func),
MaxwellToVK::BlendEquation(blend.rgb_equation),
MaxwellToVK::BlendFactor(blend.src_a_func), MaxwellToVK::BlendFactor(blend.dst_a_func),
MaxwellToVK::BlendEquation(blend.a_equation), color_components);
} }
const vk::PipelineColorBlendStateCreateInfo color_blending_ci({}, false, vk::LogicOp::eCopy,
static_cast<u32>(num_attachments),
cb_attachments.data(), {});
constexpr std::array dynamic_states = { VkPipelineColorBlendAttachmentState& attachment = cb_attachments[i];
vk::DynamicState::eViewport, vk::DynamicState::eScissor, attachment.blendEnable = blend.enable;
vk::DynamicState::eDepthBias, vk::DynamicState::eBlendConstants, attachment.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.src_rgb_func);
vk::DynamicState::eDepthBounds, vk::DynamicState::eStencilCompareMask, attachment.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.dst_rgb_func);
vk::DynamicState::eStencilWriteMask, vk::DynamicState::eStencilReference}; attachment.colorBlendOp = MaxwellToVK::BlendEquation(blend.rgb_equation);
const vk::PipelineDynamicStateCreateInfo dynamic_state_ci( attachment.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.src_a_func);
{}, static_cast<u32>(dynamic_states.size()), dynamic_states.data()); attachment.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.dst_a_func);
attachment.alphaBlendOp = MaxwellToVK::BlendEquation(blend.a_equation);
attachment.colorWriteMask = color_components;
}
vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci; VkPipelineColorBlendStateCreateInfo color_blend_ci;
color_blend_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
color_blend_ci.pNext = nullptr;
color_blend_ci.flags = 0;
color_blend_ci.logicOpEnable = VK_FALSE;
color_blend_ci.logicOp = VK_LOGIC_OP_COPY;
color_blend_ci.attachmentCount = static_cast<u32>(num_attachments);
color_blend_ci.pAttachments = cb_attachments.data();
std::memset(color_blend_ci.blendConstants, 0, sizeof(color_blend_ci.blendConstants));
static constexpr std::array dynamic_states = {
VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
VK_DYNAMIC_STATE_DEPTH_BIAS, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, VK_DYNAMIC_STATE_STENCIL_REFERENCE};
VkPipelineDynamicStateCreateInfo dynamic_state_ci;
dynamic_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamic_state_ci.pNext = nullptr;
dynamic_state_ci.flags = 0;
dynamic_state_ci.dynamicStateCount = static_cast<u32>(dynamic_states.size());
dynamic_state_ci.pDynamicStates = dynamic_states.data();
VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci;
subgroup_size_ci.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT;
subgroup_size_ci.pNext = nullptr;
subgroup_size_ci.requiredSubgroupSize = GuestWarpSize; subgroup_size_ci.requiredSubgroupSize = GuestWarpSize;
std::vector<vk::PipelineShaderStageCreateInfo> shader_stages; std::vector<VkPipelineShaderStageCreateInfo> shader_stages;
std::size_t module_index = 0; std::size_t module_index = 0;
for (std::size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) { for (std::size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
if (!program[stage]) { if (!program[stage]) {
continue; continue;
} }
const auto stage_enum = static_cast<Tegra::Engines::ShaderType>(stage); VkPipelineShaderStageCreateInfo& stage_ci = shader_stages.emplace_back();
const auto vk_stage = MaxwellToVK::ShaderStage(stage_enum); stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
auto& stage_ci = shader_stages.emplace_back(vk::PipelineShaderStageCreateFlags{}, vk_stage, stage_ci.pNext = nullptr;
*modules[module_index++], "main", nullptr); stage_ci.flags = 0;
if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(vk_stage)) { stage_ci.stage = MaxwellToVK::ShaderStage(static_cast<Tegra::Engines::ShaderType>(stage));
stage_ci.module = *modules[module_index++];
stage_ci.pName = "main";
stage_ci.pSpecializationInfo = nullptr;
if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(stage_ci.stage)) {
stage_ci.pNext = &subgroup_size_ci; stage_ci.pNext = &subgroup_size_ci;
} }
} }
const vk::GraphicsPipelineCreateInfo create_info( VkGraphicsPipelineCreateInfo ci;
{}, static_cast<u32>(shader_stages.size()), shader_stages.data(), &vertex_input_ci, ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
&input_assembly_ci, &tessellation_ci, &viewport_ci, &rasterizer_ci, &multisampling_ci, ci.pNext = nullptr;
&depth_stencil_ci, &color_blending_ci, &dynamic_state_ci, *layout, renderpass, 0, {}, 0); ci.flags = 0;
ci.stageCount = static_cast<u32>(shader_stages.size());
const auto dev = device.GetLogical(); ci.pStages = shader_stages.data();
const auto& dld = device.GetDispatchLoader(); ci.pVertexInputState = &vertex_input_ci;
return dev.createGraphicsPipelineUnique(nullptr, create_info, nullptr, dld); ci.pInputAssemblyState = &input_assembly_ci;
ci.pTessellationState = &tessellation_ci;
ci.pViewportState = &viewport_ci;
ci.pRasterizationState = &rasterization_ci;
ci.pMultisampleState = &multisample_ci;
ci.pDepthStencilState = &depth_stencil_ci;
ci.pColorBlendState = &color_blend_ci;
ci.pDynamicState = &dynamic_state_ci;
ci.layout = *layout;
ci.renderPass = renderpass;
ci.subpass = 0;
ci.basePipelineHandle = nullptr;
ci.basePipelineIndex = 0;
return device.GetLogical().CreateGraphicsPipeline(ci);
} }
} // namespace Vulkan } // namespace Vulkan

View File

@ -11,12 +11,12 @@
#include <vector> #include <vector>
#include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h" #include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h" #include "video_core/renderer_vulkan/vk_renderpass_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h" #include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -39,36 +39,36 @@ public:
VKUpdateDescriptorQueue& update_descriptor_queue, VKUpdateDescriptorQueue& update_descriptor_queue,
VKRenderPassCache& renderpass_cache, VKRenderPassCache& renderpass_cache,
const GraphicsPipelineCacheKey& key, const GraphicsPipelineCacheKey& key,
const std::vector<vk::DescriptorSetLayoutBinding>& bindings, vk::Span<VkDescriptorSetLayoutBinding> bindings,
const SPIRVProgram& program); const SPIRVProgram& program);
~VKGraphicsPipeline(); ~VKGraphicsPipeline();
vk::DescriptorSet CommitDescriptorSet(); VkDescriptorSet CommitDescriptorSet();
vk::Pipeline GetHandle() const { VkPipeline GetHandle() const {
return *pipeline; return *pipeline;
} }
vk::PipelineLayout GetLayout() const { VkPipelineLayout GetLayout() const {
return *layout; return *layout;
} }
vk::RenderPass GetRenderPass() const { VkRenderPass GetRenderPass() const {
return renderpass; return renderpass;
} }
private: private:
UniqueDescriptorSetLayout CreateDescriptorSetLayout( vk::DescriptorSetLayout CreateDescriptorSetLayout(
const std::vector<vk::DescriptorSetLayoutBinding>& bindings) const; vk::Span<VkDescriptorSetLayoutBinding> bindings) const;
UniquePipelineLayout CreatePipelineLayout() const; vk::PipelineLayout CreatePipelineLayout() const;
UniqueDescriptorUpdateTemplate CreateDescriptorUpdateTemplate( vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate(
const SPIRVProgram& program) const; const SPIRVProgram& program) const;
std::vector<UniqueShaderModule> CreateShaderModules(const SPIRVProgram& program) const; std::vector<vk::ShaderModule> CreateShaderModules(const SPIRVProgram& program) const;
UniquePipeline CreatePipeline(const RenderPassParams& renderpass_params, vk::Pipeline CreatePipeline(const RenderPassParams& renderpass_params,
const SPIRVProgram& program) const; const SPIRVProgram& program) const;
const VKDevice& device; const VKDevice& device;
@ -76,15 +76,15 @@ private:
const FixedPipelineState fixed_state; const FixedPipelineState fixed_state;
const u64 hash; const u64 hash;
UniqueDescriptorSetLayout descriptor_set_layout; vk::DescriptorSetLayout descriptor_set_layout;
DescriptorAllocator descriptor_allocator; DescriptorAllocator descriptor_allocator;
VKUpdateDescriptorQueue& update_descriptor_queue; VKUpdateDescriptorQueue& update_descriptor_queue;
UniquePipelineLayout layout; vk::PipelineLayout layout;
UniqueDescriptorUpdateTemplate descriptor_template; vk::DescriptorUpdateTemplateKHR descriptor_template;
std::vector<UniqueShaderModule> modules; std::vector<vk::ShaderModule> modules;
vk::RenderPass renderpass; VkRenderPass renderpass;
UniquePipeline pipeline; vk::Pipeline pipeline;
}; };
} // namespace Vulkan } // namespace Vulkan

View File

@ -6,22 +6,21 @@
#include <vector> #include <vector>
#include "common/assert.h" #include "common/assert.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_image.h" #include "video_core/renderer_vulkan/vk_image.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler, VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler, const VkImageCreateInfo& image_ci,
const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask) VkImageAspectFlags aspect_mask)
: device{device}, scheduler{scheduler}, format{image_ci.format}, aspect_mask{aspect_mask}, : device{device}, scheduler{scheduler}, format{image_ci.format}, aspect_mask{aspect_mask},
image_num_layers{image_ci.arrayLayers}, image_num_levels{image_ci.mipLevels} { image_num_layers{image_ci.arrayLayers}, image_num_levels{image_ci.mipLevels} {
UNIMPLEMENTED_IF_MSG(image_ci.queueFamilyIndexCount != 0, UNIMPLEMENTED_IF_MSG(image_ci.queueFamilyIndexCount != 0,
"Queue family tracking is not implemented"); "Queue family tracking is not implemented");
const auto dev = device.GetLogical(); image = device.GetLogical().CreateImage(image_ci);
image = dev.createImageUnique(image_ci, nullptr, device.GetDispatchLoader());
const u32 num_ranges = image_num_layers * image_num_levels; const u32 num_ranges = image_num_layers * image_num_levels;
barriers.resize(num_ranges); barriers.resize(num_ranges);
@ -31,8 +30,8 @@ VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler,
VKImage::~VKImage() = default; VKImage::~VKImage() = default;
void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
vk::ImageLayout new_layout) { VkImageLayout new_layout) {
if (!HasChanged(base_layer, num_layers, base_level, num_levels, new_access, new_layout)) { if (!HasChanged(base_layer, num_layers, base_level, num_levels, new_access, new_layout)) {
return; return;
} }
@ -43,9 +42,21 @@ void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num
const u32 layer = base_layer + layer_it; const u32 layer = base_layer + layer_it;
const u32 level = base_level + level_it; const u32 level = base_level + level_it;
auto& state = GetSubrangeState(layer, level); auto& state = GetSubrangeState(layer, level);
barriers[cursor] = vk::ImageMemoryBarrier( auto& barrier = barriers[cursor];
state.access, new_access, state.layout, new_layout, VK_QUEUE_FAMILY_IGNORED, barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
VK_QUEUE_FAMILY_IGNORED, *image, {aspect_mask, level, 1, layer, 1}); barrier.pNext = nullptr;
barrier.srcAccessMask = state.access;
barrier.dstAccessMask = new_access;
barrier.oldLayout = state.layout;
barrier.newLayout = new_layout;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = *image;
barrier.subresourceRange.aspectMask = aspect_mask;
barrier.subresourceRange.baseMipLevel = level;
barrier.subresourceRange.levelCount = 1;
barrier.subresourceRange.baseArrayLayer = layer;
barrier.subresourceRange.layerCount = 1;
state.access = new_access; state.access = new_access;
state.layout = new_layout; state.layout = new_layout;
} }
@ -53,16 +64,16 @@ void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([barriers = barriers, cursor](auto cmdbuf, auto& dld) { scheduler.Record([barriers = barriers, cursor](vk::CommandBuffer cmdbuf) {
// TODO(Rodrigo): Implement a way to use the latest stage across subresources. // TODO(Rodrigo): Implement a way to use the latest stage across subresources.
constexpr auto stage_stub = vk::PipelineStageFlagBits::eAllCommands; cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
cmdbuf.pipelineBarrier(stage_stub, stage_stub, {}, 0, nullptr, 0, nullptr, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, {}, {},
static_cast<u32>(cursor), barriers.data(), dld); vk::Span(barriers.data(), cursor));
}); });
} }
bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept { VkAccessFlags new_access, VkImageLayout new_layout) noexcept {
const bool is_full_range = base_layer == 0 && num_layers == image_num_layers && const bool is_full_range = base_layer == 0 && num_layers == image_num_layers &&
base_level == 0 && num_levels == image_num_levels; base_level == 0 && num_levels == image_num_levels;
if (!is_full_range) { if (!is_full_range) {
@ -91,11 +102,21 @@ bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num
void VKImage::CreatePresentView() { void VKImage::CreatePresentView() {
// Image type has to be 2D to be presented. // Image type has to be 2D to be presented.
const vk::ImageViewCreateInfo image_view_ci({}, *image, vk::ImageViewType::e2D, format, {}, VkImageViewCreateInfo image_view_ci;
{aspect_mask, 0, 1, 0, 1}); image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
const auto dev = device.GetLogical(); image_view_ci.pNext = nullptr;
const auto& dld = device.GetDispatchLoader(); image_view_ci.flags = 0;
present_view = dev.createImageViewUnique(image_view_ci, nullptr, dld); image_view_ci.image = *image;
image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_ci.format = format;
image_view_ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
image_view_ci.subresourceRange.aspectMask = aspect_mask;
image_view_ci.subresourceRange.baseMipLevel = 0;
image_view_ci.subresourceRange.levelCount = 1;
image_view_ci.subresourceRange.baseArrayLayer = 0;
image_view_ci.subresourceRange.layerCount = 1;
present_view = device.GetLogical().CreateImageView(image_view_ci);
} }
VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept { VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept {

View File

@ -8,7 +8,7 @@
#include <vector> #include <vector>
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -18,16 +18,16 @@ class VKScheduler;
class VKImage { class VKImage {
public: public:
explicit VKImage(const VKDevice& device, VKScheduler& scheduler, explicit VKImage(const VKDevice& device, VKScheduler& scheduler,
const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask); const VkImageCreateInfo& image_ci, VkImageAspectFlags aspect_mask);
~VKImage(); ~VKImage();
/// Records in the passed command buffer an image transition and updates the state of the image. /// Records in the passed command buffer an image transition and updates the state of the image.
void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
vk::ImageLayout new_layout); VkImageLayout new_layout);
/// Returns a view compatible with presentation, the image has to be 2D. /// Returns a view compatible with presentation, the image has to be 2D.
vk::ImageView GetPresentView() { VkImageView GetPresentView() {
if (!present_view) { if (!present_view) {
CreatePresentView(); CreatePresentView();
} }
@ -35,28 +35,28 @@ public:
} }
/// Returns the Vulkan image handler. /// Returns the Vulkan image handler.
vk::Image GetHandle() const { const vk::Image& GetHandle() const {
return *image; return image;
} }
/// Returns the Vulkan format for this image. /// Returns the Vulkan format for this image.
vk::Format GetFormat() const { VkFormat GetFormat() const {
return format; return format;
} }
/// Returns the Vulkan aspect mask. /// Returns the Vulkan aspect mask.
vk::ImageAspectFlags GetAspectMask() const { VkImageAspectFlags GetAspectMask() const {
return aspect_mask; return aspect_mask;
} }
private: private:
struct SubrangeState final { struct SubrangeState final {
vk::AccessFlags access{}; ///< Current access bits. VkAccessFlags access = 0; ///< Current access bits.
vk::ImageLayout layout = vk::ImageLayout::eUndefined; ///< Current image layout. VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED; ///< Current image layout.
}; };
bool HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, bool HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept; VkAccessFlags new_access, VkImageLayout new_layout) noexcept;
/// Creates a presentation view. /// Creates a presentation view.
void CreatePresentView(); void CreatePresentView();
@ -67,15 +67,15 @@ private:
const VKDevice& device; ///< Device handler. const VKDevice& device; ///< Device handler.
VKScheduler& scheduler; ///< Device scheduler. VKScheduler& scheduler; ///< Device scheduler.
const vk::Format format; ///< Vulkan format. const VkFormat format; ///< Vulkan format.
const vk::ImageAspectFlags aspect_mask; ///< Vulkan aspect mask. const VkImageAspectFlags aspect_mask; ///< Vulkan aspect mask.
const u32 image_num_layers; ///< Number of layers. const u32 image_num_layers; ///< Number of layers.
const u32 image_num_levels; ///< Number of mipmap levels. const u32 image_num_levels; ///< Number of mipmap levels.
UniqueImage image; ///< Image handle. vk::Image image; ///< Image handle.
UniqueImageView present_view; ///< Image view compatible with presentation. vk::ImageView present_view; ///< Image view compatible with presentation.
std::vector<vk::ImageMemoryBarrier> barriers; ///< Pool of barriers. std::vector<VkImageMemoryBarrier> barriers; ///< Pool of barriers.
std::vector<SubrangeState> subrange_states; ///< Current subrange state. std::vector<SubrangeState> subrange_states; ///< Current subrange state.
bool state_diverged = false; ///< True when subresources mismatch in layout. bool state_diverged = false; ///< True when subresources mismatch in layout.

View File

@ -11,9 +11,9 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -30,17 +30,11 @@ u64 GetAllocationChunkSize(u64 required_size) {
class VKMemoryAllocation final { class VKMemoryAllocation final {
public: public:
explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory, explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory,
vk::MemoryPropertyFlags properties, u64 allocation_size, u32 type) VkMemoryPropertyFlags properties, u64 allocation_size, u32 type)
: device{device}, memory{memory}, properties{properties}, allocation_size{allocation_size}, : device{device}, memory{std::move(memory)}, properties{properties},
shifted_type{ShiftType(type)} {} allocation_size{allocation_size}, shifted_type{ShiftType(type)} {}
~VKMemoryAllocation() { VKMemoryCommit Commit(VkDeviceSize commit_size, VkDeviceSize alignment) {
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
dev.free(memory, nullptr, dld);
}
VKMemoryCommit Commit(vk::DeviceSize commit_size, vk::DeviceSize alignment) {
auto found = TryFindFreeSection(free_iterator, allocation_size, auto found = TryFindFreeSection(free_iterator, allocation_size,
static_cast<u64>(commit_size), static_cast<u64>(alignment)); static_cast<u64>(commit_size), static_cast<u64>(alignment));
if (!found) { if (!found) {
@ -73,9 +67,8 @@ public:
} }
/// Returns whether this allocation is compatible with the arguments. /// Returns whether this allocation is compatible with the arguments.
bool IsCompatible(vk::MemoryPropertyFlags wanted_properties, u32 type_mask) const { bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const {
return (wanted_properties & properties) != vk::MemoryPropertyFlagBits(0) && return (wanted_properties & properties) && (type_mask & shifted_type) != 0;
(type_mask & shifted_type) != 0;
} }
private: private:
@ -113,7 +106,7 @@ private:
const VKDevice& device; ///< Vulkan device. const VKDevice& device; ///< Vulkan device.
const vk::DeviceMemory memory; ///< Vulkan memory allocation handler. const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
const vk::MemoryPropertyFlags properties; ///< Vulkan properties. const VkMemoryPropertyFlags properties; ///< Vulkan properties.
const u64 allocation_size; ///< Size of this allocation. const u64 allocation_size; ///< Size of this allocation.
const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted. const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
@ -125,22 +118,20 @@ private:
}; };
VKMemoryManager::VKMemoryManager(const VKDevice& device) VKMemoryManager::VKMemoryManager(const VKDevice& device)
: device{device}, properties{device.GetPhysical().getMemoryProperties( : device{device}, properties{device.GetPhysical().GetMemoryProperties()},
device.GetDispatchLoader())},
is_memory_unified{GetMemoryUnified(properties)} {} is_memory_unified{GetMemoryUnified(properties)} {}
VKMemoryManager::~VKMemoryManager() = default; VKMemoryManager::~VKMemoryManager() = default;
VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& requirements, VKMemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements,
bool host_visible) { bool host_visible) {
const u64 chunk_size = GetAllocationChunkSize(requirements.size); const u64 chunk_size = GetAllocationChunkSize(requirements.size);
// When a host visible commit is asked, search for host visible and coherent, otherwise search // When a host visible commit is asked, search for host visible and coherent, otherwise search
// for a fast device local type. // for a fast device local type.
const vk::MemoryPropertyFlags wanted_properties = const VkMemoryPropertyFlags wanted_properties =
host_visible host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
? vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
: vk::MemoryPropertyFlagBits::eDeviceLocal;
if (auto commit = TryAllocCommit(requirements, wanted_properties)) { if (auto commit = TryAllocCommit(requirements, wanted_properties)) {
return commit; return commit;
@ -161,23 +152,19 @@ VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& requirement
return commit; return commit;
} }
VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) { VKMemoryCommit VKMemoryManager::Commit(const vk::Buffer& buffer, bool host_visible) {
const auto dev = device.GetLogical(); auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible);
const auto& dld = device.GetDispatchLoader(); buffer.BindMemory(commit->GetMemory(), commit->GetOffset());
auto commit = Commit(dev.getBufferMemoryRequirements(buffer, dld), host_visible);
dev.bindBufferMemory(buffer, commit->GetMemory(), commit->GetOffset(), dld);
return commit; return commit;
} }
VKMemoryCommit VKMemoryManager::Commit(vk::Image image, bool host_visible) { VKMemoryCommit VKMemoryManager::Commit(const vk::Image& image, bool host_visible) {
const auto dev = device.GetLogical(); auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible);
const auto& dld = device.GetDispatchLoader(); image.BindMemory(commit->GetMemory(), commit->GetOffset());
auto commit = Commit(dev.getImageMemoryRequirements(image, dld), host_visible);
dev.bindImageMemory(image, commit->GetMemory(), commit->GetOffset(), dld);
return commit; return commit;
} }
bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask,
u64 size) { u64 size) {
const u32 type = [&] { const u32 type = [&] {
for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) { for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
@ -191,24 +178,26 @@ bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32
return 0U; return 0U;
}(); }();
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
// Try to allocate found type. // Try to allocate found type.
const vk::MemoryAllocateInfo memory_ai(size, type); VkMemoryAllocateInfo memory_ai;
vk::DeviceMemory memory; memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
if (const auto res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld); memory_ai.pNext = nullptr;
res != vk::Result::eSuccess) { memory_ai.allocationSize = size;
LOG_CRITICAL(Render_Vulkan, "Device allocation failed with code {}!", vk::to_string(res)); memory_ai.memoryTypeIndex = type;
vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory(memory_ai);
if (!memory) {
LOG_CRITICAL(Render_Vulkan, "Device allocation failed!");
return false; return false;
} }
allocations.push_back(
std::make_unique<VKMemoryAllocation>(device, memory, wanted_properties, size, type)); allocations.push_back(std::make_unique<VKMemoryAllocation>(device, std::move(memory),
wanted_properties, size, type));
return true; return true;
} }
VKMemoryCommit VKMemoryManager::TryAllocCommit(const vk::MemoryRequirements& requirements, VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requirements,
vk::MemoryPropertyFlags wanted_properties) { VkMemoryPropertyFlags wanted_properties) {
for (auto& allocation : allocations) { for (auto& allocation : allocations) {
if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) { if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) {
continue; continue;
@ -220,10 +209,9 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const vk::MemoryRequirements& req
return {}; return {};
} }
/*static*/ bool VKMemoryManager::GetMemoryUnified( bool VKMemoryManager::GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties) {
const vk::PhysicalDeviceMemoryProperties& properties) {
for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) { for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) {
if (!(properties.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) { if (!(properties.memoryHeaps[heap_index].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)) {
// Memory is considered unified when heaps are device local only. // Memory is considered unified when heaps are device local only.
return false; return false;
} }
@ -232,23 +220,19 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const vk::MemoryRequirements& req
} }
VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation, VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
vk::DeviceMemory memory, u64 begin, u64 end) const vk::DeviceMemory& memory, u64 begin, u64 end)
: device{device}, interval{begin, end}, memory{memory}, allocation{allocation} {} : device{device}, memory{memory}, interval{begin, end}, allocation{allocation} {}
VKMemoryCommitImpl::~VKMemoryCommitImpl() { VKMemoryCommitImpl::~VKMemoryCommitImpl() {
allocation->Free(this); allocation->Free(this);
} }
MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const { MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const {
const auto dev = device.GetLogical(); return MemoryMap{this, memory.Map(interval.first + offset_, size)};
const auto address = reinterpret_cast<u8*>(
dev.mapMemory(memory, interval.first + offset_, size, {}, device.GetDispatchLoader()));
return MemoryMap{this, address};
} }
void VKMemoryCommitImpl::Unmap() const { void VKMemoryCommitImpl::Unmap() const {
const auto dev = device.GetLogical(); memory.Unmap();
dev.unmapMemory(memory, device.GetDispatchLoader());
} }
MemoryMap VKMemoryCommitImpl::Map() const { MemoryMap VKMemoryCommitImpl::Map() const {

View File

@ -8,7 +8,7 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -32,13 +32,13 @@ public:
* memory. When passing false, it will try to allocate device local memory. * memory. When passing false, it will try to allocate device local memory.
* @returns A memory commit. * @returns A memory commit.
*/ */
VKMemoryCommit Commit(const vk::MemoryRequirements& reqs, bool host_visible); VKMemoryCommit Commit(const VkMemoryRequirements& reqs, bool host_visible);
/// Commits memory required by the buffer and binds it. /// Commits memory required by the buffer and binds it.
VKMemoryCommit Commit(vk::Buffer buffer, bool host_visible); VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible);
/// Commits memory required by the image and binds it. /// Commits memory required by the image and binds it.
VKMemoryCommit Commit(vk::Image image, bool host_visible); VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
/// Returns true if the memory allocations are done always in host visible and coherent memory. /// Returns true if the memory allocations are done always in host visible and coherent memory.
bool IsMemoryUnified() const { bool IsMemoryUnified() const {
@ -47,17 +47,17 @@ public:
private: private:
/// Allocates a chunk of memory. /// Allocates a chunk of memory.
bool AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, u64 size); bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
/// Tries to allocate a memory commit. /// Tries to allocate a memory commit.
VKMemoryCommit TryAllocCommit(const vk::MemoryRequirements& requirements, VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
vk::MemoryPropertyFlags wanted_properties); VkMemoryPropertyFlags wanted_properties);
/// Returns true if the device uses an unified memory model. /// Returns true if the device uses an unified memory model.
static bool GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& properties); static bool GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties);
const VKDevice& device; ///< Device handler. const VKDevice& device; ///< Device handler.
const vk::PhysicalDeviceMemoryProperties properties; ///< Physical device properties. const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
const bool is_memory_unified; ///< True if memory model is unified. const bool is_memory_unified; ///< True if memory model is unified.
std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations. std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
}; };
@ -68,7 +68,7 @@ class VKMemoryCommitImpl final {
public: public:
explicit VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation, explicit VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
vk::DeviceMemory memory, u64 begin, u64 end); const vk::DeviceMemory& memory, u64 begin, u64 end);
~VKMemoryCommitImpl(); ~VKMemoryCommitImpl();
/// Maps a memory region and returns a pointer to it. /// Maps a memory region and returns a pointer to it.
@ -80,13 +80,13 @@ public:
MemoryMap Map() const; MemoryMap Map() const;
/// Returns the Vulkan memory handler. /// Returns the Vulkan memory handler.
vk::DeviceMemory GetMemory() const { VkDeviceMemory GetMemory() const {
return memory; return *memory;
} }
/// Returns the start position of the commit relative to the allocation. /// Returns the start position of the commit relative to the allocation.
vk::DeviceSize GetOffset() const { VkDeviceSize GetOffset() const {
return static_cast<vk::DeviceSize>(interval.first); return static_cast<VkDeviceSize>(interval.first);
} }
private: private:
@ -94,8 +94,8 @@ private:
void Unmap() const; void Unmap() const;
const VKDevice& device; ///< Vulkan device. const VKDevice& device; ///< Vulkan device.
const vk::DeviceMemory& memory; ///< Vulkan device memory handler.
std::pair<u64, u64> interval{}; ///< Interval where the commit exists. std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
vk::DeviceMemory memory; ///< Vulkan device memory handler.
VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation. VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
}; };

View File

@ -13,7 +13,6 @@
#include "video_core/engines/kepler_compute.h" #include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h" #include "video_core/memory_manager.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h" #include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h" #include "video_core/renderer_vulkan/vk_compute_pipeline.h"
@ -26,6 +25,7 @@
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/shader/compiler_settings.h" #include "video_core/shader/compiler_settings.h"
namespace Vulkan { namespace Vulkan {
@ -36,12 +36,11 @@ using Tegra::Engines::ShaderType;
namespace { namespace {
// C++20's using enum constexpr VkDescriptorType UNIFORM_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
constexpr auto eUniformBuffer = vk::DescriptorType::eUniformBuffer; constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
constexpr auto eStorageBuffer = vk::DescriptorType::eStorageBuffer; constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
constexpr auto eUniformTexelBuffer = vk::DescriptorType::eUniformTexelBuffer; constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
constexpr auto eCombinedImageSampler = vk::DescriptorType::eCombinedImageSampler; constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
constexpr auto eStorageImage = vk::DescriptorType::eStorageImage;
constexpr VideoCommon::Shader::CompilerSettings compiler_settings{ constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
VideoCommon::Shader::CompileDepth::FullDecompile}; VideoCommon::Shader::CompileDepth::FullDecompile};
@ -126,32 +125,37 @@ ShaderType GetShaderType(Maxwell::ShaderProgram program) {
} }
} }
template <vk::DescriptorType descriptor_type, class Container> template <VkDescriptorType descriptor_type, class Container>
void AddBindings(std::vector<vk::DescriptorSetLayoutBinding>& bindings, u32& binding, void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& binding,
vk::ShaderStageFlags stage_flags, const Container& container) { VkShaderStageFlags stage_flags, const Container& container) {
const u32 num_entries = static_cast<u32>(std::size(container)); const u32 num_entries = static_cast<u32>(std::size(container));
for (std::size_t i = 0; i < num_entries; ++i) { for (std::size_t i = 0; i < num_entries; ++i) {
u32 count = 1; u32 count = 1;
if constexpr (descriptor_type == eCombinedImageSampler) { if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
// Combined image samplers can be arrayed. // Combined image samplers can be arrayed.
count = container[i].Size(); count = container[i].Size();
} }
bindings.emplace_back(binding++, descriptor_type, count, stage_flags, nullptr); VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
entry.binding = binding++;
entry.descriptorType = descriptor_type;
entry.descriptorCount = count;
entry.stageFlags = stage_flags;
entry.pImmutableSamplers = nullptr;
} }
} }
u32 FillDescriptorLayout(const ShaderEntries& entries, u32 FillDescriptorLayout(const ShaderEntries& entries,
std::vector<vk::DescriptorSetLayoutBinding>& bindings, std::vector<VkDescriptorSetLayoutBinding>& bindings,
Maxwell::ShaderProgram program_type, u32 base_binding) { Maxwell::ShaderProgram program_type, u32 base_binding) {
const ShaderType stage = GetStageFromProgram(program_type); const ShaderType stage = GetStageFromProgram(program_type);
const vk::ShaderStageFlags flags = MaxwellToVK::ShaderStage(stage); const VkShaderStageFlags flags = MaxwellToVK::ShaderStage(stage);
u32 binding = base_binding; u32 binding = base_binding;
AddBindings<eUniformBuffer>(bindings, binding, flags, entries.const_buffers); AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers);
AddBindings<eStorageBuffer>(bindings, binding, flags, entries.global_buffers); AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers);
AddBindings<eUniformTexelBuffer>(bindings, binding, flags, entries.texel_buffers); AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.texel_buffers);
AddBindings<eCombinedImageSampler>(bindings, binding, flags, entries.samplers); AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers);
AddBindings<eStorageImage>(bindings, binding, flags, entries.images); AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images);
return binding; return binding;
} }
@ -318,7 +322,7 @@ void VKPipelineCache::Unregister(const Shader& shader) {
RasterizerCache::Unregister(shader); RasterizerCache::Unregister(shader);
} }
std::pair<SPIRVProgram, std::vector<vk::DescriptorSetLayoutBinding>> std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>>
VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) { VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
const auto& fixed_state = key.fixed_state; const auto& fixed_state = key.fixed_state;
auto& memory_manager = system.GPU().MemoryManager(); auto& memory_manager = system.GPU().MemoryManager();
@ -335,7 +339,7 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one; specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one;
SPIRVProgram program; SPIRVProgram program;
std::vector<vk::DescriptorSetLayoutBinding> bindings; std::vector<VkDescriptorSetLayoutBinding> bindings;
for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) { for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
const auto program_enum = static_cast<Maxwell::ShaderProgram>(index); const auto program_enum = static_cast<Maxwell::ShaderProgram>(index);
@ -371,32 +375,49 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
return {std::move(program), std::move(bindings)}; return {std::move(program), std::move(bindings)};
} }
template <vk::DescriptorType descriptor_type, class Container> template <VkDescriptorType descriptor_type, class Container>
void AddEntry(std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries, u32& binding, void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u32& binding,
u32& offset, const Container& container) { u32& offset, const Container& container) {
static constexpr u32 entry_size = static_cast<u32>(sizeof(DescriptorUpdateEntry)); static constexpr u32 entry_size = static_cast<u32>(sizeof(DescriptorUpdateEntry));
const u32 count = static_cast<u32>(std::size(container)); const u32 count = static_cast<u32>(std::size(container));
if constexpr (descriptor_type == eCombinedImageSampler) { if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
for (u32 i = 0; i < count; ++i) { for (u32 i = 0; i < count; ++i) {
const u32 num_samplers = container[i].Size(); const u32 num_samplers = container[i].Size();
template_entries.emplace_back(binding, 0, num_samplers, descriptor_type, offset, VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
entry_size); entry.dstBinding = binding;
entry.dstArrayElement = 0;
entry.descriptorCount = num_samplers;
entry.descriptorType = descriptor_type;
entry.offset = offset;
entry.stride = entry_size;
++binding; ++binding;
offset += num_samplers * entry_size; offset += num_samplers * entry_size;
} }
return; return;
} }
if constexpr (descriptor_type == eUniformTexelBuffer) { if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER) {
// Nvidia has a bug where updating multiple uniform texels at once causes the driver to // Nvidia has a bug where updating multiple uniform texels at once causes the driver to
// crash. // crash.
for (u32 i = 0; i < count; ++i) { for (u32 i = 0; i < count; ++i) {
template_entries.emplace_back(binding + i, 0, 1, descriptor_type, VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
offset + i * entry_size, entry_size); entry.dstBinding = binding + i;
entry.dstArrayElement = 0;
entry.descriptorCount = 1;
entry.descriptorType = descriptor_type;
entry.offset = offset + i * entry_size;
entry.stride = entry_size;
} }
} else if (count > 0) { } else if (count > 0) {
template_entries.emplace_back(binding, 0, count, descriptor_type, offset, entry_size); VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
entry.dstBinding = binding;
entry.dstArrayElement = 0;
entry.descriptorCount = count;
entry.descriptorType = descriptor_type;
entry.offset = offset;
entry.stride = entry_size;
} }
offset += count * entry_size; offset += count * entry_size;
binding += count; binding += count;
@ -404,12 +425,12 @@ void AddEntry(std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries,
void FillDescriptorUpdateTemplateEntries( void FillDescriptorUpdateTemplateEntries(
const ShaderEntries& entries, u32& binding, u32& offset, const ShaderEntries& entries, u32& binding, u32& offset,
std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries) { std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) {
AddEntry<eUniformBuffer>(template_entries, offset, binding, entries.const_buffers); AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers);
AddEntry<eStorageBuffer>(template_entries, offset, binding, entries.global_buffers); AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers);
AddEntry<eUniformTexelBuffer>(template_entries, offset, binding, entries.texel_buffers); AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.texel_buffers);
AddEntry<eCombinedImageSampler>(template_entries, offset, binding, entries.samplers); AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers);
AddEntry<eStorageImage>(template_entries, offset, binding, entries.images); AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images);
} }
} // namespace Vulkan } // namespace Vulkan

View File

@ -19,12 +19,12 @@
#include "video_core/engines/const_buffer_engine_interface.h" #include "video_core/engines/const_buffer_engine_interface.h"
#include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_3d.h"
#include "video_core/rasterizer_cache.h" #include "video_core/rasterizer_cache.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h" #include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h" #include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h" #include "video_core/renderer_vulkan/vk_renderpass_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h" #include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/shader/registry.h" #include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h" #include "video_core/shader/shader_ir.h"
#include "video_core/surface.h" #include "video_core/surface.h"
@ -172,7 +172,7 @@ protected:
void FlushObjectInner(const Shader& object) override {} void FlushObjectInner(const Shader& object) override {}
private: private:
std::pair<SPIRVProgram, std::vector<vk::DescriptorSetLayoutBinding>> DecompileShaders( std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders(
const GraphicsPipelineCacheKey& key); const GraphicsPipelineCacheKey& key);
Core::System& system; Core::System& system;
@ -194,6 +194,6 @@ private:
void FillDescriptorUpdateTemplateEntries( void FillDescriptorUpdateTemplateEntries(
const ShaderEntries& entries, u32& binding, u32& offset, const ShaderEntries& entries, u32& binding, u32& offset,
std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries); std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries);
} // namespace Vulkan } // namespace Vulkan

View File

@ -8,19 +8,19 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_query_cache.h" #include "video_core/renderer_vulkan/vk_query_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
namespace { namespace {
constexpr std::array QUERY_TARGETS = {vk::QueryType::eOcclusion}; constexpr std::array QUERY_TARGETS = {VK_QUERY_TYPE_OCCLUSION};
constexpr vk::QueryType GetTarget(VideoCore::QueryType type) { constexpr VkQueryType GetTarget(VideoCore::QueryType type) {
return QUERY_TARGETS[static_cast<std::size_t>(type)]; return QUERY_TARGETS[static_cast<std::size_t>(type)];
} }
@ -35,29 +35,34 @@ void QueryPool::Initialize(const VKDevice& device_, VideoCore::QueryType type_)
type = type_; type = type_;
} }
std::pair<vk::QueryPool, std::uint32_t> QueryPool::Commit(VKFence& fence) { std::pair<VkQueryPool, u32> QueryPool::Commit(VKFence& fence) {
std::size_t index; std::size_t index;
do { do {
index = CommitResource(fence); index = CommitResource(fence);
} while (usage[index]); } while (usage[index]);
usage[index] = true; usage[index] = true;
return {*pools[index / GROW_STEP], static_cast<std::uint32_t>(index % GROW_STEP)}; return {*pools[index / GROW_STEP], static_cast<u32>(index % GROW_STEP)};
} }
void QueryPool::Allocate(std::size_t begin, std::size_t end) { void QueryPool::Allocate(std::size_t begin, std::size_t end) {
usage.resize(end); usage.resize(end);
const auto dev = device->GetLogical(); VkQueryPoolCreateInfo query_pool_ci;
const u32 size = static_cast<u32>(end - begin); query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
const vk::QueryPoolCreateInfo query_pool_ci({}, GetTarget(type), size, {}); query_pool_ci.pNext = nullptr;
pools.push_back(dev.createQueryPoolUnique(query_pool_ci, nullptr, device->GetDispatchLoader())); query_pool_ci.flags = 0;
query_pool_ci.queryType = GetTarget(type);
query_pool_ci.queryCount = static_cast<u32>(end - begin);
query_pool_ci.pipelineStatistics = 0;
pools.push_back(device->GetLogical().CreateQueryPool(query_pool_ci));
} }
void QueryPool::Reserve(std::pair<vk::QueryPool, std::uint32_t> query) { void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
const auto it = const auto it =
std::find_if(std::begin(pools), std::end(pools), std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
[query_pool = query.first](auto& pool) { return query_pool == *pool; }); return query_pool == *pool;
});
ASSERT(it != std::end(pools)); ASSERT(it != std::end(pools));
const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
@ -76,12 +81,11 @@ VKQueryCache::VKQueryCache(Core::System& system, VideoCore::RasterizerInterface&
VKQueryCache::~VKQueryCache() = default; VKQueryCache::~VKQueryCache() = default;
std::pair<vk::QueryPool, std::uint32_t> VKQueryCache::AllocateQuery(VideoCore::QueryType type) { std::pair<VkQueryPool, u32> VKQueryCache::AllocateQuery(VideoCore::QueryType type) {
return query_pools[static_cast<std::size_t>(type)].Commit(scheduler.GetFence()); return query_pools[static_cast<std::size_t>(type)].Commit(scheduler.GetFence());
} }
void VKQueryCache::Reserve(VideoCore::QueryType type, void VKQueryCache::Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query) {
std::pair<vk::QueryPool, std::uint32_t> query) {
query_pools[static_cast<std::size_t>(type)].Reserve(query); query_pools[static_cast<std::size_t>(type)].Reserve(query);
} }
@ -89,10 +93,10 @@ HostCounter::HostCounter(VKQueryCache& cache, std::shared_ptr<HostCounter> depen
VideoCore::QueryType type) VideoCore::QueryType type)
: VideoCommon::HostCounterBase<VKQueryCache, HostCounter>{std::move(dependency)}, cache{cache}, : VideoCommon::HostCounterBase<VKQueryCache, HostCounter>{std::move(dependency)}, cache{cache},
type{type}, query{cache.AllocateQuery(type)}, ticks{cache.Scheduler().Ticks()} { type{type}, query{cache.AllocateQuery(type)}, ticks{cache.Scheduler().Ticks()} {
const auto dev = cache.Device().GetLogical(); const vk::Device* logical = &cache.Device().GetLogical();
cache.Scheduler().Record([dev, query = query](vk::CommandBuffer cmdbuf, auto& dld) { cache.Scheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) {
dev.resetQueryPoolEXT(query.first, query.second, 1, dld); logical->ResetQueryPoolEXT(query.first, query.second, 1);
cmdbuf.beginQuery(query.first, query.second, vk::QueryControlFlagBits::ePrecise, dld); cmdbuf.BeginQuery(query.first, query.second, VK_QUERY_CONTROL_PRECISE_BIT);
}); });
} }
@ -101,22 +105,16 @@ HostCounter::~HostCounter() {
} }
void HostCounter::EndQuery() { void HostCounter::EndQuery() {
cache.Scheduler().Record([query = query](auto cmdbuf, auto& dld) { cache.Scheduler().Record(
cmdbuf.endQuery(query.first, query.second, dld); [query = query](vk::CommandBuffer cmdbuf) { cmdbuf.EndQuery(query.first, query.second); });
});
} }
u64 HostCounter::BlockingQuery() const { u64 HostCounter::BlockingQuery() const {
if (ticks >= cache.Scheduler().Ticks()) { if (ticks >= cache.Scheduler().Ticks()) {
cache.Scheduler().Flush(); cache.Scheduler().Flush();
} }
return cache.Device().GetLogical().GetQueryResult<u64>(
const auto dev = cache.Device().GetLogical(); query.first, query.second, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
const auto& dld = cache.Device().GetDispatchLoader();
u64 value;
dev.getQueryPoolResults(query.first, query.second, 1, sizeof(value), &value, sizeof(value),
vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait, dld);
return value;
} }
} // namespace Vulkan } // namespace Vulkan

View File

@ -12,8 +12,8 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/query_cache.h" #include "video_core/query_cache.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace VideoCore { namespace VideoCore {
class RasterizerInterface; class RasterizerInterface;
@ -36,9 +36,9 @@ public:
void Initialize(const VKDevice& device, VideoCore::QueryType type); void Initialize(const VKDevice& device, VideoCore::QueryType type);
std::pair<vk::QueryPool, std::uint32_t> Commit(VKFence& fence); std::pair<VkQueryPool, u32> Commit(VKFence& fence);
void Reserve(std::pair<vk::QueryPool, std::uint32_t> query); void Reserve(std::pair<VkQueryPool, u32> query);
protected: protected:
void Allocate(std::size_t begin, std::size_t end) override; void Allocate(std::size_t begin, std::size_t end) override;
@ -49,7 +49,7 @@ private:
const VKDevice* device = nullptr; const VKDevice* device = nullptr;
VideoCore::QueryType type = {}; VideoCore::QueryType type = {};
std::vector<UniqueQueryPool> pools; std::vector<vk::QueryPool> pools;
std::vector<bool> usage; std::vector<bool> usage;
}; };
@ -61,9 +61,9 @@ public:
const VKDevice& device, VKScheduler& scheduler); const VKDevice& device, VKScheduler& scheduler);
~VKQueryCache(); ~VKQueryCache();
std::pair<vk::QueryPool, std::uint32_t> AllocateQuery(VideoCore::QueryType type); std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
void Reserve(VideoCore::QueryType type, std::pair<vk::QueryPool, std::uint32_t> query); void Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query);
const VKDevice& Device() const noexcept { const VKDevice& Device() const noexcept {
return device; return device;
@ -91,7 +91,7 @@ private:
VKQueryCache& cache; VKQueryCache& cache;
const VideoCore::QueryType type; const VideoCore::QueryType type;
const std::pair<vk::QueryPool, std::uint32_t> query; const std::pair<VkQueryPool, u32> query;
const u64 ticks; const u64 ticks;
}; };

View File

@ -19,7 +19,6 @@
#include "core/memory.h" #include "core/memory.h"
#include "video_core/engines/kepler_compute.h" #include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h" #include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/renderer_vulkan.h" #include "video_core/renderer_vulkan/renderer_vulkan.h"
@ -39,6 +38,7 @@
#include "video_core/renderer_vulkan/vk_state_tracker.h" #include "video_core/renderer_vulkan/vk_state_tracker.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h" #include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -60,32 +60,39 @@ namespace {
constexpr auto ComputeShaderIndex = static_cast<std::size_t>(Tegra::Engines::ShaderType::Compute); constexpr auto ComputeShaderIndex = static_cast<std::size_t>(Tegra::Engines::ShaderType::Compute);
vk::Viewport GetViewportState(const VKDevice& device, const Maxwell& regs, std::size_t index) { VkViewport GetViewportState(const VKDevice& device, const Maxwell& regs, std::size_t index) {
const auto& viewport = regs.viewport_transform[index]; const auto& src = regs.viewport_transform[index];
const float x = viewport.translate_x - viewport.scale_x; VkViewport viewport;
const float y = viewport.translate_y - viewport.scale_y; viewport.x = src.translate_x - src.scale_x;
const float width = viewport.scale_x * 2.0f; viewport.y = src.translate_y - src.scale_y;
const float height = viewport.scale_y * 2.0f; viewport.width = src.scale_x * 2.0f;
viewport.height = src.scale_y * 2.0f;
const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne; const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne;
float near = viewport.translate_z - viewport.scale_z * reduce_z; viewport.minDepth = src.translate_z - src.scale_z * reduce_z;
float far = viewport.translate_z + viewport.scale_z; viewport.maxDepth = src.translate_z + src.scale_z;
if (!device.IsExtDepthRangeUnrestrictedSupported()) { if (!device.IsExtDepthRangeUnrestrictedSupported()) {
near = std::clamp(near, 0.0f, 1.0f); viewport.minDepth = std::clamp(viewport.minDepth, 0.0f, 1.0f);
far = std::clamp(far, 0.0f, 1.0f); viewport.maxDepth = std::clamp(viewport.maxDepth, 0.0f, 1.0f);
} }
return viewport;
return vk::Viewport(x, y, width != 0 ? width : 1.0f, height != 0 ? height : 1.0f, near, far);
} }
constexpr vk::Rect2D GetScissorState(const Maxwell& regs, std::size_t index) { VkRect2D GetScissorState(const Maxwell& regs, std::size_t index) {
const auto& scissor = regs.scissor_test[index]; const auto& src = regs.scissor_test[index];
if (!scissor.enable) { VkRect2D scissor;
return {{0, 0}, {INT32_MAX, INT32_MAX}}; if (src.enable) {
scissor.offset.x = static_cast<s32>(src.min_x);
scissor.offset.y = static_cast<s32>(src.min_y);
scissor.extent.width = src.max_x - src.min_x;
scissor.extent.height = src.max_y - src.min_y;
} else {
scissor.offset.x = 0;
scissor.offset.y = 0;
scissor.extent.width = std::numeric_limits<s32>::max();
scissor.extent.height = std::numeric_limits<s32>::max();
} }
const u32 width = scissor.max_x - scissor.min_x; return scissor;
const u32 height = scissor.max_y - scissor.min_y;
return {{static_cast<s32>(scissor.min_x), static_cast<s32>(scissor.min_y)}, {width, height}};
} }
std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses( std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses(
@ -97,8 +104,8 @@ std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses(
return addresses; return addresses;
} }
void TransitionImages(const std::vector<ImageView>& views, vk::PipelineStageFlags pipeline_stage, void TransitionImages(const std::vector<ImageView>& views, VkPipelineStageFlags pipeline_stage,
vk::AccessFlags access) { VkAccessFlags access) {
for (auto& [view, layout] : views) { for (auto& [view, layout] : views) {
view->Transition(*layout, pipeline_stage, access); view->Transition(*layout, pipeline_stage, access);
} }
@ -127,13 +134,13 @@ Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry
class BufferBindings final { class BufferBindings final {
public: public:
void AddVertexBinding(const vk::Buffer* buffer, vk::DeviceSize offset) { void AddVertexBinding(const VkBuffer* buffer, VkDeviceSize offset) {
vertex.buffer_ptrs[vertex.num_buffers] = buffer; vertex.buffer_ptrs[vertex.num_buffers] = buffer;
vertex.offsets[vertex.num_buffers] = offset; vertex.offsets[vertex.num_buffers] = offset;
++vertex.num_buffers; ++vertex.num_buffers;
} }
void SetIndexBinding(const vk::Buffer* buffer, vk::DeviceSize offset, vk::IndexType type) { void SetIndexBinding(const VkBuffer* buffer, VkDeviceSize offset, VkIndexType type) {
index.buffer = buffer; index.buffer = buffer;
index.offset = offset; index.offset = offset;
index.type = type; index.type = type;
@ -217,14 +224,14 @@ private:
// Some of these fields are intentionally left uninitialized to avoid initializing them twice. // Some of these fields are intentionally left uninitialized to avoid initializing them twice.
struct { struct {
std::size_t num_buffers = 0; std::size_t num_buffers = 0;
std::array<const vk::Buffer*, Maxwell::NumVertexArrays> buffer_ptrs; std::array<const VkBuffer*, Maxwell::NumVertexArrays> buffer_ptrs;
std::array<vk::DeviceSize, Maxwell::NumVertexArrays> offsets; std::array<VkDeviceSize, Maxwell::NumVertexArrays> offsets;
} vertex; } vertex;
struct { struct {
const vk::Buffer* buffer = nullptr; const VkBuffer* buffer = nullptr;
vk::DeviceSize offset; VkDeviceSize offset;
vk::IndexType type; VkIndexType type;
} index; } index;
template <std::size_t N> template <std::size_t N>
@ -243,38 +250,35 @@ private:
return; return;
} }
std::array<vk::Buffer, N> buffers; std::array<VkBuffer, N> buffers;
std::transform(vertex.buffer_ptrs.begin(), vertex.buffer_ptrs.begin() + N, buffers.begin(), std::transform(vertex.buffer_ptrs.begin(), vertex.buffer_ptrs.begin() + N, buffers.begin(),
[](const auto ptr) { return *ptr; }); [](const auto ptr) { return *ptr; });
std::array<vk::DeviceSize, N> offsets; std::array<VkDeviceSize, N> offsets;
std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin()); std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin());
if constexpr (is_indexed) { if constexpr (is_indexed) {
// Indexed draw // Indexed draw
scheduler.Record([buffers, offsets, index_buffer = *index.buffer, scheduler.Record([buffers, offsets, index_buffer = *index.buffer,
index_offset = index.offset, index_offset = index.offset,
index_type = index.type](auto cmdbuf, auto& dld) { index_type = index.type](vk::CommandBuffer cmdbuf) {
cmdbuf.bindIndexBuffer(index_buffer, index_offset, index_type, dld); cmdbuf.BindIndexBuffer(index_buffer, index_offset, index_type);
cmdbuf.bindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data(), cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data());
dld);
}); });
} else { } else {
// Array draw // Array draw
scheduler.Record([buffers, offsets](auto cmdbuf, auto& dld) { scheduler.Record([buffers, offsets](vk::CommandBuffer cmdbuf) {
cmdbuf.bindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data(), cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data());
dld);
}); });
} }
} }
}; };
void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf, void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const {
const vk::DispatchLoaderDynamic& dld) const {
if (is_indexed) { if (is_indexed) {
cmdbuf.drawIndexed(num_vertices, num_instances, 0, base_vertex, base_instance, dld); cmdbuf.DrawIndexed(num_vertices, num_instances, 0, base_vertex, base_instance);
} else { } else {
cmdbuf.draw(num_vertices, num_instances, base_vertex, base_instance, dld); cmdbuf.Draw(num_vertices, num_instances, base_vertex, base_instance);
} }
} }
@ -337,7 +341,7 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
const auto renderpass = pipeline.GetRenderPass(); const auto renderpass = pipeline.GetRenderPass();
const auto [framebuffer, render_area] = ConfigureFramebuffers(renderpass); const auto [framebuffer, render_area] = ConfigureFramebuffers(renderpass);
scheduler.RequestRenderpass({renderpass, framebuffer, {{0, 0}, render_area}, 0, nullptr}); scheduler.RequestRenderpass(renderpass, framebuffer, render_area);
UpdateDynamicStates(); UpdateDynamicStates();
@ -345,19 +349,19 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
if (device.IsNvDeviceDiagnosticCheckpoints()) { if (device.IsNvDeviceDiagnosticCheckpoints()) {
scheduler.Record( scheduler.Record(
[&pipeline](auto cmdbuf, auto& dld) { cmdbuf.setCheckpointNV(&pipeline, dld); }); [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(&pipeline); });
} }
BeginTransformFeedback(); BeginTransformFeedback();
const auto pipeline_layout = pipeline.GetLayout(); const auto pipeline_layout = pipeline.GetLayout();
const auto descriptor_set = pipeline.CommitDescriptorSet(); const auto descriptor_set = pipeline.CommitDescriptorSet();
scheduler.Record([pipeline_layout, descriptor_set, draw_params](auto cmdbuf, auto& dld) { scheduler.Record([pipeline_layout, descriptor_set, draw_params](vk::CommandBuffer cmdbuf) {
if (descriptor_set) { if (descriptor_set) {
cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline_layout, cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout,
DESCRIPTOR_SET, 1, &descriptor_set, 0, nullptr, dld); DESCRIPTOR_SET, descriptor_set, {});
} }
draw_params.Draw(cmdbuf, dld); draw_params.Draw(cmdbuf);
}); });
EndTransformFeedback(); EndTransformFeedback();
@ -389,48 +393,54 @@ void RasterizerVulkan::Clear() {
DEBUG_ASSERT(texceptions.none()); DEBUG_ASSERT(texceptions.none());
SetupImageTransitions(0, color_attachments, zeta_attachment); SetupImageTransitions(0, color_attachments, zeta_attachment);
const vk::RenderPass renderpass = renderpass_cache.GetRenderPass(GetRenderPassParams(0)); const VkRenderPass renderpass = renderpass_cache.GetRenderPass(GetRenderPassParams(0));
const auto [framebuffer, render_area] = ConfigureFramebuffers(renderpass); const auto [framebuffer, render_area] = ConfigureFramebuffers(renderpass);
scheduler.RequestRenderpass({renderpass, framebuffer, {{0, 0}, render_area}, 0, nullptr}); scheduler.RequestRenderpass(renderpass, framebuffer, render_area);
const auto& scissor = regs.scissor_test[0]; VkClearRect clear_rect;
const vk::Offset2D scissor_offset(scissor.min_x, scissor.min_y); clear_rect.baseArrayLayer = regs.clear_buffers.layer;
vk::Extent2D scissor_extent{scissor.max_x - scissor.min_x, scissor.max_y - scissor.min_y}; clear_rect.layerCount = 1;
scissor_extent.width = std::min(scissor_extent.width, render_area.width); clear_rect.rect = GetScissorState(regs, 0);
scissor_extent.height = std::min(scissor_extent.height, render_area.height); clear_rect.rect.extent.width = std::min(clear_rect.rect.extent.width, render_area.width);
clear_rect.rect.extent.height = std::min(clear_rect.rect.extent.height, render_area.height);
const u32 layer = regs.clear_buffers.layer;
const vk::ClearRect clear_rect({scissor_offset, scissor_extent}, layer, 1);
if (use_color) { if (use_color) {
const std::array clear_color = {regs.clear_color[0], regs.clear_color[1], VkClearValue clear_value;
regs.clear_color[2], regs.clear_color[3]}; std::memcpy(clear_value.color.float32, regs.clear_color, sizeof(regs.clear_color));
const vk::ClearValue clear_value{clear_color};
const u32 color_attachment = regs.clear_buffers.RT; const u32 color_attachment = regs.clear_buffers.RT;
scheduler.Record([color_attachment, clear_value, clear_rect](auto cmdbuf, auto& dld) { scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) {
const vk::ClearAttachment attachment(vk::ImageAspectFlagBits::eColor, color_attachment, VkClearAttachment attachment;
clear_value); attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
cmdbuf.clearAttachments(1, &attachment, 1, &clear_rect, dld); attachment.colorAttachment = color_attachment;
attachment.clearValue = clear_value;
cmdbuf.ClearAttachments(attachment, clear_rect);
}); });
} }
if (!use_depth && !use_stencil) { if (!use_depth && !use_stencil) {
return; return;
} }
vk::ImageAspectFlags aspect_flags; VkImageAspectFlags aspect_flags = 0;
if (use_depth) { if (use_depth) {
aspect_flags |= vk::ImageAspectFlagBits::eDepth; aspect_flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
} }
if (use_stencil) { if (use_stencil) {
aspect_flags |= vk::ImageAspectFlagBits::eStencil; aspect_flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
} }
scheduler.Record([clear_depth = regs.clear_depth, clear_stencil = regs.clear_stencil, scheduler.Record([clear_depth = regs.clear_depth, clear_stencil = regs.clear_stencil,
clear_rect, aspect_flags](auto cmdbuf, auto& dld) { clear_rect, aspect_flags](vk::CommandBuffer cmdbuf) {
const vk::ClearDepthStencilValue clear_zeta(clear_depth, clear_stencil); VkClearValue clear_value;
const vk::ClearValue clear_value{clear_zeta}; clear_value.depthStencil.depth = clear_depth;
const vk::ClearAttachment attachment(aspect_flags, 0, clear_value); clear_value.depthStencil.stencil = clear_stencil;
cmdbuf.clearAttachments(1, &attachment, 1, &clear_rect, dld);
VkClearAttachment attachment;
attachment.aspectMask = aspect_flags;
attachment.colorAttachment = 0;
attachment.clearValue.depthStencil.depth = clear_depth;
attachment.clearValue.depthStencil.stencil = clear_stencil;
cmdbuf.ClearAttachments(attachment, clear_rect);
}); });
} }
@ -463,24 +473,24 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
buffer_cache.Unmap(); buffer_cache.Unmap();
TransitionImages(sampled_views, vk::PipelineStageFlagBits::eComputeShader, TransitionImages(sampled_views, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
vk::AccessFlagBits::eShaderRead); VK_ACCESS_SHADER_READ_BIT);
TransitionImages(image_views, vk::PipelineStageFlagBits::eComputeShader, TransitionImages(image_views, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite); VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
if (device.IsNvDeviceDiagnosticCheckpoints()) { if (device.IsNvDeviceDiagnosticCheckpoints()) {
scheduler.Record( scheduler.Record(
[&pipeline](auto cmdbuf, auto& dld) { cmdbuf.setCheckpointNV(nullptr, dld); }); [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(nullptr); });
} }
scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y, scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y,
grid_z = launch_desc.grid_dim_z, pipeline_handle = pipeline.GetHandle(), grid_z = launch_desc.grid_dim_z, pipeline_handle = pipeline.GetHandle(),
layout = pipeline.GetLayout(), layout = pipeline.GetLayout(),
descriptor_set = pipeline.CommitDescriptorSet()](auto cmdbuf, auto& dld) { descriptor_set = pipeline.CommitDescriptorSet()](vk::CommandBuffer cmdbuf) {
cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline_handle, dld); cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_handle);
cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, layout, DESCRIPTOR_SET, 1, cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, DESCRIPTOR_SET,
&descriptor_set, 0, nullptr, dld); descriptor_set, {});
cmdbuf.dispatch(grid_x, grid_y, grid_z, dld); cmdbuf.Dispatch(grid_x, grid_y, grid_z);
}); });
} }
@ -625,13 +635,13 @@ bool RasterizerVulkan::WalkAttachmentOverlaps(const CachedSurfaceView& attachmen
continue; continue;
} }
overlap = true; overlap = true;
*layout = vk::ImageLayout::eGeneral; *layout = VK_IMAGE_LAYOUT_GENERAL;
} }
return overlap; return overlap;
} }
std::tuple<vk::Framebuffer, vk::Extent2D> RasterizerVulkan::ConfigureFramebuffers( std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
vk::RenderPass renderpass) { VkRenderPass renderpass) {
FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(), FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(),
std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()}; std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()};
@ -658,15 +668,20 @@ std::tuple<vk::Framebuffer, vk::Extent2D> RasterizerVulkan::ConfigureFramebuffer
const auto [fbentry, is_cache_miss] = framebuffer_cache.try_emplace(key); const auto [fbentry, is_cache_miss] = framebuffer_cache.try_emplace(key);
auto& framebuffer = fbentry->second; auto& framebuffer = fbentry->second;
if (is_cache_miss) { if (is_cache_miss) {
const vk::FramebufferCreateInfo framebuffer_ci( VkFramebufferCreateInfo framebuffer_ci;
{}, key.renderpass, static_cast<u32>(key.views.size()), key.views.data(), key.width, framebuffer_ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
key.height, key.layers); framebuffer_ci.pNext = nullptr;
const auto dev = device.GetLogical(); framebuffer_ci.flags = 0;
const auto& dld = device.GetDispatchLoader(); framebuffer_ci.renderPass = key.renderpass;
framebuffer = dev.createFramebufferUnique(framebuffer_ci, nullptr, dld); framebuffer_ci.attachmentCount = static_cast<u32>(key.views.size());
framebuffer_ci.pAttachments = key.views.data();
framebuffer_ci.width = key.width;
framebuffer_ci.height = key.height;
framebuffer_ci.layers = key.layers;
framebuffer = device.GetLogical().CreateFramebuffer(framebuffer_ci);
} }
return {*framebuffer, vk::Extent2D{key.width, key.height}}; return {*framebuffer, VkExtent2D{key.width, key.height}};
} }
RasterizerVulkan::DrawParameters RasterizerVulkan::SetupGeometry(FixedPipelineState& fixed_state, RasterizerVulkan::DrawParameters RasterizerVulkan::SetupGeometry(FixedPipelineState& fixed_state,
@ -714,10 +729,9 @@ void RasterizerVulkan::SetupShaderDescriptors(
void RasterizerVulkan::SetupImageTransitions( void RasterizerVulkan::SetupImageTransitions(
Texceptions texceptions, const std::array<View, Maxwell::NumRenderTargets>& color_attachments, Texceptions texceptions, const std::array<View, Maxwell::NumRenderTargets>& color_attachments,
const View& zeta_attachment) { const View& zeta_attachment) {
TransitionImages(sampled_views, vk::PipelineStageFlagBits::eAllGraphics, TransitionImages(sampled_views, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_ACCESS_SHADER_READ_BIT);
vk::AccessFlagBits::eShaderRead); TransitionImages(image_views, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
TransitionImages(image_views, vk::PipelineStageFlagBits::eAllGraphics, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite);
for (std::size_t rt = 0; rt < std::size(color_attachments); ++rt) { for (std::size_t rt = 0; rt < std::size(color_attachments); ++rt) {
const auto color_attachment = color_attachments[rt]; const auto color_attachment = color_attachments[rt];
@ -725,19 +739,19 @@ void RasterizerVulkan::SetupImageTransitions(
continue; continue;
} }
const auto image_layout = const auto image_layout =
texceptions[rt] ? vk::ImageLayout::eGeneral : vk::ImageLayout::eColorAttachmentOptimal; texceptions[rt] ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
color_attachment->Transition( color_attachment->Transition(image_layout, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
image_layout, vk::PipelineStageFlagBits::eColorAttachmentOutput, VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite); VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
} }
if (zeta_attachment != nullptr) { if (zeta_attachment != nullptr) {
const auto image_layout = texceptions[ZETA_TEXCEPTION_INDEX] const auto image_layout = texceptions[ZETA_TEXCEPTION_INDEX]
? vk::ImageLayout::eGeneral ? VK_IMAGE_LAYOUT_GENERAL
: vk::ImageLayout::eDepthStencilAttachmentOptimal; : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
zeta_attachment->Transition(image_layout, vk::PipelineStageFlagBits::eLateFragmentTests, zeta_attachment->Transition(image_layout, VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
vk::AccessFlagBits::eDepthStencilAttachmentRead | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
vk::AccessFlagBits::eDepthStencilAttachmentWrite); VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
} }
} }
@ -773,9 +787,9 @@ void RasterizerVulkan::BeginTransformFeedback() {
const std::size_t size = binding.buffer_size; const std::size_t size = binding.buffer_size;
const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true); const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true);
scheduler.Record([buffer = *buffer, offset = offset, size](auto cmdbuf, auto& dld) { scheduler.Record([buffer = *buffer, offset = offset, size](vk::CommandBuffer cmdbuf) {
cmdbuf.bindTransformFeedbackBuffersEXT(0, {buffer}, {offset}, {size}, dld); cmdbuf.BindTransformFeedbackBuffersEXT(0, 1, &buffer, &offset, &size);
cmdbuf.beginTransformFeedbackEXT(0, {}, {}, dld); cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr);
}); });
} }
@ -786,7 +800,7 @@ void RasterizerVulkan::EndTransformFeedback() {
} }
scheduler.Record( scheduler.Record(
[](auto cmdbuf, auto& dld) { cmdbuf.endTransformFeedbackEXT(0, {}, {}, dld); }); [](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); });
} }
void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex_input, void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex_input,
@ -837,7 +851,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar
} else { } else {
const auto [buffer, offset] = const auto [buffer, offset] =
quad_array_pass.Assemble(params.num_vertices, params.base_vertex); quad_array_pass.Assemble(params.num_vertices, params.base_vertex);
buffer_bindings.SetIndexBinding(&buffer, offset, vk::IndexType::eUint32); buffer_bindings.SetIndexBinding(buffer, offset, VK_INDEX_TYPE_UINT32);
params.base_vertex = 0; params.base_vertex = 0;
params.num_vertices = params.num_vertices * 6 / 4; params.num_vertices = params.num_vertices * 6 / 4;
params.is_indexed = true; params.is_indexed = true;
@ -1022,7 +1036,7 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
update_descriptor_queue.AddSampledImage(sampler, image_view); update_descriptor_queue.AddSampledImage(sampler, image_view);
const auto image_layout = update_descriptor_queue.GetLastImageLayout(); const auto image_layout = update_descriptor_queue.GetLastImageLayout();
*image_layout = vk::ImageLayout::eShaderReadOnlyOptimal; *image_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
sampled_views.push_back(ImageView{std::move(view), image_layout}); sampled_views.push_back(ImageView{std::move(view), image_layout});
} }
@ -1039,7 +1053,7 @@ void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const Ima
update_descriptor_queue.AddImage(image_view); update_descriptor_queue.AddImage(image_view);
const auto image_layout = update_descriptor_queue.GetLastImageLayout(); const auto image_layout = update_descriptor_queue.GetLastImageLayout();
*image_layout = vk::ImageLayout::eGeneral; *image_layout = VK_IMAGE_LAYOUT_GENERAL;
image_views.push_back(ImageView{std::move(view), image_layout}); image_views.push_back(ImageView{std::move(view), image_layout});
} }
@ -1056,9 +1070,7 @@ void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& reg
GetViewportState(device, regs, 10), GetViewportState(device, regs, 11), GetViewportState(device, regs, 10), GetViewportState(device, regs, 11),
GetViewportState(device, regs, 12), GetViewportState(device, regs, 13), GetViewportState(device, regs, 12), GetViewportState(device, regs, 13),
GetViewportState(device, regs, 14), GetViewportState(device, regs, 15)}; GetViewportState(device, regs, 14), GetViewportState(device, regs, 15)};
scheduler.Record([viewports](auto cmdbuf, auto& dld) { scheduler.Record([viewports](vk::CommandBuffer cmdbuf) { cmdbuf.SetViewport(0, viewports); });
cmdbuf.setViewport(0, static_cast<u32>(viewports.size()), viewports.data(), dld);
});
} }
void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs) { void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs) {
@ -1072,9 +1084,7 @@ void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs
GetScissorState(regs, 9), GetScissorState(regs, 10), GetScissorState(regs, 11), GetScissorState(regs, 9), GetScissorState(regs, 10), GetScissorState(regs, 11),
GetScissorState(regs, 12), GetScissorState(regs, 13), GetScissorState(regs, 14), GetScissorState(regs, 12), GetScissorState(regs, 13), GetScissorState(regs, 14),
GetScissorState(regs, 15)}; GetScissorState(regs, 15)};
scheduler.Record([scissors](auto cmdbuf, auto& dld) { scheduler.Record([scissors](vk::CommandBuffer cmdbuf) { cmdbuf.SetScissor(0, scissors); });
cmdbuf.setScissor(0, static_cast<u32>(scissors.size()), scissors.data(), dld);
});
} }
void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) { void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) {
@ -1082,8 +1092,8 @@ void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) {
return; return;
} }
scheduler.Record([constant = regs.polygon_offset_units, clamp = regs.polygon_offset_clamp, scheduler.Record([constant = regs.polygon_offset_units, clamp = regs.polygon_offset_clamp,
factor = regs.polygon_offset_factor](auto cmdbuf, auto& dld) { factor = regs.polygon_offset_factor](vk::CommandBuffer cmdbuf) {
cmdbuf.setDepthBias(constant, clamp, factor / 2.0f, dld); cmdbuf.SetDepthBias(constant, clamp, factor / 2.0f);
}); });
} }
@ -1093,9 +1103,8 @@ void RasterizerVulkan::UpdateBlendConstants(Tegra::Engines::Maxwell3D::Regs& reg
} }
const std::array blend_color = {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b, const std::array blend_color = {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b,
regs.blend_color.a}; regs.blend_color.a};
scheduler.Record([blend_color](auto cmdbuf, auto& dld) { scheduler.Record(
cmdbuf.setBlendConstants(blend_color.data(), dld); [blend_color](vk::CommandBuffer cmdbuf) { cmdbuf.SetBlendConstants(blend_color.data()); });
});
} }
void RasterizerVulkan::UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs) { void RasterizerVulkan::UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs) {
@ -1103,7 +1112,7 @@ void RasterizerVulkan::UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs)
return; return;
} }
scheduler.Record([min = regs.depth_bounds[0], max = regs.depth_bounds[1]]( scheduler.Record([min = regs.depth_bounds[0], max = regs.depth_bounds[1]](
auto cmdbuf, auto& dld) { cmdbuf.setDepthBounds(min, max, dld); }); vk::CommandBuffer cmdbuf) { cmdbuf.SetDepthBounds(min, max); });
} }
void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs) { void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs) {
@ -1116,24 +1125,24 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
[front_ref = regs.stencil_front_func_ref, front_write_mask = regs.stencil_front_mask, [front_ref = regs.stencil_front_func_ref, front_write_mask = regs.stencil_front_mask,
front_test_mask = regs.stencil_front_func_mask, back_ref = regs.stencil_back_func_ref, front_test_mask = regs.stencil_front_func_mask, back_ref = regs.stencil_back_func_ref,
back_write_mask = regs.stencil_back_mask, back_write_mask = regs.stencil_back_mask,
back_test_mask = regs.stencil_back_func_mask](auto cmdbuf, auto& dld) { back_test_mask = regs.stencil_back_func_mask](vk::CommandBuffer cmdbuf) {
// Front face // Front face
cmdbuf.setStencilReference(vk::StencilFaceFlagBits::eFront, front_ref, dld); cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_BIT, front_ref);
cmdbuf.setStencilWriteMask(vk::StencilFaceFlagBits::eFront, front_write_mask, dld); cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_BIT, front_write_mask);
cmdbuf.setStencilCompareMask(vk::StencilFaceFlagBits::eFront, front_test_mask, dld); cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_FRONT_BIT, front_test_mask);
// Back face // Back face
cmdbuf.setStencilReference(vk::StencilFaceFlagBits::eBack, back_ref, dld); cmdbuf.SetStencilReference(VK_STENCIL_FACE_BACK_BIT, back_ref);
cmdbuf.setStencilWriteMask(vk::StencilFaceFlagBits::eBack, back_write_mask, dld); cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_BACK_BIT, back_write_mask);
cmdbuf.setStencilCompareMask(vk::StencilFaceFlagBits::eBack, back_test_mask, dld); cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_BACK_BIT, back_test_mask);
}); });
} else { } else {
// Front face defines both faces // Front face defines both faces
scheduler.Record([ref = regs.stencil_back_func_ref, write_mask = regs.stencil_back_mask, scheduler.Record([ref = regs.stencil_back_func_ref, write_mask = regs.stencil_back_mask,
test_mask = regs.stencil_back_func_mask](auto cmdbuf, auto& dld) { test_mask = regs.stencil_back_func_mask](vk::CommandBuffer cmdbuf) {
cmdbuf.setStencilReference(vk::StencilFaceFlagBits::eFrontAndBack, ref, dld); cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_AND_BACK, ref);
cmdbuf.setStencilWriteMask(vk::StencilFaceFlagBits::eFrontAndBack, write_mask, dld); cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_AND_BACK, write_mask);
cmdbuf.setStencilCompareMask(vk::StencilFaceFlagBits::eFrontAndBack, test_mask, dld); cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_FRONT_AND_BACK, test_mask);
}); });
} }
} }

View File

@ -17,7 +17,6 @@
#include "video_core/memory_manager.h" #include "video_core/memory_manager.h"
#include "video_core/rasterizer_accelerated.h" #include "video_core/rasterizer_accelerated.h"
#include "video_core/rasterizer_interface.h" #include "video_core/rasterizer_interface.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h" #include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h" #include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pass.h" #include "video_core/renderer_vulkan/vk_compute_pass.h"
@ -32,6 +31,7 @@
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h" #include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Core { namespace Core {
class System; class System;
@ -49,11 +49,10 @@ namespace Vulkan {
struct VKScreenInfo; struct VKScreenInfo;
using ImageViewsPack = using ImageViewsPack = boost::container::static_vector<VkImageView, Maxwell::NumRenderTargets + 1>;
boost::container::static_vector<vk::ImageView, Maxwell::NumRenderTargets + 1>;
struct FramebufferCacheKey { struct FramebufferCacheKey {
vk::RenderPass renderpass{}; VkRenderPass renderpass{};
u32 width = 0; u32 width = 0;
u32 height = 0; u32 height = 0;
u32 layers = 0; u32 layers = 0;
@ -101,7 +100,7 @@ class BufferBindings;
struct ImageView { struct ImageView {
View view; View view;
vk::ImageLayout* layout = nullptr; VkImageLayout* layout = nullptr;
}; };
class RasterizerVulkan final : public VideoCore::RasterizerAccelerated { class RasterizerVulkan final : public VideoCore::RasterizerAccelerated {
@ -137,7 +136,7 @@ public:
private: private:
struct DrawParameters { struct DrawParameters {
void Draw(vk::CommandBuffer cmdbuf, const vk::DispatchLoaderDynamic& dld) const; void Draw(vk::CommandBuffer cmdbuf) const;
u32 base_instance = 0; u32 base_instance = 0;
u32 num_instances = 0; u32 num_instances = 0;
@ -154,7 +153,7 @@ private:
Texceptions UpdateAttachments(); Texceptions UpdateAttachments();
std::tuple<vk::Framebuffer, vk::Extent2D> ConfigureFramebuffers(vk::RenderPass renderpass); std::tuple<VkFramebuffer, VkExtent2D> ConfigureFramebuffers(VkRenderPass renderpass);
/// Setups geometry buffers and state. /// Setups geometry buffers and state.
DrawParameters SetupGeometry(FixedPipelineState& fixed_state, BufferBindings& buffer_bindings, DrawParameters SetupGeometry(FixedPipelineState& fixed_state, BufferBindings& buffer_bindings,
@ -272,7 +271,7 @@ private:
u32 draw_counter = 0; u32 draw_counter = 0;
// TODO(Rodrigo): Invalidate on image destruction // TODO(Rodrigo): Invalidate on image destruction
std::unordered_map<FramebufferCacheKey, UniqueFramebuffer> framebuffer_cache; std::unordered_map<FramebufferCacheKey, vk::Framebuffer> framebuffer_cache;
}; };
} // namespace Vulkan } // namespace Vulkan

View File

@ -6,10 +6,10 @@
#include <vector> #include <vector>
#include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h" #include "video_core/renderer_vulkan/vk_renderpass_cache.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -17,7 +17,7 @@ VKRenderPassCache::VKRenderPassCache(const VKDevice& device) : device{device} {}
VKRenderPassCache::~VKRenderPassCache() = default; VKRenderPassCache::~VKRenderPassCache() = default;
vk::RenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) { VkRenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) {
const auto [pair, is_cache_miss] = cache.try_emplace(params); const auto [pair, is_cache_miss] = cache.try_emplace(params);
auto& entry = pair->second; auto& entry = pair->second;
if (is_cache_miss) { if (is_cache_miss) {
@ -26,9 +26,9 @@ vk::RenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params)
return *entry; return *entry;
} }
UniqueRenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const { vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const {
std::vector<vk::AttachmentDescription> descriptors; std::vector<VkAttachmentDescription> descriptors;
std::vector<vk::AttachmentReference> color_references; std::vector<VkAttachmentReference> color_references;
for (std::size_t rt = 0; rt < params.color_attachments.size(); ++rt) { for (std::size_t rt = 0; rt < params.color_attachments.size(); ++rt) {
const auto attachment = params.color_attachments[rt]; const auto attachment = params.color_attachments[rt];
@ -39,16 +39,25 @@ UniqueRenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& par
// TODO(Rodrigo): Add eMayAlias when it's needed. // TODO(Rodrigo): Add eMayAlias when it's needed.
const auto color_layout = attachment.is_texception const auto color_layout = attachment.is_texception
? vk::ImageLayout::eGeneral ? VK_IMAGE_LAYOUT_GENERAL
: vk::ImageLayout::eColorAttachmentOptimal; : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
descriptors.emplace_back(vk::AttachmentDescriptionFlagBits::eMayAlias, format.format, VkAttachmentDescription& descriptor = descriptors.emplace_back();
vk::SampleCountFlagBits::e1, vk::AttachmentLoadOp::eLoad, descriptor.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT;
vk::AttachmentStoreOp::eStore, vk::AttachmentLoadOp::eDontCare, descriptor.format = format.format;
vk::AttachmentStoreOp::eDontCare, color_layout, color_layout); descriptor.samples = VK_SAMPLE_COUNT_1_BIT;
color_references.emplace_back(static_cast<u32>(rt), color_layout); descriptor.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
descriptor.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
descriptor.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
descriptor.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
descriptor.initialLayout = color_layout;
descriptor.finalLayout = color_layout;
VkAttachmentReference& reference = color_references.emplace_back();
reference.attachment = static_cast<u32>(rt);
reference.layout = color_layout;
} }
vk::AttachmentReference zeta_attachment_ref; VkAttachmentReference zeta_attachment_ref;
if (params.has_zeta) { if (params.has_zeta) {
const auto format = const auto format =
MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.zeta_pixel_format); MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.zeta_pixel_format);
@ -56,45 +65,68 @@ UniqueRenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& par
static_cast<u32>(params.zeta_pixel_format)); static_cast<u32>(params.zeta_pixel_format));
const auto zeta_layout = params.zeta_texception const auto zeta_layout = params.zeta_texception
? vk::ImageLayout::eGeneral ? VK_IMAGE_LAYOUT_GENERAL
: vk::ImageLayout::eDepthStencilAttachmentOptimal; : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
descriptors.emplace_back(vk::AttachmentDescriptionFlags{}, format.format, VkAttachmentDescription& descriptor = descriptors.emplace_back();
vk::SampleCountFlagBits::e1, vk::AttachmentLoadOp::eLoad, descriptor.flags = 0;
vk::AttachmentStoreOp::eStore, vk::AttachmentLoadOp::eLoad, descriptor.format = format.format;
vk::AttachmentStoreOp::eStore, zeta_layout, zeta_layout); descriptor.samples = VK_SAMPLE_COUNT_1_BIT;
zeta_attachment_ref = descriptor.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
vk::AttachmentReference(static_cast<u32>(params.color_attachments.size()), zeta_layout); descriptor.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
descriptor.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
descriptor.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
descriptor.initialLayout = zeta_layout;
descriptor.finalLayout = zeta_layout;
zeta_attachment_ref.attachment = static_cast<u32>(params.color_attachments.size());
zeta_attachment_ref.layout = zeta_layout;
} }
const vk::SubpassDescription subpass_description( VkSubpassDescription subpass_description;
{}, vk::PipelineBindPoint::eGraphics, 0, nullptr, static_cast<u32>(color_references.size()), subpass_description.flags = 0;
color_references.data(), nullptr, params.has_zeta ? &zeta_attachment_ref : nullptr, 0, subpass_description.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
nullptr); subpass_description.inputAttachmentCount = 0;
subpass_description.pInputAttachments = nullptr;
subpass_description.colorAttachmentCount = static_cast<u32>(color_references.size());
subpass_description.pColorAttachments = color_references.data();
subpass_description.pResolveAttachments = nullptr;
subpass_description.pDepthStencilAttachment = params.has_zeta ? &zeta_attachment_ref : nullptr;
subpass_description.preserveAttachmentCount = 0;
subpass_description.pPreserveAttachments = nullptr;
vk::AccessFlags access; VkAccessFlags access = 0;
vk::PipelineStageFlags stage; VkPipelineStageFlags stage = 0;
if (!color_references.empty()) { if (!color_references.empty()) {
access |= access |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite; stage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
stage |= vk::PipelineStageFlagBits::eColorAttachmentOutput;
} }
if (params.has_zeta) { if (params.has_zeta) {
access |= vk::AccessFlagBits::eDepthStencilAttachmentRead | access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
vk::AccessFlagBits::eDepthStencilAttachmentWrite; VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
stage |= vk::PipelineStageFlagBits::eLateFragmentTests; stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
} }
const vk::SubpassDependency subpass_dependency(VK_SUBPASS_EXTERNAL, 0, stage, stage, {}, access, VkSubpassDependency subpass_dependency;
{}); subpass_dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
subpass_dependency.dstSubpass = 0;
subpass_dependency.srcStageMask = stage;
subpass_dependency.dstStageMask = stage;
subpass_dependency.srcAccessMask = 0;
subpass_dependency.dstAccessMask = access;
subpass_dependency.dependencyFlags = 0;
const vk::RenderPassCreateInfo create_info({}, static_cast<u32>(descriptors.size()), VkRenderPassCreateInfo ci;
descriptors.data(), 1, &subpass_description, 1, ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
&subpass_dependency); ci.pNext = nullptr;
ci.flags = 0;
const auto dev = device.GetLogical(); ci.attachmentCount = static_cast<u32>(descriptors.size());
const auto& dld = device.GetDispatchLoader(); ci.pAttachments = descriptors.data();
return dev.createRenderPassUnique(create_info, nullptr, dld); ci.subpassCount = 1;
ci.pSubpasses = &subpass_description;
ci.dependencyCount = 1;
ci.pDependencies = &subpass_dependency;
return device.GetLogical().CreateRenderPass(ci);
} }
} // namespace Vulkan } // namespace Vulkan

View File

@ -12,7 +12,7 @@
#include <boost/functional/hash.hpp> #include <boost/functional/hash.hpp>
#include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h" #include "video_core/surface.h"
namespace Vulkan { namespace Vulkan {
@ -85,13 +85,13 @@ public:
explicit VKRenderPassCache(const VKDevice& device); explicit VKRenderPassCache(const VKDevice& device);
~VKRenderPassCache(); ~VKRenderPassCache();
vk::RenderPass GetRenderPass(const RenderPassParams& params); VkRenderPass GetRenderPass(const RenderPassParams& params);
private: private:
UniqueRenderPass CreateRenderPass(const RenderPassParams& params) const; vk::RenderPass CreateRenderPass(const RenderPassParams& params) const;
const VKDevice& device; const VKDevice& device;
std::unordered_map<RenderPassParams, UniqueRenderPass> cache; std::unordered_map<RenderPassParams, vk::RenderPass> cache;
}; };
} // namespace Vulkan } // namespace Vulkan

View File

@ -6,83 +6,83 @@
#include <optional> #include <optional>
#include "common/assert.h" #include "common/assert.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
namespace {
// TODO(Rodrigo): Fine tune these numbers. // TODO(Rodrigo): Fine tune these numbers.
constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000; constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
constexpr std::size_t FENCES_GROW_STEP = 0x40; constexpr std::size_t FENCES_GROW_STEP = 0x40;
VkFenceCreateInfo BuildFenceCreateInfo() {
VkFenceCreateInfo fence_ci;
fence_ci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_ci.pNext = nullptr;
fence_ci.flags = 0;
return fence_ci;
}
} // Anonymous namespace
class CommandBufferPool final : public VKFencedPool { class CommandBufferPool final : public VKFencedPool {
public: public:
CommandBufferPool(const VKDevice& device) CommandBufferPool(const VKDevice& device)
: VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {} : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
void Allocate(std::size_t begin, std::size_t end) override { void Allocate(std::size_t begin, std::size_t end) override {
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
const u32 graphics_family = device.GetGraphicsFamily();
auto pool = std::make_unique<Pool>();
// Command buffers are going to be commited, recorded, executed every single usage cycle. // Command buffers are going to be commited, recorded, executed every single usage cycle.
// They are also going to be reseted when commited. // They are also going to be reseted when commited.
const auto pool_flags = vk::CommandPoolCreateFlagBits::eTransient | VkCommandPoolCreateInfo command_pool_ci;
vk::CommandPoolCreateFlagBits::eResetCommandBuffer; command_pool_ci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
const vk::CommandPoolCreateInfo cmdbuf_pool_ci(pool_flags, graphics_family); command_pool_ci.pNext = nullptr;
pool->handle = dev.createCommandPoolUnique(cmdbuf_pool_ci, nullptr, dld); command_pool_ci.flags =
VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
command_pool_ci.queueFamilyIndex = device.GetGraphicsFamily();
const vk::CommandBufferAllocateInfo cmdbuf_ai(*pool->handle, Pool& pool = pools.emplace_back();
vk::CommandBufferLevel::ePrimary, pool.handle = device.GetLogical().CreateCommandPool(command_pool_ci);
static_cast<u32>(COMMAND_BUFFER_POOL_SIZE)); pool.cmdbufs = pool.handle.Allocate(COMMAND_BUFFER_POOL_SIZE);
pool->cmdbufs =
dev.allocateCommandBuffersUnique<std::allocator<UniqueCommandBuffer>>(cmdbuf_ai, dld);
pools.push_back(std::move(pool));
} }
vk::CommandBuffer Commit(VKFence& fence) { VkCommandBuffer Commit(VKFence& fence) {
const std::size_t index = CommitResource(fence); const std::size_t index = CommitResource(fence);
const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE; const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE;
const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE; const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE;
return *pools[pool_index]->cmdbufs[sub_index]; return pools[pool_index].cmdbufs[sub_index];
} }
private: private:
struct Pool { struct Pool {
UniqueCommandPool handle; vk::CommandPool handle;
std::vector<UniqueCommandBuffer> cmdbufs; vk::CommandBuffers cmdbufs;
}; };
const VKDevice& device; const VKDevice& device;
std::vector<Pool> pools;
std::vector<std::unique_ptr<Pool>> pools;
}; };
VKResource::VKResource() = default; VKResource::VKResource() = default;
VKResource::~VKResource() = default; VKResource::~VKResource() = default;
VKFence::VKFence(const VKDevice& device, UniqueFence handle) VKFence::VKFence(const VKDevice& device)
: device{device}, handle{std::move(handle)} {} : device{device}, handle{device.GetLogical().CreateFence(BuildFenceCreateInfo())} {}
VKFence::~VKFence() = default; VKFence::~VKFence() = default;
void VKFence::Wait() { void VKFence::Wait() {
static constexpr u64 timeout = std::numeric_limits<u64>::max(); switch (const VkResult result = handle.Wait()) {
const auto dev = device.GetLogical(); case VK_SUCCESS:
const auto& dld = device.GetDispatchLoader();
switch (const auto result = dev.waitForFences(1, &*handle, true, timeout, dld)) {
case vk::Result::eSuccess:
return; return;
case vk::Result::eErrorDeviceLost: case VK_ERROR_DEVICE_LOST:
device.ReportLoss(); device.ReportLoss();
[[fallthrough]]; [[fallthrough]];
default: default:
vk::throwResultException(result, "vk::waitForFences"); throw vk::Exception(result);
} }
} }
@ -107,13 +107,11 @@ bool VKFence::Tick(bool gpu_wait, bool owner_wait) {
return false; return false;
} }
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
if (gpu_wait) { if (gpu_wait) {
// Wait for the fence if it has been requested. // Wait for the fence if it has been requested.
dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld); (void)handle.Wait();
} else { } else {
if (dev.getFenceStatus(*handle, dld) != vk::Result::eSuccess) { if (handle.GetStatus() != VK_SUCCESS) {
// Vulkan fence is not ready, not much it can do here // Vulkan fence is not ready, not much it can do here
return false; return false;
} }
@ -126,7 +124,7 @@ bool VKFence::Tick(bool gpu_wait, bool owner_wait) {
protected_resources.clear(); protected_resources.clear();
// Prepare fence for reusage. // Prepare fence for reusage.
dev.resetFences({*handle}, dld); handle.Reset();
is_used = false; is_used = false;
return true; return true;
} }
@ -299,21 +297,16 @@ VKFence& VKResourceManager::CommitFence() {
return *found_fence; return *found_fence;
} }
vk::CommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) { VkCommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) {
return command_buffer_pool->Commit(fence); return command_buffer_pool->Commit(fence);
} }
void VKResourceManager::GrowFences(std::size_t new_fences_count) { void VKResourceManager::GrowFences(std::size_t new_fences_count) {
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
const vk::FenceCreateInfo fence_ci;
const std::size_t previous_size = fences.size(); const std::size_t previous_size = fences.size();
fences.resize(previous_size + new_fences_count); fences.resize(previous_size + new_fences_count);
std::generate(fences.begin() + previous_size, fences.end(), [&]() { std::generate(fences.begin() + previous_size, fences.end(),
return std::make_unique<VKFence>(device, dev.createFenceUnique(fence_ci, nullptr, dld)); [this] { return std::make_unique<VKFence>(device); });
});
} }
} // namespace Vulkan } // namespace Vulkan

View File

@ -7,7 +7,7 @@
#include <cstddef> #include <cstddef>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -42,7 +42,7 @@ class VKFence {
friend class VKResourceManager; friend class VKResourceManager;
public: public:
explicit VKFence(const VKDevice& device, UniqueFence handle); explicit VKFence(const VKDevice& device);
~VKFence(); ~VKFence();
/** /**
@ -69,7 +69,7 @@ public:
void RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept; void RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept;
/// Retreives the fence. /// Retreives the fence.
operator vk::Fence() const { operator VkFence() const {
return *handle; return *handle;
} }
@ -87,7 +87,7 @@ private:
bool Tick(bool gpu_wait, bool owner_wait); bool Tick(bool gpu_wait, bool owner_wait);
const VKDevice& device; ///< Device handler const VKDevice& device; ///< Device handler
UniqueFence handle; ///< Vulkan fence vk::Fence handle; ///< Vulkan fence
std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence
bool is_owned = false; ///< The fence has been commited but not released yet. bool is_owned = false; ///< The fence has been commited but not released yet.
bool is_used = false; ///< The fence has been commited but it has not been checked to be free. bool is_used = false; ///< The fence has been commited but it has not been checked to be free.
@ -181,7 +181,7 @@ public:
VKFence& CommitFence(); VKFence& CommitFence();
/// Commits an unused command buffer and protects it with a fence. /// Commits an unused command buffer and protects it with a fence.
vk::CommandBuffer CommitCommandBuffer(VKFence& fence); VkCommandBuffer CommitCommandBuffer(VKFence& fence);
private: private:
/// Allocates new fences. /// Allocates new fences.

View File

@ -7,64 +7,64 @@
#include <unordered_map> #include <unordered_map>
#include "common/assert.h" #include "common/assert.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_sampler_cache.h" #include "video_core/renderer_vulkan/vk_sampler_cache.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/textures/texture.h" #include "video_core/textures/texture.h"
namespace Vulkan { namespace Vulkan {
static std::optional<vk::BorderColor> TryConvertBorderColor(std::array<float, 4> color) { namespace {
VkBorderColor ConvertBorderColor(std::array<float, 4> color) {
// TODO(Rodrigo): Manage integer border colors // TODO(Rodrigo): Manage integer border colors
if (color == std::array<float, 4>{0, 0, 0, 0}) { if (color == std::array<float, 4>{0, 0, 0, 0}) {
return vk::BorderColor::eFloatTransparentBlack; return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
} else if (color == std::array<float, 4>{0, 0, 0, 1}) { } else if (color == std::array<float, 4>{0, 0, 0, 1}) {
return vk::BorderColor::eFloatOpaqueBlack; return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
} else if (color == std::array<float, 4>{1, 1, 1, 1}) { } else if (color == std::array<float, 4>{1, 1, 1, 1}) {
return vk::BorderColor::eFloatOpaqueWhite; return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
} else { }
if (color[0] + color[1] + color[2] > 1.35f) { if (color[0] + color[1] + color[2] > 1.35f) {
// If color elements are brighter than roughly 0.5 average, use white border // If color elements are brighter than roughly 0.5 average, use white border
return vk::BorderColor::eFloatOpaqueWhite; return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
} } else if (color[3] > 0.5f) {
if (color[3] > 0.5f) { return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
return vk::BorderColor::eFloatOpaqueBlack; } else {
} return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
return vk::BorderColor::eFloatTransparentBlack;
} }
} }
} // Anonymous namespace
VKSamplerCache::VKSamplerCache(const VKDevice& device) : device{device} {} VKSamplerCache::VKSamplerCache(const VKDevice& device) : device{device} {}
VKSamplerCache::~VKSamplerCache() = default; VKSamplerCache::~VKSamplerCache() = default;
UniqueSampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) const { vk::Sampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) const {
const float max_anisotropy{tsc.GetMaxAnisotropy()}; VkSamplerCreateInfo ci;
const bool has_anisotropy{max_anisotropy > 1.0f}; ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
ci.pNext = nullptr;
const auto border_color{tsc.GetBorderColor()}; ci.flags = 0;
const auto vk_border_color{TryConvertBorderColor(border_color)}; ci.magFilter = MaxwellToVK::Sampler::Filter(tsc.mag_filter);
ci.minFilter = MaxwellToVK::Sampler::Filter(tsc.min_filter);
constexpr bool unnormalized_coords{false}; ci.mipmapMode = MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter);
ci.addressModeU = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_u, tsc.mag_filter);
const vk::SamplerCreateInfo sampler_ci( ci.addressModeV = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_v, tsc.mag_filter);
{}, MaxwellToVK::Sampler::Filter(tsc.mag_filter), ci.addressModeW = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_p, tsc.mag_filter);
MaxwellToVK::Sampler::Filter(tsc.min_filter), ci.mipLodBias = tsc.GetLodBias();
MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter), ci.anisotropyEnable = tsc.GetMaxAnisotropy() > 1.0f ? VK_TRUE : VK_FALSE;
MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_u, tsc.mag_filter), ci.maxAnisotropy = tsc.GetMaxAnisotropy();
MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_v, tsc.mag_filter), ci.compareEnable = tsc.depth_compare_enabled;
MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_p, tsc.mag_filter), tsc.GetLodBias(), ci.compareOp = MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func);
has_anisotropy, max_anisotropy, tsc.depth_compare_enabled, ci.minLod = tsc.GetMinLod();
MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func), tsc.GetMinLod(), ci.maxLod = tsc.GetMaxLod();
tsc.GetMaxLod(), vk_border_color.value_or(vk::BorderColor::eFloatTransparentBlack), ci.borderColor = ConvertBorderColor(tsc.GetBorderColor());
unnormalized_coords); ci.unnormalizedCoordinates = VK_FALSE;
return device.GetLogical().CreateSampler(ci);
const auto& dld{device.GetDispatchLoader()};
const auto dev{device.GetLogical()};
return dev.createSamplerUnique(sampler_ci, nullptr, dld);
} }
vk::Sampler VKSamplerCache::ToSamplerType(const UniqueSampler& sampler) const { VkSampler VKSamplerCache::ToSamplerType(const vk::Sampler& sampler) const {
return *sampler; return *sampler;
} }

View File

@ -4,7 +4,7 @@
#pragma once #pragma once
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/sampler_cache.h" #include "video_core/sampler_cache.h"
#include "video_core/textures/texture.h" #include "video_core/textures/texture.h"
@ -12,15 +12,15 @@ namespace Vulkan {
class VKDevice; class VKDevice;
class VKSamplerCache final : public VideoCommon::SamplerCache<vk::Sampler, UniqueSampler> { class VKSamplerCache final : public VideoCommon::SamplerCache<VkSampler, vk::Sampler> {
public: public:
explicit VKSamplerCache(const VKDevice& device); explicit VKSamplerCache(const VKDevice& device);
~VKSamplerCache(); ~VKSamplerCache();
protected: protected:
UniqueSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const override; vk::Sampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const override;
vk::Sampler ToSamplerType(const UniqueSampler& sampler) const override; VkSampler ToSamplerType(const vk::Sampler& sampler) const override;
private: private:
const VKDevice& device; const VKDevice& device;

View File

@ -10,23 +10,22 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/microprofile.h" #include "common/microprofile.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_query_cache.h" #include "video_core/renderer_vulkan/vk_query_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h" #include "video_core/renderer_vulkan/vk_state_tracker.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
MICROPROFILE_DECLARE(Vulkan_WaitForWorker); MICROPROFILE_DECLARE(Vulkan_WaitForWorker);
void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf, void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
const vk::DispatchLoaderDynamic& dld) {
auto command = first; auto command = first;
while (command != nullptr) { while (command != nullptr) {
auto next = command->GetNext(); auto next = command->GetNext();
command->Execute(cmdbuf, dld); command->Execute(cmdbuf);
command->~Command(); command->~Command();
command = next; command = next;
} }
@ -51,7 +50,7 @@ VKScheduler::~VKScheduler() {
worker_thread.join(); worker_thread.join();
} }
void VKScheduler::Flush(bool release_fence, vk::Semaphore semaphore) { void VKScheduler::Flush(bool release_fence, VkSemaphore semaphore) {
SubmitExecution(semaphore); SubmitExecution(semaphore);
if (release_fence) { if (release_fence) {
current_fence->Release(); current_fence->Release();
@ -59,7 +58,7 @@ void VKScheduler::Flush(bool release_fence, vk::Semaphore semaphore) {
AllocateNewContext(); AllocateNewContext();
} }
void VKScheduler::Finish(bool release_fence, vk::Semaphore semaphore) { void VKScheduler::Finish(bool release_fence, VkSemaphore semaphore) {
SubmitExecution(semaphore); SubmitExecution(semaphore);
current_fence->Wait(); current_fence->Wait();
if (release_fence) { if (release_fence) {
@ -89,17 +88,34 @@ void VKScheduler::DispatchWork() {
AcquireNewChunk(); AcquireNewChunk();
} }
void VKScheduler::RequestRenderpass(const vk::RenderPassBeginInfo& renderpass_bi) { void VKScheduler::RequestRenderpass(VkRenderPass renderpass, VkFramebuffer framebuffer,
if (state.renderpass && renderpass_bi == *state.renderpass) { VkExtent2D render_area) {
if (renderpass == state.renderpass && framebuffer == state.framebuffer &&
render_area.width == state.render_area.width &&
render_area.height == state.render_area.height) {
return; return;
} }
const bool end_renderpass = state.renderpass.has_value(); const bool end_renderpass = state.renderpass != nullptr;
state.renderpass = renderpass_bi; state.renderpass = renderpass;
Record([renderpass_bi, end_renderpass](auto cmdbuf, auto& dld) { state.framebuffer = framebuffer;
state.render_area = render_area;
VkRenderPassBeginInfo renderpass_bi;
renderpass_bi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderpass_bi.pNext = nullptr;
renderpass_bi.renderPass = renderpass;
renderpass_bi.framebuffer = framebuffer;
renderpass_bi.renderArea.offset.x = 0;
renderpass_bi.renderArea.offset.y = 0;
renderpass_bi.renderArea.extent = render_area;
renderpass_bi.clearValueCount = 0;
renderpass_bi.pClearValues = nullptr;
Record([renderpass_bi, end_renderpass](vk::CommandBuffer cmdbuf) {
if (end_renderpass) { if (end_renderpass) {
cmdbuf.endRenderPass(dld); cmdbuf.EndRenderPass();
} }
cmdbuf.beginRenderPass(renderpass_bi, vk::SubpassContents::eInline, dld); cmdbuf.BeginRenderPass(renderpass_bi, VK_SUBPASS_CONTENTS_INLINE);
}); });
} }
@ -107,13 +123,13 @@ void VKScheduler::RequestOutsideRenderPassOperationContext() {
EndRenderPass(); EndRenderPass();
} }
void VKScheduler::BindGraphicsPipeline(vk::Pipeline pipeline) { void VKScheduler::BindGraphicsPipeline(VkPipeline pipeline) {
if (state.graphics_pipeline == pipeline) { if (state.graphics_pipeline == pipeline) {
return; return;
} }
state.graphics_pipeline = pipeline; state.graphics_pipeline = pipeline;
Record([pipeline](auto cmdbuf, auto& dld) { Record([pipeline](vk::CommandBuffer cmdbuf) {
cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline, dld); cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
}); });
} }
@ -126,37 +142,50 @@ void VKScheduler::WorkerThread() {
} }
auto extracted_chunk = std::move(chunk_queue.Front()); auto extracted_chunk = std::move(chunk_queue.Front());
chunk_queue.Pop(); chunk_queue.Pop();
extracted_chunk->ExecuteAll(current_cmdbuf, device.GetDispatchLoader()); extracted_chunk->ExecuteAll(current_cmdbuf);
chunk_reserve.Push(std::move(extracted_chunk)); chunk_reserve.Push(std::move(extracted_chunk));
} while (!quit); } while (!quit);
} }
void VKScheduler::SubmitExecution(vk::Semaphore semaphore) { void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
EndPendingOperations(); EndPendingOperations();
InvalidateState(); InvalidateState();
WaitWorker(); WaitWorker();
std::unique_lock lock{mutex}; std::unique_lock lock{mutex};
const auto queue = device.GetGraphicsQueue(); current_cmdbuf.End();
const auto& dld = device.GetDispatchLoader();
current_cmdbuf.end(dld);
const vk::SubmitInfo submit_info(0, nullptr, nullptr, 1, &current_cmdbuf, semaphore ? 1U : 0U, VkSubmitInfo submit_info;
&semaphore); submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
queue.submit({submit_info}, static_cast<vk::Fence>(*current_fence), dld); submit_info.pNext = nullptr;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = nullptr;
submit_info.pWaitDstStageMask = nullptr;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = current_cmdbuf.address();
submit_info.signalSemaphoreCount = semaphore ? 1 : 0;
submit_info.pSignalSemaphores = &semaphore;
device.GetGraphicsQueue().Submit(submit_info, *current_fence);
} }
void VKScheduler::AllocateNewContext() { void VKScheduler::AllocateNewContext() {
++ticks; ++ticks;
VkCommandBufferBeginInfo cmdbuf_bi;
cmdbuf_bi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmdbuf_bi.pNext = nullptr;
cmdbuf_bi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
cmdbuf_bi.pInheritanceInfo = nullptr;
std::unique_lock lock{mutex}; std::unique_lock lock{mutex};
current_fence = next_fence; current_fence = next_fence;
next_fence = &resource_manager.CommitFence(); next_fence = &resource_manager.CommitFence();
current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence); current_cmdbuf = vk::CommandBuffer(resource_manager.CommitCommandBuffer(*current_fence),
current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit},
device.GetDispatchLoader()); device.GetDispatchLoader());
current_cmdbuf.Begin(cmdbuf_bi);
// Enable counters once again. These are disabled when a command buffer is finished. // Enable counters once again. These are disabled when a command buffer is finished.
if (query_cache) { if (query_cache) {
query_cache->UpdateCounters(); query_cache->UpdateCounters();
@ -177,8 +206,8 @@ void VKScheduler::EndRenderPass() {
if (!state.renderpass) { if (!state.renderpass) {
return; return;
} }
state.renderpass = std::nullopt; state.renderpass = nullptr;
Record([](auto cmdbuf, auto& dld) { cmdbuf.endRenderPass(dld); }); Record([](vk::CommandBuffer cmdbuf) { cmdbuf.EndRenderPass(); });
} }
void VKScheduler::AcquireNewChunk() { void VKScheduler::AcquireNewChunk() {

View File

@ -13,7 +13,7 @@
#include <utility> #include <utility>
#include "common/common_types.h" #include "common/common_types.h"
#include "common/threadsafe_queue.h" #include "common/threadsafe_queue.h"
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -49,10 +49,10 @@ public:
~VKScheduler(); ~VKScheduler();
/// Sends the current execution context to the GPU. /// Sends the current execution context to the GPU.
void Flush(bool release_fence = true, vk::Semaphore semaphore = nullptr); void Flush(bool release_fence = true, VkSemaphore semaphore = nullptr);
/// Sends the current execution context to the GPU and waits for it to complete. /// Sends the current execution context to the GPU and waits for it to complete.
void Finish(bool release_fence = true, vk::Semaphore semaphore = nullptr); void Finish(bool release_fence = true, VkSemaphore semaphore = nullptr);
/// Waits for the worker thread to finish executing everything. After this function returns it's /// Waits for the worker thread to finish executing everything. After this function returns it's
/// safe to touch worker resources. /// safe to touch worker resources.
@ -62,14 +62,15 @@ public:
void DispatchWork(); void DispatchWork();
/// Requests to begin a renderpass. /// Requests to begin a renderpass.
void RequestRenderpass(const vk::RenderPassBeginInfo& renderpass_bi); void RequestRenderpass(VkRenderPass renderpass, VkFramebuffer framebuffer,
VkExtent2D render_area);
/// Requests the current executino context to be able to execute operations only allowed outside /// Requests the current executino context to be able to execute operations only allowed outside
/// of a renderpass. /// of a renderpass.
void RequestOutsideRenderPassOperationContext(); void RequestOutsideRenderPassOperationContext();
/// Binds a pipeline to the current execution context. /// Binds a pipeline to the current execution context.
void BindGraphicsPipeline(vk::Pipeline pipeline); void BindGraphicsPipeline(VkPipeline pipeline);
/// Assigns the query cache. /// Assigns the query cache.
void SetQueryCache(VKQueryCache& query_cache_) { void SetQueryCache(VKQueryCache& query_cache_) {
@ -101,8 +102,7 @@ private:
public: public:
virtual ~Command() = default; virtual ~Command() = default;
virtual void Execute(vk::CommandBuffer cmdbuf, virtual void Execute(vk::CommandBuffer cmdbuf) const = 0;
const vk::DispatchLoaderDynamic& dld) const = 0;
Command* GetNext() const { Command* GetNext() const {
return next; return next;
@ -125,9 +125,8 @@ private:
TypedCommand(TypedCommand&&) = delete; TypedCommand(TypedCommand&&) = delete;
TypedCommand& operator=(TypedCommand&&) = delete; TypedCommand& operator=(TypedCommand&&) = delete;
void Execute(vk::CommandBuffer cmdbuf, void Execute(vk::CommandBuffer cmdbuf) const override {
const vk::DispatchLoaderDynamic& dld) const override { command(cmdbuf);
command(cmdbuf, dld);
} }
private: private:
@ -136,7 +135,7 @@ private:
class CommandChunk final { class CommandChunk final {
public: public:
void ExecuteAll(vk::CommandBuffer cmdbuf, const vk::DispatchLoaderDynamic& dld); void ExecuteAll(vk::CommandBuffer cmdbuf);
template <typename T> template <typename T>
bool Record(T& command) { bool Record(T& command) {
@ -175,7 +174,7 @@ private:
void WorkerThread(); void WorkerThread();
void SubmitExecution(vk::Semaphore semaphore); void SubmitExecution(VkSemaphore semaphore);
void AllocateNewContext(); void AllocateNewContext();
@ -198,8 +197,10 @@ private:
VKFence* next_fence = nullptr; VKFence* next_fence = nullptr;
struct State { struct State {
std::optional<vk::RenderPassBeginInfo> renderpass; VkRenderPass renderpass = nullptr;
vk::Pipeline graphics_pipeline; VkFramebuffer framebuffer = nullptr;
VkExtent2D render_area = {0, 0};
VkPipeline graphics_pipeline = nullptr;
} state; } state;
std::unique_ptr<CommandChunk> chunk; std::unique_ptr<CommandChunk> chunk;

View File

@ -801,7 +801,7 @@ private:
if (IsOutputAttributeArray()) { if (IsOutputAttributeArray()) {
const u32 num = GetNumOutputVertices(); const u32 num = GetNumOutputVertices();
type = TypeArray(type, Constant(t_uint, num)); type = TypeArray(type, Constant(t_uint, num));
if (device.GetDriverID() != vk::DriverIdKHR::eIntelProprietaryWindows) { if (device.GetDriverID() != VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR) {
// Intel's proprietary driver fails to setup defaults for arrayed output // Intel's proprietary driver fails to setup defaults for arrayed output
// attributes. // attributes.
varying_default = ConstantComposite(type, std::vector(num, varying_default)); varying_default = ConstantComposite(type, std::vector(num, varying_default));

View File

@ -8,27 +8,25 @@
#include "common/alignment.h" #include "common/alignment.h"
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_shader_util.h" #include "video_core/renderer_vulkan/vk_shader_util.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
UniqueShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data) { vk::ShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data) {
// Avoid undefined behavior by copying to a staging allocation // Avoid undefined behavior by copying to a staging allocation
ASSERT(code_size % sizeof(u32) == 0); ASSERT(code_size % sizeof(u32) == 0);
const auto data = std::make_unique<u32[]>(code_size / sizeof(u32)); const auto data = std::make_unique<u32[]>(code_size / sizeof(u32));
std::memcpy(data.get(), code_data, code_size); std::memcpy(data.get(), code_data, code_size);
const auto dev = device.GetLogical(); VkShaderModuleCreateInfo ci;
const auto& dld = device.GetDispatchLoader(); ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
const vk::ShaderModuleCreateInfo shader_ci({}, code_size, data.get()); ci.pNext = nullptr;
vk::ShaderModule shader_module; ci.flags = 0;
if (dev.createShaderModule(&shader_ci, nullptr, &shader_module, dld) != vk::Result::eSuccess) { ci.codeSize = code_size;
UNREACHABLE_MSG("Shader module failed to build!"); ci.pCode = data.get();
} return device.GetLogical().CreateShaderModule(ci);
return UniqueShaderModule(shader_module, vk::ObjectDestroy(dev, nullptr, dld));
} }
} // namespace Vulkan } // namespace Vulkan

View File

@ -6,12 +6,12 @@
#include <vector> #include <vector>
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
class VKDevice; class VKDevice;
UniqueShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data); vk::ShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data);
} // namespace Vulkan } // namespace Vulkan

View File

@ -13,6 +13,7 @@
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -71,17 +72,23 @@ VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_
} }
VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) { VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) {
const auto usage =
vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst |
vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eStorageBuffer |
vk::BufferUsageFlagBits::eIndexBuffer;
const u32 log2 = Common::Log2Ceil64(size); const u32 log2 = Common::Log2Ceil64(size);
const vk::BufferCreateInfo buffer_ci({}, 1ULL << log2, usage, vk::SharingMode::eExclusive, 0,
nullptr); VkBufferCreateInfo ci;
const auto dev = device.GetLogical(); ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
ci.pNext = nullptr;
ci.flags = 0;
ci.size = 1ULL << log2;
ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = nullptr;
auto buffer = std::make_unique<VKBuffer>(); auto buffer = std::make_unique<VKBuffer>();
buffer->handle = dev.createBufferUnique(buffer_ci, nullptr, device.GetDispatchLoader()); buffer->handle = device.GetLogical().CreateBuffer(ci);
buffer->commit = memory_manager.Commit(*buffer->handle, host_visible); buffer->commit = memory_manager.Commit(buffer->handle, host_visible);
auto& entries = GetCache(host_visible)[log2].entries; auto& entries = GetCache(host_visible)[log2].entries;
return *entries.emplace_back(std::move(buffer), scheduler.GetFence(), epoch).buffer; return *entries.emplace_back(std::move(buffer), scheduler.GetFence(), epoch).buffer;

View File

@ -11,9 +11,9 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -22,7 +22,7 @@ class VKFenceWatch;
class VKScheduler; class VKScheduler;
struct VKBuffer final { struct VKBuffer final {
UniqueBuffer handle; vk::Buffer handle;
VKMemoryCommit commit; VKMemoryCommit commit;
}; };

View File

@ -9,11 +9,11 @@
#include "common/alignment.h" #include "common/alignment.h"
#include "common/assert.h" #include "common/assert.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h" #include "video_core/renderer_vulkan/vk_stream_buffer.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -25,8 +25,8 @@ constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000;
constexpr u64 STREAM_BUFFER_SIZE = 256 * 1024 * 1024; constexpr u64 STREAM_BUFFER_SIZE = 256 * 1024 * 1024;
std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter, std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter,
vk::MemoryPropertyFlags wanted) { VkMemoryPropertyFlags wanted) {
const auto properties = device.GetPhysical().getMemoryProperties(device.GetDispatchLoader()); const auto properties = device.GetPhysical().GetMemoryProperties();
for (u32 i = 0; i < properties.memoryTypeCount; i++) { for (u32 i = 0; i < properties.memoryTypeCount; i++) {
if (!(filter & (1 << i))) { if (!(filter & (1 << i))) {
continue; continue;
@ -35,13 +35,13 @@ std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter,
return i; return i;
} }
} }
return {}; return std::nullopt;
} }
} // Anonymous namespace } // Anonymous namespace
VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler, VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
vk::BufferUsageFlags usage) VkBufferUsageFlags usage)
: device{device}, scheduler{scheduler} { : device{device}, scheduler{scheduler} {
CreateBuffers(usage); CreateBuffers(usage);
ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE); ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE);
@ -78,17 +78,13 @@ std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) {
invalidated = true; invalidated = true;
} }
const auto dev = device.GetLogical(); return {memory.Map(offset, size), offset, invalidated};
const auto& dld = device.GetDispatchLoader();
const auto pointer = reinterpret_cast<u8*>(dev.mapMemory(*memory, offset, size, {}, dld));
return {pointer, offset, invalidated};
} }
void VKStreamBuffer::Unmap(u64 size) { void VKStreamBuffer::Unmap(u64 size) {
ASSERT_MSG(size <= mapped_size, "Reserved size is too small"); ASSERT_MSG(size <= mapped_size, "Reserved size is too small");
const auto dev = device.GetLogical(); memory.Unmap();
dev.unmapMemory(*memory, device.GetDispatchLoader());
offset += size; offset += size;
@ -101,30 +97,42 @@ void VKStreamBuffer::Unmap(u64 size) {
watch.fence.Watch(scheduler.GetFence()); watch.fence.Watch(scheduler.GetFence());
} }
void VKStreamBuffer::CreateBuffers(vk::BufferUsageFlags usage) { void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) {
const vk::BufferCreateInfo buffer_ci({}, STREAM_BUFFER_SIZE, usage, vk::SharingMode::eExclusive, VkBufferCreateInfo buffer_ci;
0, nullptr); buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
const auto dev = device.GetLogical(); buffer_ci.pNext = nullptr;
const auto& dld = device.GetDispatchLoader(); buffer_ci.flags = 0;
buffer = dev.createBufferUnique(buffer_ci, nullptr, dld); buffer_ci.size = STREAM_BUFFER_SIZE;
buffer_ci.usage = usage;
buffer_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buffer_ci.queueFamilyIndexCount = 0;
buffer_ci.pQueueFamilyIndices = nullptr;
const auto requirements = dev.getBufferMemoryRequirements(*buffer, dld); const auto& dev = device.GetLogical();
buffer = dev.CreateBuffer(buffer_ci);
const auto& dld = device.GetDispatchLoader();
const auto requirements = dev.GetBufferMemoryRequirements(*buffer);
// Prefer device local host visible allocations (this should hit AMD's pinned memory). // Prefer device local host visible allocations (this should hit AMD's pinned memory).
auto type = FindMemoryType(device, requirements.memoryTypeBits, auto type =
vk::MemoryPropertyFlagBits::eHostVisible | FindMemoryType(device, requirements.memoryTypeBits,
vk::MemoryPropertyFlagBits::eHostCoherent | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
vk::MemoryPropertyFlagBits::eDeviceLocal); VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
if (!type) { if (!type) {
// Otherwise search for a host visible allocation. // Otherwise search for a host visible allocation.
type = FindMemoryType(device, requirements.memoryTypeBits, type = FindMemoryType(device, requirements.memoryTypeBits,
vk::MemoryPropertyFlagBits::eHostVisible | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
vk::MemoryPropertyFlagBits::eHostCoherent); VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
ASSERT_MSG(type, "No host visible and coherent memory type found"); ASSERT_MSG(type, "No host visible and coherent memory type found");
} }
const vk::MemoryAllocateInfo alloc_ci(requirements.size, *type); VkMemoryAllocateInfo memory_ai;
memory = dev.allocateMemoryUnique(alloc_ci, nullptr, dld); memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_ai.pNext = nullptr;
memory_ai.allocationSize = requirements.size;
memory_ai.memoryTypeIndex = *type;
dev.bindBufferMemory(*buffer, *memory, 0, dld); memory = dev.AllocateMemory(memory_ai);
buffer.BindMemory(*memory, 0);
} }
void VKStreamBuffer::ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size) { void VKStreamBuffer::ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size) {

View File

@ -9,7 +9,7 @@
#include <vector> #include <vector>
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -21,7 +21,7 @@ class VKScheduler;
class VKStreamBuffer final { class VKStreamBuffer final {
public: public:
explicit VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler, explicit VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
vk::BufferUsageFlags usage); VkBufferUsageFlags usage);
~VKStreamBuffer(); ~VKStreamBuffer();
/** /**
@ -35,7 +35,7 @@ public:
/// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy. /// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy.
void Unmap(u64 size); void Unmap(u64 size);
vk::Buffer GetHandle() const { VkBuffer GetHandle() const {
return *buffer; return *buffer;
} }
@ -46,7 +46,7 @@ private:
}; };
/// Creates Vulkan buffer handles committing the required the required memory. /// Creates Vulkan buffer handles committing the required the required memory.
void CreateBuffers(vk::BufferUsageFlags usage); void CreateBuffers(VkBufferUsageFlags usage);
/// Increases the amount of watches available. /// Increases the amount of watches available.
void ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size); void ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size);
@ -55,11 +55,9 @@ private:
const VKDevice& device; ///< Vulkan device manager. const VKDevice& device; ///< Vulkan device manager.
VKScheduler& scheduler; ///< Command scheduler. VKScheduler& scheduler; ///< Command scheduler.
const vk::AccessFlags access; ///< Access usage of this stream buffer.
const vk::PipelineStageFlags pipeline_stage; ///< Pipeline usage of this stream buffer.
UniqueBuffer buffer; ///< Mapped buffer. vk::Buffer buffer; ///< Mapped buffer.
UniqueDeviceMemory memory; ///< Memory allocation. vk::DeviceMemory memory; ///< Memory allocation.
u64 offset{}; ///< Buffer iterator. u64 offset{}; ///< Buffer iterator.
u64 mapped_size{}; ///< Size reserved for the current copy. u64 mapped_size{}; ///< Size reserved for the current copy.

View File

@ -11,69 +11,64 @@
#include "common/logging/log.h" #include "common/logging/log.h"
#include "core/core.h" #include "core/core.h"
#include "core/frontend/framebuffer_layout.h" #include "core/frontend/framebuffer_layout.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_swapchain.h" #include "video_core/renderer_vulkan/vk_swapchain.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
namespace { namespace {
vk::SurfaceFormatKHR ChooseSwapSurfaceFormat(const std::vector<vk::SurfaceFormatKHR>& formats, VkSurfaceFormatKHR ChooseSwapSurfaceFormat(vk::Span<VkSurfaceFormatKHR> formats, bool srgb) {
bool srgb) { if (formats.size() == 1 && formats[0].format == VK_FORMAT_UNDEFINED) {
if (formats.size() == 1 && formats[0].format == vk::Format::eUndefined) { VkSurfaceFormatKHR format;
vk::SurfaceFormatKHR format; format.format = VK_FORMAT_B8G8R8A8_UNORM;
format.format = vk::Format::eB8G8R8A8Unorm; format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
format.colorSpace = vk::ColorSpaceKHR::eSrgbNonlinear;
return format; return format;
} }
const auto& found = std::find_if(formats.begin(), formats.end(), [srgb](const auto& format) { const auto& found = std::find_if(formats.begin(), formats.end(), [srgb](const auto& format) {
const auto request_format = srgb ? vk::Format::eB8G8R8A8Srgb : vk::Format::eB8G8R8A8Unorm; const auto request_format = srgb ? VK_FORMAT_B8G8R8A8_SRGB : VK_FORMAT_B8G8R8A8_UNORM;
return format.format == request_format && return format.format == request_format &&
format.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear; format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
}); });
return found != formats.end() ? *found : formats[0]; return found != formats.end() ? *found : formats[0];
} }
vk::PresentModeKHR ChooseSwapPresentMode(const std::vector<vk::PresentModeKHR>& modes) { VkPresentModeKHR ChooseSwapPresentMode(vk::Span<VkPresentModeKHR> modes) {
// Mailbox doesn't lock the application like fifo (vsync), prefer it // Mailbox doesn't lock the application like fifo (vsync), prefer it
const auto& found = std::find_if(modes.begin(), modes.end(), [](const auto& mode) { const auto found = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR);
return mode == vk::PresentModeKHR::eMailbox; return found != modes.end() ? *found : VK_PRESENT_MODE_FIFO_KHR;
});
return found != modes.end() ? *found : vk::PresentModeKHR::eFifo;
} }
vk::Extent2D ChooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, VkExtent2D ChooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height) {
u32 height) {
constexpr auto undefined_size{std::numeric_limits<u32>::max()}; constexpr auto undefined_size{std::numeric_limits<u32>::max()};
if (capabilities.currentExtent.width != undefined_size) { if (capabilities.currentExtent.width != undefined_size) {
return capabilities.currentExtent; return capabilities.currentExtent;
} }
vk::Extent2D extent = {width, height}; VkExtent2D extent;
extent.width = std::max(capabilities.minImageExtent.width, extent.width = std::max(capabilities.minImageExtent.width,
std::min(capabilities.maxImageExtent.width, extent.width)); std::min(capabilities.maxImageExtent.width, width));
extent.height = std::max(capabilities.minImageExtent.height, extent.height = std::max(capabilities.minImageExtent.height,
std::min(capabilities.maxImageExtent.height, extent.height)); std::min(capabilities.maxImageExtent.height, height));
return extent; return extent;
} }
} // Anonymous namespace } // Anonymous namespace
VKSwapchain::VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device) VKSwapchain::VKSwapchain(VkSurfaceKHR surface, const VKDevice& device)
: surface{surface}, device{device} {} : surface{surface}, device{device} {}
VKSwapchain::~VKSwapchain() = default; VKSwapchain::~VKSwapchain() = default;
void VKSwapchain::Create(u32 width, u32 height, bool srgb) { void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
const auto& dld = device.GetDispatchLoader();
const auto physical_device = device.GetPhysical(); const auto physical_device = device.GetPhysical();
const auto capabilities{physical_device.getSurfaceCapabilitiesKHR(surface, dld)}; const auto capabilities{physical_device.GetSurfaceCapabilitiesKHR(surface)};
if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) { if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) {
return; return;
} }
device.GetLogical().waitIdle(dld); device.GetLogical().WaitIdle();
Destroy(); Destroy();
CreateSwapchain(capabilities, width, height, srgb); CreateSwapchain(capabilities, width, height, srgb);
@ -84,10 +79,8 @@ void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
} }
void VKSwapchain::AcquireNextImage() { void VKSwapchain::AcquireNextImage() {
const auto dev{device.GetLogical()}; device.GetLogical().AcquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
const auto& dld{device.GetDispatchLoader()}; *present_semaphores[frame_index], {}, &image_index);
dev.acquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
*present_semaphores[frame_index], {}, &image_index, dld);
if (auto& fence = fences[image_index]; fence) { if (auto& fence = fences[image_index]; fence) {
fence->Wait(); fence->Wait();
@ -96,29 +89,37 @@ void VKSwapchain::AcquireNextImage() {
} }
} }
bool VKSwapchain::Present(vk::Semaphore render_semaphore, VKFence& fence) { bool VKSwapchain::Present(VkSemaphore render_semaphore, VKFence& fence) {
const vk::Semaphore present_semaphore{*present_semaphores[frame_index]}; const VkSemaphore present_semaphore{*present_semaphores[frame_index]};
const std::array<vk::Semaphore, 2> semaphores{present_semaphore, render_semaphore}; const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore};
const u32 wait_semaphore_count{render_semaphore ? 2U : 1U};
const auto& dld{device.GetDispatchLoader()};
const auto present_queue{device.GetPresentQueue()}; const auto present_queue{device.GetPresentQueue()};
bool recreated = false; bool recreated = false;
const vk::PresentInfoKHR present_info(wait_semaphore_count, semaphores.data(), 1, VkPresentInfoKHR present_info;
&swapchain.get(), &image_index, {}); present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
switch (const auto result = present_queue.presentKHR(&present_info, dld); result) { present_info.pNext = nullptr;
case vk::Result::eSuccess: present_info.waitSemaphoreCount = render_semaphore ? 2U : 1U;
present_info.pWaitSemaphores = semaphores.data();
present_info.swapchainCount = 1;
present_info.pSwapchains = swapchain.address();
present_info.pImageIndices = &image_index;
present_info.pResults = nullptr;
switch (const VkResult result = present_queue.Present(present_info)) {
case VK_SUCCESS:
break; break;
case vk::Result::eErrorOutOfDateKHR: case VK_SUBOPTIMAL_KHR:
LOG_DEBUG(Render_Vulkan, "Suboptimal swapchain");
break;
case VK_ERROR_OUT_OF_DATE_KHR:
if (current_width > 0 && current_height > 0) { if (current_width > 0 && current_height > 0) {
Create(current_width, current_height, current_srgb); Create(current_width, current_height, current_srgb);
recreated = true; recreated = true;
} }
break; break;
default: default:
LOG_CRITICAL(Render_Vulkan, "Vulkan failed to present swapchain due to {}!", LOG_CRITICAL(Render_Vulkan, "Failed to present with error {}", vk::ToString(result));
vk::to_string(result)); break;
UNREACHABLE();
} }
ASSERT(fences[image_index] == nullptr); ASSERT(fences[image_index] == nullptr);
@ -132,74 +133,92 @@ bool VKSwapchain::HasFramebufferChanged(const Layout::FramebufferLayout& framebu
return framebuffer.width != current_width || framebuffer.height != current_height; return framebuffer.width != current_width || framebuffer.height != current_height;
} }
void VKSwapchain::CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width,
u32 height, bool srgb) { u32 height, bool srgb) {
const auto& dld{device.GetDispatchLoader()};
const auto physical_device{device.GetPhysical()}; const auto physical_device{device.GetPhysical()};
const auto formats{physical_device.getSurfaceFormatsKHR(surface, dld)}; const auto formats{physical_device.GetSurfaceFormatsKHR(surface)};
const auto present_modes{physical_device.getSurfacePresentModesKHR(surface, dld)}; const auto present_modes{physical_device.GetSurfacePresentModesKHR(surface)};
const vk::SurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats, srgb)}; const VkSurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats, srgb)};
const vk::PresentModeKHR present_mode{ChooseSwapPresentMode(present_modes)}; const VkPresentModeKHR present_mode{ChooseSwapPresentMode(present_modes)};
u32 requested_image_count{capabilities.minImageCount + 1}; u32 requested_image_count{capabilities.minImageCount + 1};
if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) { if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
requested_image_count = capabilities.maxImageCount; requested_image_count = capabilities.maxImageCount;
} }
vk::SwapchainCreateInfoKHR swapchain_ci( VkSwapchainCreateInfoKHR swapchain_ci;
{}, surface, requested_image_count, surface_format.format, surface_format.colorSpace, {}, 1, swapchain_ci.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
vk::ImageUsageFlagBits::eColorAttachment, {}, {}, {}, capabilities.currentTransform, swapchain_ci.pNext = nullptr;
vk::CompositeAlphaFlagBitsKHR::eOpaque, present_mode, false, {}); swapchain_ci.flags = 0;
swapchain_ci.surface = surface;
swapchain_ci.minImageCount = requested_image_count;
swapchain_ci.imageFormat = surface_format.format;
swapchain_ci.imageColorSpace = surface_format.colorSpace;
swapchain_ci.imageArrayLayers = 1;
swapchain_ci.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
swapchain_ci.queueFamilyIndexCount = 0;
swapchain_ci.pQueueFamilyIndices = nullptr;
swapchain_ci.preTransform = capabilities.currentTransform;
swapchain_ci.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
swapchain_ci.presentMode = present_mode;
swapchain_ci.clipped = VK_FALSE;
swapchain_ci.oldSwapchain = nullptr;
const u32 graphics_family{device.GetGraphicsFamily()}; const u32 graphics_family{device.GetGraphicsFamily()};
const u32 present_family{device.GetPresentFamily()}; const u32 present_family{device.GetPresentFamily()};
const std::array<u32, 2> queue_indices{graphics_family, present_family}; const std::array<u32, 2> queue_indices{graphics_family, present_family};
if (graphics_family != present_family) { if (graphics_family != present_family) {
swapchain_ci.imageSharingMode = vk::SharingMode::eConcurrent; swapchain_ci.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size()); swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
swapchain_ci.pQueueFamilyIndices = queue_indices.data(); swapchain_ci.pQueueFamilyIndices = queue_indices.data();
} else { } else {
swapchain_ci.imageSharingMode = vk::SharingMode::eExclusive; swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
} }
// Request the size again to reduce the possibility of a TOCTOU race condition. // Request the size again to reduce the possibility of a TOCTOU race condition.
const auto updated_capabilities = physical_device.getSurfaceCapabilitiesKHR(surface, dld); const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(surface);
swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height); swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height);
// Don't add code within this and the swapchain creation. // Don't add code within this and the swapchain creation.
const auto dev{device.GetLogical()}; swapchain = device.GetLogical().CreateSwapchainKHR(swapchain_ci);
swapchain = dev.createSwapchainKHRUnique(swapchain_ci, nullptr, dld);
extent = swapchain_ci.imageExtent; extent = swapchain_ci.imageExtent;
current_width = extent.width; current_width = extent.width;
current_height = extent.height; current_height = extent.height;
current_srgb = srgb; current_srgb = srgb;
images = dev.getSwapchainImagesKHR(*swapchain, dld); images = swapchain.GetImages();
image_count = static_cast<u32>(images.size()); image_count = static_cast<u32>(images.size());
image_format = surface_format.format; image_format = surface_format.format;
} }
void VKSwapchain::CreateSemaphores() { void VKSwapchain::CreateSemaphores() {
const auto dev{device.GetLogical()};
const auto& dld{device.GetDispatchLoader()};
present_semaphores.resize(image_count); present_semaphores.resize(image_count);
for (std::size_t i = 0; i < image_count; i++) { std::generate(present_semaphores.begin(), present_semaphores.end(),
present_semaphores[i] = dev.createSemaphoreUnique({}, nullptr, dld); [this] { return device.GetLogical().CreateSemaphore(); });
}
} }
void VKSwapchain::CreateImageViews() { void VKSwapchain::CreateImageViews() {
const auto dev{device.GetLogical()}; VkImageViewCreateInfo ci;
const auto& dld{device.GetDispatchLoader()}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ci.pNext = nullptr;
ci.flags = 0;
// ci.image
ci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ci.format = image_format;
ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
ci.subresourceRange.baseMipLevel = 0;
ci.subresourceRange.levelCount = 1;
ci.subresourceRange.baseArrayLayer = 0;
ci.subresourceRange.layerCount = 1;
image_views.resize(image_count); image_views.resize(image_count);
for (std::size_t i = 0; i < image_count; i++) { for (std::size_t i = 0; i < image_count; i++) {
const vk::ImageViewCreateInfo image_view_ci({}, images[i], vk::ImageViewType::e2D, ci.image = images[i];
image_format, {}, image_views[i] = device.GetLogical().CreateImageView(ci);
{vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1});
image_views[i] = dev.createImageViewUnique(image_view_ci, nullptr, dld);
} }
} }

View File

@ -7,7 +7,7 @@
#include <vector> #include <vector>
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Layout { namespace Layout {
struct FramebufferLayout; struct FramebufferLayout;
@ -20,7 +20,7 @@ class VKFence;
class VKSwapchain { class VKSwapchain {
public: public:
explicit VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device); explicit VKSwapchain(VkSurfaceKHR surface, const VKDevice& device);
~VKSwapchain(); ~VKSwapchain();
/// Creates (or recreates) the swapchain with a given size. /// Creates (or recreates) the swapchain with a given size.
@ -31,12 +31,12 @@ public:
/// Presents the rendered image to the swapchain. Returns true when the swapchains had to be /// Presents the rendered image to the swapchain. Returns true when the swapchains had to be
/// recreated. Takes responsability for the ownership of fence. /// recreated. Takes responsability for the ownership of fence.
bool Present(vk::Semaphore render_semaphore, VKFence& fence); bool Present(VkSemaphore render_semaphore, VKFence& fence);
/// Returns true when the framebuffer layout has changed. /// Returns true when the framebuffer layout has changed.
bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const; bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const;
const vk::Extent2D& GetSize() const { VkExtent2D GetSize() const {
return extent; return extent;
} }
@ -48,15 +48,15 @@ public:
return image_index; return image_index;
} }
vk::Image GetImageIndex(std::size_t index) const { VkImage GetImageIndex(std::size_t index) const {
return images[index]; return images[index];
} }
vk::ImageView GetImageViewIndex(std::size_t index) const { VkImageView GetImageViewIndex(std::size_t index) const {
return *image_views[index]; return *image_views[index];
} }
vk::Format GetImageFormat() const { VkFormat GetImageFormat() const {
return image_format; return image_format;
} }
@ -65,30 +65,30 @@ public:
} }
private: private:
void CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, u32 height, void CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height,
bool srgb); bool srgb);
void CreateSemaphores(); void CreateSemaphores();
void CreateImageViews(); void CreateImageViews();
void Destroy(); void Destroy();
const vk::SurfaceKHR surface; const VkSurfaceKHR surface;
const VKDevice& device; const VKDevice& device;
UniqueSwapchainKHR swapchain; vk::SwapchainKHR swapchain;
std::size_t image_count{}; std::size_t image_count{};
std::vector<vk::Image> images; std::vector<VkImage> images;
std::vector<UniqueImageView> image_views; std::vector<vk::ImageView> image_views;
std::vector<UniqueFramebuffer> framebuffers; std::vector<vk::Framebuffer> framebuffers;
std::vector<VKFence*> fences; std::vector<VKFence*> fences;
std::vector<UniqueSemaphore> present_semaphores; std::vector<vk::Semaphore> present_semaphores;
u32 image_index{}; u32 image_index{};
u32 frame_index{}; u32 frame_index{};
vk::Format image_format{}; VkFormat image_format{};
vk::Extent2D extent{}; VkExtent2D extent{};
u32 current_width{}; u32 current_width{};
u32 current_height{}; u32 current_height{};

View File

@ -17,7 +17,6 @@
#include "core/memory.h" #include "core/memory.h"
#include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_3d.h"
#include "video_core/morton.h" #include "video_core/morton.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_memory_manager.h"
@ -25,6 +24,7 @@
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h" #include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h" #include "video_core/surface.h"
#include "video_core/textures/convert.h" #include "video_core/textures/convert.h"
@ -39,18 +39,18 @@ using VideoCore::Surface::SurfaceTarget;
namespace { namespace {
vk::ImageType SurfaceTargetToImage(SurfaceTarget target) { VkImageType SurfaceTargetToImage(SurfaceTarget target) {
switch (target) { switch (target) {
case SurfaceTarget::Texture1D: case SurfaceTarget::Texture1D:
case SurfaceTarget::Texture1DArray: case SurfaceTarget::Texture1DArray:
return vk::ImageType::e1D; return VK_IMAGE_TYPE_1D;
case SurfaceTarget::Texture2D: case SurfaceTarget::Texture2D:
case SurfaceTarget::Texture2DArray: case SurfaceTarget::Texture2DArray:
case SurfaceTarget::TextureCubemap: case SurfaceTarget::TextureCubemap:
case SurfaceTarget::TextureCubeArray: case SurfaceTarget::TextureCubeArray:
return vk::ImageType::e2D; return VK_IMAGE_TYPE_2D;
case SurfaceTarget::Texture3D: case SurfaceTarget::Texture3D:
return vk::ImageType::e3D; return VK_IMAGE_TYPE_3D;
case SurfaceTarget::TextureBuffer: case SurfaceTarget::TextureBuffer:
UNREACHABLE(); UNREACHABLE();
return {}; return {};
@ -59,35 +59,35 @@ vk::ImageType SurfaceTargetToImage(SurfaceTarget target) {
return {}; return {};
} }
vk::ImageAspectFlags PixelFormatToImageAspect(PixelFormat pixel_format) { VkImageAspectFlags PixelFormatToImageAspect(PixelFormat pixel_format) {
if (pixel_format < PixelFormat::MaxColorFormat) { if (pixel_format < PixelFormat::MaxColorFormat) {
return vk::ImageAspectFlagBits::eColor; return VK_IMAGE_ASPECT_COLOR_BIT;
} else if (pixel_format < PixelFormat::MaxDepthFormat) { } else if (pixel_format < PixelFormat::MaxDepthFormat) {
return vk::ImageAspectFlagBits::eDepth; return VK_IMAGE_ASPECT_DEPTH_BIT;
} else if (pixel_format < PixelFormat::MaxDepthStencilFormat) { } else if (pixel_format < PixelFormat::MaxDepthStencilFormat) {
return vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil; return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
} else { } else {
UNREACHABLE_MSG("Invalid pixel format={}", static_cast<u32>(pixel_format)); UNREACHABLE_MSG("Invalid pixel format={}", static_cast<int>(pixel_format));
return vk::ImageAspectFlagBits::eColor; return VK_IMAGE_ASPECT_COLOR_BIT;
} }
} }
vk::ImageViewType GetImageViewType(SurfaceTarget target) { VkImageViewType GetImageViewType(SurfaceTarget target) {
switch (target) { switch (target) {
case SurfaceTarget::Texture1D: case SurfaceTarget::Texture1D:
return vk::ImageViewType::e1D; return VK_IMAGE_VIEW_TYPE_1D;
case SurfaceTarget::Texture2D: case SurfaceTarget::Texture2D:
return vk::ImageViewType::e2D; return VK_IMAGE_VIEW_TYPE_2D;
case SurfaceTarget::Texture3D: case SurfaceTarget::Texture3D:
return vk::ImageViewType::e3D; return VK_IMAGE_VIEW_TYPE_3D;
case SurfaceTarget::Texture1DArray: case SurfaceTarget::Texture1DArray:
return vk::ImageViewType::e1DArray; return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
case SurfaceTarget::Texture2DArray: case SurfaceTarget::Texture2DArray:
return vk::ImageViewType::e2DArray; return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
case SurfaceTarget::TextureCubemap: case SurfaceTarget::TextureCubemap:
return vk::ImageViewType::eCube; return VK_IMAGE_VIEW_TYPE_CUBE;
case SurfaceTarget::TextureCubeArray: case SurfaceTarget::TextureCubeArray:
return vk::ImageViewType::eCubeArray; return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case SurfaceTarget::TextureBuffer: case SurfaceTarget::TextureBuffer:
break; break;
} }
@ -95,73 +95,88 @@ vk::ImageViewType GetImageViewType(SurfaceTarget target) {
return {}; return {};
} }
UniqueBuffer CreateBuffer(const VKDevice& device, const SurfaceParams& params, vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
std::size_t host_memory_size) { std::size_t host_memory_size) {
// TODO(Rodrigo): Move texture buffer creation to the buffer cache // TODO(Rodrigo): Move texture buffer creation to the buffer cache
const vk::BufferCreateInfo buffer_ci({}, host_memory_size, VkBufferCreateInfo ci;
vk::BufferUsageFlagBits::eUniformTexelBuffer | ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
vk::BufferUsageFlagBits::eTransferSrc | ci.pNext = nullptr;
vk::BufferUsageFlagBits::eTransferDst, ci.flags = 0;
vk::SharingMode::eExclusive, 0, nullptr); ci.size = static_cast<VkDeviceSize>(host_memory_size);
const auto dev = device.GetLogical(); ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
const auto& dld = device.GetDispatchLoader(); VK_BUFFER_USAGE_TRANSFER_DST_BIT;
return dev.createBufferUnique(buffer_ci, nullptr, dld); ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = nullptr;
return device.GetLogical().CreateBuffer(ci);
} }
vk::BufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device, VkBufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
const SurfaceParams& params, const SurfaceParams& params, VkBuffer buffer,
vk::Buffer buffer,
std::size_t host_memory_size) { std::size_t host_memory_size) {
ASSERT(params.IsBuffer()); ASSERT(params.IsBuffer());
const auto format = VkBufferViewCreateInfo ci;
MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format; ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
return vk::BufferViewCreateInfo({}, buffer, format, 0, host_memory_size); ci.pNext = nullptr;
ci.flags = 0;
ci.buffer = buffer;
ci.format = MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format;
ci.offset = 0;
ci.range = static_cast<VkDeviceSize>(host_memory_size);
return ci;
} }
vk::ImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) { VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) {
constexpr auto sample_count = vk::SampleCountFlagBits::e1;
constexpr auto tiling = vk::ImageTiling::eOptimal;
ASSERT(!params.IsBuffer()); ASSERT(!params.IsBuffer());
const auto [format, attachable, storage] = const auto [format, attachable, storage] =
MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.pixel_format); MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.pixel_format);
auto image_usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst | VkImageCreateInfo ci;
vk::ImageUsageFlagBits::eTransferSrc; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = nullptr;
ci.flags = 0;
ci.imageType = SurfaceTargetToImage(params.target);
ci.format = format;
ci.mipLevels = params.num_levels;
ci.arrayLayers = static_cast<u32>(params.GetNumLayers());
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = nullptr;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
if (attachable) { if (attachable) {
image_usage |= params.IsPixelFormatZeta() ? vk::ImageUsageFlagBits::eDepthStencilAttachment ci.usage |= params.IsPixelFormatZeta() ? VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
: vk::ImageUsageFlagBits::eColorAttachment; : VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
} }
if (storage) { if (storage) {
image_usage |= vk::ImageUsageFlagBits::eStorage; ci.usage |= VK_IMAGE_USAGE_STORAGE_BIT;
} }
vk::ImageCreateFlags flags;
vk::Extent3D extent;
switch (params.target) { switch (params.target) {
case SurfaceTarget::TextureCubemap: case SurfaceTarget::TextureCubemap:
case SurfaceTarget::TextureCubeArray: case SurfaceTarget::TextureCubeArray:
flags |= vk::ImageCreateFlagBits::eCubeCompatible; ci.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
[[fallthrough]]; [[fallthrough]];
case SurfaceTarget::Texture1D: case SurfaceTarget::Texture1D:
case SurfaceTarget::Texture1DArray: case SurfaceTarget::Texture1DArray:
case SurfaceTarget::Texture2D: case SurfaceTarget::Texture2D:
case SurfaceTarget::Texture2DArray: case SurfaceTarget::Texture2DArray:
extent = vk::Extent3D(params.width, params.height, 1); ci.extent = {params.width, params.height, 1};
break; break;
case SurfaceTarget::Texture3D: case SurfaceTarget::Texture3D:
extent = vk::Extent3D(params.width, params.height, params.depth); ci.extent = {params.width, params.height, params.depth};
break; break;
case SurfaceTarget::TextureBuffer: case SurfaceTarget::TextureBuffer:
UNREACHABLE(); UNREACHABLE();
} }
return vk::ImageCreateInfo(flags, SurfaceTargetToImage(params.target), format, extent, return ci;
params.num_levels, static_cast<u32>(params.GetNumLayers()),
sample_count, tiling, image_usage, vk::SharingMode::eExclusive, 0,
nullptr, vk::ImageLayout::eUndefined);
} }
} // Anonymous namespace } // Anonymous namespace
@ -175,15 +190,13 @@ CachedSurface::CachedSurface(Core::System& system, const VKDevice& device,
memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} { memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} {
if (params.IsBuffer()) { if (params.IsBuffer()) {
buffer = CreateBuffer(device, params, host_memory_size); buffer = CreateBuffer(device, params, host_memory_size);
commit = memory_manager.Commit(*buffer, false); commit = memory_manager.Commit(buffer, false);
const auto buffer_view_ci = const auto buffer_view_ci =
GenerateBufferViewCreateInfo(device, params, *buffer, host_memory_size); GenerateBufferViewCreateInfo(device, params, *buffer, host_memory_size);
format = buffer_view_ci.format; format = buffer_view_ci.format;
const auto dev = device.GetLogical(); buffer_view = device.GetLogical().CreateBufferView(buffer_view_ci);
const auto& dld = device.GetDispatchLoader();
buffer_view = dev.createBufferViewUnique(buffer_view_ci, nullptr, dld);
} else { } else {
const auto image_ci = GenerateImageCreateInfo(device, params); const auto image_ci = GenerateImageCreateInfo(device, params);
format = image_ci.format; format = image_ci.format;
@ -221,16 +234,15 @@ void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
// We can't copy images to buffers inside a renderpass // We can't copy images to buffers inside a renderpass
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
FullTransition(vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferRead, FullTransition(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT,
vk::ImageLayout::eTransferSrcOptimal); VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
const auto& buffer = staging_pool.GetUnusedBuffer(host_memory_size, true); const auto& buffer = staging_pool.GetUnusedBuffer(host_memory_size, true);
// TODO(Rodrigo): Do this in a single copy // TODO(Rodrigo): Do this in a single copy
for (u32 level = 0; level < params.num_levels; ++level) { for (u32 level = 0; level < params.num_levels; ++level) {
scheduler.Record([image = image->GetHandle(), buffer = *buffer.handle, scheduler.Record([image = *image->GetHandle(), buffer = *buffer.handle,
copy = GetBufferImageCopy(level)](auto cmdbuf, auto& dld) { copy = GetBufferImageCopy(level)](vk::CommandBuffer cmdbuf) {
cmdbuf.copyImageToBuffer(image, vk::ImageLayout::eTransferSrcOptimal, buffer, {copy}, cmdbuf.CopyImageToBuffer(image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, copy);
dld);
}); });
} }
scheduler.Finish(); scheduler.Finish();
@ -257,15 +269,27 @@ void CachedSurface::UploadBuffer(const std::vector<u8>& staging_buffer) {
std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size); std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size);
scheduler.Record([src_buffer = *src_buffer.handle, dst_buffer = *buffer, scheduler.Record([src_buffer = *src_buffer.handle, dst_buffer = *buffer,
size = host_memory_size](auto cmdbuf, auto& dld) { size = host_memory_size](vk::CommandBuffer cmdbuf) {
const vk::BufferCopy copy(0, 0, size); VkBufferCopy copy;
cmdbuf.copyBuffer(src_buffer, dst_buffer, {copy}, dld); copy.srcOffset = 0;
copy.dstOffset = 0;
copy.size = size;
cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy);
cmdbuf.pipelineBarrier( VkBufferMemoryBarrier barrier;
vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eVertexShader, {}, {}, barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
{vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferWrite, barrier.pNext = nullptr;
vk::AccessFlagBits::eShaderRead, 0, 0, dst_buffer, 0, size)}, barrier.srcAccessMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
{}, dld); barrier.dstAccessMask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
barrier.srcQueueFamilyIndex = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstQueueFamilyIndex = VK_ACCESS_SHADER_READ_BIT;
barrier.srcQueueFamilyIndex = 0;
barrier.dstQueueFamilyIndex = 0;
barrier.buffer = dst_buffer;
barrier.offset = 0;
barrier.size = size;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
0, {}, barrier, {});
}); });
} }
@ -273,43 +297,49 @@ void CachedSurface::UploadImage(const std::vector<u8>& staging_buffer) {
const auto& src_buffer = staging_pool.GetUnusedBuffer(host_memory_size, true); const auto& src_buffer = staging_pool.GetUnusedBuffer(host_memory_size, true);
std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size); std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size);
FullTransition(vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferWrite, FullTransition(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
vk::ImageLayout::eTransferDstOptimal); VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
for (u32 level = 0; level < params.num_levels; ++level) { for (u32 level = 0; level < params.num_levels; ++level) {
vk::BufferImageCopy copy = GetBufferImageCopy(level); const VkBufferImageCopy copy = GetBufferImageCopy(level);
if (image->GetAspectMask() == if (image->GetAspectMask() == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
(vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil)) { scheduler.Record([buffer = *src_buffer.handle, image = *image->GetHandle(),
vk::BufferImageCopy depth = copy; copy](vk::CommandBuffer cmdbuf) {
vk::BufferImageCopy stencil = copy; std::array<VkBufferImageCopy, 2> copies = {copy, copy};
depth.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eDepth; copies[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
stencil.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eStencil; copies[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
scheduler.Record([buffer = *src_buffer.handle, image = image->GetHandle(), depth, cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
stencil](auto cmdbuf, auto& dld) { copies);
cmdbuf.copyBufferToImage(buffer, image, vk::ImageLayout::eTransferDstOptimal,
{depth, stencil}, dld);
}); });
} else { } else {
scheduler.Record([buffer = *src_buffer.handle, image = image->GetHandle(), scheduler.Record([buffer = *src_buffer.handle, image = *image->GetHandle(),
copy](auto cmdbuf, auto& dld) { copy](vk::CommandBuffer cmdbuf) {
cmdbuf.copyBufferToImage(buffer, image, vk::ImageLayout::eTransferDstOptimal, cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, copy);
{copy}, dld);
}); });
} }
} }
} }
vk::BufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const { VkBufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const {
const u32 vk_depth = params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1; VkBufferImageCopy copy;
const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level, is_converted); copy.bufferOffset = params.GetHostMipmapLevelOffset(level, is_converted);
copy.bufferRowLength = 0;
return vk::BufferImageCopy( copy.bufferImageHeight = 0;
mip_offset, 0, 0, copy.imageSubresource.aspectMask = image->GetAspectMask();
{image->GetAspectMask(), level, 0, static_cast<u32>(params.GetNumLayers())}, {0, 0, 0}, copy.imageSubresource.mipLevel = level;
{params.GetMipWidth(level), params.GetMipHeight(level), vk_depth}); copy.imageSubresource.baseArrayLayer = 0;
copy.imageSubresource.layerCount = static_cast<u32>(params.GetNumLayers());
copy.imageOffset.x = 0;
copy.imageOffset.y = 0;
copy.imageOffset.z = 0;
copy.imageExtent.width = params.GetMipWidth(level);
copy.imageExtent.height = params.GetMipHeight(level);
copy.imageExtent.depth =
params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1;
return copy;
} }
vk::ImageSubresourceRange CachedSurface::GetImageSubresourceRange() const { VkImageSubresourceRange CachedSurface::GetImageSubresourceRange() const {
return {image->GetAspectMask(), 0, params.num_levels, 0, return {image->GetAspectMask(), 0, params.num_levels, 0,
static_cast<u32>(params.GetNumLayers())}; static_cast<u32>(params.GetNumLayers())};
} }
@ -321,11 +351,11 @@ CachedSurfaceView::CachedSurfaceView(const VKDevice& device, CachedSurface& surf
aspect_mask{surface.GetAspectMask()}, device{device}, surface{surface}, aspect_mask{surface.GetAspectMask()}, device{device}, surface{surface},
base_layer{params.base_layer}, num_layers{params.num_layers}, base_level{params.base_level}, base_layer{params.base_layer}, num_layers{params.num_layers}, base_level{params.base_level},
num_levels{params.num_levels}, image_view_type{image ? GetImageViewType(params.target) num_levels{params.num_levels}, image_view_type{image ? GetImageViewType(params.target)
: vk::ImageViewType{}} {} : VK_IMAGE_VIEW_TYPE_1D} {}
CachedSurfaceView::~CachedSurfaceView() = default; CachedSurfaceView::~CachedSurfaceView() = default;
vk::ImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y_source, VkImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y_source,
SwizzleSource z_source, SwizzleSource w_source) { SwizzleSource z_source, SwizzleSource w_source) {
const u32 swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source); const u32 swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source);
if (last_image_view && last_swizzle == swizzle) { if (last_image_view && last_swizzle == swizzle) {
@ -351,37 +381,45 @@ vk::ImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource
// Games can sample depth or stencil values on textures. This is decided by the swizzle value on // Games can sample depth or stencil values on textures. This is decided by the swizzle value on
// hardware. To emulate this on Vulkan we specify it in the aspect. // hardware. To emulate this on Vulkan we specify it in the aspect.
vk::ImageAspectFlags aspect = aspect_mask; VkImageAspectFlags aspect = aspect_mask;
if (aspect == (vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil)) { if (aspect == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
UNIMPLEMENTED_IF(x_source != SwizzleSource::R && x_source != SwizzleSource::G); UNIMPLEMENTED_IF(x_source != SwizzleSource::R && x_source != SwizzleSource::G);
const bool is_first = x_source == SwizzleSource::R; const bool is_first = x_source == SwizzleSource::R;
switch (params.pixel_format) { switch (params.pixel_format) {
case VideoCore::Surface::PixelFormat::Z24S8: case VideoCore::Surface::PixelFormat::Z24S8:
case VideoCore::Surface::PixelFormat::Z32FS8: case VideoCore::Surface::PixelFormat::Z32FS8:
aspect = is_first ? vk::ImageAspectFlagBits::eDepth : vk::ImageAspectFlagBits::eStencil; aspect = is_first ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_STENCIL_BIT;
break; break;
case VideoCore::Surface::PixelFormat::S8Z24: case VideoCore::Surface::PixelFormat::S8Z24:
aspect = is_first ? vk::ImageAspectFlagBits::eStencil : vk::ImageAspectFlagBits::eDepth; aspect = is_first ? VK_IMAGE_ASPECT_STENCIL_BIT : VK_IMAGE_ASPECT_DEPTH_BIT;
break; break;
default: default:
aspect = vk::ImageAspectFlagBits::eDepth; aspect = VK_IMAGE_ASPECT_DEPTH_BIT;
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
// Vulkan doesn't seem to understand swizzling of a depth stencil image, use identity // Vulkan doesn't seem to understand swizzling of a depth stencil image, use identity
swizzle_x = vk::ComponentSwizzle::eR; swizzle_x = VK_COMPONENT_SWIZZLE_R;
swizzle_y = vk::ComponentSwizzle::eG; swizzle_y = VK_COMPONENT_SWIZZLE_G;
swizzle_z = vk::ComponentSwizzle::eB; swizzle_z = VK_COMPONENT_SWIZZLE_B;
swizzle_w = vk::ComponentSwizzle::eA; swizzle_w = VK_COMPONENT_SWIZZLE_A;
} }
const vk::ImageViewCreateInfo image_view_ci( VkImageViewCreateInfo ci;
{}, surface.GetImageHandle(), image_view_type, surface.GetImage().GetFormat(), ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
{swizzle_x, swizzle_y, swizzle_z, swizzle_w}, ci.pNext = nullptr;
{aspect, base_level, num_levels, base_layer, num_layers}); ci.flags = 0;
ci.image = surface.GetImageHandle();
ci.viewType = image_view_type;
ci.format = surface.GetImage().GetFormat();
ci.components = {swizzle_x, swizzle_y, swizzle_z, swizzle_w};
ci.subresourceRange.aspectMask = aspect;
ci.subresourceRange.baseMipLevel = base_level;
ci.subresourceRange.levelCount = num_levels;
ci.subresourceRange.baseArrayLayer = base_layer;
ci.subresourceRange.layerCount = num_layers;
image_view = device.GetLogical().CreateImageView(ci);
const auto dev = device.GetLogical();
image_view = dev.createImageViewUnique(image_view_ci, nullptr, device.GetDispatchLoader());
return last_image_view = *image_view; return last_image_view = *image_view;
} }
@ -418,25 +456,36 @@ void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface,
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
src_surface->Transition(copy_params.source_z, copy_params.depth, copy_params.source_level, 1, src_surface->Transition(copy_params.source_z, copy_params.depth, copy_params.source_level, 1,
vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferRead, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT,
vk::ImageLayout::eTransferSrcOptimal); VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
dst_surface->Transition( dst_surface->Transition(dst_base_layer, num_layers, copy_params.dest_level, 1,
dst_base_layer, num_layers, copy_params.dest_level, 1, vk::PipelineStageFlagBits::eTransfer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
vk::AccessFlagBits::eTransferWrite, vk::ImageLayout::eTransferDstOptimal); VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const vk::ImageSubresourceLayers src_subresource( VkImageCopy copy;
src_surface->GetAspectMask(), copy_params.source_level, copy_params.source_z, num_layers); copy.srcSubresource.aspectMask = src_surface->GetAspectMask();
const vk::ImageSubresourceLayers dst_subresource( copy.srcSubresource.mipLevel = copy_params.source_level;
dst_surface->GetAspectMask(), copy_params.dest_level, dst_base_layer, num_layers); copy.srcSubresource.baseArrayLayer = copy_params.source_z;
const vk::Offset3D src_offset(copy_params.source_x, copy_params.source_y, 0); copy.srcSubresource.layerCount = num_layers;
const vk::Offset3D dst_offset(copy_params.dest_x, copy_params.dest_y, dst_offset_z); copy.srcOffset.x = copy_params.source_x;
const vk::Extent3D extent(copy_params.width, copy_params.height, extent_z); copy.srcOffset.y = copy_params.source_y;
const vk::ImageCopy copy(src_subresource, src_offset, dst_subresource, dst_offset, extent); copy.srcOffset.z = 0;
const vk::Image src_image = src_surface->GetImageHandle(); copy.dstSubresource.aspectMask = dst_surface->GetAspectMask();
const vk::Image dst_image = dst_surface->GetImageHandle(); copy.dstSubresource.mipLevel = copy_params.dest_level;
scheduler.Record([src_image, dst_image, copy](auto cmdbuf, auto& dld) { copy.dstSubresource.baseArrayLayer = dst_base_layer;
cmdbuf.copyImage(src_image, vk::ImageLayout::eTransferSrcOptimal, dst_image, copy.dstSubresource.layerCount = num_layers;
vk::ImageLayout::eTransferDstOptimal, {copy}, dld); copy.dstOffset.x = copy_params.dest_x;
copy.dstOffset.y = copy_params.dest_y;
copy.dstOffset.z = dst_offset_z;
copy.extent.width = copy_params.width;
copy.extent.height = copy_params.height;
copy.extent.depth = extent_z;
const VkImage src_image = src_surface->GetImageHandle();
const VkImage dst_image = dst_surface->GetImageHandle();
scheduler.Record([src_image, dst_image, copy](vk::CommandBuffer cmdbuf) {
cmdbuf.CopyImage(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, copy);
}); });
} }
@ -445,25 +494,34 @@ void VKTextureCache::ImageBlit(View& src_view, View& dst_view,
// We can't blit inside a renderpass // We can't blit inside a renderpass
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
src_view->Transition(vk::ImageLayout::eTransferSrcOptimal, vk::PipelineStageFlagBits::eTransfer, src_view->Transition(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
vk::AccessFlagBits::eTransferRead); VK_ACCESS_TRANSFER_READ_BIT);
dst_view->Transition(vk::ImageLayout::eTransferDstOptimal, vk::PipelineStageFlagBits::eTransfer, dst_view->Transition(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
vk::AccessFlagBits::eTransferWrite); VK_ACCESS_TRANSFER_WRITE_BIT);
VkImageBlit blit;
blit.srcSubresource = src_view->GetImageSubresourceLayers();
blit.srcOffsets[0].x = copy_config.src_rect.left;
blit.srcOffsets[0].y = copy_config.src_rect.top;
blit.srcOffsets[0].z = 0;
blit.srcOffsets[1].x = copy_config.src_rect.right;
blit.srcOffsets[1].y = copy_config.src_rect.bottom;
blit.srcOffsets[1].z = 1;
blit.dstSubresource = dst_view->GetImageSubresourceLayers();
blit.dstOffsets[0].x = copy_config.dst_rect.left;
blit.dstOffsets[0].y = copy_config.dst_rect.top;
blit.dstOffsets[0].z = 0;
blit.dstOffsets[1].x = copy_config.dst_rect.right;
blit.dstOffsets[1].y = copy_config.dst_rect.bottom;
blit.dstOffsets[1].z = 1;
const auto& cfg = copy_config;
const auto src_top_left = vk::Offset3D(cfg.src_rect.left, cfg.src_rect.top, 0);
const auto src_bot_right = vk::Offset3D(cfg.src_rect.right, cfg.src_rect.bottom, 1);
const auto dst_top_left = vk::Offset3D(cfg.dst_rect.left, cfg.dst_rect.top, 0);
const auto dst_bot_right = vk::Offset3D(cfg.dst_rect.right, cfg.dst_rect.bottom, 1);
const vk::ImageBlit blit(src_view->GetImageSubresourceLayers(), {src_top_left, src_bot_right},
dst_view->GetImageSubresourceLayers(), {dst_top_left, dst_bot_right});
const bool is_linear = copy_config.filter == Tegra::Engines::Fermi2D::Filter::Linear; const bool is_linear = copy_config.filter == Tegra::Engines::Fermi2D::Filter::Linear;
scheduler.Record([src_image = src_view->GetImage(), dst_image = dst_view->GetImage(), blit, scheduler.Record([src_image = src_view->GetImage(), dst_image = dst_view->GetImage(), blit,
is_linear](auto cmdbuf, auto& dld) { is_linear](vk::CommandBuffer cmdbuf) {
cmdbuf.blitImage(src_image, vk::ImageLayout::eTransferSrcOptimal, dst_image, cmdbuf.BlitImage(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image,
vk::ImageLayout::eTransferDstOptimal, {blit}, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, blit,
is_linear ? vk::Filter::eLinear : vk::Filter::eNearest, dld); is_linear ? VK_FILTER_LINEAR : VK_FILTER_NEAREST);
}); });
} }

View File

@ -13,10 +13,10 @@
#include "common/math_util.h" #include "common/math_util.h"
#include "video_core/gpu.h" #include "video_core/gpu.h"
#include "video_core/rasterizer_cache.h" #include "video_core/rasterizer_cache.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_image.h" #include "video_core/renderer_vulkan/vk_image.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/texture_cache/surface_base.h" #include "video_core/texture_cache/surface_base.h"
#include "video_core/texture_cache/texture_cache.h" #include "video_core/texture_cache/texture_cache.h"
#include "video_core/textures/decoders.h" #include "video_core/textures/decoders.h"
@ -60,15 +60,15 @@ public:
void UploadTexture(const std::vector<u8>& staging_buffer) override; void UploadTexture(const std::vector<u8>& staging_buffer) override;
void DownloadTexture(std::vector<u8>& staging_buffer) override; void DownloadTexture(std::vector<u8>& staging_buffer) override;
void FullTransition(vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, void FullTransition(VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
vk::ImageLayout new_layout) { VkImageLayout new_layout) {
image->Transition(0, static_cast<u32>(params.GetNumLayers()), 0, params.num_levels, image->Transition(0, static_cast<u32>(params.GetNumLayers()), 0, params.num_levels,
new_stage_mask, new_access, new_layout); new_stage_mask, new_access, new_layout);
} }
void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
vk::ImageLayout new_layout) { VkImageLayout new_layout) {
image->Transition(base_layer, num_layers, base_level, num_levels, new_stage_mask, image->Transition(base_layer, num_layers, base_level, num_levels, new_stage_mask,
new_access, new_layout); new_access, new_layout);
} }
@ -81,15 +81,15 @@ public:
return *image; return *image;
} }
vk::Image GetImageHandle() const { VkImage GetImageHandle() const {
return image->GetHandle(); return *image->GetHandle();
} }
vk::ImageAspectFlags GetAspectMask() const { VkImageAspectFlags GetAspectMask() const {
return image->GetAspectMask(); return image->GetAspectMask();
} }
vk::BufferView GetBufferViewHandle() const { VkBufferView GetBufferViewHandle() const {
return *buffer_view; return *buffer_view;
} }
@ -104,9 +104,9 @@ private:
void UploadImage(const std::vector<u8>& staging_buffer); void UploadImage(const std::vector<u8>& staging_buffer);
vk::BufferImageCopy GetBufferImageCopy(u32 level) const; VkBufferImageCopy GetBufferImageCopy(u32 level) const;
vk::ImageSubresourceRange GetImageSubresourceRange() const; VkImageSubresourceRange GetImageSubresourceRange() const;
Core::System& system; Core::System& system;
const VKDevice& device; const VKDevice& device;
@ -116,11 +116,11 @@ private:
VKStagingBufferPool& staging_pool; VKStagingBufferPool& staging_pool;
std::optional<VKImage> image; std::optional<VKImage> image;
UniqueBuffer buffer; vk::Buffer buffer;
UniqueBufferView buffer_view; vk::BufferView buffer_view;
VKMemoryCommit commit; VKMemoryCommit commit;
vk::Format format; VkFormat format = VK_FORMAT_UNDEFINED;
}; };
class CachedSurfaceView final : public VideoCommon::ViewBase { class CachedSurfaceView final : public VideoCommon::ViewBase {
@ -129,7 +129,7 @@ public:
const ViewParams& params, bool is_proxy); const ViewParams& params, bool is_proxy);
~CachedSurfaceView(); ~CachedSurfaceView();
vk::ImageView GetHandle(Tegra::Texture::SwizzleSource x_source, VkImageView GetHandle(Tegra::Texture::SwizzleSource x_source,
Tegra::Texture::SwizzleSource y_source, Tegra::Texture::SwizzleSource y_source,
Tegra::Texture::SwizzleSource z_source, Tegra::Texture::SwizzleSource z_source,
Tegra::Texture::SwizzleSource w_source); Tegra::Texture::SwizzleSource w_source);
@ -138,7 +138,7 @@ public:
return &surface == &rhs.surface; return &surface == &rhs.surface;
} }
vk::ImageView GetHandle() { VkImageView GetHandle() {
return GetHandle(Tegra::Texture::SwizzleSource::R, Tegra::Texture::SwizzleSource::G, return GetHandle(Tegra::Texture::SwizzleSource::R, Tegra::Texture::SwizzleSource::G,
Tegra::Texture::SwizzleSource::B, Tegra::Texture::SwizzleSource::A); Tegra::Texture::SwizzleSource::B, Tegra::Texture::SwizzleSource::A);
} }
@ -159,24 +159,24 @@ public:
return buffer_view; return buffer_view;
} }
vk::Image GetImage() const { VkImage GetImage() const {
return image; return image;
} }
vk::BufferView GetBufferView() const { VkBufferView GetBufferView() const {
return buffer_view; return buffer_view;
} }
vk::ImageSubresourceRange GetImageSubresourceRange() const { VkImageSubresourceRange GetImageSubresourceRange() const {
return {aspect_mask, base_level, num_levels, base_layer, num_layers}; return {aspect_mask, base_level, num_levels, base_layer, num_layers};
} }
vk::ImageSubresourceLayers GetImageSubresourceLayers() const { VkImageSubresourceLayers GetImageSubresourceLayers() const {
return {surface.GetAspectMask(), base_level, base_layer, num_layers}; return {surface.GetAspectMask(), base_level, base_layer, num_layers};
} }
void Transition(vk::ImageLayout new_layout, vk::PipelineStageFlags new_stage_mask, void Transition(VkImageLayout new_layout, VkPipelineStageFlags new_stage_mask,
vk::AccessFlags new_access) const { VkAccessFlags new_access) const {
surface.Transition(base_layer, num_layers, base_level, num_levels, new_stage_mask, surface.Transition(base_layer, num_layers, base_level, num_levels, new_stage_mask,
new_access, new_layout); new_access, new_layout);
} }
@ -196,9 +196,9 @@ private:
// Store a copy of these values to avoid double dereference when reading them // Store a copy of these values to avoid double dereference when reading them
const SurfaceParams params; const SurfaceParams params;
const vk::Image image; const VkImage image;
const vk::BufferView buffer_view; const VkBufferView buffer_view;
const vk::ImageAspectFlags aspect_mask; const VkImageAspectFlags aspect_mask;
const VKDevice& device; const VKDevice& device;
CachedSurface& surface; CachedSurface& surface;
@ -206,12 +206,12 @@ private:
const u32 num_layers; const u32 num_layers;
const u32 base_level; const u32 base_level;
const u32 num_levels; const u32 num_levels;
const vk::ImageViewType image_view_type; const VkImageViewType image_view_type;
vk::ImageView last_image_view; VkImageView last_image_view = nullptr;
u32 last_swizzle{}; u32 last_swizzle = 0;
std::unordered_map<u32, UniqueImageView> view_cache; std::unordered_map<u32, vk::ImageView> view_cache;
}; };
class VKTextureCache final : public TextureCacheBase { class VKTextureCache final : public TextureCacheBase {

View File

@ -7,10 +7,10 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -27,8 +27,8 @@ void VKUpdateDescriptorQueue::Acquire() {
entries.clear(); entries.clear();
} }
void VKUpdateDescriptorQueue::Send(vk::DescriptorUpdateTemplate update_template, void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template,
vk::DescriptorSet set) { VkDescriptorSet set) {
if (payload.size() + entries.size() >= payload.max_size()) { if (payload.size() + entries.size() >= payload.max_size()) {
LOG_WARNING(Render_Vulkan, "Payload overflow, waiting for worker thread"); LOG_WARNING(Render_Vulkan, "Payload overflow, waiting for worker thread");
scheduler.WaitWorker(); scheduler.WaitWorker();
@ -37,20 +37,20 @@ void VKUpdateDescriptorQueue::Send(vk::DescriptorUpdateTemplate update_template,
const auto payload_start = payload.data() + payload.size(); const auto payload_start = payload.data() + payload.size();
for (const auto& entry : entries) { for (const auto& entry : entries) {
if (const auto image = std::get_if<vk::DescriptorImageInfo>(&entry)) { if (const auto image = std::get_if<VkDescriptorImageInfo>(&entry)) {
payload.push_back(*image); payload.push_back(*image);
} else if (const auto buffer = std::get_if<Buffer>(&entry)) { } else if (const auto buffer = std::get_if<Buffer>(&entry)) {
payload.emplace_back(*buffer->buffer, buffer->offset, buffer->size); payload.emplace_back(*buffer->buffer, buffer->offset, buffer->size);
} else if (const auto texel = std::get_if<vk::BufferView>(&entry)) { } else if (const auto texel = std::get_if<VkBufferView>(&entry)) {
payload.push_back(*texel); payload.push_back(*texel);
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
} }
scheduler.Record([dev = device.GetLogical(), payload_start, set, scheduler.Record(
update_template]([[maybe_unused]] auto cmdbuf, auto& dld) { [payload_start, set, update_template, logical = &device.GetLogical()](vk::CommandBuffer) {
dev.updateDescriptorSetWithTemplate(set, update_template, payload_start, dld); logical->UpdateDescriptorSet(set, update_template, payload_start);
}); });
} }

View File

@ -9,7 +9,7 @@
#include <boost/container/static_vector.hpp> #include <boost/container/static_vector.hpp>
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan { namespace Vulkan {
@ -20,18 +20,18 @@ class DescriptorUpdateEntry {
public: public:
explicit DescriptorUpdateEntry() : image{} {} explicit DescriptorUpdateEntry() : image{} {}
DescriptorUpdateEntry(vk::DescriptorImageInfo image) : image{image} {} DescriptorUpdateEntry(VkDescriptorImageInfo image) : image{image} {}
DescriptorUpdateEntry(vk::Buffer buffer, vk::DeviceSize offset, vk::DeviceSize size) DescriptorUpdateEntry(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size)
: buffer{buffer, offset, size} {} : buffer{buffer, offset, size} {}
DescriptorUpdateEntry(vk::BufferView texel_buffer) : texel_buffer{texel_buffer} {} DescriptorUpdateEntry(VkBufferView texel_buffer) : texel_buffer{texel_buffer} {}
private: private:
union { union {
vk::DescriptorImageInfo image; VkDescriptorImageInfo image;
vk::DescriptorBufferInfo buffer; VkDescriptorBufferInfo buffer;
vk::BufferView texel_buffer; VkBufferView texel_buffer;
}; };
}; };
@ -44,37 +44,35 @@ public:
void Acquire(); void Acquire();
void Send(vk::DescriptorUpdateTemplate update_template, vk::DescriptorSet set); void Send(VkDescriptorUpdateTemplateKHR update_template, VkDescriptorSet set);
void AddSampledImage(vk::Sampler sampler, vk::ImageView image_view) { void AddSampledImage(VkSampler sampler, VkImageView image_view) {
entries.emplace_back(vk::DescriptorImageInfo{sampler, image_view, {}}); entries.emplace_back(VkDescriptorImageInfo{sampler, image_view, {}});
} }
void AddImage(vk::ImageView image_view) { void AddImage(VkImageView image_view) {
entries.emplace_back(vk::DescriptorImageInfo{{}, image_view, {}}); entries.emplace_back(VkDescriptorImageInfo{{}, image_view, {}});
} }
void AddBuffer(const vk::Buffer* buffer, u64 offset, std::size_t size) { void AddBuffer(const VkBuffer* buffer, u64 offset, std::size_t size) {
entries.push_back(Buffer{buffer, offset, size}); entries.push_back(Buffer{buffer, offset, size});
} }
void AddTexelBuffer(vk::BufferView texel_buffer) { void AddTexelBuffer(VkBufferView texel_buffer) {
entries.emplace_back(texel_buffer); entries.emplace_back(texel_buffer);
} }
vk::ImageLayout* GetLastImageLayout() { VkImageLayout* GetLastImageLayout() {
return &std::get<vk::DescriptorImageInfo>(entries.back()).imageLayout; return &std::get<VkDescriptorImageInfo>(entries.back()).imageLayout;
} }
private: private:
struct Buffer { struct Buffer {
const vk::Buffer* buffer{}; const VkBuffer* buffer = nullptr;
u64 offset{}; u64 offset = 0;
std::size_t size{}; std::size_t size = 0;
}; };
using Variant = std::variant<vk::DescriptorImageInfo, Buffer, vk::BufferView>; using Variant = std::variant<VkDescriptorImageInfo, Buffer, VkBufferView>;
// Old gcc versions don't consider this trivially copyable.
// static_assert(std::is_trivially_copyable_v<Variant>);
const VKDevice& device; const VKDevice& device;
VKScheduler& scheduler; VKScheduler& scheduler;