yuzu-emu
/
yuzu-mainline
Archived
1
0
Fork 0

nvdec: Implement VA-API hardware video acceleration (#6713)

* nvdec: VA-API

* Verify formatting

* Forgot a semicolon for Windows

* Clarify comment about AV_PIX_FMT_NV12

* Fix assert log spam from missing negation

* vic: Remove forgotten debug code

* Address lioncash's review

* Mention VA-API is Intel/AMD

* Address v1993's review

* Hopefully fix CMakeLists style this time

* vic: Improve cache locality

* vic: Fix off-by-one error

* codec: Async

* codec: Forgot the GetValue()

* nvdec: Address ameerj's review

* codec: Fallback to CPU without VA-API support

* cmake: Address lat9nq's review

* cmake: Make VA-API optional

* vaapi: Multiple GPU

* Apply suggestions from code review

Co-authored-by: Ameer J <52414509+ameerj@users.noreply.github.com>

* nvdec: Address ameerj's review

* codec: Use anonymous instead of static

* nvdec: Remove enum and fix memory leak

* nvdec: Address ameerj's review

* codec: Remove preparation for threading

Co-authored-by: Ameer J <52414509+ameerj@users.noreply.github.com>
This commit is contained in:
yzct12345 2021-08-04 03:43:11 +00:00 committed by GitHub
parent d16a337d98
commit 2868d4ba84
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 210 additions and 76 deletions

View File

@ -583,8 +583,32 @@ if (YUZU_USE_BUNDLED_FFMPEG)
"${FFmpeg_PREFIX};${FFmpeg_BUILD_DIR}" "${FFmpeg_PREFIX};${FFmpeg_BUILD_DIR}"
CACHE PATH "Path to FFmpeg headers" FORCE) CACHE PATH "Path to FFmpeg headers" FORCE)
if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
Include(FindPkgConfig REQUIRED)
pkg_check_modules(LIBVA libva)
endif()
if(LIBVA_FOUND)
pkg_check_modules(LIBDRM libdrm REQUIRED)
find_package(X11 REQUIRED)
pkg_check_modules(LIBVA-DRM libva-drm REQUIRED)
pkg_check_modules(LIBVA-X11 libva-x11 REQUIRED)
set(FFmpeg_LIBVA_LIBRARIES
${LIBDRM_LIBRARIES}
${X11_LIBRARIES}
${LIBVA-DRM_LIBRARIES}
${LIBVA-X11_LIBRARIES}
${LIBVA_LIBRARIES})
set(FFmpeg_HWACCEL_FLAGS
--enable-hwaccel=h264_vaapi
--enable-hwaccel=vp9_vaapi
--enable-libdrm)
message(STATUS "VA-API found")
else()
set(FFmpeg_HWACCEL_FLAGS --disable-vaapi)
endif()
# `configure` parameters builds only exactly what yuzu needs from FFmpeg # `configure` parameters builds only exactly what yuzu needs from FFmpeg
# `--disable-{vaapi,vdpau}` is needed to avoid linking issues # `--disable-vdpau` is needed to avoid linking issues
add_custom_command( add_custom_command(
OUTPUT OUTPUT
${FFmpeg_MAKEFILE} ${FFmpeg_MAKEFILE}
@ -600,15 +624,16 @@ if (YUZU_USE_BUNDLED_FFMPEG)
--disable-network --disable-network
--disable-postproc --disable-postproc
--disable-swresample --disable-swresample
--disable-vaapi
--disable-vdpau --disable-vdpau
--enable-decoder=h264 --enable-decoder=h264
--enable-decoder=vp9 --enable-decoder=vp9
--cc="${CMAKE_C_COMPILER}" --cc="${CMAKE_C_COMPILER}"
--cxx="${CMAKE_CXX_COMPILER}" --cxx="${CMAKE_CXX_COMPILER}"
${FFmpeg_HWACCEL_FLAGS}
WORKING_DIRECTORY WORKING_DIRECTORY
${FFmpeg_BUILD_DIR} ${FFmpeg_BUILD_DIR}
) )
unset(FFmpeg_HWACCEL_FLAGS)
# Workaround for Ubuntu 18.04's older version of make not being able to call make as a child # Workaround for Ubuntu 18.04's older version of make not being able to call make as a child
# with context of the jobserver. Also helps ninja users. # with context of the jobserver. Also helps ninja users.
@ -618,9 +643,10 @@ if (YUZU_USE_BUNDLED_FFMPEG)
OUTPUT_VARIABLE OUTPUT_VARIABLE
SYSTEM_THREADS) SYSTEM_THREADS)
set(FFmpeg_BUILD_LIBRARIES ${FFmpeg_LIBRARIES})
add_custom_command( add_custom_command(
OUTPUT OUTPUT
${FFmpeg_LIBRARIES} ${FFmpeg_BUILD_LIBRARIES}
COMMAND COMMAND
make -j${SYSTEM_THREADS} make -j${SYSTEM_THREADS}
WORKING_DIRECTORY WORKING_DIRECTORY
@ -630,7 +656,12 @@ if (YUZU_USE_BUNDLED_FFMPEG)
# ALL makes this custom target build every time # ALL makes this custom target build every time
# but it won't actually build if the DEPENDS parameter is up to date # but it won't actually build if the DEPENDS parameter is up to date
add_custom_target(ffmpeg-configure ALL DEPENDS ${FFmpeg_MAKEFILE}) add_custom_target(ffmpeg-configure ALL DEPENDS ${FFmpeg_MAKEFILE})
add_custom_target(ffmpeg-build ALL DEPENDS ${FFmpeg_LIBRARIES} ffmpeg-configure) add_custom_target(ffmpeg-build ALL DEPENDS ${FFmpeg_BUILD_LIBRARIES} ffmpeg-configure)
link_libraries(${FFmpeg_LIBVA_LIBRARIES})
set(FFmpeg_LIBRARIES ${FFmpeg_LIBVA_LIBRARIES} ${FFmpeg_BUILD_LIBRARIES}
CACHE PATH "Paths to FFmpeg libraries" FORCE)
unset(FFmpeg_BUILD_LIBRARIES)
unset(FFmpeg_LIBVA_LIBRARIES)
if (FFmpeg_FOUND) if (FFmpeg_FOUND)
message(STATUS "Found FFmpeg version ${FFmpeg_VERSION}") message(STATUS "Found FFmpeg version ${FFmpeg_VERSION}")

View File

@ -1,5 +1,10 @@
add_subdirectory(host_shaders) add_subdirectory(host_shaders)
if(LIBVA_FOUND)
set_source_files_properties(command_classes/codecs/codec.cpp
PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1)
endif()
add_library(video_core STATIC add_library(video_core STATIC
buffer_cache/buffer_base.h buffer_cache/buffer_base.h
buffer_cache/buffer_cache.cpp buffer_cache/buffer_cache.cpp

View File

@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version // Licensed under GPLv2 or any later version
// Refer to the license.txt file included. // Refer to the license.txt file included.
#include <cstring>
#include <fstream> #include <fstream>
#include <vector> #include <vector>
#include "common/assert.h" #include "common/assert.h"
@ -17,10 +16,47 @@ extern "C" {
} }
namespace Tegra { namespace Tegra {
#if defined(LIBVA_FOUND)
// Hardware acceleration code from FFmpeg/doc/examples/hw_decode.c originally under MIT license
namespace {
constexpr std::array<const char*, 2> VAAPI_DRIVERS = {
"i915",
"amdgpu",
};
AVPixelFormat GetHwFormat(AVCodecContext*, const AVPixelFormat* pix_fmts) {
for (const AVPixelFormat* p = pix_fmts; *p != AV_PIX_FMT_NONE; ++p) {
if (*p == AV_PIX_FMT_VAAPI) {
return AV_PIX_FMT_VAAPI;
}
}
LOG_INFO(Service_NVDRV, "Could not find compatible GPU AV format, falling back to CPU");
return *pix_fmts;
}
bool CreateVaapiHwdevice(AVBufferRef** av_hw_device) {
AVDictionary* hwdevice_options = nullptr;
av_dict_set(&hwdevice_options, "connection_type", "drm", 0);
for (const auto& driver : VAAPI_DRIVERS) {
av_dict_set(&hwdevice_options, "kernel_driver", driver, 0);
const int hwdevice_error = av_hwdevice_ctx_create(av_hw_device, AV_HWDEVICE_TYPE_VAAPI,
nullptr, hwdevice_options, 0);
if (hwdevice_error >= 0) {
LOG_INFO(Service_NVDRV, "Using VA-API with {}", driver);
av_dict_free(&hwdevice_options);
return true;
}
LOG_DEBUG(Service_NVDRV, "VA-API av_hwdevice_ctx_create failed {}", hwdevice_error);
}
LOG_DEBUG(Service_NVDRV, "VA-API av_hwdevice_ctx_create failed for all drivers");
av_dict_free(&hwdevice_options);
return false;
}
} // namespace
#endif
void AVFrameDeleter(AVFrame* ptr) { void AVFrameDeleter(AVFrame* ptr) {
av_frame_unref(ptr); av_frame_free(&ptr);
av_free(ptr);
} }
Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs) Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs)
@ -32,19 +68,31 @@ Codec::~Codec() {
return; return;
} }
// Free libav memory // Free libav memory
AVFrame* av_frame{nullptr};
avcodec_send_packet(av_codec_ctx, nullptr); avcodec_send_packet(av_codec_ctx, nullptr);
av_frame = av_frame_alloc(); AVFrame* av_frame = av_frame_alloc();
avcodec_receive_frame(av_codec_ctx, av_frame); avcodec_receive_frame(av_codec_ctx, av_frame);
avcodec_flush_buffers(av_codec_ctx); avcodec_flush_buffers(av_codec_ctx);
av_frame_free(&av_frame);
av_frame_unref(av_frame);
av_free(av_frame);
avcodec_close(av_codec_ctx); avcodec_close(av_codec_ctx);
av_buffer_unref(&av_hw_device);
}
void Codec::InitializeHwdec() {
// Prioritize integrated GPU to mitigate bandwidth bottlenecks
#if defined(LIBVA_FOUND)
if (CreateVaapiHwdevice(&av_hw_device)) {
const auto hw_device_ctx = av_buffer_ref(av_hw_device);
ASSERT_MSG(hw_device_ctx, "av_buffer_ref failed");
av_codec_ctx->hw_device_ctx = hw_device_ctx;
av_codec_ctx->get_format = GetHwFormat;
return;
}
#endif
// TODO more GPU accelerated decoders
} }
void Codec::Initialize() { void Codec::Initialize() {
AVCodecID codec{AV_CODEC_ID_NONE}; AVCodecID codec;
switch (current_codec) { switch (current_codec) {
case NvdecCommon::VideoCodec::H264: case NvdecCommon::VideoCodec::H264:
codec = AV_CODEC_ID_H264; codec = AV_CODEC_ID_H264;
@ -53,22 +101,24 @@ void Codec::Initialize() {
codec = AV_CODEC_ID_VP9; codec = AV_CODEC_ID_VP9;
break; break;
default: default:
UNIMPLEMENTED_MSG("Unknown codec {}", current_codec);
return; return;
} }
av_codec = avcodec_find_decoder(codec); av_codec = avcodec_find_decoder(codec);
av_codec_ctx = avcodec_alloc_context3(av_codec); av_codec_ctx = avcodec_alloc_context3(av_codec);
av_opt_set(av_codec_ctx->priv_data, "tune", "zerolatency", 0); av_opt_set(av_codec_ctx->priv_data, "tune", "zerolatency", 0);
InitializeHwdec();
// TODO(ameerj): libavcodec gpu hw acceleration if (!av_codec_ctx->hw_device_ctx) {
LOG_INFO(Service_NVDRV, "Using FFmpeg software decoding");
}
const auto av_error = avcodec_open2(av_codec_ctx, av_codec, nullptr); const auto av_error = avcodec_open2(av_codec_ctx, av_codec, nullptr);
if (av_error < 0) { if (av_error < 0) {
LOG_ERROR(Service_NVDRV, "avcodec_open2() Failed."); LOG_ERROR(Service_NVDRV, "avcodec_open2() Failed.");
avcodec_close(av_codec_ctx); avcodec_close(av_codec_ctx);
av_buffer_unref(&av_hw_device);
return; return;
} }
initialized = true; initialized = true;
return;
} }
void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) { void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) {
@ -80,36 +130,64 @@ void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) {
void Codec::Decode() { void Codec::Decode() {
const bool is_first_frame = !initialized; const bool is_first_frame = !initialized;
if (!initialized) { if (is_first_frame) {
Initialize(); Initialize();
} }
bool vp9_hidden_frame = false; bool vp9_hidden_frame = false;
AVPacket packet{};
av_init_packet(&packet);
std::vector<u8> frame_data; std::vector<u8> frame_data;
if (current_codec == NvdecCommon::VideoCodec::H264) { if (current_codec == NvdecCommon::VideoCodec::H264) {
frame_data = h264_decoder->ComposeFrameHeader(state, is_first_frame); frame_data = h264_decoder->ComposeFrameHeader(state, is_first_frame);
} else if (current_codec == NvdecCommon::VideoCodec::Vp9) { } else if (current_codec == NvdecCommon::VideoCodec::Vp9) {
frame_data = vp9_decoder->ComposeFrameHeader(state); frame_data = vp9_decoder->ComposeFrameHeader(state);
vp9_hidden_frame = vp9_decoder->WasFrameHidden(); vp9_hidden_frame = vp9_decoder->WasFrameHidden();
} }
AVPacket packet{};
av_init_packet(&packet);
packet.data = frame_data.data(); packet.data = frame_data.data();
packet.size = static_cast<s32>(frame_data.size()); packet.size = static_cast<s32>(frame_data.size());
if (const int ret = avcodec_send_packet(av_codec_ctx, &packet); ret) {
avcodec_send_packet(av_codec_ctx, &packet); LOG_DEBUG(Service_NVDRV, "avcodec_send_packet error {}", ret);
return;
if (!vp9_hidden_frame) {
// Only receive/store visible frames
AVFramePtr frame = AVFramePtr{av_frame_alloc(), AVFrameDeleter};
avcodec_receive_frame(av_codec_ctx, frame.get());
av_frames.push(std::move(frame));
// Limit queue to 10 frames. Workaround for ZLA decode and queue spam
if (av_frames.size() > 10) {
av_frames.pop();
} }
// Only receive/store visible frames
if (vp9_hidden_frame) {
return;
}
AVFrame* hw_frame = av_frame_alloc();
AVFrame* sw_frame = hw_frame;
ASSERT_MSG(hw_frame, "av_frame_alloc hw_frame failed");
if (const int ret = avcodec_receive_frame(av_codec_ctx, hw_frame); ret) {
LOG_DEBUG(Service_NVDRV, "avcodec_receive_frame error {}", ret);
av_frame_free(&hw_frame);
return;
}
if (!hw_frame->width || !hw_frame->height) {
LOG_WARNING(Service_NVDRV, "Zero width or height in frame");
av_frame_free(&hw_frame);
return;
}
#if defined(LIBVA_FOUND)
// Hardware acceleration code from FFmpeg/doc/examples/hw_decode.c under MIT license
if (hw_frame->format == AV_PIX_FMT_VAAPI) {
sw_frame = av_frame_alloc();
ASSERT_MSG(sw_frame, "av_frame_alloc sw_frame failed");
// Can't use AV_PIX_FMT_YUV420P and share code with software decoding in vic.cpp
// because Intel drivers crash unless using AV_PIX_FMT_NV12
sw_frame->format = AV_PIX_FMT_NV12;
const int transfer_data_ret = av_hwframe_transfer_data(sw_frame, hw_frame, 0);
ASSERT_MSG(!transfer_data_ret, "av_hwframe_transfer_data error {}", transfer_data_ret);
av_frame_free(&hw_frame);
}
#endif
if (sw_frame->format != AV_PIX_FMT_YUV420P && sw_frame->format != AV_PIX_FMT_NV12) {
UNIMPLEMENTED_MSG("Unexpected video format from host graphics: {}", sw_frame->format);
av_frame_free(&sw_frame);
return;
}
av_frames.push(AVFramePtr{sw_frame, AVFrameDeleter});
if (av_frames.size() > 10) {
LOG_TRACE(Service_NVDRV, "av_frames.push overflow dropped frame");
av_frames.pop();
} }
} }
@ -119,7 +197,6 @@ AVFramePtr Codec::GetCurrentFrame() {
if (av_frames.empty()) { if (av_frames.empty()) {
return AVFramePtr{nullptr, AVFrameDeleter}; return AVFramePtr{nullptr, AVFrameDeleter};
} }
AVFramePtr frame = std::move(av_frames.front()); AVFramePtr frame = std::move(av_frames.front());
av_frames.pop(); av_frames.pop();
return frame; return frame;
@ -144,6 +221,5 @@ std::string_view Codec::GetCurrentCodecName() const {
default: default:
return "Unknown"; return "Unknown";
} }
}; }
} // namespace Tegra } // namespace Tegra

View File

@ -22,7 +22,6 @@ extern "C" {
namespace Tegra { namespace Tegra {
class GPU; class GPU;
struct VicRegisters;
void AVFrameDeleter(AVFrame* ptr); void AVFrameDeleter(AVFrame* ptr);
using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>; using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>;
@ -55,10 +54,13 @@ public:
[[nodiscard]] std::string_view GetCurrentCodecName() const; [[nodiscard]] std::string_view GetCurrentCodecName() const;
private: private:
void InitializeHwdec();
bool initialized{}; bool initialized{};
NvdecCommon::VideoCodec current_codec{NvdecCommon::VideoCodec::None}; NvdecCommon::VideoCodec current_codec{NvdecCommon::VideoCodec::None};
AVCodec* av_codec{nullptr}; AVCodec* av_codec{nullptr};
AVBufferRef* av_hw_device{nullptr};
AVCodecContext* av_codec_ctx{nullptr}; AVCodecContext* av_codec_ctx{nullptr};
GPU& gpu; GPU& gpu;

View File

@ -46,11 +46,8 @@ void Vic::ProcessMethod(Method method, u32 argument) {
case Method::SetOutputSurfaceLumaOffset: case Method::SetOutputSurfaceLumaOffset:
output_surface_luma_address = arg; output_surface_luma_address = arg;
break; break;
case Method::SetOutputSurfaceChromaUOffset: case Method::SetOutputSurfaceChromaOffset:
output_surface_chroma_u_address = arg; output_surface_chroma_address = arg;
break;
case Method::SetOutputSurfaceChromaVOffset:
output_surface_chroma_v_address = arg;
break; break;
default: default:
break; break;
@ -65,11 +62,10 @@ void Vic::Execute() {
const VicConfig config{gpu.MemoryManager().Read<u64>(config_struct_address + 0x20)}; const VicConfig config{gpu.MemoryManager().Read<u64>(config_struct_address + 0x20)};
const AVFramePtr frame_ptr = nvdec_processor->GetFrame(); const AVFramePtr frame_ptr = nvdec_processor->GetFrame();
const auto* frame = frame_ptr.get(); const auto* frame = frame_ptr.get();
if (!frame || frame->width == 0 || frame->height == 0) { if (!frame) {
return; return;
} }
const VideoPixelFormat pixel_format = const auto pixel_format = static_cast<VideoPixelFormat>(config.pixel_format.Value());
static_cast<VideoPixelFormat>(config.pixel_format.Value());
switch (pixel_format) { switch (pixel_format) {
case VideoPixelFormat::BGRA8: case VideoPixelFormat::BGRA8:
case VideoPixelFormat::RGBA8: { case VideoPixelFormat::RGBA8: {
@ -83,16 +79,18 @@ void Vic::Execute() {
sws_freeContext(scaler_ctx); sws_freeContext(scaler_ctx);
scaler_ctx = nullptr; scaler_ctx = nullptr;
// FFmpeg returns all frames in YUV420, convert it into expected format // Frames are decoded into either YUV420 or NV12 formats. Convert to desired format
scaler_ctx = scaler_ctx = sws_getContext(frame->width, frame->height,
sws_getContext(frame->width, frame->height, AV_PIX_FMT_YUV420P, frame->width, static_cast<AVPixelFormat>(frame->format), frame->width,
frame->height, target_format, 0, nullptr, nullptr, nullptr); frame->height, target_format, 0, nullptr, nullptr, nullptr);
scaler_width = frame->width; scaler_width = frame->width;
scaler_height = frame->height; scaler_height = frame->height;
} }
// Get Converted frame // Get Converted frame
const std::size_t linear_size = frame->width * frame->height * 4; const u32 width = static_cast<u32>(frame->width);
const u32 height = static_cast<u32>(frame->height);
const std::size_t linear_size = width * height * 4;
// Only allocate frame_buffer once per stream, as the size is not expected to change // Only allocate frame_buffer once per stream, as the size is not expected to change
if (!converted_frame_buffer) { if (!converted_frame_buffer) {
@ -109,11 +107,10 @@ void Vic::Execute() {
if (blk_kind != 0) { if (blk_kind != 0) {
// swizzle pitch linear to block linear // swizzle pitch linear to block linear
const u32 block_height = static_cast<u32>(config.block_linear_height_log2); const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
const auto size = Tegra::Texture::CalculateSize(true, 4, frame->width, frame->height, 1, const auto size =
block_height, 0); Tegra::Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
luma_buffer.resize(size); luma_buffer.resize(size);
Tegra::Texture::SwizzleSubrect(frame->width, frame->height, frame->width * 4, Tegra::Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(),
frame->width, 4, luma_buffer.data(),
converted_frame_buffer.get(), block_height, 0, 0); converted_frame_buffer.get(), block_height, 0, 0);
gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
@ -131,41 +128,65 @@ void Vic::Execute() {
const std::size_t surface_height = config.surface_height_minus1 + 1; const std::size_t surface_height = config.surface_height_minus1 + 1;
const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width)); const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width));
const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height)); const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height));
const std::size_t half_width = frame_width / 2; const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL;
const std::size_t half_height = frame_height / 2;
const std::size_t aligned_width = (surface_width + 0xff) & ~0xff;
const auto* luma_ptr = frame->data[0];
const auto* chroma_b_ptr = frame->data[1];
const auto* chroma_r_ptr = frame->data[2];
const auto stride = static_cast<size_t>(frame->linesize[0]); const auto stride = static_cast<size_t>(frame->linesize[0]);
const auto half_stride = static_cast<size_t>(frame->linesize[1]);
luma_buffer.resize(aligned_width * surface_height); luma_buffer.resize(aligned_width * surface_height);
chroma_buffer.resize(aligned_width * surface_height / 2); chroma_buffer.resize(aligned_width * surface_height / 2);
// Populate luma buffer // Populate luma buffer
const u8* luma_src = frame->data[0];
for (std::size_t y = 0; y < frame_height; ++y) { for (std::size_t y = 0; y < frame_height; ++y) {
const std::size_t src = y * stride; const std::size_t src = y * stride;
const std::size_t dst = y * aligned_width; const std::size_t dst = y * aligned_width;
for (std::size_t x = 0; x < frame_width; ++x) { for (std::size_t x = 0; x < frame_width; ++x) {
luma_buffer[dst + x] = luma_ptr[src + x]; luma_buffer[dst + x] = luma_src[src + x];
} }
} }
gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
luma_buffer.size()); luma_buffer.size());
// Chroma
const std::size_t half_height = frame_height / 2;
const auto half_stride = static_cast<size_t>(frame->linesize[1]);
switch (frame->format) {
case AV_PIX_FMT_YUV420P: {
// Frame from FFmpeg software
// Populate chroma buffer from both channels with interleaving. // Populate chroma buffer from both channels with interleaving.
const std::size_t half_width = frame_width / 2;
const u8* chroma_b_src = frame->data[1];
const u8* chroma_r_src = frame->data[2];
for (std::size_t y = 0; y < half_height; ++y) { for (std::size_t y = 0; y < half_height; ++y) {
const std::size_t src = y * half_stride; const std::size_t src = y * half_stride;
const std::size_t dst = y * aligned_width; const std::size_t dst = y * aligned_width;
for (std::size_t x = 0; x < half_width; ++x) { for (std::size_t x = 0; x < half_width; ++x) {
chroma_buffer[dst + x * 2] = chroma_b_ptr[src + x]; chroma_buffer[dst + x * 2] = chroma_b_src[src + x];
chroma_buffer[dst + x * 2 + 1] = chroma_r_ptr[src + x]; chroma_buffer[dst + x * 2 + 1] = chroma_r_src[src + x];
} }
} }
gpu.MemoryManager().WriteBlock(output_surface_chroma_u_address, chroma_buffer.data(), break;
}
case AV_PIX_FMT_NV12: {
// Frame from VA-API hardware
// This is already interleaved so just copy
const u8* chroma_src = frame->data[1];
for (std::size_t y = 0; y < half_height; ++y) {
const std::size_t src = y * stride;
const std::size_t dst = y * aligned_width;
for (std::size_t x = 0; x < frame_width; ++x) {
chroma_buffer[dst + x] = chroma_src[src + x];
}
}
break;
}
default:
UNREACHABLE();
break;
}
gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
chroma_buffer.size()); chroma_buffer.size());
break; break;
} }

View File

@ -22,8 +22,8 @@ public:
SetControlParams = 0x1c1, SetControlParams = 0x1c1,
SetConfigStructOffset = 0x1c2, SetConfigStructOffset = 0x1c2,
SetOutputSurfaceLumaOffset = 0x1c8, SetOutputSurfaceLumaOffset = 0x1c8,
SetOutputSurfaceChromaUOffset = 0x1c9, SetOutputSurfaceChromaOffset = 0x1c9,
SetOutputSurfaceChromaVOffset = 0x1ca SetOutputSurfaceChromaUnusedOffset = 0x1ca
}; };
explicit Vic(GPU& gpu, std::shared_ptr<Nvdec> nvdec_processor); explicit Vic(GPU& gpu, std::shared_ptr<Nvdec> nvdec_processor);
@ -64,8 +64,7 @@ private:
GPUVAddr config_struct_address{}; GPUVAddr config_struct_address{};
GPUVAddr output_surface_luma_address{}; GPUVAddr output_surface_luma_address{};
GPUVAddr output_surface_chroma_u_address{}; GPUVAddr output_surface_chroma_address{};
GPUVAddr output_surface_chroma_v_address{};
SwsContext* scaler_ctx{}; SwsContext* scaler_ctx{};
s32 scaler_width{}; s32 scaler_width{};