Merge pull request #11278 from Kelebek1/dma_sync
Mark accelerated DMA destination buffers and images as GPU-modified
This commit is contained in:
commit
ae1421265a
|
@ -1335,7 +1335,8 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
|
|||
}
|
||||
const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
|
||||
static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize;
|
||||
const auto post_op = VideoCommon::ObtainBufferOperation::DoNothing;
|
||||
const auto post_op = IS_IMAGE_UPLOAD ? VideoCommon::ObtainBufferOperation::DoNothing
|
||||
: VideoCommon::ObtainBufferOperation::MarkAsWritten;
|
||||
const auto [buffer, offset] =
|
||||
buffer_cache.ObtainBuffer(buffer_operand.address, buffer_size, sync_info, post_op);
|
||||
|
||||
|
@ -1344,8 +1345,12 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
|
|||
const std::span copy_span{©, 1};
|
||||
|
||||
if constexpr (IS_IMAGE_UPLOAD) {
|
||||
texture_cache.PrepareImage(image_id, true, false);
|
||||
image->UploadMemory(buffer->Handle(), offset, copy_span);
|
||||
} else {
|
||||
if (offset % BytesPerBlock(image->info.format)) {
|
||||
return false;
|
||||
}
|
||||
texture_cache.DownloadImageIntoBuffer(image, buffer->Handle(), offset, copy_span,
|
||||
buffer_operand.address, buffer_size);
|
||||
}
|
||||
|
|
|
@ -830,7 +830,8 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
|
|||
}
|
||||
const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
|
||||
static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize;
|
||||
const auto post_op = VideoCommon::ObtainBufferOperation::DoNothing;
|
||||
const auto post_op = IS_IMAGE_UPLOAD ? VideoCommon::ObtainBufferOperation::DoNothing
|
||||
: VideoCommon::ObtainBufferOperation::MarkAsWritten;
|
||||
const auto [buffer, offset] =
|
||||
buffer_cache.ObtainBuffer(buffer_operand.address, buffer_size, sync_info, post_op);
|
||||
|
||||
|
@ -839,8 +840,12 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
|
|||
const std::span copy_span{©, 1};
|
||||
|
||||
if constexpr (IS_IMAGE_UPLOAD) {
|
||||
texture_cache.PrepareImage(image_id, true, false);
|
||||
image->UploadMemory(buffer->Handle(), offset, copy_span);
|
||||
} else {
|
||||
if (offset % BytesPerBlock(image->info.format)) {
|
||||
return false;
|
||||
}
|
||||
texture_cache.DownloadImageIntoBuffer(image, buffer->Handle(), offset, copy_span,
|
||||
buffer_operand.address, buffer_size);
|
||||
}
|
||||
|
|
|
@ -243,6 +243,9 @@ public:
|
|||
/// Create channel state.
|
||||
void CreateChannel(Tegra::Control::ChannelState& channel) final override;
|
||||
|
||||
/// Prepare an image to be used
|
||||
void PrepareImage(ImageId image_id, bool is_modification, bool invalidate);
|
||||
|
||||
std::recursive_mutex mutex;
|
||||
|
||||
private:
|
||||
|
@ -387,9 +390,6 @@ private:
|
|||
/// Synchronize image aliases, copying data if needed
|
||||
void SynchronizeAliases(ImageId image_id);
|
||||
|
||||
/// Prepare an image to be used
|
||||
void PrepareImage(ImageId image_id, bool is_modification, bool invalidate);
|
||||
|
||||
/// Prepare an image view to be used
|
||||
void PrepareImageView(ImageViewId image_view_id, bool is_modification, bool invalidate);
|
||||
|
||||
|
|
Reference in New Issue