scratch_buffer: Explicitly defing resize and resize_destructive functions
resize keeps previous data intact when the buffer grows resize_destructive destroys the previous data when the buffer grows
This commit is contained in:
parent
64869807e2
commit
c6590ad07b
|
@ -25,7 +25,20 @@ public:
|
|||
~ScratchBuffer() = default;
|
||||
|
||||
/// This will only grow the buffer's capacity if size is greater than the current capacity.
|
||||
/// The previously held data will remain intact.
|
||||
void resize(size_t size) {
|
||||
if (size > buffer_capacity) {
|
||||
auto new_buffer = Common::make_unique_for_overwrite<T[]>(size);
|
||||
std::move(buffer.get(), buffer.get() + buffer_capacity, new_buffer.get());
|
||||
buffer = std::move(new_buffer);
|
||||
buffer_capacity = size;
|
||||
}
|
||||
last_requested_size = size;
|
||||
}
|
||||
|
||||
/// This will only grow the buffer's capacity if size is greater than the current capacity.
|
||||
/// The previously held data will be destroyed if a reallocation occurs.
|
||||
void resize_destructive(size_t size) {
|
||||
if (size > buffer_capacity) {
|
||||
buffer_capacity = size;
|
||||
buffer = Common::make_unique_for_overwrite<T[]>(buffer_capacity);
|
||||
|
@ -61,6 +74,10 @@ public:
|
|||
return buffer[i];
|
||||
}
|
||||
|
||||
[[nodiscard]] const T& operator[](size_t i) const {
|
||||
return buffer[i];
|
||||
}
|
||||
|
||||
[[nodiscard]] size_t size() const noexcept {
|
||||
return last_requested_size;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ TEST_CASE("ScratchBuffer: Basic Test", "[common]") {
|
|||
}
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: Resize Grow", "[common]") {
|
||||
TEST_CASE("ScratchBuffer: resize_destructive Grow", "[common]") {
|
||||
std::array<u8, 10> payload;
|
||||
payload.fill(66);
|
||||
|
||||
|
@ -38,14 +38,86 @@ TEST_CASE("ScratchBuffer: Resize Grow", "[common]") {
|
|||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
// Increasing the size should reallocate the buffer
|
||||
buf.resize(payload.size() * 2);
|
||||
buf.resize_destructive(payload.size() * 2);
|
||||
REQUIRE(buf.size() == payload.size() * 2);
|
||||
REQUIRE(buf.capacity() == payload.size() * 2);
|
||||
|
||||
// Since the buffer is not value initialized, reading its data will be garbage
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: Resize Shrink", "[common]") {
|
||||
TEST_CASE("ScratchBuffer: resize_destructive Shrink", "[common]") {
|
||||
std::array<u8, 10> payload;
|
||||
payload.fill(66);
|
||||
|
||||
ScratchBuffer<u8> buf(payload.size());
|
||||
REQUIRE(buf.size() == payload.size());
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
std::memcpy(buf.data(), payload.data(), payload.size());
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
|
||||
// Decreasing the size should not cause a buffer reallocation
|
||||
// This can be tested by ensuring the buffer capacity and data has not changed,
|
||||
buf.resize_destructive(1U);
|
||||
REQUIRE(buf.size() == 1U);
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: resize Grow u8", "[common]") {
|
||||
std::array<u8, 10> payload;
|
||||
payload.fill(66);
|
||||
|
||||
ScratchBuffer<u8> buf(payload.size());
|
||||
REQUIRE(buf.size() == payload.size());
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
std::memcpy(buf.data(), payload.data(), payload.size());
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
|
||||
// Increasing the size should reallocate the buffer
|
||||
buf.resize(payload.size() * 2);
|
||||
REQUIRE(buf.size() == payload.size() * 2);
|
||||
REQUIRE(buf.capacity() == payload.size() * 2);
|
||||
|
||||
// resize() keeps the previous data intact
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: resize Grow u64", "[common]") {
|
||||
std::array<u64, 10> payload;
|
||||
payload.fill(6666);
|
||||
|
||||
ScratchBuffer<u64> buf(payload.size());
|
||||
REQUIRE(buf.size() == payload.size());
|
||||
REQUIRE(buf.capacity() == payload.size());
|
||||
|
||||
std::memcpy(buf.data(), payload.data(), payload.size() * sizeof(u64));
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
|
||||
// Increasing the size should reallocate the buffer
|
||||
buf.resize(payload.size() * 2);
|
||||
REQUIRE(buf.size() == payload.size() * 2);
|
||||
REQUIRE(buf.capacity() == payload.size() * 2);
|
||||
|
||||
// resize() keeps the previous data intact
|
||||
for (size_t i = 0; i < payload.size(); ++i) {
|
||||
REQUIRE(buf[i] == payload[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("ScratchBuffer: resize Shrink", "[common]") {
|
||||
std::array<u8, 10> payload;
|
||||
payload.fill(66);
|
||||
|
||||
|
|
|
@ -1926,7 +1926,7 @@ std::span<const u8> BufferCache<P>::ImmediateBufferWithData(VAddr cpu_addr, size
|
|||
|
||||
template <class P>
|
||||
std::span<u8> BufferCache<P>::ImmediateBuffer(size_t wanted_capacity) {
|
||||
immediate_buffer_alloc.resize(wanted_capacity);
|
||||
immediate_buffer_alloc.resize_destructive(wanted_capacity);
|
||||
return std::span<u8>(immediate_buffer_alloc.data(), wanted_capacity);
|
||||
}
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ bool DmaPusher::Step() {
|
|||
}
|
||||
|
||||
// Push buffer non-empty, read a word
|
||||
command_headers.resize(command_list_header.size);
|
||||
command_headers.resize_destructive(command_list_header.size);
|
||||
if (Settings::IsGPULevelHigh()) {
|
||||
memory_manager.ReadBlock(dma_get, command_headers.data(),
|
||||
command_list_header.size * sizeof(u32));
|
||||
|
|
|
@ -24,7 +24,7 @@ void State::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
|
|||
void State::ProcessExec(const bool is_linear_) {
|
||||
write_offset = 0;
|
||||
copy_size = regs.line_length_in * regs.line_count;
|
||||
inner_buffer.resize(copy_size);
|
||||
inner_buffer.resize_destructive(copy_size);
|
||||
is_linear = is_linear_;
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ void State::ProcessData(std::span<const u8> read_buffer) {
|
|||
const std::size_t dst_size = Tegra::Texture::CalculateSize(
|
||||
true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth,
|
||||
regs.dest.BlockHeight(), regs.dest.BlockDepth());
|
||||
tmp_buffer.resize(dst_size);
|
||||
tmp_buffer.resize_destructive(dst_size);
|
||||
memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
|
||||
Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width,
|
||||
regs.dest.height, regs.dest.depth, x_offset, regs.dest.y,
|
||||
|
|
|
@ -184,8 +184,8 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
|
|||
const size_t src_size =
|
||||
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
|
||||
|
||||
read_buffer.resize(src_size);
|
||||
write_buffer.resize(dst_size);
|
||||
read_buffer.resize_destructive(src_size);
|
||||
write_buffer.resize_destructive(dst_size);
|
||||
|
||||
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
||||
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
||||
|
@ -231,8 +231,8 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
|
|||
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
|
||||
const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
|
||||
|
||||
read_buffer.resize(src_size);
|
||||
write_buffer.resize(dst_size);
|
||||
read_buffer.resize_destructive(src_size);
|
||||
write_buffer.resize_destructive(dst_size);
|
||||
|
||||
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
||||
if (Settings::IsGPULevelExtreme()) {
|
||||
|
@ -261,8 +261,8 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
|
|||
pos_x = pos_x % x_in_gob;
|
||||
pos_y = pos_y % 8;
|
||||
|
||||
read_buffer.resize(src_size);
|
||||
write_buffer.resize(dst_size);
|
||||
read_buffer.resize_destructive(src_size);
|
||||
write_buffer.resize_destructive(dst_size);
|
||||
|
||||
if (Settings::IsGPULevelExtreme()) {
|
||||
memory_manager.ReadBlock(regs.offset_in + offset, read_buffer.data(), src_size);
|
||||
|
@ -321,10 +321,10 @@ void MaxwellDMA::CopyBlockLinearToBlockLinear() {
|
|||
const u32 pitch = x_elements * bytes_per_pixel;
|
||||
const size_t mid_buffer_size = pitch * regs.line_count;
|
||||
|
||||
read_buffer.resize(src_size);
|
||||
write_buffer.resize(dst_size);
|
||||
read_buffer.resize_destructive(src_size);
|
||||
write_buffer.resize_destructive(dst_size);
|
||||
|
||||
intermediate_buffer.resize(mid_buffer_size);
|
||||
intermediate_buffer.resize_destructive(mid_buffer_size);
|
||||
|
||||
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
||||
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
||||
|
|
|
@ -155,7 +155,7 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
|
|||
// swizzle pitch linear to block linear
|
||||
const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
|
||||
const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
|
||||
luma_buffer.resize(size);
|
||||
luma_buffer.resize_destructive(size);
|
||||
std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height);
|
||||
Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
|
||||
block_height, 0, width * 4);
|
||||
|
@ -181,8 +181,8 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
|
|||
|
||||
const auto stride = static_cast<size_t>(frame->linesize[0]);
|
||||
|
||||
luma_buffer.resize(aligned_width * surface_height);
|
||||
chroma_buffer.resize(aligned_width * surface_height / 2);
|
||||
luma_buffer.resize_destructive(aligned_width * surface_height);
|
||||
chroma_buffer.resize_destructive(aligned_width * surface_height / 2);
|
||||
|
||||
// Populate luma buffer
|
||||
const u8* luma_src = frame->data[0];
|
||||
|
|
Reference in New Issue