diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index a6e87a3583..fd7764b2f8 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -187,34 +187,34 @@ void BufferCache
::ClearDownload(DAddr device_addr, u64 size) {
}
template ::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
+bool BufferCache ::DMACopy(GPUVAddr src_address, GPUVAddr dst_address, u64 amount) {
const std::optional ::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
auto mirror = [&](DAddr base_address, DAddr base_address_end) {
const u64 size = base_address_end - base_address;
const DAddr diff = base_address - *cpu_src_address;
- const DAddr new_base_address = *cpu_dest_address + diff;
+ const DAddr new_base_address = *cpu_dst_address + diff;
tmp_intervals.push_back({new_base_address, size});
uncommitted_gpu_modified_ranges.Add(new_base_address, size);
};
gpu_modified_ranges.ForEachInRange(*cpu_src_address, amount, mirror);
// This subtraction in this order is important for overlapping copies.
- gpu_modified_ranges.Subtract(*cpu_dest_address, amount);
+ gpu_modified_ranges.Subtract(*cpu_dst_address, amount);
const bool has_new_downloads = tmp_intervals.size() != 0;
for (const auto& pair : tmp_intervals) {
gpu_modified_ranges.Add(pair.first, pair.second);
}
const auto& copy = copies[0];
src_buffer.MarkUsage(copy.src_offset, copy.size);
- dest_buffer.MarkUsage(copy.dst_offset, copy.size);
- runtime.CopyBuffer(dest_buffer, src_buffer, copies, true);
+ dst_buffer.MarkUsage(copy.dst_offset, copy.size);
+ runtime.CopyBuffer(dst_buffer, src_buffer, copies, true);
if (has_new_downloads) {
- memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount);
+ memory_tracker.MarkRegionAsGpuModified(*cpu_dst_address, amount);
}
Tegra::Memory::DeviceGuestMemoryScoped ::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
if (!cpu_dst_address) {
return false;
}
- const bool dest_dirty = IsRegionRegistered(*cpu_dst_address, amount);
- if (!dest_dirty) {
+ const bool dst_dirty = IsRegionRegistered(*cpu_dst_address, amount);
+ if (!dst_dirty) {
return false;
}
@@ -263,10 +263,10 @@ bool BufferCache ::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
gpu_modified_ranges.Subtract(*cpu_dst_address, size);
const BufferId buffer = FindBuffer(*cpu_dst_address, static_cast ::UpdateIndexBuffer() {
u32 buffer_size = Common::AlignUp(inline_index_size, CACHING_PAGESIZE);
if (inline_buffer_id == NULL_BUFFER_ID) [[unlikely]] {
inline_buffer_id = CreateBuffer(0, buffer_size);
- }
- if (slot_buffers[inline_buffer_id].SizeBytes() < buffer_size) [[unlikely]] {
+ } else if (slot_buffers[inline_buffer_id].SizeBytes() < buffer_size) [[unlikely]] {
slot_buffers.erase(inline_buffer_id);
inline_buffer_id = CreateBuffer(0, buffer_size);
}
@@ -1529,38 +1528,38 @@ void BufferCache ::MappedUploadMemory([[maybe_unused]] Buffer& buffer,
}
template ::InlineMemory(DAddr dest_address, size_t copy_size,
+bool BufferCache ::InlineMemory(DAddr dst_address, size_t copy_size,
std::span ::InlineMemoryImplementation(DAddr dest_address, size_t copy_size,
+void BufferCache ::InlineMemoryImplementation(DAddr dst_address, size_t copy_size,
std::span ::InlineMemoryImplementation(DAddr dest_address, size_t copy_
const bool can_reorder = runtime.CanReorderUpload(buffer, copies);
runtime.CopyBuffer(buffer, upload_staging.buffer, copies, true, can_reorder);
} else {
- buffer.ImmediateUpload(buffer.Offset(dest_address), inlined_buffer.first(copy_size));
+ buffer.ImmediateUpload(buffer.Offset(dst_address), inlined_buffer.first(copy_size));
}
}