revert [vk] StreamBuffer Changes (#2684)
All checks were successful
eden-license / license-header (pull_request) Successful in 29s
All checks were successful
eden-license / license-header (pull_request) Successful in 29s
revert [vk] StreamBuffer Changes (#2684) Tighter ring allocation + sync tracking with correct wrap/no-wrap, alignment, and per-range sync_ticks; update head/tail only when needed (wrap to 0 if tail won’t fit). Adds safer sizing under debug tools by preferring device-local host-visible heaps with fallback. Expected result: fewer stalls, steadier reuse, higher upload throughput. Co-authored-by: Ribbit <ribbit@placeholder.com> Reviewed-on: #2684 Reviewed-by: MaranBr <maranbr@eden-emu.dev> Reviewed-by: Lizzie <lizzie@eden-emu.dev> Reviewed-by: CamilleLaVey <camillelavey99@gmail.com> Co-authored-by: Ribbit <ribbit@eden-emu.dev> Co-committed-by: Ribbit <ribbit@eden-emu.dev>
This commit is contained in:
parent
3c6ef765af
commit
5f38b77b5e
1 changed files with 14 additions and 46 deletions
|
@ -33,29 +33,19 @@ constexpr VkDeviceSize MAX_STREAM_BUFFER_SIZE = 128_MiB;
|
||||||
size_t GetStreamBufferSize(const Device& device) {
|
size_t GetStreamBufferSize(const Device& device) {
|
||||||
VkDeviceSize size{0};
|
VkDeviceSize size{0};
|
||||||
if (device.HasDebuggingToolAttached()) {
|
if (device.HasDebuggingToolAttached()) {
|
||||||
bool found_heap = false;
|
ForEachDeviceLocalHostVisibleHeap(device, [&size](size_t index, VkMemoryHeap& heap) {
|
||||||
ForEachDeviceLocalHostVisibleHeap(device, [&size, &found_heap](size_t /*index*/, VkMemoryHeap& heap) {
|
|
||||||
size = (std::max)(size, heap.size);
|
size = (std::max)(size, heap.size);
|
||||||
found_heap = true;
|
|
||||||
});
|
});
|
||||||
// If no suitable heap was found fall back to the default cap to avoid creating a zero-sized stream buffer.
|
// If rebar is not supported, cut the max heap size to 40%. This will allow 2 captures to be
|
||||||
if (!found_heap) {
|
// loaded at the same time in RenderDoc. If rebar is supported, this shouldn't be an issue
|
||||||
size = MAX_STREAM_BUFFER_SIZE;
|
// as the heap will be much larger.
|
||||||
} else if (size <= 256_MiB) {
|
if (size <= 256_MiB) {
|
||||||
// If rebar is not supported, cut the max heap size to 40%. This will allow 2 captures to be
|
|
||||||
// loaded at the same time in RenderDoc. If rebar is supported, this shouldn't be an issue
|
|
||||||
// as the heap will be much larger.
|
|
||||||
size = size * 40 / 100;
|
size = size * 40 / 100;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
size = MAX_STREAM_BUFFER_SIZE;
|
size = MAX_STREAM_BUFFER_SIZE;
|
||||||
}
|
}
|
||||||
|
return (std::min)(Common::AlignUp(size, MAX_ALIGNMENT), MAX_STREAM_BUFFER_SIZE);
|
||||||
// Clamp to the configured maximum, align up for safety, and ensure a sane minimum so
|
|
||||||
// region_size (stream_buffer_size / NUM_SYNCS) never becomes zero.
|
|
||||||
const VkDeviceSize aligned = (std::min)(Common::AlignUp(size, MAX_ALIGNMENT), MAX_STREAM_BUFFER_SIZE);
|
|
||||||
const VkDeviceSize min_size = MAX_ALIGNMENT * StagingBufferPool::NUM_SYNCS;
|
|
||||||
return static_cast<size_t>((std::max)(aligned, min_size));
|
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
|
@ -116,53 +106,31 @@ void StagingBufferPool::TickFrame() {
|
||||||
}
|
}
|
||||||
|
|
||||||
StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
|
StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
|
||||||
const size_t aligned_size = Common::AlignUp(size, MAX_ALIGNMENT);
|
if (AreRegionsActive(Region(free_iterator) + 1,
|
||||||
const bool wraps = iterator + size >= stream_buffer_size;
|
(std::min)(Region(iterator + size) + 1, NUM_SYNCS))) {
|
||||||
const size_t new_iterator =
|
|
||||||
wraps ? aligned_size : Common::AlignUp(iterator + size, MAX_ALIGNMENT);
|
|
||||||
const size_t begin_region = wraps ? 0 : Region(iterator);
|
|
||||||
const size_t last_byte = new_iterator == 0 ? 0 : new_iterator - 1;
|
|
||||||
const size_t end_region = (std::min)(Region(last_byte) + 1, NUM_SYNCS);
|
|
||||||
const size_t guard_begin = (std::min)(Region(free_iterator) + 1, NUM_SYNCS);
|
|
||||||
|
|
||||||
if (!wraps) {
|
|
||||||
if (guard_begin < end_region && AreRegionsActive(guard_begin, end_region)) {
|
|
||||||
// Avoid waiting for the previous usages to be free
|
|
||||||
return GetStagingBuffer(size, MemoryUsage::Upload);
|
|
||||||
}
|
|
||||||
} else if (guard_begin < NUM_SYNCS && AreRegionsActive(guard_begin, NUM_SYNCS)) {
|
|
||||||
// Avoid waiting for the previous usages to be free
|
// Avoid waiting for the previous usages to be free
|
||||||
return GetStagingBuffer(size, MemoryUsage::Upload);
|
return GetStagingBuffer(size, MemoryUsage::Upload);
|
||||||
}
|
}
|
||||||
|
|
||||||
const u64 current_tick = scheduler.CurrentTick();
|
const u64 current_tick = scheduler.CurrentTick();
|
||||||
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator),
|
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator),
|
||||||
current_tick);
|
current_tick);
|
||||||
used_iterator = iterator;
|
used_iterator = iterator;
|
||||||
|
free_iterator = (std::max)(free_iterator, iterator + size);
|
||||||
|
|
||||||
if (wraps) {
|
if (iterator + size >= stream_buffer_size) {
|
||||||
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS,
|
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS,
|
||||||
current_tick);
|
current_tick);
|
||||||
used_iterator = 0;
|
used_iterator = 0;
|
||||||
iterator = 0;
|
iterator = 0;
|
||||||
free_iterator = size;
|
free_iterator = size;
|
||||||
const size_t head_last_byte = aligned_size == 0 ? 0 : aligned_size - 1;
|
|
||||||
const size_t head_end_region = (std::min)(Region(head_last_byte) + 1, NUM_SYNCS);
|
if (AreRegionsActive(0, Region(size) + 1)) {
|
||||||
if (AreRegionsActive(0, head_end_region)) {
|
|
||||||
// Avoid waiting for the previous usages to be free
|
// Avoid waiting for the previous usages to be free
|
||||||
return GetStagingBuffer(size, MemoryUsage::Upload);
|
return GetStagingBuffer(size, MemoryUsage::Upload);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
const size_t offset = iterator;
|
||||||
std::fill(sync_ticks.begin() + begin_region, sync_ticks.begin() + end_region, current_tick);
|
iterator = Common::AlignUp(iterator + size, MAX_ALIGNMENT);
|
||||||
|
|
||||||
const size_t offset = wraps ? 0 : iterator;
|
|
||||||
iterator = new_iterator;
|
|
||||||
|
|
||||||
if (!wraps) {
|
|
||||||
free_iterator = (std::max)(free_iterator, offset + size);
|
|
||||||
}
|
|
||||||
|
|
||||||
return StagingBufferRef{
|
return StagingBufferRef{
|
||||||
.buffer = *stream_buffer,
|
.buffer = *stream_buffer,
|
||||||
.offset = static_cast<VkDeviceSize>(offset),
|
.offset = static_cast<VkDeviceSize>(offset),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue