diff --git a/src/common/settings.h b/src/common/settings.h index dd9b03f28e..c6b52f7ba3 100644 --- a/src/common/settings.h +++ b/src/common/settings.h @@ -161,7 +161,7 @@ struct Values { Category::LibraryApplet}; Setting photo_viewer_applet_mode{ linkage, AppletMode::LLE, "photo_viewer_applet_mode", Category::LibraryApplet}; - Setting offline_web_applet_mode{linkage, AppletMode::HLE, "offline_web_applet_mode", + Setting offline_web_applet_mode{linkage, AppletMode::LLE, "offline_web_applet_mode", Category::LibraryApplet}; Setting login_share_applet_mode{linkage, AppletMode::HLE, "login_share_applet_mode", Category::LibraryApplet}; diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h index 6dcf7bb228..192c6e5c01 100644 --- a/src/core/device_memory_manager.h +++ b/src/core/device_memory_manager.h @@ -1,3 +1,6 @@ +// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later @@ -109,6 +112,9 @@ public: void ReadBlock(DAddr address, void* dest_pointer, size_t size); void ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size); +#ifdef YUZU_DEBUG + bool ReadBlockFastChecked(DAddr address, void* dest_pointer, size_t size); +#endif void WriteBlock(DAddr address, const void* src_pointer, size_t size); void WriteBlockUnsafe(DAddr address, const void* src_pointer, size_t size); diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index 52dff5df9a..3629579c09 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -1,3 +1,6 @@ +// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later @@ -467,6 +470,29 @@ void DeviceMemoryManager::ReadBlockUnsafe(DAddr address, void* dest_poin }); } +#ifdef YUZU_DEBUG +template +bool DeviceMemoryManager::ReadBlockFastChecked(DAddr address, void* dest_pointer, + size_t size) { + bool success = true; + WalkBlock( + address, size, + [&](size_t copy_amount, DAddr current_vaddr) { + LOG_CRITICAL(Render, "DeviceMemory OOB/unmapped: addr=0x{:x} size={}", current_vaddr, + size); + std::memset(dest_pointer, 0, copy_amount); + success = false; + }, + [&](size_t copy_amount, const u8* const src_ptr) { + std::memcpy(dest_pointer, src_ptr, copy_amount); + }, + [&](const std::size_t copy_amount) { + dest_pointer = static_cast(dest_pointer) + copy_amount; + }); + return success; +} +#endif + template void DeviceMemoryManager::WriteBlockUnsafe(DAddr address, const void* src_pointer, size_t size) { diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 5223afe937..388c8034c5 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -386,11 +386,10 @@ void BufferCache

::BindHostComputeBuffers() { template void BufferCache

::SetUniformBuffersState(const std::array& mask, const UniformBufferSizes* sizes) { - if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) { - if (channel_state->enabled_uniform_buffer_masks != mask) { - if constexpr (IS_OPENGL) { - channel_state->fast_bound_uniform_buffers.fill(0); - } + const bool mask_changed = channel_state->enabled_uniform_buffer_masks != mask; + if (mask_changed) { + channel_state->fast_bound_uniform_buffers.fill(0); + if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) { channel_state->dirty_uniform_buffers.fill(~u32{0}); channel_state->uniform_buffer_binding_sizes.fill({}); } @@ -806,7 +805,7 @@ void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 channel_state->uniform_buffer_binding_sizes[stage][binding_index] != size; if (should_fast_bind) { // We only have to bind when the currently bound buffer is not the fast version - channel_state->fast_bound_uniform_buffers[stage] |= 1U << binding_index; + channel_state->fast_bound_uniform_buffers[stage] |= 1u << binding_index; channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size; runtime.BindFastUniformBuffer(stage, binding_index, size); } @@ -815,13 +814,22 @@ void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 return; } } - if constexpr (IS_OPENGL) { - channel_state->fast_bound_uniform_buffers[stage] |= 1U << binding_index; - channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size; - } + channel_state->fast_bound_uniform_buffers[stage] |= 1u << binding_index; + channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size; // Stream buffer path to avoid stalling on non-Nvidia drivers or Vulkan const std::span span = runtime.BindMappedUniformBuffer(stage, binding_index, size); +#ifdef YUZU_DEBUG + ASSERT(binding_index < NUM_GRAPHICS_UNIFORM_BUFFERS); + ASSERT(span.size() >= size && "UBO stream span too small"); + if (!device_memory.ReadBlockFastChecked(device_addr, span.data(), size)) { + LOG_CRITICAL(Render, "DeviceMemory OOB/unmapped: addr=0x{:x} size={}", device_addr, size); + channel_state->fast_bound_uniform_buffers[stage] &= ~(1u << binding_index); + ASSERT(false); + return; + } +#else device_memory.ReadBlockUnsafe(device_addr, span.data(), size); +#endif return; } // Classic cached path @@ -830,7 +838,8 @@ void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 } // Skip binding if it's not needed and if the bound buffer is not the fast version // This exists to avoid instances where the fast buffer is bound and a GPU write happens - needs_bind |= HasFastUniformBufferBound(stage, binding_index); + const bool was_fast_bound = HasFastUniformBufferBound(stage, binding_index); + needs_bind |= was_fast_bound; if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) { needs_bind |= channel_state->uniform_buffer_binding_sizes[stage][binding_index] != size; } @@ -839,9 +848,6 @@ void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 } const u32 offset = buffer.Offset(device_addr); if constexpr (IS_OPENGL) { - // Fast buffer will be unbound - channel_state->fast_bound_uniform_buffers[stage] &= ~(1U << binding_index); - // Mark the index as dirty if offset doesn't match const bool is_copy_bind = offset != 0 && !runtime.SupportsNonZeroUniformOffset(); channel_state->dirty_uniform_buffers[stage] |= (is_copy_bind ? 1U : 0U) << index; @@ -855,6 +861,7 @@ void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 } else { runtime.BindUniformBuffer(buffer, offset, size); } + channel_state->fast_bound_uniform_buffers[stage] &= ~(1u << binding_index); } template @@ -1789,12 +1796,7 @@ std::span BufferCache

::ImmediateBuffer(size_t wanted_capacity) { template bool BufferCache

::HasFastUniformBufferBound(size_t stage, u32 binding_index) const noexcept { - if constexpr (IS_OPENGL) { - return ((channel_state->fast_bound_uniform_buffers[stage] >> binding_index) & 1) != 0; - } else { - // Only OpenGL has fast uniform buffers - return false; - } + return ((channel_state->fast_bound_uniform_buffers[stage] >> binding_index) & 1u) != 0; } template diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h index 486d19fb79..09631ffd83 100644 --- a/src/video_core/buffer_cache/buffer_cache_base.h +++ b/src/video_core/buffer_cache/buffer_cache_base.h @@ -53,6 +53,7 @@ constexpr u32 NUM_COMPUTE_UNIFORM_BUFFERS = 8; constexpr u32 NUM_STORAGE_BUFFERS = 16; constexpr u32 NUM_TEXTURE_BUFFERS = 32; constexpr u32 NUM_STAGES = 5; +static_assert(NUM_GRAPHICS_UNIFORM_BUFFERS <= 32, "fast bitmask must fit u32"); using UniformBufferSizes = std::array, NUM_STAGES>; using ComputeUniformBufferSizes = std::array; @@ -137,8 +138,8 @@ public: u32 written_compute_texture_buffers = 0; u32 image_compute_texture_buffers = 0; - std::array uniform_cache_hits{}; - std::array uniform_cache_shots{}; + std::array uniform_cache_hits{}; + std::array uniform_cache_shots{}; u32 uniform_buffer_skip_cache_size = DEFAULT_SKIP_CACHE_SIZE; diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp index 08513d1534..0fbe707b04 100644 --- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp @@ -25,35 +25,48 @@ namespace { using namespace Common::Literals; -// Maximum potential alignment of a Vulkan buffer -constexpr VkDeviceSize MAX_ALIGNMENT = 256; +// Minimum alignment we want to enforce for the streaming ring +constexpr VkDeviceSize MIN_STREAM_ALIGNMENT = 256; // Stream buffer size in bytes constexpr VkDeviceSize MAX_STREAM_BUFFER_SIZE = 128_MiB; -size_t GetStreamBufferSize(const Device& device) { +size_t GetStreamBufferSize(const Device& device, VkDeviceSize alignment) { VkDeviceSize size{0}; if (device.HasDebuggingToolAttached()) { - ForEachDeviceLocalHostVisibleHeap(device, [&size](size_t index, VkMemoryHeap& heap) { + bool found_heap = false; + ForEachDeviceLocalHostVisibleHeap(device, [&size, &found_heap](size_t /*index*/, VkMemoryHeap& heap) { size = (std::max)(size, heap.size); + found_heap = true; }); - // If rebar is not supported, cut the max heap size to 40%. This will allow 2 captures to be - // loaded at the same time in RenderDoc. If rebar is supported, this shouldn't be an issue - // as the heap will be much larger. - if (size <= 256_MiB) { + // If no suitable heap was found fall back to the default cap to avoid creating a zero-sized stream buffer. + if (!found_heap) { + size = MAX_STREAM_BUFFER_SIZE; + } else if (size <= 256_MiB) { + // If rebar is not supported, cut the max heap size to 40%. This will allow 2 captures to be + // loaded at the same time in RenderDoc. If rebar is supported, this shouldn't be an issue + // as the heap will be much larger. size = size * 40 / 100; } } else { size = MAX_STREAM_BUFFER_SIZE; } - return (std::min)(Common::AlignUp(size, MAX_ALIGNMENT), MAX_STREAM_BUFFER_SIZE); + + // Clamp to the configured maximum, align up for safety, and ensure a sane minimum so + // region_size (stream_buffer_size / NUM_SYNCS) never becomes zero. + const VkDeviceSize aligned = + (std::min)(Common::AlignUp(size, alignment), MAX_STREAM_BUFFER_SIZE); + const VkDeviceSize min_size = alignment * StagingBufferPool::NUM_SYNCS; + return static_cast((std::max)(aligned, min_size)); } } // Anonymous namespace StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_, Scheduler& scheduler_) : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_}, - stream_buffer_size{GetStreamBufferSize(device)}, region_size{stream_buffer_size / - StagingBufferPool::NUM_SYNCS} { + stream_alignment{std::max(device_.GetUniformBufferAlignment(), + MIN_STREAM_ALIGNMENT)}, + stream_buffer_size{GetStreamBufferSize(device_, stream_alignment)}, + region_size{stream_buffer_size / StagingBufferPool::NUM_SYNCS} { VkBufferCreateInfo stream_ci = { .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .pNext = nullptr, @@ -106,31 +119,54 @@ void StagingBufferPool::TickFrame() { } StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) { - if (AreRegionsActive(Region(free_iterator) + 1, - (std::min)(Region(iterator + size) + 1, NUM_SYNCS))) { + const size_t alignment = static_cast(stream_alignment); + const size_t aligned_size = Common::AlignUp(size, alignment); + const bool wraps = iterator + size >= stream_buffer_size; + const size_t new_iterator = + wraps ? aligned_size : Common::AlignUp(iterator + size, alignment); + const size_t begin_region = wraps ? 0 : Region(iterator); + const size_t last_byte = new_iterator == 0 ? 0 : new_iterator - 1; + const size_t end_region = (std::min)(Region(last_byte) + 1, NUM_SYNCS); + const size_t guard_begin = (std::min)(Region(free_iterator) + 1, NUM_SYNCS); + + if (!wraps) { + if (guard_begin < end_region && AreRegionsActive(guard_begin, end_region)) { + // Avoid waiting for the previous usages to be free + return GetStagingBuffer(size, MemoryUsage::Upload); + } + } else if (guard_begin < NUM_SYNCS && AreRegionsActive(guard_begin, NUM_SYNCS)) { // Avoid waiting for the previous usages to be free return GetStagingBuffer(size, MemoryUsage::Upload); } + const u64 current_tick = scheduler.CurrentTick(); std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator), current_tick); used_iterator = iterator; - free_iterator = (std::max)(free_iterator, iterator + size); - if (iterator + size >= stream_buffer_size) { + if (wraps) { std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS, current_tick); used_iterator = 0; iterator = 0; - free_iterator = size; - - if (AreRegionsActive(0, Region(size) + 1)) { + free_iterator = aligned_size; + const size_t head_last_byte = aligned_size == 0 ? 0 : aligned_size - 1; + const size_t head_end_region = (std::min)(Region(head_last_byte) + 1, NUM_SYNCS); + if (AreRegionsActive(0, head_end_region)) { // Avoid waiting for the previous usages to be free return GetStagingBuffer(size, MemoryUsage::Upload); } } - const size_t offset = iterator; - iterator = Common::AlignUp(iterator + size, MAX_ALIGNMENT); + + std::fill(sync_ticks.begin() + begin_region, sync_ticks.begin() + end_region, current_tick); + + const size_t offset = wraps ? 0 : iterator; + iterator = new_iterator; + + if (!wraps) { + free_iterator = (std::max)(free_iterator, offset + aligned_size); + } + return StagingBufferRef{ .buffer = *stream_buffer, .offset = static_cast(offset), diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h index f63a203272..5c40ca069f 100644 --- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h @@ -1,3 +1,6 @@ +// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project // SPDX-License-Identifier: GPL-3.0-or-later @@ -102,6 +105,7 @@ private: MemoryAllocator& memory_allocator; Scheduler& scheduler; + VkDeviceSize stream_alignment; vk::Buffer stream_buffer; std::span stream_pointer; VkDeviceSize stream_buffer_size; diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp index b825348760..18f629f639 100644 --- a/src/yuzu/configuration/configure_debug.cpp +++ b/src/yuzu/configuration/configure_debug.cpp @@ -83,7 +83,8 @@ void ConfigureDebug::SetConfiguration() { #ifdef YUZU_USE_QT_WEB_ENGINE ui->disable_web_applet->setChecked(UISettings::values.disable_web_applet.GetValue()); #else - ui->disable_web_applet->setVisible(false); + ui->disable_web_applet->setEnabled(false); + ui->disable_web_applet->setText(tr("Web applet not compiled")); #endif }