Compare commits

...

3 commits

Author SHA1 Message Date
bf4e271cb3 fix
All checks were successful
eden-license / license-header (pull_request) Successful in 24s
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-10-04 23:59:47 +02:00
d18efb18fa [core] use memcpy instead of hand rolling aligned cases
Hand rolling memcpy like this is always frowned upon because the compiler has more insight on whats going on (plus the code resolves to a worse version of itself on assembly). This removes some branches that are just straight up redundant. May save stuff especially for systems without fastmem enabled.

Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-10-04 23:59:47 +02:00
268918aece
[vk] Implement Shader Read Barrier (#2671)
Adding the shader read barrier keeps every render/compute/transfer write visible before the image is sampled, so it prevents the “read-before-writes-finish” hazards. Without it you can get random stale frames, flickering post process passes, partially updated HUD textures, and corrupted depth-to-color conversions especially in scenes that render into an offscreen image and immediately feed that image to a shader (reflections, bloom, dynamic resolution, depth visualizers, etc.). This fix makes those R2T chains deterministic again across all Vulkan drivers.

Co-authored-by: Ribbit <ribbit@placeholder.com>
Reviewed-on: #2671
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
Reviewed-by: crueter <crueter@eden-emu.dev>
Co-authored-by: Ribbit <ribbit@eden-emu.dev>
Co-committed-by: Ribbit <ribbit@eden-emu.dev>
2025-10-04 23:58:08 +02:00
5 changed files with 131 additions and 187 deletions

View file

@ -10,6 +10,7 @@
#include <mutex> #include <mutex>
#include <span> #include <span>
#include <thread> #include <thread>
#include <type_traits>
#include <vector> #include <vector>
#include "common/assert.h" #include "common/assert.h"
@ -681,22 +682,17 @@ struct Memory::Impl {
} }
} }
[[nodiscard]] u8* GetPointerImpl(u64 vaddr, auto on_unmapped, auto on_rasterizer) const { template<typename F, typename G>
[[nodiscard]] u8* GetPointerImpl(u64 vaddr, F&& on_unmapped, G&& on_rasterizer) const {
// AARCH64 masks the upper 16 bit of all memory accesses // AARCH64 masks the upper 16 bit of all memory accesses
vaddr = vaddr & 0xffffffffffffULL; vaddr &= 0xffffffffffffULL;
if (!AddressSpaceContains(*current_page_table, vaddr, 1)) [[unlikely]] { if (AddressSpaceContains(*current_page_table, vaddr, 1)) [[likely]] {
on_unmapped();
return nullptr;
} else {
// Avoid adding any extra logic to this fast-path block // Avoid adding any extra logic to this fast-path block
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw(); const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) [[likely]] {
return reinterpret_cast<u8*>(pointer + vaddr); return reinterpret_cast<u8*>(pointer + vaddr);
} else { } else {
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
case Common::PageType::Unmapped:
on_unmapped();
return nullptr;
case Common::PageType::Memory: case Common::PageType::Memory:
ASSERT_MSG(false, "Mapped memory page without a pointer @ 0x{:016X}", vaddr); ASSERT_MSG(false, "Mapped memory page without a pointer @ 0x{:016X}", vaddr);
return nullptr; return nullptr;
@ -707,11 +703,18 @@ struct Memory::Impl {
on_rasterizer(); on_rasterizer();
return host_ptr; return host_ptr;
} }
case Common::PageType::Unmapped: [[unlikely]] {
on_unmapped();
return nullptr;
}
default: default:
UNREACHABLE(); UNREACHABLE();
} }
return nullptr; return nullptr;
} }
} else {
on_unmapped();
return nullptr;
} }
} }
@ -729,172 +732,38 @@ struct Memory::Impl {
GetInteger(vaddr), []() {}, []() {}); GetInteger(vaddr), []() {}, []() {});
} }
/** /// @brief Reads a particular data type out of memory at the given virtual address.
* Reads a particular data type out of memory at the given virtual address. /// @param vaddr The virtual address to read the data type from.
* /// @tparam T The data type to read out of memory.
* @param vaddr The virtual address to read the data type from. /// @returns The instance of T read from the specified virtual address.
*
* @tparam T The data type to read out of memory. This type *must* be
* trivially copyable, otherwise the behavior of this function
* is undefined.
*
* @returns The instance of T read from the specified virtual address.
*/
template <typename T> template <typename T>
T Read(Common::ProcessAddress vaddr) { inline T Read(Common::ProcessAddress vaddr) noexcept requires(std::is_trivially_copyable_v<T>) {
// Fast path for aligned reads of common sizes
const u64 addr = GetInteger(vaddr); const u64 addr = GetInteger(vaddr);
if constexpr (std::is_same_v<T, u8> || std::is_same_v<T, s8>) { if (auto const ptr = GetPointerImpl(addr, [addr]() {
// 8-bit reads are always aligned LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, addr);
const u8* const ptr = GetPointerImpl( }, [&]() {
addr, HandleRasterizerDownload(addr, sizeof(T));
[addr]() { }); ptr) [[likely]] {
LOG_ERROR(HW_Memory, "Unmapped Read8 @ 0x{:016X}", addr); // It may be tempting to rewrite this particular section to use "reinterpret_cast";
}, // afterall, it's trivially copyable so surely it can be copied ov- Alignment.
[&]() { HandleRasterizerDownload(addr, sizeof(T)); }); // Remember, alignment. memcpy() will deal with all the alignment extremely fast.
if (ptr) { T result{};
return static_cast<T>(*ptr);
}
return 0;
} else if constexpr (std::is_same_v<T, u16_le> || std::is_same_v<T, s16_le>) {
// Check alignment for 16-bit reads
if ((addr & 1) == 0) {
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read16 @ 0x{:016X}", addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
return static_cast<T>(*reinterpret_cast<const u16*>(ptr));
}
}
} else if constexpr (std::is_same_v<T, u32_le> || std::is_same_v<T, s32_le>) {
// Check alignment for 32-bit reads
if ((addr & 3) == 0) {
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read32 @ 0x{:016X}", addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
return static_cast<T>(*reinterpret_cast<const u32*>(ptr));
}
}
} else if constexpr (std::is_same_v<T, u64_le> || std::is_same_v<T, s64_le>) {
// Check alignment for 64-bit reads
if ((addr & 7) == 0) {
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read64 @ 0x{:016X}", addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
return static_cast<T>(*reinterpret_cast<const u64*>(ptr));
}
}
}
// Fall back to the general case for other types or unaligned access
T result = 0;
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
std::memcpy(&result, ptr, sizeof(T)); std::memcpy(&result, ptr, sizeof(T));
return result;
} }
return result; return T{};
} }
/** /// @brief Writes a particular data type to memory at the given virtual address.
* Writes a particular data type to memory at the given virtual address. /// @param vaddr The virtual address to write the data type to.
* /// @tparam T The data type to write to memory.
* @param vaddr The virtual address to write the data type to.
*
* @tparam T The data type to write to memory. This type *must* be
* trivially copyable, otherwise the behavior of this function
* is undefined.
*/
template <typename T> template <typename T>
void Write(Common::ProcessAddress vaddr, const T data) { inline void Write(Common::ProcessAddress vaddr, const T data) noexcept requires(std::is_trivially_copyable_v<T>) {
// Fast path for aligned writes of common sizes
const u64 addr = GetInteger(vaddr); const u64 addr = GetInteger(vaddr);
if constexpr (std::is_same_v<T, u8> || std::is_same_v<T, s8>) { if (auto const ptr = GetPointerImpl(addr, [addr, data]() {
// 8-bit writes are always aligned LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8, addr, u64(data));
u8* const ptr = GetPointerImpl( }, [&]() { HandleRasterizerWrite(addr, sizeof(T)); }); ptr) [[likely]]
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write8 @ 0x{:016X} = 0x{:02X}", addr,
static_cast<u8>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*ptr = static_cast<u8>(data);
}
return;
} else if constexpr (std::is_same_v<T, u16_le> || std::is_same_v<T, s16_le>) {
// Check alignment for 16-bit writes
if ((addr & 1) == 0) {
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write16 @ 0x{:016X} = 0x{:04X}", addr,
static_cast<u16>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*reinterpret_cast<u16*>(ptr) = static_cast<u16>(data);
return;
}
}
} else if constexpr (std::is_same_v<T, u32_le> || std::is_same_v<T, s32_le>) {
// Check alignment for 32-bit writes
if ((addr & 3) == 0) {
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write32 @ 0x{:016X} = 0x{:08X}", addr,
static_cast<u32>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*reinterpret_cast<u32*>(ptr) = static_cast<u32>(data);
return;
}
}
} else if constexpr (std::is_same_v<T, u64_le> || std::is_same_v<T, s64_le>) {
// Check alignment for 64-bit writes
if ((addr & 7) == 0) {
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write64 @ 0x{:016X} = 0x{:016X}", addr,
static_cast<u64>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*reinterpret_cast<u64*>(ptr) = static_cast<u64>(data);
return;
}
}
}
// Fall back to the general case for other types or unaligned access
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8,
addr, static_cast<u64>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
std::memcpy(ptr, &data, sizeof(T)); std::memcpy(ptr, &data, sizeof(T));
}
} }
template <typename T> template <typename T>

View file

@ -156,6 +156,8 @@ void MaxwellDMA::Launch() {
} }
void MaxwellDMA::CopyBlockLinearToPitch() { void MaxwellDMA::CopyBlockLinearToPitch() {
u32 bytes_per_pixel = 1; u32 bytes_per_pixel = 1;
DMA::ImageOperand src_operand; DMA::ImageOperand src_operand;
src_operand.bytes_per_pixel = bytes_per_pixel; src_operand.bytes_per_pixel = bytes_per_pixel;

View file

@ -46,6 +46,38 @@ namespace Vulkan {
using VideoCommon::ImageViewType; using VideoCommon::ImageViewType;
namespace { namespace {
[[nodiscard]] VkImageAspectFlags AspectMaskFromFormat(VideoCore::Surface::PixelFormat format) {
using VideoCore::Surface::SurfaceType;
switch (VideoCore::Surface::GetFormatType(format)) {
case SurfaceType::ColorTexture:
return VK_IMAGE_ASPECT_COLOR_BIT;
case SurfaceType::Depth:
return VK_IMAGE_ASPECT_DEPTH_BIT;
case SurfaceType::Stencil:
return VK_IMAGE_ASPECT_STENCIL_BIT;
case SurfaceType::DepthStencil:
return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
default:
return VK_IMAGE_ASPECT_COLOR_BIT;
}
}
[[nodiscard]] VkImageSubresourceRange SubresourceRangeFromView(const ImageView& image_view) {
auto range = image_view.range;
if ((image_view.flags & VideoCommon::ImageViewFlagBits::Slice) != VideoCommon::ImageViewFlagBits{}) {
range.base.layer = 0;
range.extent.layers = 1;
}
return VkImageSubresourceRange{
.aspectMask = AspectMaskFromFormat(image_view.format),
.baseMipLevel = static_cast<u32>(range.base.level),
.levelCount = static_cast<u32>(range.extent.levels),
.baseArrayLayer = static_cast<u32>(range.base.layer),
.layerCount = static_cast<u32>(range.extent.layers),
};
}
struct PushConstants { struct PushConstants {
std::array<float, 2> tex_scale; std::array<float, 2> tex_scale;
std::array<float, 2> tex_offset; std::array<float, 2> tex_offset;
@ -417,6 +449,40 @@ void TransitionImageLayout(vk::CommandBuffer& cmdbuf, VkImage image, VkImageLayo
0, barrier); 0, barrier);
} }
void RecordShaderReadBarrier(Scheduler& scheduler, const ImageView& image_view) {
const VkImage image = image_view.ImageHandle();
const VkImageSubresourceRange subresource_range = SubresourceRangeFromView(image_view);
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([image, subresource_range](vk::CommandBuffer cmdbuf) {
const VkImageMemoryBarrier barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_SHADER_WRITE_BIT |
VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = image,
.subresourceRange = subresource_range,
};
cmdbuf.PipelineBarrier(
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_TRANSFER_BIT |
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
0,
barrier);
});
}
void BeginRenderPass(vk::CommandBuffer& cmdbuf, const Framebuffer* framebuffer) { void BeginRenderPass(vk::CommandBuffer& cmdbuf, const Framebuffer* framebuffer) {
const VkRenderPass render_pass = framebuffer->RenderPass(); const VkRenderPass render_pass = framebuffer->RenderPass();
const VkFramebuffer framebuffer_handle = framebuffer->Handle(); const VkFramebuffer framebuffer_handle = framebuffer->Handle();
@ -484,7 +550,7 @@ BlitImageHelper::BlitImageHelper(const Device& device_, Scheduler& scheduler_,
BlitImageHelper::~BlitImageHelper() = default; BlitImageHelper::~BlitImageHelper() = default;
void BlitImageHelper::BlitColor(const Framebuffer* dst_framebuffer, VkImageView src_view, void BlitImageHelper::BlitColor(const Framebuffer* dst_framebuffer, const ImageView& src_image_view,
const Region2D& dst_region, const Region2D& src_region, const Region2D& dst_region, const Region2D& src_region,
Tegra::Engines::Fermi2D::Filter filter, Tegra::Engines::Fermi2D::Filter filter,
Tegra::Engines::Fermi2D::Operation operation) { Tegra::Engines::Fermi2D::Operation operation) {
@ -496,10 +562,12 @@ void BlitImageHelper::BlitColor(const Framebuffer* dst_framebuffer, VkImageView
const VkPipelineLayout layout = *one_texture_pipeline_layout; const VkPipelineLayout layout = *one_texture_pipeline_layout;
const VkSampler sampler = is_linear ? *linear_sampler : *nearest_sampler; const VkSampler sampler = is_linear ? *linear_sampler : *nearest_sampler;
const VkPipeline pipeline = FindOrEmplaceColorPipeline(key); const VkPipeline pipeline = FindOrEmplaceColorPipeline(key);
const VkImageView src_view = src_image_view.Handle(Shader::TextureType::Color2D);
RecordShaderReadBarrier(scheduler, src_image_view);
scheduler.RequestRenderpass(dst_framebuffer); scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([this, dst_region, src_region, pipeline, layout, sampler, scheduler.Record([this, dst_region, src_region, pipeline, layout, sampler,
src_view](vk::CommandBuffer cmdbuf) { src_view](vk::CommandBuffer cmdbuf) {
// TODO: Barriers
const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit(); const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
UpdateOneTextureDescriptorSet(device, descriptor_set, sampler, src_view); UpdateOneTextureDescriptorSet(device, descriptor_set, sampler, src_view);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
@ -538,7 +606,7 @@ void BlitImageHelper::BlitColor(const Framebuffer* dst_framebuffer, VkImageView
} }
void BlitImageHelper::BlitDepthStencil(const Framebuffer* dst_framebuffer, void BlitImageHelper::BlitDepthStencil(const Framebuffer* dst_framebuffer,
VkImageView src_depth_view, VkImageView src_stencil_view, ImageView& src_image_view,
const Region2D& dst_region, const Region2D& src_region, const Region2D& dst_region, const Region2D& src_region,
Tegra::Engines::Fermi2D::Filter filter, Tegra::Engines::Fermi2D::Filter filter,
Tegra::Engines::Fermi2D::Operation operation) { Tegra::Engines::Fermi2D::Operation operation) {
@ -554,10 +622,13 @@ void BlitImageHelper::BlitDepthStencil(const Framebuffer* dst_framebuffer,
const VkPipelineLayout layout = *two_textures_pipeline_layout; const VkPipelineLayout layout = *two_textures_pipeline_layout;
const VkSampler sampler = *nearest_sampler; const VkSampler sampler = *nearest_sampler;
const VkPipeline pipeline = FindOrEmplaceDepthStencilPipeline(key); const VkPipeline pipeline = FindOrEmplaceDepthStencilPipeline(key);
const VkImageView src_depth_view = src_image_view.DepthView();
const VkImageView src_stencil_view = src_image_view.StencilView();
RecordShaderReadBarrier(scheduler, src_image_view);
scheduler.RequestRenderpass(dst_framebuffer); scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([dst_region, src_region, pipeline, layout, sampler, src_depth_view, scheduler.Record([dst_region, src_region, pipeline, layout, sampler, src_depth_view,
src_stencil_view, this](vk::CommandBuffer cmdbuf) { src_stencil_view, this](vk::CommandBuffer cmdbuf) {
// TODO: Barriers
const VkDescriptorSet descriptor_set = two_textures_descriptor_allocator.Commit(); const VkDescriptorSet descriptor_set = two_textures_descriptor_allocator.Commit();
UpdateTwoTexturesDescriptorSet(device, descriptor_set, sampler, src_depth_view, UpdateTwoTexturesDescriptorSet(device, descriptor_set, sampler, src_depth_view,
src_stencil_view); src_stencil_view);
@ -692,6 +763,7 @@ void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_frameb
const VkSampler sampler = *nearest_sampler; const VkSampler sampler = *nearest_sampler;
const VkExtent2D extent = GetConversionExtent(src_image_view); const VkExtent2D extent = GetConversionExtent(src_image_view);
RecordShaderReadBarrier(scheduler, src_image_view);
scheduler.RequestRenderpass(dst_framebuffer); scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([pipeline, layout, sampler, src_view, extent, this](vk::CommandBuffer cmdbuf) { scheduler.Record([pipeline, layout, sampler, src_view, extent, this](vk::CommandBuffer cmdbuf) {
const VkOffset2D offset{ const VkOffset2D offset{
@ -717,7 +789,6 @@ void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_frameb
const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit(); const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
UpdateOneTextureDescriptorSet(device, descriptor_set, sampler, src_view); UpdateOneTextureDescriptorSet(device, descriptor_set, sampler, src_view);
// TODO: Barriers
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set, cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set,
nullptr); nullptr);
@ -737,6 +808,7 @@ void BlitImageHelper::ConvertDepthStencil(VkPipeline pipeline, const Framebuffer
const VkSampler sampler = *nearest_sampler; const VkSampler sampler = *nearest_sampler;
const VkExtent2D extent = GetConversionExtent(src_image_view); const VkExtent2D extent = GetConversionExtent(src_image_view);
RecordShaderReadBarrier(scheduler, src_image_view);
scheduler.RequestRenderpass(dst_framebuffer); scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([pipeline, layout, sampler, src_depth_view, src_stencil_view, extent, scheduler.Record([pipeline, layout, sampler, src_depth_view, src_stencil_view, extent,
this](vk::CommandBuffer cmdbuf) { this](vk::CommandBuffer cmdbuf) {
@ -763,7 +835,6 @@ void BlitImageHelper::ConvertDepthStencil(VkPipeline pipeline, const Framebuffer
const VkDescriptorSet descriptor_set = two_textures_descriptor_allocator.Commit(); const VkDescriptorSet descriptor_set = two_textures_descriptor_allocator.Commit();
UpdateTwoTexturesDescriptorSet(device, descriptor_set, sampler, src_depth_view, UpdateTwoTexturesDescriptorSet(device, descriptor_set, sampler, src_depth_view,
src_stencil_view); src_stencil_view);
// TODO: Barriers
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set, cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set,
nullptr); nullptr);

View file

@ -1,4 +1,7 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project // SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
#pragma once #pragma once
@ -43,7 +46,7 @@ public:
StateTracker& state_tracker, DescriptorPool& descriptor_pool); StateTracker& state_tracker, DescriptorPool& descriptor_pool);
~BlitImageHelper(); ~BlitImageHelper();
void BlitColor(const Framebuffer* dst_framebuffer, VkImageView src_image_view, void BlitColor(const Framebuffer* dst_framebuffer, const ImageView& src_image_view,
const Region2D& dst_region, const Region2D& src_region, const Region2D& dst_region, const Region2D& src_region,
Tegra::Engines::Fermi2D::Filter filter, Tegra::Engines::Fermi2D::Filter filter,
Tegra::Engines::Fermi2D::Operation operation); Tegra::Engines::Fermi2D::Operation operation);
@ -52,9 +55,9 @@ public:
VkImage src_image, VkSampler src_sampler, const Region2D& dst_region, VkImage src_image, VkSampler src_sampler, const Region2D& dst_region,
const Region2D& src_region, const Extent3D& src_size); const Region2D& src_region, const Extent3D& src_size);
void BlitDepthStencil(const Framebuffer* dst_framebuffer, VkImageView src_depth_view, void BlitDepthStencil(const Framebuffer* dst_framebuffer, ImageView& src_image_view,
VkImageView src_stencil_view, const Region2D& dst_region, const Region2D& dst_region, const Region2D& src_region,
const Region2D& src_region, Tegra::Engines::Fermi2D::Filter filter, Tegra::Engines::Fermi2D::Filter filter,
Tegra::Engines::Fermi2D::Operation operation); Tegra::Engines::Fermi2D::Operation operation);
void ConvertD32ToR32(const Framebuffer* dst_framebuffer, const ImageView& src_image_view); void ConvertD32ToR32(const Framebuffer* dst_framebuffer, const ImageView& src_image_view);

View file

@ -1086,8 +1086,8 @@ void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst
return; return;
} }
if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT && !is_src_msaa && !is_dst_msaa) { if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT && !is_src_msaa && !is_dst_msaa) {
blit_image_helper.BlitColor(dst_framebuffer, src.Handle(Shader::TextureType::Color2D), blit_image_helper.BlitColor(dst_framebuffer, src, dst_region, src_region, filter,
dst_region, src_region, filter, operation); operation);
return; return;
} }
ASSERT(src.format == dst.format); ASSERT(src.format == dst.format);
@ -1106,8 +1106,8 @@ void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst
}(); }();
if (!can_blit_depth_stencil) { if (!can_blit_depth_stencil) {
UNIMPLEMENTED_IF(is_src_msaa || is_dst_msaa); UNIMPLEMENTED_IF(is_src_msaa || is_dst_msaa);
blit_image_helper.BlitDepthStencil(dst_framebuffer, src.DepthView(), src.StencilView(), blit_image_helper.BlitDepthStencil(dst_framebuffer, src, dst_region, src_region,
dst_region, src_region, filter, operation); filter, operation);
return; return;
} }
} }
@ -1968,18 +1968,17 @@ bool Image::BlitScaleHelper(bool scale_up) {
blit_framebuffer = blit_framebuffer =
std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent, scale_up); std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent, scale_up);
} }
const auto color_view = blit_view->Handle(Shader::TextureType::Color2D);
runtime->blit_image_helper.BlitColor(blit_framebuffer.get(), color_view, dst_region, runtime->blit_image_helper.BlitColor(blit_framebuffer.get(), *blit_view, dst_region,
src_region, operation, BLIT_OPERATION); src_region, operation, BLIT_OPERATION);
} else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { } else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
if (!blit_framebuffer) { if (!blit_framebuffer) {
blit_framebuffer = blit_framebuffer =
std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent, scale_up); std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent, scale_up);
} }
runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(), runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), *blit_view,
blit_view->StencilView(), dst_region, dst_region, src_region, operation,
src_region, operation, BLIT_OPERATION); BLIT_OPERATION);
} else { } else {
// TODO: Use helper blits where applicable // TODO: Use helper blits where applicable
flags &= ~ImageFlagBits::Rescaled; flags &= ~ImageFlagBits::Rescaled;