Compare commits

..

2 commits

Author SHA1 Message Date
Ribbit
7bbeafc0ca more changes might have overlooked something
Some checks failed
eden-license / license-header (pull_request) Failing after 23s
2025-10-08 20:31:00 +02:00
Ribbit
cb6da0409b [SPIR-V] Auto apply flat interpolation to integer fragment inputs 2025-10-08 20:31:00 +02:00
15 changed files with 370 additions and 174 deletions

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
@ -109,6 +112,9 @@ public:
void ReadBlock(DAddr address, void* dest_pointer, size_t size); void ReadBlock(DAddr address, void* dest_pointer, size_t size);
void ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size); void ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size);
#ifdef YUZU_DEBUG
bool ReadBlockFastChecked(DAddr address, void* dest_pointer, size_t size);
#endif
void WriteBlock(DAddr address, const void* src_pointer, size_t size); void WriteBlock(DAddr address, const void* src_pointer, size_t size);
void WriteBlockUnsafe(DAddr address, const void* src_pointer, size_t size); void WriteBlockUnsafe(DAddr address, const void* src_pointer, size_t size);

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
@ -467,6 +470,29 @@ void DeviceMemoryManager<Traits>::ReadBlockUnsafe(DAddr address, void* dest_poin
}); });
} }
#ifdef YUZU_DEBUG
template <typename Traits>
bool DeviceMemoryManager<Traits>::ReadBlockFastChecked(DAddr address, void* dest_pointer,
size_t size) {
bool success = true;
WalkBlock(
address, size,
[&](size_t copy_amount, DAddr current_vaddr) {
LOG_CRITICAL(Render, "DeviceMemory OOB/unmapped: addr=0x{:x} size={}", current_vaddr,
size);
std::memset(dest_pointer, 0, copy_amount);
success = false;
},
[&](size_t copy_amount, const u8* const src_ptr) {
std::memcpy(dest_pointer, src_ptr, copy_amount);
},
[&](const std::size_t copy_amount) {
dest_pointer = static_cast<u8*>(dest_pointer) + copy_amount;
});
return success;
}
#endif
template <typename Traits> template <typename Traits>
void DeviceMemoryManager<Traits>::WriteBlockUnsafe(DAddr address, const void* src_pointer, void DeviceMemoryManager<Traits>::WriteBlockUnsafe(DAddr address, const void* src_pointer,
size_t size) { size_t size) {

View file

@ -509,9 +509,6 @@ std::vector<std::string> ProfileManager::FindOrphanedProfiles()
good_uuids.emplace_back(uuid_string); good_uuids.emplace_back(uuid_string);
} }
// used for acnh, etc
good_uuids.emplace_back("00000000000000000000000000000000");
// TODO: fetch save_id programmatically // TODO: fetch save_id programmatically
const auto path = Common::FS::GetEdenPath(Common::FS::EdenPath::NANDDir) const auto path = Common::FS::GetEdenPath(Common::FS::EdenPath::NANDDir)
/ "user/save/0000000000000000"; / "user/save/0000000000000000";

View file

@ -1,6 +1,3 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
@ -71,6 +68,42 @@ std::string ResolveURL(const std::string& url) {
return url.substr(0, index) + "lp1" + url.substr(index + 1); return url.substr(0, index) + "lp1" + url.substr(index + 1);
} }
WebArgInputTLVMap ReadWebArgs(const std::vector<u8>& web_arg, WebArgHeader& web_arg_header) {
std::memcpy(&web_arg_header, web_arg.data(), sizeof(WebArgHeader));
if (web_arg.size() == sizeof(WebArgHeader)) {
return {};
}
WebArgInputTLVMap input_tlv_map;
u64 current_offset = sizeof(WebArgHeader);
for (std::size_t i = 0; i < web_arg_header.total_tlv_entries; ++i) {
if (web_arg.size() < current_offset + sizeof(WebArgInputTLV)) {
return input_tlv_map;
}
WebArgInputTLV input_tlv;
std::memcpy(&input_tlv, web_arg.data() + current_offset, sizeof(WebArgInputTLV));
current_offset += sizeof(WebArgInputTLV);
if (web_arg.size() < current_offset + input_tlv.arg_data_size) {
return input_tlv_map;
}
std::vector<u8> data(input_tlv.arg_data_size);
std::memcpy(data.data(), web_arg.data() + current_offset, input_tlv.arg_data_size);
current_offset += input_tlv.arg_data_size;
input_tlv_map.insert_or_assign(input_tlv.input_tlv_type, std::move(data));
}
return input_tlv_map;
}
FileSys::VirtualFile GetOfflineRomFS(Core::System& system, u64 title_id, FileSys::VirtualFile GetOfflineRomFS(Core::System& system, u64 title_id,
FileSys::ContentRecordType nca_type) { FileSys::ContentRecordType nca_type) {
if (nca_type == FileSys::ContentRecordType::Data) { if (nca_type == FileSys::ContentRecordType::Data) {
@ -111,43 +144,6 @@ FileSys::VirtualFile GetOfflineRomFS(Core::System& system, u64 title_id,
} }
} }
#ifdef YUZU_USE_QT_WEB_ENGINE
WebArgInputTLVMap ReadWebArgs(const std::vector<u8>& web_arg, WebArgHeader& web_arg_header) {
std::memcpy(&web_arg_header, web_arg.data(), sizeof(WebArgHeader));
if (web_arg.size() == sizeof(WebArgHeader)) {
return {};
}
WebArgInputTLVMap input_tlv_map;
u64 current_offset = sizeof(WebArgHeader);
for (std::size_t i = 0; i < web_arg_header.total_tlv_entries; ++i) {
if (web_arg.size() < current_offset + sizeof(WebArgInputTLV)) {
return input_tlv_map;
}
WebArgInputTLV input_tlv;
std::memcpy(&input_tlv, web_arg.data() + current_offset, sizeof(WebArgInputTLV));
current_offset += sizeof(WebArgInputTLV);
if (web_arg.size() < current_offset + input_tlv.arg_data_size) {
return input_tlv_map;
}
std::vector<u8> data(input_tlv.arg_data_size);
std::memcpy(data.data(), web_arg.data() + current_offset, input_tlv.arg_data_size);
current_offset += input_tlv.arg_data_size;
input_tlv_map.insert_or_assign(input_tlv.input_tlv_type, std::move(data));
}
return input_tlv_map;
}
void ExtractSharedFonts(Core::System& system) { void ExtractSharedFonts(Core::System& system) {
static constexpr std::array<const char*, 7> DECRYPTED_SHARED_FONTS{ static constexpr std::array<const char*, 7> DECRYPTED_SHARED_FONTS{
"FontStandard.ttf", "FontStandard.ttf",
@ -225,7 +221,6 @@ void ExtractSharedFonts(Core::System& system) {
FileSys::VfsRawCopy(decrypted_font, out_file); FileSys::VfsRawCopy(decrypted_font, out_file);
} }
} }
#endif
} // namespace } // namespace
@ -237,7 +232,6 @@ WebBrowser::WebBrowser(Core::System& system_, std::shared_ptr<Applet> applet_,
WebBrowser::~WebBrowser() = default; WebBrowser::~WebBrowser() = default;
void WebBrowser::Initialize() { void WebBrowser::Initialize() {
#ifdef YUZU_USE_QT_WEB_ENGINE
FrontendApplet::Initialize(); FrontendApplet::Initialize();
LOG_INFO(Service_AM, "Initializing Web Browser Applet."); LOG_INFO(Service_AM, "Initializing Web Browser Applet.");
@ -290,7 +284,6 @@ void WebBrowser::Initialize() {
ASSERT_MSG(false, "Invalid ShimKind={}", web_arg_header.shim_kind); ASSERT_MSG(false, "Invalid ShimKind={}", web_arg_header.shim_kind);
break; break;
} }
#endif
} }
Result WebBrowser::GetStatus() const { Result WebBrowser::GetStatus() const {
@ -302,7 +295,6 @@ void WebBrowser::ExecuteInteractive() {
} }
void WebBrowser::Execute() { void WebBrowser::Execute() {
#ifdef YUZU_USE_QT_WEB_ENGINE
switch (web_arg_header.shim_kind) { switch (web_arg_header.shim_kind) {
case ShimKind::Shop: case ShimKind::Shop:
ExecuteShop(); ExecuteShop();
@ -330,10 +322,6 @@ void WebBrowser::Execute() {
WebBrowserExit(WebExitReason::EndButtonPressed); WebBrowserExit(WebExitReason::EndButtonPressed);
break; break;
} }
#else
LOG_INFO(Service_AM, "Web Browser Applet disabled, skipping.");
WebBrowserExit(WebExitReason::EndButtonPressed);
#endif
} }
void WebBrowser::ExtractOfflineRomFS() { void WebBrowser::ExtractOfflineRomFS() {

View file

@ -45,10 +45,6 @@ if (NOT APPLE AND ENABLE_OPENGL)
target_compile_definitions(qt_common PUBLIC HAS_OPENGL) target_compile_definitions(qt_common PUBLIC HAS_OPENGL)
endif() endif()
if (UNIX AND NOT APPLE) if (NOT WIN32)
if (TARGET Qt6::GuiPrivate) target_include_directories(qt_common PRIVATE ${Qt6Gui_PRIVATE_INCLUDE_DIRS})
target_link_libraries(qt_common PRIVATE Qt6::GuiPrivate)
else()
target_include_directories(qt_common PRIVATE ${Qt6Gui_PRIVATE_INCLUDE_DIRS})
endif()
endif() endif()

View file

@ -98,12 +98,46 @@ Id ImageType(EmitContext& ctx, const ImageDescriptor& desc, Id sampled_type) {
throw InvalidArgument("Invalid texture type {}", desc.type); throw InvalidArgument("Invalid texture type {}", desc.type);
} }
bool IsFragmentStage(const EmitContext& ctx) {
return ctx.stage == Stage::Fragment;
}
bool IsUserVaryingInput(bool is_builtin, bool has_location) {
return !is_builtin && has_location;
}
bool IsIntegerOrBoolType(EmitContext& ctx, Id type) {
return ctx.IsIntegerOrBoolType(type);
}
bool RequiresFlatDecoration(EmitContext& ctx, Id type, spv::StorageClass storage_class,
bool is_builtin, bool has_location) {
if (!IsFragmentStage(ctx)) {
return false;
}
if (storage_class != spv::StorageClass::Input) {
return false;
}
if (!IsUserVaryingInput(is_builtin, has_location)) {
return false;
}
return IsIntegerOrBoolType(ctx, type);
}
Id DefineVariable(EmitContext& ctx, Id type, std::optional<spv::BuiltIn> builtin, Id DefineVariable(EmitContext& ctx, Id type, std::optional<spv::BuiltIn> builtin,
spv::StorageClass storage_class, std::optional<Id> initializer = std::nullopt) { spv::StorageClass storage_class, std::optional<Id> initializer = std::nullopt,
bool has_location = false) {
const Id pointer_type{ctx.TypePointer(storage_class, type)}; const Id pointer_type{ctx.TypePointer(storage_class, type)};
const Id id{ctx.AddGlobalVariable(pointer_type, storage_class, initializer)}; const Id id{ctx.AddGlobalVariable(pointer_type, storage_class, initializer)};
if (builtin) { const bool is_builtin{builtin.has_value()};
ctx.Decorate(id, spv::Decoration::BuiltIn, *builtin); if (is_builtin) {
ctx.DecorateUnique(id, spv::Decoration::BuiltIn, static_cast<u32>(*builtin));
}
// Flat only for integer/bool user varyings in fragment input; never for BuiltIns; dedupe avoids
// multiple identical decorations.
if (RequiresFlatDecoration(ctx, type, storage_class, is_builtin, has_location)) {
ctx.DecorateUnique(id, spv::Decoration::Flat);
} }
ctx.interfaces.push_back(id); ctx.interfaces.push_back(id);
return id; return id;
@ -126,7 +160,8 @@ u32 NumVertices(InputTopology input_topology) {
} }
Id DefineInput(EmitContext& ctx, Id type, bool per_invocation, Id DefineInput(EmitContext& ctx, Id type, bool per_invocation,
std::optional<spv::BuiltIn> builtin = std::nullopt) { std::optional<spv::BuiltIn> builtin = std::nullopt,
bool has_location = false) {
switch (ctx.stage) { switch (ctx.stage) {
case Stage::TessellationControl: case Stage::TessellationControl:
case Stage::TessellationEval: case Stage::TessellationEval:
@ -143,7 +178,7 @@ Id DefineInput(EmitContext& ctx, Id type, bool per_invocation,
default: default:
break; break;
} }
return DefineVariable(ctx, type, builtin, spv::StorageClass::Input); return DefineVariable(ctx, type, builtin, spv::StorageClass::Input, std::nullopt, has_location);
} }
Id DefineOutput(EmitContext& ctx, Id type, std::optional<u32> invocations, Id DefineOutput(EmitContext& ctx, Id type, std::optional<u32> invocations,
@ -170,7 +205,7 @@ void DefineGenericOutput(EmitContext& ctx, size_t index, std::optional<u32> invo
const u32 num_components{xfb_varying ? xfb_varying->components : remainder}; const u32 num_components{xfb_varying ? xfb_varying->components : remainder};
const Id id{DefineOutput(ctx, ctx.F32[num_components], invocations)}; const Id id{DefineOutput(ctx, ctx.F32[num_components], invocations)};
ctx.Decorate(id, spv::Decoration::Location, static_cast<u32>(index)); ctx.DecorateUnique(id, spv::Decoration::Location, static_cast<u32>(index));
if (element > 0) { if (element > 0) {
ctx.Decorate(id, spv::Decoration::Component, element); ctx.Decorate(id, spv::Decoration::Component, element);
} }
@ -445,6 +480,94 @@ Id DescType(EmitContext& ctx, Id sampled_type, Id pointer_type, u32 count) {
} }
} // Anonymous namespace } // Anonymous namespace
Id EmitContext::TypeArray(Id element_type, Id length) {
const Id array_type{Sirit::Module::TypeArray(element_type, length)};
array_element_types[array_type] = element_type;
type_integer_or_bool_cache[array_type] = IsIntegerOrBoolType(element_type);
return array_type;
}
Id EmitContext::TypeStruct(Id member) {
const std::array<Id, 1> members{member};
return TypeStruct(std::span<const Id>(members));
}
Id EmitContext::TypeStruct(std::span<const Id> members) {
const Id struct_type{Sirit::Module::TypeStruct(members)};
struct_member_types[struct_type] = std::vector<Id>(members.begin(), members.end());
const bool has_integer_member{
std::any_of(members.begin(), members.end(),
[this](Id member_type) { return IsIntegerOrBoolType(member_type); })};
type_integer_or_bool_cache[struct_type] = has_integer_member;
return struct_type;
}
Id EmitContext::TypeVector(Id element_type, u32 components) {
const Id vector_type{
Sirit::Module::TypeVector(element_type, static_cast<int>(components))};
type_integer_or_bool_cache[vector_type] = IsIntegerOrBoolType(element_type);
return vector_type;
}
bool EmitContext::HasDecoration(Id id, spv::Decoration decoration,
std::optional<u32> literal) const {
const auto list_it{decorations.find(id)};
if (list_it == decorations.end()) {
return false;
}
const auto& records{list_it->second};
return std::any_of(records.begin(), records.end(), [&](const DecorationRecord& record) {
if (record.decoration != decoration) {
return false;
}
if (!literal.has_value()) {
return true;
}
return record.literal.has_value() && record.literal.value() == literal.value();
});
}
void EmitContext::DecorateUnique(Id id, spv::Decoration decoration,
std::optional<u32> literal) {
if (decoration == spv::Decoration::Flat || decoration == spv::Decoration::NoPerspective) {
// SPIR-V only allows non-default interpolation decorations on user-defined inputs.
ASSERT_MSG(!HasDecoration(id, spv::Decoration::BuiltIn),
"Interpolation decoration applied to a BuiltIn");
}
if (HasDecoration(id, decoration, literal)) {
return;
}
decorations[id].emplace_back(DecorationRecord{decoration, literal});
if (literal.has_value()) {
Sirit::Module::Decorate(id, decoration, literal.value());
} else {
Sirit::Module::Decorate(id, decoration);
}
}
bool EmitContext::IsIntegerOrBoolType(Id type) {
if (const auto it = type_integer_or_bool_cache.find(type);
it != type_integer_or_bool_cache.end()) {
return it->second;
}
if (const auto array_it = array_element_types.find(type); array_it != array_element_types.end()) {
const bool result{IsIntegerOrBoolType(array_it->second)};
type_integer_or_bool_cache[type] = result;
return result;
}
if (const auto struct_it = struct_member_types.find(type);
struct_it != struct_member_types.end()) {
const bool result{std::any_of(struct_it->second.begin(), struct_it->second.end(),
[this](Id member_type) {
return IsIntegerOrBoolType(member_type);
})};
type_integer_or_bool_cache[type] = result;
return result;
}
type_integer_or_bool_cache[type] = false;
return false;
}
void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name) { void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name) {
defs[0] = sirit_ctx.Name(base_type, name); defs[0] = sirit_ctx.Name(base_type, name);
@ -532,11 +655,24 @@ Id EmitContext::BitOffset16(const IR::Value& offset) {
void EmitContext::DefineCommonTypes(const Info& info) { void EmitContext::DefineCommonTypes(const Info& info) {
void_id = TypeVoid(); void_id = TypeVoid();
const auto mark_vector_type = [this](VectorTypes& vectors, bool is_integer_or_bool) {
for (size_t components = 1; components <= 4; ++components) {
const Id type{vectors[components]};
if (type.value != 0) {
type_integer_or_bool_cache[type] = is_integer_or_bool;
}
}
};
U1 = Name(TypeBool(), "u1"); U1 = Name(TypeBool(), "u1");
type_integer_or_bool_cache[U1] = true;
F32.Define(*this, TypeFloat(32), "f32"); F32.Define(*this, TypeFloat(32), "f32");
mark_vector_type(F32, false);
U32.Define(*this, TypeInt(32, false), "u32"); U32.Define(*this, TypeInt(32, false), "u32");
mark_vector_type(U32, true);
S32.Define(*this, TypeInt(32, true), "s32"); S32.Define(*this, TypeInt(32, true), "s32");
mark_vector_type(S32, true);
private_u32 = Name(TypePointer(spv::StorageClass::Private, U32[1]), "private_u32"); private_u32 = Name(TypePointer(spv::StorageClass::Private, U32[1]), "private_u32");
@ -551,23 +687,30 @@ void EmitContext::DefineCommonTypes(const Info& info) {
AddCapability(spv::Capability::Int8); AddCapability(spv::Capability::Int8);
U8 = Name(TypeInt(8, false), "u8"); U8 = Name(TypeInt(8, false), "u8");
S8 = Name(TypeInt(8, true), "s8"); S8 = Name(TypeInt(8, true), "s8");
type_integer_or_bool_cache[U8] = true;
type_integer_or_bool_cache[S8] = true;
} }
if (info.uses_int16 && profile.support_int16) { if (info.uses_int16 && profile.support_int16) {
AddCapability(spv::Capability::Int16); AddCapability(spv::Capability::Int16);
U16 = Name(TypeInt(16, false), "u16"); U16 = Name(TypeInt(16, false), "u16");
S16 = Name(TypeInt(16, true), "s16"); S16 = Name(TypeInt(16, true), "s16");
type_integer_or_bool_cache[U16] = true;
type_integer_or_bool_cache[S16] = true;
} }
if (info.uses_int64 && profile.support_int64) { if (info.uses_int64 && profile.support_int64) {
AddCapability(spv::Capability::Int64); AddCapability(spv::Capability::Int64);
U64 = Name(TypeInt(64, false), "u64"); U64 = Name(TypeInt(64, false), "u64");
type_integer_or_bool_cache[U64] = true;
} }
if (info.uses_fp16) { if (info.uses_fp16) {
AddCapability(spv::Capability::Float16); AddCapability(spv::Capability::Float16);
F16.Define(*this, TypeFloat(16), "f16"); F16.Define(*this, TypeFloat(16), "f16");
mark_vector_type(F16, false);
} }
if (info.uses_fp64) { if (info.uses_fp64) {
AddCapability(spv::Capability::Float64); AddCapability(spv::Capability::Float64);
F64.Define(*this, TypeFloat(64), "f64"); F64.Define(*this, TypeFloat(64), "f64");
mark_vector_type(F64, false);
} }
} }
@ -1072,7 +1215,7 @@ void EmitContext::DefineRescalingInputUniformConstant() {
const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, F32[4])}; const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, F32[4])};
rescaling_uniform_constant = rescaling_uniform_constant =
AddGlobalVariable(pointer_type, spv::StorageClass::UniformConstant); AddGlobalVariable(pointer_type, spv::StorageClass::UniformConstant);
Decorate(rescaling_uniform_constant, spv::Decoration::Location, 0u); DecorateUnique(rescaling_uniform_constant, spv::Decoration::Location, 0u);
if (profile.supported_spirv >= 0x00010400) { if (profile.supported_spirv >= 0x00010400) {
interfaces.push_back(rescaling_uniform_constant); interfaces.push_back(rescaling_uniform_constant);
@ -1449,7 +1592,6 @@ void EmitContext::DefineInputs(const IR::Program& program) {
AddCapability(spv::Capability::GroupNonUniform); AddCapability(spv::Capability::GroupNonUniform);
subgroup_local_invocation_id = subgroup_local_invocation_id =
DefineInput(*this, U32[1], false, spv::BuiltIn::SubgroupLocalInvocationId); DefineInput(*this, U32[1], false, spv::BuiltIn::SubgroupLocalInvocationId);
Decorate(subgroup_local_invocation_id, spv::Decoration::Flat);
} }
if (info.uses_fswzadd) { if (info.uses_fswzadd) {
const Id f32_one{Const(1.0f)}; const Id f32_one{Const(1.0f)};
@ -1465,7 +1607,6 @@ void EmitContext::DefineInputs(const IR::Program& program) {
if (loads[IR::Attribute::Layer]) { if (loads[IR::Attribute::Layer]) {
AddCapability(spv::Capability::Geometry); AddCapability(spv::Capability::Geometry);
layer = DefineInput(*this, U32[1], false, spv::BuiltIn::Layer); layer = DefineInput(*this, U32[1], false, spv::BuiltIn::Layer);
Decorate(layer, spv::Decoration::Flat);
} }
if (loads.AnyComponent(IR::Attribute::PositionX)) { if (loads.AnyComponent(IR::Attribute::PositionX)) {
const bool is_fragment{stage == Stage::Fragment}; const bool is_fragment{stage == Stage::Fragment};
@ -1541,8 +1682,8 @@ void EmitContext::DefineInputs(const IR::Program& program) {
continue; continue;
} }
const Id type{GetAttributeType(*this, input_type)}; const Id type{GetAttributeType(*this, input_type)};
const Id id{DefineInput(*this, type, true)}; const Id id{DefineInput(*this, type, true, std::nullopt, true)};
Decorate(id, spv::Decoration::Location, static_cast<u32>(index)); DecorateUnique(id, spv::Decoration::Location, static_cast<u32>(index));
Name(id, fmt::format("in_attr{}", index)); Name(id, fmt::format("in_attr{}", index));
input_generics[index] = GetAttributeInfo(*this, input_type, id); input_generics[index] = GetAttributeInfo(*this, input_type, id);
@ -1552,16 +1693,20 @@ void EmitContext::DefineInputs(const IR::Program& program) {
if (stage != Stage::Fragment) { if (stage != Stage::Fragment) {
continue; continue;
} }
if (RequiresFlatDecoration(*this, type, spv::StorageClass::Input, false, true)) {
ASSERT_MSG(HasDecoration(id, spv::Decoration::Flat),
"Flat decoration missing on integer/bool user varying input");
continue;
}
switch (info.interpolation[index]) { switch (info.interpolation[index]) {
case Interpolation::Smooth: case Interpolation::Smooth:
// Default // Default interpolation per SPIR-V spec; no decoration emitted.
// Decorate(id, spv::Decoration::Smooth);
break; break;
case Interpolation::NoPerspective: case Interpolation::NoPerspective:
Decorate(id, spv::Decoration::NoPerspective); DecorateUnique(id, spv::Decoration::NoPerspective);
break; break;
case Interpolation::Flat: case Interpolation::Flat:
Decorate(id, spv::Decoration::Flat); DecorateUnique(id, spv::Decoration::Flat);
break; break;
} }
} }
@ -1570,9 +1715,9 @@ void EmitContext::DefineInputs(const IR::Program& program) {
if (!info.uses_patches[index]) { if (!info.uses_patches[index]) {
continue; continue;
} }
const Id id{DefineInput(*this, F32[4], false)}; const Id id{DefineInput(*this, F32[4], false, std::nullopt, true)};
Decorate(id, spv::Decoration::Patch); Decorate(id, spv::Decoration::Patch);
Decorate(id, spv::Decoration::Location, static_cast<u32>(index)); DecorateUnique(id, spv::Decoration::Location, static_cast<u32>(index));
patches[index] = id; patches[index] = id;
} }
} }
@ -1649,7 +1794,7 @@ void EmitContext::DefineOutputs(const IR::Program& program) {
} }
const Id id{DefineOutput(*this, F32[4], std::nullopt)}; const Id id{DefineOutput(*this, F32[4], std::nullopt)};
Decorate(id, spv::Decoration::Patch); Decorate(id, spv::Decoration::Patch);
Decorate(id, spv::Decoration::Location, static_cast<u32>(index)); DecorateUnique(id, spv::Decoration::Location, static_cast<u32>(index));
patches[index] = id; patches[index] = id;
} }
break; break;
@ -1659,17 +1804,19 @@ void EmitContext::DefineOutputs(const IR::Program& program) {
continue; continue;
} }
frag_color[index] = DefineOutput(*this, F32[4], std::nullopt); frag_color[index] = DefineOutput(*this, F32[4], std::nullopt);
Decorate(frag_color[index], spv::Decoration::Location, index); DecorateUnique(frag_color[index], spv::Decoration::Location, index);
Name(frag_color[index], fmt::format("frag_color{}", index)); Name(frag_color[index], fmt::format("frag_color{}", index));
} }
if (info.stores_frag_depth) { if (info.stores_frag_depth) {
frag_depth = DefineOutput(*this, F32[1], std::nullopt); frag_depth = DefineOutput(*this, F32[1], std::nullopt);
Decorate(frag_depth, spv::Decoration::BuiltIn, spv::BuiltIn::FragDepth); DecorateUnique(frag_depth, spv::Decoration::BuiltIn,
static_cast<u32>(spv::BuiltIn::FragDepth));
} }
if (info.stores_sample_mask) { if (info.stores_sample_mask) {
const Id array_type{TypeArray(U32[1], Const(1U))}; const Id array_type{TypeArray(U32[1], Const(1U))};
sample_mask = DefineOutput(*this, array_type, std::nullopt); sample_mask = DefineOutput(*this, array_type, std::nullopt);
Decorate(sample_mask, spv::Decoration::BuiltIn, spv::BuiltIn::SampleMask); DecorateUnique(sample_mask, spv::Decoration::BuiltIn,
static_cast<u32>(spv::BuiltIn::SampleMask));
} }
break; break;
default: default:
@ -1678,3 +1825,4 @@ void EmitContext::DefineOutputs(const IR::Program& program) {
} }
} // namespace Shader::Backend::SPIRV } // namespace Shader::Backend::SPIRV

View file

@ -4,6 +4,11 @@
#pragma once #pragma once
#include <array> #include <array>
#include <bitset>
#include <optional>
#include <span>
#include <unordered_map>
#include <vector>
#include <sirit/sirit.h> #include <sirit/sirit.h>
@ -19,6 +24,23 @@ static std::bitset<8> clip_distance_written;
using Sirit::Id; using Sirit::Id;
struct DecorationRecord {
spv::Decoration decoration;
std::optional<u32> literal;
};
struct IdHash {
std::size_t operator()(const Id& id) const noexcept {
return std::hash<u32>{}(id.value);
}
};
struct IdEqual {
bool operator()(const Id& lhs, const Id& rhs) const noexcept {
return lhs.value == rhs.value;
}
};
class VectorTypes { class VectorTypes {
public: public:
void Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name); void Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name);
@ -204,6 +226,23 @@ public:
return Constant(F32[1], value); return Constant(F32[1], value);
} }
Id TypeArray(Id element_type, Id length);
Id TypeStruct(Id member);
Id TypeStruct(std::span<const Id> members);
Id TypeVector(Id element_type, u32 components);
template <typename... Members>
Id TypeStruct(Id first, Members... rest) {
const std::array<Id, sizeof...(rest) + 1> members{first, rest...};
return TypeStruct(std::span<const Id>(members));
}
[[nodiscard]] bool HasDecoration(Id id, spv::Decoration decoration,
std::optional<u32> literal = std::nullopt) const;
void DecorateUnique(Id id, spv::Decoration decoration,
std::optional<u32> literal = std::nullopt);
bool IsIntegerOrBoolType(Id type);
const Profile& profile; const Profile& profile;
const RuntimeInfo& runtime_info; const RuntimeInfo& runtime_info;
Stage stage{}; Stage stage{};
@ -361,6 +400,11 @@ public:
Id load_const_func_u32x2{}; Id load_const_func_u32x2{};
Id load_const_func_u32x4{}; Id load_const_func_u32x4{};
std::unordered_map<Id, std::vector<DecorationRecord>, IdHash, IdEqual> decorations;
std::unordered_map<Id, bool, IdHash, IdEqual> type_integer_or_bool_cache;
std::unordered_map<Id, Id, IdHash, IdEqual> array_element_types;
std::unordered_map<Id, std::vector<Id>, IdHash, IdEqual> struct_member_types;
private: private:
void DefineCommonTypes(const Info& info); void DefineCommonTypes(const Info& info);
void DefineCommonConstants(); void DefineCommonConstants();

View file

@ -386,7 +386,8 @@ void BufferCache<P>::BindHostComputeBuffers() {
template <class P> template <class P>
void BufferCache<P>::SetUniformBuffersState(const std::array<u32, NUM_STAGES>& mask, void BufferCache<P>::SetUniformBuffersState(const std::array<u32, NUM_STAGES>& mask,
const UniformBufferSizes* sizes) { const UniformBufferSizes* sizes) {
if (channel_state->enabled_uniform_buffer_masks != mask) { const bool mask_changed = channel_state->enabled_uniform_buffer_masks != mask;
if (mask_changed) {
channel_state->fast_bound_uniform_buffers.fill(0); channel_state->fast_bound_uniform_buffers.fill(0);
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) { if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
channel_state->dirty_uniform_buffers.fill(~u32{0}); channel_state->dirty_uniform_buffers.fill(~u32{0});
@ -817,7 +818,18 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size; channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size;
// Stream buffer path to avoid stalling on non-Nvidia drivers or Vulkan // Stream buffer path to avoid stalling on non-Nvidia drivers or Vulkan
const std::span<u8> span = runtime.BindMappedUniformBuffer(stage, binding_index, size); const std::span<u8> span = runtime.BindMappedUniformBuffer(stage, binding_index, size);
#ifdef YUZU_DEBUG
ASSERT(binding_index < NUM_GRAPHICS_UNIFORM_BUFFERS);
ASSERT(span.size() >= size && "UBO stream span too small");
if (!device_memory.ReadBlockFastChecked(device_addr, span.data(), size)) {
LOG_CRITICAL(Render, "DeviceMemory OOB/unmapped: addr=0x{:x} size={}", device_addr, size);
channel_state->fast_bound_uniform_buffers[stage] &= ~(1u << binding_index);
ASSERT(false);
return;
}
#else
device_memory.ReadBlockUnsafe(device_addr, span.data(), size); device_memory.ReadBlockUnsafe(device_addr, span.data(), size);
#endif
return; return;
} }
// Classic cached path // Classic cached path
@ -826,7 +838,8 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
} }
// Skip binding if it's not needed and if the bound buffer is not the fast version // Skip binding if it's not needed and if the bound buffer is not the fast version
// This exists to avoid instances where the fast buffer is bound and a GPU write happens // This exists to avoid instances where the fast buffer is bound and a GPU write happens
needs_bind |= HasFastUniformBufferBound(stage, binding_index); const bool was_fast_bound = HasFastUniformBufferBound(stage, binding_index);
needs_bind |= was_fast_bound;
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) { if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
needs_bind |= channel_state->uniform_buffer_binding_sizes[stage][binding_index] != size; needs_bind |= channel_state->uniform_buffer_binding_sizes[stage][binding_index] != size;
} }

View file

@ -53,7 +53,6 @@ constexpr u32 NUM_COMPUTE_UNIFORM_BUFFERS = 8;
constexpr u32 NUM_STORAGE_BUFFERS = 16; constexpr u32 NUM_STORAGE_BUFFERS = 16;
constexpr u32 NUM_TEXTURE_BUFFERS = 32; constexpr u32 NUM_TEXTURE_BUFFERS = 32;
constexpr u32 NUM_STAGES = 5; constexpr u32 NUM_STAGES = 5;
static_assert(NUM_GRAPHICS_UNIFORM_BUFFERS <= 32, "fast bitmask must fit u32"); static_assert(NUM_GRAPHICS_UNIFORM_BUFFERS <= 32, "fast bitmask must fit u32");
using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>; using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>;

View file

@ -337,11 +337,6 @@ BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& m
uint8_pass = std::make_unique<Uint8Pass>(device, scheduler, descriptor_pool, staging_pool, uint8_pass = std::make_unique<Uint8Pass>(device, scheduler, descriptor_pool, staging_pool,
compute_pass_descriptor_queue); compute_pass_descriptor_queue);
} }
const u32 ubo_align = static_cast<u32>(
device.GetUniformBufferAlignment() //check if the device has it
);
// add the ability to change the size in settings in future
uniform_ring.Init(device, memory_allocator, 8 * 1024 * 1024 /* 8 MiB */, ubo_align ? ubo_align : 256);
quad_array_index_buffer = std::make_shared<QuadArrayIndexBuffer>(device_, memory_allocator_, quad_array_index_buffer = std::make_shared<QuadArrayIndexBuffer>(device_, memory_allocator_,
scheduler_, staging_pool_); scheduler_, staging_pool_);
quad_strip_index_buffer = std::make_shared<QuadStripIndexBuffer>(device_, memory_allocator_, quad_strip_index_buffer = std::make_shared<QuadStripIndexBuffer>(device_, memory_allocator_,
@ -360,42 +355,6 @@ void BufferCacheRuntime::FreeDeferredStagingBuffer(StagingBufferRef& ref) {
staging_pool.FreeDeferred(ref); staging_pool.FreeDeferred(ref);
} }
void BufferCacheRuntime::UniformRing::Init(const Device& device,
MemoryAllocator& alloc,
u64 bytes, u32 alignment) {
for (size_t i = 0; i < NUM_FRAMES; ++i) {
VkBufferCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = bytes,
.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
buffers[i] = alloc.CreateBuffer(ci, MemoryUsage::Upload);
mapped[i] = buffers[i].Mapped().data();
}
size = bytes;
align = alignment ? alignment : 256;
head = 0;
current_frame = 0;
}
std::span<u8> BufferCacheRuntime::UniformRing::Alloc(u32 bytes, u32& out_offset) {
const u64 aligned = Common::AlignUp(head, static_cast<u64>(align));
u64 end = aligned + bytes;
if (end > size) {
return {}; // Fallback to staging pool
}
out_offset = static_cast<u32>(aligned);
head = end;
return {mapped[current_frame] + out_offset, bytes};
}
u64 BufferCacheRuntime::GetDeviceLocalMemory() const { u64 BufferCacheRuntime::GetDeviceLocalMemory() const {
return device.GetDeviceLocalMemory(); return device.GetDeviceLocalMemory();
} }
@ -416,7 +375,6 @@ void BufferCacheRuntime::TickFrame(Common::SlotVector<Buffer>& slot_buffers) noe
for (auto it = slot_buffers.begin(); it != slot_buffers.end(); it++) { for (auto it = slot_buffers.begin(); it != slot_buffers.end(); it++) {
it->ResetUsageTracking(); it->ResetUsageTracking();
} }
uniform_ring.BeginFrame();
} }
void BufferCacheRuntime::Finish() { void BufferCacheRuntime::Finish() {

View file

@ -1,6 +1,3 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project // SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
@ -127,15 +124,8 @@ public:
void BindTransformFeedbackBuffers(VideoCommon::HostBindings<Buffer>& bindings); void BindTransformFeedbackBuffers(VideoCommon::HostBindings<Buffer>& bindings);
std::span<u8> BindMappedUniformBuffer([[maybe_unused]] size_t /*stage*/, std::span<u8> BindMappedUniformBuffer([[maybe_unused]] size_t stage,
[[maybe_unused]] u32 /*binding_index*/, [[maybe_unused]] u32 binding_index, u32 size) {
u32 size) {
u32 offset = 0;
if (auto span = uniform_ring.Alloc(size, offset); !span.empty()) {
BindBuffer(*uniform_ring.buffers[uniform_ring.current_frame], offset, size);
return span;
}
// Fallback for giant requests
const StagingBufferRef ref = staging_pool.Request(size, MemoryUsage::Upload); const StagingBufferRef ref = staging_pool.Request(size, MemoryUsage::Upload);
BindBuffer(ref.buffer, static_cast<u32>(ref.offset), size); BindBuffer(ref.buffer, static_cast<u32>(ref.offset), size);
return ref.mapped_span; return ref.mapped_span;
@ -163,24 +153,6 @@ private:
void ReserveNullBuffer(); void ReserveNullBuffer();
vk::Buffer CreateNullBuffer(); vk::Buffer CreateNullBuffer();
struct UniformRing {
static constexpr size_t NUM_FRAMES = 3;
std::array<vk::Buffer, NUM_FRAMES> buffers{};
std::array<u8*, NUM_FRAMES> mapped{};
u64 size = 0;
u64 head = 0;
u32 align = 256;
size_t current_frame = 0;
void Init(const Device& device, MemoryAllocator& alloc, u64 bytes, u32 alignment);
void BeginFrame() {
current_frame = (current_frame + 1) % NUM_FRAMES;
head = 0;
}
std::span<u8> Alloc(u32 bytes, u32& out_offset);
};
UniformRing uniform_ring;
const Device& device; const Device& device;
MemoryAllocator& memory_allocator; MemoryAllocator& memory_allocator;
Scheduler& scheduler; Scheduler& scheduler;

View file

@ -25,35 +25,48 @@ namespace {
using namespace Common::Literals; using namespace Common::Literals;
// Maximum potential alignment of a Vulkan buffer // Minimum alignment we want to enforce for the streaming ring
constexpr VkDeviceSize MAX_ALIGNMENT = 256; constexpr VkDeviceSize MIN_STREAM_ALIGNMENT = 256;
// Stream buffer size in bytes // Stream buffer size in bytes
constexpr VkDeviceSize MAX_STREAM_BUFFER_SIZE = 128_MiB; constexpr VkDeviceSize MAX_STREAM_BUFFER_SIZE = 128_MiB;
size_t GetStreamBufferSize(const Device& device) { size_t GetStreamBufferSize(const Device& device, VkDeviceSize alignment) {
VkDeviceSize size{0}; VkDeviceSize size{0};
if (device.HasDebuggingToolAttached()) { if (device.HasDebuggingToolAttached()) {
ForEachDeviceLocalHostVisibleHeap(device, [&size](size_t index, VkMemoryHeap& heap) { bool found_heap = false;
ForEachDeviceLocalHostVisibleHeap(device, [&size, &found_heap](size_t /*index*/, VkMemoryHeap& heap) {
size = (std::max)(size, heap.size); size = (std::max)(size, heap.size);
found_heap = true;
}); });
// If rebar is not supported, cut the max heap size to 40%. This will allow 2 captures to be // If no suitable heap was found fall back to the default cap to avoid creating a zero-sized stream buffer.
// loaded at the same time in RenderDoc. If rebar is supported, this shouldn't be an issue if (!found_heap) {
// as the heap will be much larger. size = MAX_STREAM_BUFFER_SIZE;
if (size <= 256_MiB) { } else if (size <= 256_MiB) {
// If rebar is not supported, cut the max heap size to 40%. This will allow 2 captures to be
// loaded at the same time in RenderDoc. If rebar is supported, this shouldn't be an issue
// as the heap will be much larger.
size = size * 40 / 100; size = size * 40 / 100;
} }
} else { } else {
size = MAX_STREAM_BUFFER_SIZE; size = MAX_STREAM_BUFFER_SIZE;
} }
return (std::min)(Common::AlignUp(size, MAX_ALIGNMENT), MAX_STREAM_BUFFER_SIZE);
// Clamp to the configured maximum, align up for safety, and ensure a sane minimum so
// region_size (stream_buffer_size / NUM_SYNCS) never becomes zero.
const VkDeviceSize aligned =
(std::min)(Common::AlignUp(size, alignment), MAX_STREAM_BUFFER_SIZE);
const VkDeviceSize min_size = alignment * StagingBufferPool::NUM_SYNCS;
return static_cast<size_t>((std::max)(aligned, min_size));
} }
} // Anonymous namespace } // Anonymous namespace
StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_, StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_,
Scheduler& scheduler_) Scheduler& scheduler_)
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_}, : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_},
stream_buffer_size{GetStreamBufferSize(device)}, region_size{stream_buffer_size / stream_alignment{std::max<VkDeviceSize>(device_.GetUniformBufferAlignment(),
StagingBufferPool::NUM_SYNCS} { MIN_STREAM_ALIGNMENT)},
stream_buffer_size{GetStreamBufferSize(device_, stream_alignment)},
region_size{stream_buffer_size / StagingBufferPool::NUM_SYNCS} {
VkBufferCreateInfo stream_ci = { VkBufferCreateInfo stream_ci = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
@ -106,31 +119,54 @@ void StagingBufferPool::TickFrame() {
} }
StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) { StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
if (AreRegionsActive(Region(free_iterator) + 1, const size_t alignment = static_cast<size_t>(stream_alignment);
(std::min)(Region(iterator + size) + 1, NUM_SYNCS))) { const size_t aligned_size = Common::AlignUp(size, alignment);
const bool wraps = iterator + size >= stream_buffer_size;
const size_t new_iterator =
wraps ? aligned_size : Common::AlignUp(iterator + size, alignment);
const size_t begin_region = wraps ? 0 : Region(iterator);
const size_t last_byte = new_iterator == 0 ? 0 : new_iterator - 1;
const size_t end_region = (std::min)(Region(last_byte) + 1, NUM_SYNCS);
const size_t guard_begin = (std::min)(Region(free_iterator) + 1, NUM_SYNCS);
if (!wraps) {
if (guard_begin < end_region && AreRegionsActive(guard_begin, end_region)) {
// Avoid waiting for the previous usages to be free
return GetStagingBuffer(size, MemoryUsage::Upload);
}
} else if (guard_begin < NUM_SYNCS && AreRegionsActive(guard_begin, NUM_SYNCS)) {
// Avoid waiting for the previous usages to be free // Avoid waiting for the previous usages to be free
return GetStagingBuffer(size, MemoryUsage::Upload); return GetStagingBuffer(size, MemoryUsage::Upload);
} }
const u64 current_tick = scheduler.CurrentTick(); const u64 current_tick = scheduler.CurrentTick();
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator), std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator),
current_tick); current_tick);
used_iterator = iterator; used_iterator = iterator;
free_iterator = (std::max)(free_iterator, iterator + size);
if (iterator + size >= stream_buffer_size) { if (wraps) {
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS, std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS,
current_tick); current_tick);
used_iterator = 0; used_iterator = 0;
iterator = 0; iterator = 0;
free_iterator = size; free_iterator = aligned_size;
const size_t head_last_byte = aligned_size == 0 ? 0 : aligned_size - 1;
if (AreRegionsActive(0, Region(size) + 1)) { const size_t head_end_region = (std::min)(Region(head_last_byte) + 1, NUM_SYNCS);
if (AreRegionsActive(0, head_end_region)) {
// Avoid waiting for the previous usages to be free // Avoid waiting for the previous usages to be free
return GetStagingBuffer(size, MemoryUsage::Upload); return GetStagingBuffer(size, MemoryUsage::Upload);
} }
} }
const size_t offset = iterator;
iterator = Common::AlignUp(iterator + size, MAX_ALIGNMENT); std::fill(sync_ticks.begin() + begin_region, sync_ticks.begin() + end_region, current_tick);
const size_t offset = wraps ? 0 : iterator;
iterator = new_iterator;
if (!wraps) {
free_iterator = (std::max)(free_iterator, offset + aligned_size);
}
return StagingBufferRef{ return StagingBufferRef{
.buffer = *stream_buffer, .buffer = *stream_buffer,
.offset = static_cast<VkDeviceSize>(offset), .offset = static_cast<VkDeviceSize>(offset),

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later // SPDX-License-Identifier: GPL-3.0-or-later
@ -102,6 +105,7 @@ private:
MemoryAllocator& memory_allocator; MemoryAllocator& memory_allocator;
Scheduler& scheduler; Scheduler& scheduler;
VkDeviceSize stream_alignment;
vk::Buffer stream_buffer; vk::Buffer stream_buffer;
std::span<u8> stream_pointer; std::span<u8> stream_pointer;
VkDeviceSize stream_buffer_size; VkDeviceSize stream_buffer_size;

View file

@ -393,8 +393,16 @@ target_link_libraries(yuzu PRIVATE common core input_common frontend_common netw
target_link_libraries(yuzu PRIVATE Boost::headers glad Qt6::Widgets) target_link_libraries(yuzu PRIVATE Boost::headers glad Qt6::Widgets)
target_link_libraries(yuzu PRIVATE ${PLATFORM_LIBRARIES} Threads::Threads) target_link_libraries(yuzu PRIVATE ${PLATFORM_LIBRARIES} Threads::Threads)
if (NOT WIN32)
target_include_directories(yuzu PRIVATE ${Qt6Gui_PRIVATE_INCLUDE_DIRS})
endif()
if (UNIX AND NOT APPLE) if (UNIX AND NOT APPLE)
target_link_libraries(yuzu PRIVATE Qt6::DBus) target_link_libraries(yuzu PRIVATE Qt6::DBus)
if (TARGET Qt6::GuiPrivate)
target_link_libraries(yuzu PRIVATE Qt6::GuiPrivate)
endif()
endif() endif()
target_compile_definitions(yuzu PRIVATE target_compile_definitions(yuzu PRIVATE

View file

@ -83,7 +83,8 @@ void ConfigureDebug::SetConfiguration() {
#ifdef YUZU_USE_QT_WEB_ENGINE #ifdef YUZU_USE_QT_WEB_ENGINE
ui->disable_web_applet->setChecked(UISettings::values.disable_web_applet.GetValue()); ui->disable_web_applet->setChecked(UISettings::values.disable_web_applet.GetValue());
#else #else
ui->disable_web_applet->setVisible(false); ui->disable_web_applet->setEnabled(false);
ui->disable_web_applet->setText(tr("Web applet not compiled"));
#endif #endif
} }