Compare commits

..

1 commit

Author SHA1 Message Date
6e3a943e74
[core] use memcpy instead of hand rolling aligned cases
Hand rolling memcpy like this is always frowned upon because the compiler has more insight on whats going on (plus the code resolves to a worse version of itself on assembly). This removes some branches that are just straight up redundant. May save stuff especially for systems without fastmem enabled.

Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-10-01 08:04:34 +00:00
7 changed files with 44 additions and 175 deletions

View file

@ -224,7 +224,7 @@ set(YUZU_TZDB_PATH "" CACHE STRING "Path to a pre-downloaded timezone database")
cmake_dependent_option(YUZU_USE_FASTER_LD "Check if a faster linker is available" ON "LINUX" OFF)
cmake_dependent_option(YUZU_USE_BUNDLED_MOLTENVK "Download bundled MoltenVK lib" ON "APPLE" OFF)
cmake_dependent_option(YUZU_APPLE_USE_BUNDLED_MONTENVK "Download bundled MoltenVK lib" ON "APPLE" OFF)
option(YUZU_DISABLE_LLVM "Disable LLVM (useful for CI)" OFF)

View file

@ -31,7 +31,7 @@ Notes:
* Currently, build fails without this
- `YUZU_USE_FASTER_LD` (ON) Check if a faster linker is available
* Only available on UNIX
- `YUZU_USE_BUNDLED_MOLTENVK` (ON, macOS only) Download bundled MoltenVK lib)
- `YUZU_APPLE_USE_BUNDLED_MONTENVK` (ON, macOS only) Download bundled MoltenVK lib)
- `YUZU_TZDB_PATH` (string) Path to a pre-downloaded timezone database (useful for nixOS)
- `ENABLE_OPENSSL` (ON for Linux and *BSD) Enable OpenSSL backend for the ssl service
* Always enabled if the web service is enabled

View file

@ -18,7 +18,7 @@
#define TITLE_BAR_FORMAT_RUNNING "@TITLE_BAR_FORMAT_RUNNING@"
#define IS_DEV_BUILD @IS_DEV_BUILD@
#define COMPILER_ID "@CXX_COMPILER@"
#define BUILD_AUTO_UPDATE_WEBSITE "@BUILD_AUTO_UPDATE_WEBSITE@"
#define BUILD_AUTO_UPDATE_WEBISTE "@BUILD_AUTO_UPDATE_WEBISTE@"
#define BUILD_AUTO_UPDATE_API "@BUILD_AUTO_UPDATE_API@"
#define BUILD_AUTO_UPDATE_REPO "@BUILD_AUTO_UPDATE_REPO@"
@ -37,7 +37,7 @@ constexpr const char g_title_bar_format_running[] = TITLE_BAR_FORMAT_RUNNING;
constexpr const char g_compiler_id[] = COMPILER_ID;
constexpr const bool g_is_dev_build = IS_DEV_BUILD;
constexpr const char g_build_auto_update_website[] = BUILD_AUTO_UPDATE_WEBSITE;
constexpr const char g_build_auto_update_website[] = BUILD_AUTO_UPDATE_WEBISTE;
constexpr const char g_build_auto_update_api[] = BUILD_AUTO_UPDATE_API;
constexpr const char g_build_auto_update_repo[] = BUILD_AUTO_UPDATE_REPO;

View file

@ -10,6 +10,7 @@
#include <mutex>
#include <span>
#include <thread>
#include <type_traits>
#include <vector>
#include "common/assert.h"
@ -681,22 +682,17 @@ struct Memory::Impl {
}
}
[[nodiscard]] u8* GetPointerImpl(u64 vaddr, auto on_unmapped, auto on_rasterizer) const {
template<typename F, typename G>
[[nodiscard]] u8* GetPointerImpl(u64 vaddr, F&& on_unmapped, G&& on_rasterizer) const {
// AARCH64 masks the upper 16 bit of all memory accesses
vaddr = vaddr & 0xffffffffffffULL;
if (!AddressSpaceContains(*current_page_table, vaddr, 1)) [[unlikely]] {
on_unmapped();
return nullptr;
} else {
vaddr &= 0xffffffffffffULL;
if (AddressSpaceContains(*current_page_table, vaddr, 1)) [[likely]] {
// Avoid adding any extra logic to this fast-path block
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) [[likely]] {
return reinterpret_cast<u8*>(pointer + vaddr);
} else {
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
case Common::PageType::Unmapped:
on_unmapped();
return nullptr;
case Common::PageType::Memory:
ASSERT_MSG(false, "Mapped memory page without a pointer @ 0x{:016X}", vaddr);
return nullptr;
@ -707,11 +703,18 @@ struct Memory::Impl {
on_rasterizer();
return host_ptr;
}
case Common::PageType::Unmapped: [[unlikely]] {
on_unmapped();
return nullptr;
}
default:
UNREACHABLE();
}
return nullptr;
}
} else {
on_unmapped();
return nullptr;
}
}
@ -729,172 +732,38 @@ struct Memory::Impl {
GetInteger(vaddr), []() {}, []() {});
}
/**
* Reads a particular data type out of memory at the given virtual address.
*
* @param vaddr The virtual address to read the data type from.
*
* @tparam T The data type to read out of memory. This type *must* be
* trivially copyable, otherwise the behavior of this function
* is undefined.
*
* @returns The instance of T read from the specified virtual address.
*/
/// @brief Reads a particular data type out of memory at the given virtual address.
/// @param vaddr The virtual address to read the data type from.
/// @tparam T The data type to read out of memory.
/// @returns The instance of T read from the specified virtual address.
template <typename T>
T Read(Common::ProcessAddress vaddr) {
// Fast path for aligned reads of common sizes
inline T Read(Common::ProcessAddress vaddr) requires(std::is_trivially_copyable_v<T>) noexcept {
const u64 addr = GetInteger(vaddr);
if constexpr (std::is_same_v<T, u8> || std::is_same_v<T, s8>) {
// 8-bit reads are always aligned
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read8 @ 0x{:016X}", addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
return static_cast<T>(*ptr);
}
return 0;
} else if constexpr (std::is_same_v<T, u16_le> || std::is_same_v<T, s16_le>) {
// Check alignment for 16-bit reads
if ((addr & 1) == 0) {
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read16 @ 0x{:016X}", addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
return static_cast<T>(*reinterpret_cast<const u16*>(ptr));
}
}
} else if constexpr (std::is_same_v<T, u32_le> || std::is_same_v<T, s32_le>) {
// Check alignment for 32-bit reads
if ((addr & 3) == 0) {
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read32 @ 0x{:016X}", addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
return static_cast<T>(*reinterpret_cast<const u32*>(ptr));
}
}
} else if constexpr (std::is_same_v<T, u64_le> || std::is_same_v<T, s64_le>) {
// Check alignment for 64-bit reads
if ((addr & 7) == 0) {
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read64 @ 0x{:016X}", addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
return static_cast<T>(*reinterpret_cast<const u64*>(ptr));
}
}
}
// Fall back to the general case for other types or unaligned access
T result = 0;
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
if (auto const ptr = GetPointerImpl(addr, [addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, addr);
}, [&]() {
HandleRasterizerDownload(addr, sizeof(T));
}); ptr) [[likely]] {
// It may be tempting to rewrite this particular section to use "reinterpret_cast";
// afterall, it's trivially copyable so surely it can be copied ov- Alignment.
// Remember, alignment. memcpy() will deal with all the alignment extremely fast.
T result{};
std::memcpy(&result, ptr, sizeof(T));
return result;
}
return result;
return T{};
}
/**
* Writes a particular data type to memory at the given virtual address.
*
* @param vaddr The virtual address to write the data type to.
*
* @tparam T The data type to write to memory. This type *must* be
* trivially copyable, otherwise the behavior of this function
* is undefined.
*/
/// @brief Writes a particular data type to memory at the given virtual address.
/// @param vaddr The virtual address to write the data type to.
/// @tparam T The data type to write to memory.
template <typename T>
void Write(Common::ProcessAddress vaddr, const T data) {
// Fast path for aligned writes of common sizes
inline void Write(Common::ProcessAddress vaddr, const T data) requires(std::is_trivially_copyable_v<T>) noexcept {
const u64 addr = GetInteger(vaddr);
if constexpr (std::is_same_v<T, u8> || std::is_same_v<T, s8>) {
// 8-bit writes are always aligned
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write8 @ 0x{:016X} = 0x{:02X}", addr,
static_cast<u8>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*ptr = static_cast<u8>(data);
}
return;
} else if constexpr (std::is_same_v<T, u16_le> || std::is_same_v<T, s16_le>) {
// Check alignment for 16-bit writes
if ((addr & 1) == 0) {
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write16 @ 0x{:016X} = 0x{:04X}", addr,
static_cast<u16>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*reinterpret_cast<u16*>(ptr) = static_cast<u16>(data);
return;
}
}
} else if constexpr (std::is_same_v<T, u32_le> || std::is_same_v<T, s32_le>) {
// Check alignment for 32-bit writes
if ((addr & 3) == 0) {
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write32 @ 0x{:016X} = 0x{:08X}", addr,
static_cast<u32>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*reinterpret_cast<u32*>(ptr) = static_cast<u32>(data);
return;
}
}
} else if constexpr (std::is_same_v<T, u64_le> || std::is_same_v<T, s64_le>) {
// Check alignment for 64-bit writes
if ((addr & 7) == 0) {
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write64 @ 0x{:016X} = 0x{:016X}", addr,
static_cast<u64>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*reinterpret_cast<u64*>(ptr) = static_cast<u64>(data);
return;
}
}
}
// Fall back to the general case for other types or unaligned access
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8,
addr, static_cast<u64>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
if (auto const ptr = GetPointerImpl(addr, [addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8, addr, u64(data));
}, [&]() { HandleRasterizerWrite(addr, sizeof(T)); }); ptr) [[likely]]
std::memcpy(ptr, &data, sizeof(T));
}
}
template <typename T>

View file

@ -366,7 +366,7 @@ if (APPLE)
set_target_properties(yuzu PROPERTIES MACOSX_BUNDLE TRUE)
set_target_properties(yuzu PROPERTIES MACOSX_BUNDLE_INFO_PLIST ${CMAKE_CURRENT_SOURCE_DIR}/Info.plist)
if (YUZU_USE_BUNDLED_MOLTENVK)
if (YUZU_APPLE_USE_BUNDLED_MONTENVK)
set(MOLTENVK_PLATFORM "macOS")
set(MOLTENVK_VERSION "v1.3.0")
download_moltenvk(${MOLTENVK_PLATFORM} ${MOLTENVK_VERSION})

View file

@ -4204,8 +4204,8 @@ void GMainWindow::OnEmulatorUpdateAvailable() {
}
#endif
void GMainWindow::UpdateWindowTitle(std::string_view title_name, std::string_view title_version, std::string_view gpu_vendor) {
static const std::string build_id = std::string{Common::g_build_id};
void GMainWindow::UpdateWindowTitle(std::string_view title_name, std::string_view title_version,
std::string_view gpu_vendor) {
static const std::string yuzu_title = fmt::format("{} | {} | {}",
std::string{Common::g_build_name},
std::string{Common::g_build_version},

View file

@ -58,7 +58,7 @@ std::optional<std::string> UpdateChecker::GetResponse(std::string url, std::stri
std::optional<std::string> UpdateChecker::GetLatestRelease(bool include_prereleases)
{
const auto update_check_url = std::string{Common::g_build_auto_update_api};
constexpr auto update_check_url = std::string{Common::g_build_auto_update_api};
std::string update_check_path = fmt::format("/repos/{}", std::string{Common::g_build_auto_update_repo});
try {
if (include_prereleases) { // This can return either a prerelease or a stable release,