Compare commits
20 commits
51a770dd19
...
7208dcaba1
Author | SHA1 | Date | |
---|---|---|---|
7208dcaba1 | |||
1e1419a4bd | |||
42db873d09 | |||
622ecba2b7 | |||
5adb432c24 | |||
bd2431b875 | |||
624b322f18 | |||
e150aa2106 | |||
34294f2cce | |||
cefd23bacc | |||
bd1d7738ee | |||
631c7272c5 | |||
![]() |
e28b0d2590 | ||
![]() |
6fcfe7f4f3 | ||
e60fd4b68b | |||
10c76568b8 | |||
8dba6a2cb4 | |||
4b5a8e0621 | |||
39e27bc954 | |||
21c77bdcac |
30 changed files with 684 additions and 560 deletions
|
@ -1,6 +1,6 @@
|
|||
AppRun
|
||||
eden.desktop
|
||||
org.eden_emu.eden.desktop
|
||||
dev.eden_emu.eden.desktop
|
||||
shared/bin/eden
|
||||
shared/lib/lib.path
|
||||
shared/lib/ld-linux-x86-64.so.2
|
||||
|
|
|
@ -59,15 +59,15 @@ VERSION="$(echo "$EDEN_TAG")"
|
|||
mkdir -p ./AppDir
|
||||
cd ./AppDir
|
||||
|
||||
cp ../dist/org.eden_emu.eden.desktop .
|
||||
cp ../dist/org.eden_emu.eden.svg .
|
||||
cp ../dist/dev.eden_emu.eden.desktop .
|
||||
cp ../dist/dev.eden_emu.eden.svg .
|
||||
|
||||
ln -sf ./org.eden_emu.eden.svg ./.DirIcon
|
||||
ln -sf ./dev.eden_emu.eden.svg ./.DirIcon
|
||||
|
||||
UPINFO='gh-releases-zsync|eden-emulator|Releases|latest|*.AppImage.zsync'
|
||||
|
||||
if [ "$DEVEL" = 'true' ]; then
|
||||
sed -i 's|Name=Eden|Name=Eden Nightly|' ./org.eden_emu.eden.desktop
|
||||
sed -i 's|Name=Eden|Name=Eden Nightly|' ./dev.eden_emu.eden.desktop
|
||||
UPINFO="$(echo "$UPINFO" | sed 's|Releases|nightly|')"
|
||||
fi
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
which png2icns || [ which yay && yay libicns ] || exit
|
||||
which magick || exit
|
||||
|
||||
export EDEN_SVG_ICO="dist/org.eden_emu.eden.svg"
|
||||
export EDEN_SVG_ICO="dist/dev.eden_emu.eden.svg"
|
||||
svgo --multipass $EDEN_SVG_ICO
|
||||
|
||||
magick -density 256x256 -background transparent $EDEN_SVG_ICO \
|
||||
|
|
|
@ -858,14 +858,14 @@ endif()
|
|||
# https://specifications.freedesktop.org/shared-mime-info-spec/shared-mime-info-spec-latest.html
|
||||
# https://www.freedesktop.org/software/appstream/docs/
|
||||
if(ENABLE_QT AND UNIX AND NOT APPLE)
|
||||
install(FILES "dist/org.eden_emu.eden.desktop"
|
||||
install(FILES "dist/dev.eden_emu.eden.desktop"
|
||||
DESTINATION "share/applications")
|
||||
install(FILES "dist/org.eden_emu.eden.svg"
|
||||
install(FILES "dist/dev.eden_emu.eden.svg"
|
||||
DESTINATION "share/icons/hicolor/scalable/apps")
|
||||
|
||||
# TODO: these files need to be updated.
|
||||
install(FILES "dist/org.eden_emu.eden.xml"
|
||||
install(FILES "dist/dev.eden_emu.eden.xml"
|
||||
DESTINATION "share/mime/packages")
|
||||
install(FILES "dist/org.eden_emu.eden.metainfo.xml"
|
||||
install(FILES "dist/dev.eden_emu.eden.metainfo.xml"
|
||||
DESTINATION "share/metainfo")
|
||||
endif()
|
||||
|
|
|
@ -10,7 +10,7 @@ Type=Application
|
|||
Name=Eden
|
||||
GenericName=Switch Emulator
|
||||
Comment=Nintendo Switch video game console emulator
|
||||
Icon=org.eden_emu.eden
|
||||
Icon=dev.eden_emu.eden
|
||||
TryExec=eden
|
||||
Exec=eden %f
|
||||
Categories=Game;Emulator;Qt;
|
Before Width: | Height: | Size: 9.2 KiB After Width: | Height: | Size: 9.2 KiB |
4
externals/CMakeLists.txt
vendored
4
externals/CMakeLists.txt
vendored
|
@ -147,6 +147,10 @@ add_subdirectory(nx_tzdb)
|
|||
# VMA
|
||||
AddJsonPackage(vulkan-memory-allocator)
|
||||
|
||||
if (VulkanMemoryAllocator_ADDED AND MSVC)
|
||||
target_compile_options(VulkanMemoryAllocator INTERFACE /wd4189)
|
||||
endif()
|
||||
|
||||
if (NOT TARGET LLVM::Demangle)
|
||||
add_library(demangle demangle/ItaniumDemangle.cpp)
|
||||
target_include_directories(demangle PUBLIC ./demangle)
|
||||
|
|
28
externals/ffmpeg/CMakeLists.txt
vendored
28
externals/ffmpeg/CMakeLists.txt
vendored
|
@ -63,20 +63,22 @@ if (NOT WIN32 AND NOT ANDROID)
|
|||
set(FFmpeg_HWACCEL_INCLUDE_DIRS)
|
||||
set(FFmpeg_HWACCEL_LDFLAGS)
|
||||
|
||||
# In Solaris needs explicit linking for ffmpeg which links to /lib/amd64/libX11.so
|
||||
if(PLATFORM_SUN)
|
||||
list(APPEND FFmpeg_HWACCEL_LIBRARIES
|
||||
X11
|
||||
"/usr/lib/xorg/amd64/libdrm.so")
|
||||
else()
|
||||
pkg_check_modules(LIBDRM libdrm REQUIRED)
|
||||
list(APPEND FFmpeg_HWACCEL_LIBRARIES
|
||||
${LIBDRM_LIBRARIES})
|
||||
list(APPEND FFmpeg_HWACCEL_INCLUDE_DIRS
|
||||
${LIBDRM_INCLUDE_DIRS})
|
||||
if (NOT APPLE)
|
||||
# In Solaris needs explicit linking for ffmpeg which links to /lib/amd64/libX11.so
|
||||
if(PLATFORM_SUN)
|
||||
list(APPEND FFmpeg_HWACCEL_LIBRARIES
|
||||
X11
|
||||
"/usr/lib/xorg/amd64/libdrm.so")
|
||||
else()
|
||||
pkg_check_modules(LIBDRM libdrm REQUIRED)
|
||||
list(APPEND FFmpeg_HWACCEL_LIBRARIES
|
||||
${LIBDRM_LIBRARIES})
|
||||
list(APPEND FFmpeg_HWACCEL_INCLUDE_DIRS
|
||||
${LIBDRM_INCLUDE_DIRS})
|
||||
endif()
|
||||
list(APPEND FFmpeg_HWACCEL_FLAGS
|
||||
--enable-libdrm)
|
||||
endif()
|
||||
list(APPEND FFmpeg_HWACCEL_FLAGS
|
||||
--enable-libdrm)
|
||||
|
||||
if(LIBVA_FOUND)
|
||||
find_package(X11 REQUIRED)
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.yuzu.yuzu_emu.model.DriverViewModel
|
|||
import org.yuzu.yuzu_emu.model.GamesViewModel
|
||||
import org.yuzu.yuzu_emu.model.HomeViewModel
|
||||
import org.yuzu.yuzu_emu.model.InstallResult
|
||||
import android.os.Build
|
||||
import org.yuzu.yuzu_emu.model.TaskState
|
||||
import org.yuzu.yuzu_emu.model.TaskViewModel
|
||||
import org.yuzu.yuzu_emu.utils.*
|
||||
|
@ -47,6 +48,7 @@ import java.io.BufferedOutputStream
|
|||
import java.util.zip.ZipEntry
|
||||
import java.util.zip.ZipInputStream
|
||||
import androidx.core.content.edit
|
||||
import kotlin.text.compareTo
|
||||
|
||||
class MainActivity : AppCompatActivity(), ThemeProvider {
|
||||
private lateinit var binding: ActivityMainBinding
|
||||
|
@ -110,6 +112,19 @@ class MainActivity : AppCompatActivity(), ThemeProvider {
|
|||
|
||||
binding = ActivityMainBinding.inflate(layoutInflater)
|
||||
|
||||
// Since Android 15, google automatically forces "games" to be 60 hrz
|
||||
// This ensures the display's max refresh rate is actually used
|
||||
display?.let {
|
||||
val supportedModes = it.supportedModes
|
||||
val maxRefreshRate = supportedModes.maxByOrNull { mode -> mode.refreshRate }
|
||||
|
||||
if (maxRefreshRate != null) {
|
||||
val layoutParams = window.attributes
|
||||
layoutParams.preferredDisplayModeId = maxRefreshRate.modeId
|
||||
window.attributes = layoutParams
|
||||
}
|
||||
}
|
||||
|
||||
setContentView(binding.root)
|
||||
|
||||
checkAndRequestBluetoothPermissions()
|
||||
|
|
|
@ -124,11 +124,16 @@ object CustomSettingsHandler {
|
|||
|
||||
// Check for driver requirements if activity and driverViewModel are provided
|
||||
if (activity != null && driverViewModel != null) {
|
||||
val driverPath = extractDriverPath(customSettings)
|
||||
if (driverPath != null) {
|
||||
Log.info("[CustomSettingsHandler] Custom settings specify driver: $driverPath")
|
||||
val rawDriverPath = extractDriverPath(customSettings)
|
||||
if (rawDriverPath != null) {
|
||||
// Normalize to local storage path (we only store drivers under driverStoragePath)
|
||||
val driverFilename = rawDriverPath.substringAfterLast('/')
|
||||
.substringAfterLast('\\')
|
||||
val localDriverPath = "${GpuDriverHelper.driverStoragePath}$driverFilename"
|
||||
Log.info("[CustomSettingsHandler] Custom settings specify driver: $rawDriverPath (normalized: $localDriverPath)")
|
||||
|
||||
// Check if driver exists in the driver storage
|
||||
val driverFile = File(driverPath)
|
||||
val driverFile = File(localDriverPath)
|
||||
if (!driverFile.exists()) {
|
||||
Log.info("[CustomSettingsHandler] Driver not found locally: ${driverFile.name}")
|
||||
|
||||
|
@ -182,7 +187,7 @@ object CustomSettingsHandler {
|
|||
}
|
||||
|
||||
// Attempt to download and install the driver
|
||||
val driverUri = DriverResolver.ensureDriverAvailable(driverPath, activity) { progress ->
|
||||
val driverUri = DriverResolver.ensureDriverAvailable(driverFilename, activity) { progress ->
|
||||
progressChannel.trySend(progress.toInt())
|
||||
}
|
||||
|
||||
|
@ -209,12 +214,12 @@ object CustomSettingsHandler {
|
|||
return null
|
||||
}
|
||||
|
||||
// Verify the downloaded driver
|
||||
val installedFile = File(driverPath)
|
||||
// Verify the downloaded driver (from normalized local path)
|
||||
val installedFile = File(localDriverPath)
|
||||
val metadata = GpuDriverHelper.getMetadataFromZip(installedFile)
|
||||
if (metadata.name == null) {
|
||||
Log.error(
|
||||
"[CustomSettingsHandler] Downloaded driver is invalid: $driverPath"
|
||||
"[CustomSettingsHandler] Downloaded driver is invalid: $localDriverPath"
|
||||
)
|
||||
Toast.makeText(
|
||||
activity,
|
||||
|
@ -232,7 +237,7 @@ object CustomSettingsHandler {
|
|||
}
|
||||
|
||||
// Add to driver list
|
||||
driverViewModel.onDriverAdded(Pair(driverPath, metadata))
|
||||
driverViewModel.onDriverAdded(Pair(localDriverPath, metadata))
|
||||
Log.info(
|
||||
"[CustomSettingsHandler] Successfully downloaded and installed driver: ${metadata.name}"
|
||||
)
|
||||
|
@ -268,7 +273,7 @@ object CustomSettingsHandler {
|
|||
// Driver exists, verify it's valid
|
||||
val metadata = GpuDriverHelper.getMetadataFromZip(driverFile)
|
||||
if (metadata.name == null) {
|
||||
Log.error("[CustomSettingsHandler] Invalid driver file: $driverPath")
|
||||
Log.error("[CustomSettingsHandler] Invalid driver file: $localDriverPath")
|
||||
Toast.makeText(
|
||||
activity,
|
||||
activity.getString(
|
||||
|
@ -459,6 +464,8 @@ object CustomSettingsHandler {
|
|||
|
||||
if (inGpuDriverSection && trimmed.startsWith("driver_path=")) {
|
||||
return trimmed.substringAfter("driver_path=")
|
||||
.trim()
|
||||
.removeSurrounding("\"", "\"")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -68,6 +68,48 @@ object DriverResolver {
|
|||
val filename: String
|
||||
)
|
||||
|
||||
// Matching helpers
|
||||
private val KNOWN_SUFFIXES = listOf(
|
||||
".adpkg.zip",
|
||||
".zip",
|
||||
".7z",
|
||||
".tar.gz",
|
||||
".tar.xz",
|
||||
".rar"
|
||||
)
|
||||
|
||||
private fun stripKnownSuffixes(name: String): String {
|
||||
var result = name
|
||||
var changed: Boolean
|
||||
do {
|
||||
changed = false
|
||||
for (s in KNOWN_SUFFIXES) {
|
||||
if (result.endsWith(s, ignoreCase = true)) {
|
||||
result = result.dropLast(s.length)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
} while (changed)
|
||||
return result
|
||||
}
|
||||
|
||||
private fun normalizeName(name: String): String {
|
||||
val base = stripKnownSuffixes(name.lowercase())
|
||||
// Remove non-alphanumerics to make substring checks resilient
|
||||
return base.replace(Regex("[^a-z0-9]+"), " ").trim()
|
||||
}
|
||||
|
||||
private fun tokenize(name: String): Set<String> =
|
||||
normalizeName(name).split(Regex("\\s+")).filter { it.isNotBlank() }.toSet()
|
||||
|
||||
// Jaccard similarity between two sets
|
||||
private fun jaccard(a: Set<String>, b: Set<String>): Double {
|
||||
if (a.isEmpty() || b.isEmpty()) return 0.0
|
||||
val inter = a.intersect(b).size.toDouble()
|
||||
val uni = a.union(b).size.toDouble()
|
||||
return if (uni == 0.0) 0.0 else inter / uni
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a driver download URL from its filename
|
||||
* @param filename The driver filename (e.g., "turnip_mrpurple-T19-toasted.adpkg.zip")
|
||||
|
@ -98,7 +140,7 @@ object DriverResolver {
|
|||
async {
|
||||
searchRepository(repoPath, filename)
|
||||
}
|
||||
}.mapNotNull { it.await() }.firstOrNull().also { resolved ->
|
||||
}.firstNotNullOfOrNull { it.await() }.also { resolved ->
|
||||
// Cache the result if found
|
||||
resolved?.let {
|
||||
urlCache[filename] = it
|
||||
|
@ -119,22 +161,56 @@ object DriverResolver {
|
|||
releaseCache[repoPath] = it
|
||||
}
|
||||
|
||||
// Search through all releases and artifacts
|
||||
// First pass: exact name (case-insensitive) against asset filenames
|
||||
val target = filename.lowercase()
|
||||
for (release in releases) {
|
||||
for (artifact in release.artifacts) {
|
||||
if (artifact.name == filename) {
|
||||
Log.info(
|
||||
"[DriverResolver] Found $filename in $repoPath/${release.tagName}"
|
||||
)
|
||||
if (artifact.name.equals(filename, ignoreCase = true) || artifact.name.lowercase() == target) {
|
||||
Log.info("[DriverResolver] Found $filename in $repoPath/${release.tagName}")
|
||||
return@withContext ResolvedDriver(
|
||||
downloadUrl = artifact.url.toString(),
|
||||
repoPath = repoPath,
|
||||
releaseTag = release.tagName,
|
||||
filename = filename
|
||||
filename = artifact.name
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Second pass: fuzzy match by asset filenames only
|
||||
val reqNorm = normalizeName(filename)
|
||||
val reqTokens = tokenize(filename)
|
||||
var best: ResolvedDriver? = null
|
||||
var bestScore = 0.0
|
||||
|
||||
for (release in releases) {
|
||||
for (artifact in release.artifacts) {
|
||||
val artNorm = normalizeName(artifact.name)
|
||||
val artTokens = tokenize(artifact.name)
|
||||
|
||||
var score = jaccard(reqTokens, artTokens)
|
||||
// Boost if one normalized name contains the other
|
||||
if (artNorm.contains(reqNorm) || reqNorm.contains(artNorm)) {
|
||||
score = maxOf(score, 0.92)
|
||||
}
|
||||
|
||||
if (score > bestScore) {
|
||||
bestScore = score
|
||||
best = ResolvedDriver(
|
||||
downloadUrl = artifact.url.toString(),
|
||||
repoPath = repoPath,
|
||||
releaseTag = release.tagName,
|
||||
filename = artifact.name
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Threshold to avoid bad guesses, this worked fine in testing but might need tuning
|
||||
if (best != null && bestScore >= 0.6) {
|
||||
Log.info("[DriverResolver] Fuzzy matched $filename -> ${best.filename} in ${best.repoPath} (score=%.2f)".format(bestScore))
|
||||
return@withContext best
|
||||
}
|
||||
null
|
||||
} catch (e: Exception) {
|
||||
Log.error("[DriverResolver] Failed to search $repoPath: ${e.message}")
|
||||
|
@ -296,8 +372,8 @@ object DriverResolver {
|
|||
context: Context,
|
||||
onProgress: ((Float) -> Unit)? = null
|
||||
): Uri? {
|
||||
// Extract filename from path
|
||||
val filename = driverPath.substringAfterLast('/')
|
||||
// Extract filename from path (support both separators)
|
||||
val filename = driverPath.substringAfterLast('/').substringAfterLast('\\')
|
||||
|
||||
// Check if driver already exists locally
|
||||
val localPath = "${GpuDriverHelper.driverStoragePath}$filename"
|
||||
|
|
|
@ -17,7 +17,7 @@ add_library(yuzu-android SHARED
|
|||
|
||||
set_property(TARGET yuzu-android PROPERTY IMPORTED_LOCATION ${FFmpeg_LIBRARY_DIR})
|
||||
|
||||
target_link_libraries(yuzu-android PRIVATE audio_core common core input_common frontend_common Vulkan::Headers)
|
||||
target_link_libraries(yuzu-android PRIVATE audio_core common core input_common frontend_common Vulkan::Headers GPUOpen::VulkanMemoryAllocator)
|
||||
target_link_libraries(yuzu-android PRIVATE android camera2ndk EGL glad jnigraphics log)
|
||||
if (ARCHITECTURE_arm64)
|
||||
target_link_libraries(yuzu-android PRIVATE adrenotools)
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <windows.h>
|
||||
#include "common/dynamic_library.h"
|
||||
|
||||
#elif defined(__linux__) || defined(__FreeBSD__) || defined(__sun__) // ^^^ Windows ^^^ vvv Linux vvv
|
||||
#elif defined(__linux__) || defined(__FreeBSD__) || defined(__sun__) || defined(__APPLE__) // ^^^ Windows ^^^ vvv POSIX vvv
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
|
@ -20,10 +20,18 @@
|
|||
#include <boost/icl/interval_set.hpp>
|
||||
#include <fcntl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/random.h>
|
||||
#include <unistd.h>
|
||||
#include "common/scope_exit.h"
|
||||
|
||||
#if defined(__linux__)
|
||||
#include <sys/random.h>
|
||||
#elif defined(__APPLE__)
|
||||
#include <sys/types.h>
|
||||
#include <sys/random.h>
|
||||
#include <mach/vm_map.h>
|
||||
#include <mach/mach.h>
|
||||
#endif
|
||||
|
||||
// FreeBSD
|
||||
#ifndef MAP_NORESERVE
|
||||
#define MAP_NORESERVE 0
|
||||
|
@ -32,8 +40,12 @@
|
|||
#ifndef MAP_ALIGNED_SUPER
|
||||
#define MAP_ALIGNED_SUPER 0
|
||||
#endif
|
||||
// macOS
|
||||
#ifndef MAP_ANONYMOUS
|
||||
#define MAP_ANONYMOUS MAP_ANON
|
||||
#endif
|
||||
|
||||
#endif // ^^^ Linux ^^^
|
||||
#endif // ^^^ POSIX ^^^
|
||||
|
||||
#include <mutex>
|
||||
#include <random>
|
||||
|
@ -372,7 +384,7 @@ private:
|
|||
std::unordered_map<size_t, size_t> placeholder_host_pointers; ///< Placeholder backing offset
|
||||
};
|
||||
|
||||
#elif defined(__linux__) || defined(__FreeBSD__) || defined(__sun__) // ^^^ Windows ^^^ vvv Linux vvv
|
||||
#elif defined(__linux__) || defined(__FreeBSD__) || defined(__sun__) || defined(__APPLE__) // ^^^ Windows ^^^ vvv POSIX vvv
|
||||
|
||||
#ifdef ARCHITECTURE_arm64
|
||||
|
||||
|
@ -489,6 +501,13 @@ public:
|
|||
#elif defined(__FreeBSD__) && __FreeBSD__ < 13
|
||||
// XXX Drop after FreeBSD 12.* reaches EOL on 2024-06-30
|
||||
fd = shm_open(SHM_ANON, O_RDWR, 0600);
|
||||
#elif defined(__APPLE__)
|
||||
// macOS doesn't have memfd_create, use anonymous temporary file
|
||||
char template_path[] = "/tmp/eden_mem_XXXXXX";
|
||||
fd = mkstemp(template_path);
|
||||
if (fd >= 0) {
|
||||
unlink(template_path);
|
||||
}
|
||||
#else
|
||||
fd = memfd_create("HostMemory", 0);
|
||||
#endif
|
||||
|
@ -645,7 +664,7 @@ private:
|
|||
FreeRegionManager free_manager{};
|
||||
};
|
||||
|
||||
#else // ^^^ Linux ^^^ vvv Generic vvv
|
||||
#else // ^^^ POSIX ^^^ vvv Generic vvv
|
||||
|
||||
class HostMemory::Impl {
|
||||
public:
|
||||
|
|
|
@ -551,6 +551,8 @@ struct Values {
|
|||
3,
|
||||
#elif defined (ANDROID)
|
||||
0,
|
||||
#elif defined (__APPLE__)
|
||||
0,
|
||||
#else
|
||||
2,
|
||||
#endif
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <span>
|
||||
#include <string>
|
||||
|
|
|
@ -219,28 +219,55 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(IoctlAllocGpfifoEx& params, DeviceFD fd) {
|
|||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_gpu::AllocateObjectContext(IoctlAllocObjCtx& params) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, class_num={:X}, flags={:X}, obj_id={:X}", params.class_num,
|
||||
params.flags, params.obj_id);
|
||||
s32_le nvhost_gpu::GetObjectContextClassNumberIndex(CtxClasses class_number) {
|
||||
constexpr s32_le invalid_class_number_index = -1;
|
||||
switch (class_number) {
|
||||
case CtxClasses::Ctx2D: return 0;
|
||||
case CtxClasses::Ctx3D: return 1;
|
||||
case CtxClasses::CtxCompute: return 2;
|
||||
case CtxClasses::CtxKepler: return 3;
|
||||
case CtxClasses::CtxDMA: return 4;
|
||||
case CtxClasses::CtxChannelGPFIFO: return 5;
|
||||
default: return invalid_class_number_index;
|
||||
}
|
||||
}
|
||||
|
||||
if (!channel_state->initialized) {
|
||||
NvResult nvhost_gpu::AllocateObjectContext(IoctlAllocObjCtx& params) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, class_num={:#X}, flags={:#X}, obj_id={:#X}", params.class_num,
|
||||
params.flags, params.obj_id);
|
||||
|
||||
if (!channel_state || !channel_state->initialized) {
|
||||
LOG_CRITICAL(Service_NVDRV, "No address space bound to allocate a object context!");
|
||||
return NvResult::NotInitialized;
|
||||
}
|
||||
|
||||
switch (static_cast<CtxClasses>(params.class_num)) {
|
||||
case CtxClasses::Ctx2D:
|
||||
case CtxClasses::Ctx3D:
|
||||
case CtxClasses::CtxCompute:
|
||||
case CtxClasses::CtxKepler:
|
||||
case CtxClasses::CtxDMA:
|
||||
case CtxClasses::CtxChannelGPFIFO:
|
||||
ctxObj_params.push_back(params);
|
||||
return NvResult::Success;
|
||||
default:
|
||||
LOG_ERROR(Service_NVDRV, "Invalid class number for object context: {:X}", params.class_num);
|
||||
std::scoped_lock lk(channel_mutex);
|
||||
|
||||
if (params.flags) {
|
||||
LOG_WARNING(Service_NVDRV, "non-zero flags={:#X} for class={:#X}", params.flags,
|
||||
params.class_num);
|
||||
|
||||
constexpr u32 allowed_mask{};
|
||||
params.flags = allowed_mask;
|
||||
}
|
||||
|
||||
s32_le ctx_class_number_index =
|
||||
GetObjectContextClassNumberIndex(static_cast<CtxClasses>(params.class_num));
|
||||
if (ctx_class_number_index < 0) {
|
||||
LOG_ERROR(Service_NVDRV, "Invalid class number for object context: {:#X}",
|
||||
params.class_num);
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
if (ctxObjs[ctx_class_number_index].has_value()) {
|
||||
LOG_ERROR(Service_NVDRV, "Object context for class {:#X} already allocated on this channel",
|
||||
params.class_num);
|
||||
return NvResult::AlreadyAllocated;
|
||||
}
|
||||
|
||||
ctxObjs[ctx_class_number_index] = params;
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
static boost::container::small_vector<Tegra::CommandHeader, 512> BuildWaitCommandList(
|
||||
|
|
|
@ -172,7 +172,7 @@ private:
|
|||
s32_le nvmap_fd{};
|
||||
u64_le user_data{};
|
||||
IoctlZCullBind zcull_params{};
|
||||
std::vector<IoctlAllocObjCtx> ctxObj_params{};
|
||||
std::array<std::optional<IoctlAllocObjCtx>, 6> ctxObjs{};
|
||||
u32_le channel_priority{};
|
||||
u32_le channel_timeslice{};
|
||||
|
||||
|
@ -184,9 +184,12 @@ private:
|
|||
NvResult SetChannelPriority(IoctlChannelSetPriority& params);
|
||||
NvResult AllocGPFIFOEx(IoctlAllocGpfifoEx& params, DeviceFD fd);
|
||||
NvResult AllocGPFIFOEx2(IoctlAllocGpfifoEx& params, DeviceFD fd);
|
||||
|
||||
s32_le GetObjectContextClassNumberIndex(CtxClasses class_number);
|
||||
NvResult AllocateObjectContext(IoctlAllocObjCtx& params);
|
||||
|
||||
NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, Tegra::CommandList&& entries);
|
||||
|
||||
NvResult SubmitGPFIFOBase1(IoctlSubmitGpfifo& params,
|
||||
std::span<Tegra::CommandListHeader> commands, bool kickoff = false);
|
||||
NvResult SubmitGPFIFOBase2(IoctlSubmitGpfifo& params,
|
||||
|
|
|
@ -102,13 +102,16 @@ constexpr VkPipelineVertexInputStateCreateInfo PIPELINE_VERTEX_INPUT_STATE_CREAT
|
|||
.vertexAttributeDescriptionCount = 0,
|
||||
.pVertexAttributeDescriptions = nullptr,
|
||||
};
|
||||
constexpr VkPipelineInputAssemblyStateCreateInfo PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
|
||||
.primitiveRestartEnable = VK_FALSE,
|
||||
};
|
||||
|
||||
VkPipelineInputAssemblyStateCreateInfo GetPipelineInputAssemblyStateCreateInfo(const Device& device) {
|
||||
return VkPipelineInputAssemblyStateCreateInfo{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
|
||||
.primitiveRestartEnable = device.IsMoltenVK() ? VK_TRUE : VK_FALSE,
|
||||
};
|
||||
}
|
||||
constexpr VkPipelineViewportStateCreateInfo PIPELINE_VIEWPORT_STATE_CREATE_INFO{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
@ -802,6 +805,7 @@ VkPipeline BlitImageHelper::FindOrEmplaceColorPipeline(const BlitImagePipelineKe
|
|||
.pAttachments = &blend_attachment,
|
||||
.blendConstants = {0.0f, 0.0f, 0.0f, 0.0f},
|
||||
};
|
||||
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
|
||||
blit_color_pipelines.push_back(device.GetLogical().CreateGraphicsPipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
@ -809,7 +813,7 @@ VkPipeline BlitImageHelper::FindOrEmplaceColorPipeline(const BlitImagePipelineKe
|
|||
.stageCount = static_cast<u32>(stages.size()),
|
||||
.pStages = stages.data(),
|
||||
.pVertexInputState = &PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &input_assembly_ci,
|
||||
.pTessellationState = nullptr,
|
||||
.pViewportState = &PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
||||
.pRasterizationState = &PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||
|
@ -833,6 +837,7 @@ VkPipeline BlitImageHelper::FindOrEmplaceDepthStencilPipeline(const BlitImagePip
|
|||
}
|
||||
blit_depth_stencil_keys.push_back(key);
|
||||
const std::array stages = MakeStages(*full_screen_vert, *blit_depth_stencil_frag);
|
||||
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
|
||||
blit_depth_stencil_pipelines.push_back(device.GetLogical().CreateGraphicsPipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
@ -840,7 +845,7 @@ VkPipeline BlitImageHelper::FindOrEmplaceDepthStencilPipeline(const BlitImagePip
|
|||
.stageCount = static_cast<u32>(stages.size()),
|
||||
.pStages = stages.data(),
|
||||
.pVertexInputState = &PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &input_assembly_ci,
|
||||
.pTessellationState = nullptr,
|
||||
.pViewportState = &PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
||||
.pRasterizationState = &PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||
|
@ -885,6 +890,7 @@ VkPipeline BlitImageHelper::FindOrEmplaceClearColorPipeline(const BlitImagePipel
|
|||
.pAttachments = &color_blend_attachment_state,
|
||||
.blendConstants = {0.0f, 0.0f, 0.0f, 0.0f},
|
||||
};
|
||||
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
|
||||
clear_color_pipelines.push_back(device.GetLogical().CreateGraphicsPipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
@ -892,7 +898,7 @@ VkPipeline BlitImageHelper::FindOrEmplaceClearColorPipeline(const BlitImagePipel
|
|||
.stageCount = static_cast<u32>(stages.size()),
|
||||
.pStages = stages.data(),
|
||||
.pVertexInputState = &PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &input_assembly_ci,
|
||||
.pTessellationState = nullptr,
|
||||
.pViewportState = &PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
||||
.pRasterizationState = &PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||
|
@ -940,6 +946,7 @@ VkPipeline BlitImageHelper::FindOrEmplaceClearStencilPipeline(
|
|||
.minDepthBounds = 0.0f,
|
||||
.maxDepthBounds = 0.0f,
|
||||
};
|
||||
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
|
||||
clear_stencil_pipelines.push_back(device.GetLogical().CreateGraphicsPipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
@ -947,7 +954,7 @@ VkPipeline BlitImageHelper::FindOrEmplaceClearStencilPipeline(
|
|||
.stageCount = static_cast<u32>(stages.size()),
|
||||
.pStages = stages.data(),
|
||||
.pVertexInputState = &PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &input_assembly_ci,
|
||||
.pTessellationState = nullptr,
|
||||
.pViewportState = &PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
||||
.pRasterizationState = &PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||
|
@ -970,6 +977,7 @@ void BlitImageHelper::ConvertDepthToColorPipeline(vk::Pipeline& pipeline, VkRend
|
|||
}
|
||||
VkShaderModule frag_shader = *convert_float_to_depth_frag;
|
||||
const std::array stages = MakeStages(*full_screen_vert, frag_shader);
|
||||
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
|
||||
pipeline = device.GetLogical().CreateGraphicsPipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
@ -977,7 +985,7 @@ void BlitImageHelper::ConvertDepthToColorPipeline(vk::Pipeline& pipeline, VkRend
|
|||
.stageCount = static_cast<u32>(stages.size()),
|
||||
.pStages = stages.data(),
|
||||
.pVertexInputState = &PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &input_assembly_ci,
|
||||
.pTessellationState = nullptr,
|
||||
.pViewportState = &PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
||||
.pRasterizationState = &PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||
|
@ -999,6 +1007,7 @@ void BlitImageHelper::ConvertColorToDepthPipeline(vk::Pipeline& pipeline, VkRend
|
|||
}
|
||||
VkShaderModule frag_shader = *convert_depth_to_float_frag;
|
||||
const std::array stages = MakeStages(*full_screen_vert, frag_shader);
|
||||
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
|
||||
pipeline = device.GetLogical().CreateGraphicsPipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
@ -1006,7 +1015,7 @@ void BlitImageHelper::ConvertColorToDepthPipeline(vk::Pipeline& pipeline, VkRend
|
|||
.stageCount = static_cast<u32>(stages.size()),
|
||||
.pStages = stages.data(),
|
||||
.pVertexInputState = &PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &input_assembly_ci,
|
||||
.pTessellationState = nullptr,
|
||||
.pViewportState = &PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
||||
.pRasterizationState = &PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||
|
@ -1029,6 +1038,7 @@ void BlitImageHelper::ConvertPipelineEx(vk::Pipeline& pipeline, VkRenderPass ren
|
|||
return;
|
||||
}
|
||||
const std::array stages = MakeStages(*full_screen_vert, *module);
|
||||
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
|
||||
pipeline = device.GetLogical().CreateGraphicsPipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
@ -1036,7 +1046,7 @@ void BlitImageHelper::ConvertPipelineEx(vk::Pipeline& pipeline, VkRenderPass ren
|
|||
.stageCount = static_cast<u32>(stages.size()),
|
||||
.pStages = stages.data(),
|
||||
.pVertexInputState = &PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &input_assembly_ci,
|
||||
.pTessellationState = nullptr,
|
||||
.pViewportState = &PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
||||
.pRasterizationState = &PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||
|
@ -1070,6 +1080,7 @@ void BlitImageHelper::ConvertPipeline(vk::Pipeline& pipeline, VkRenderPass rende
|
|||
VkShaderModule frag_shader =
|
||||
is_target_depth ? *convert_float_to_depth_frag : *convert_depth_to_float_frag;
|
||||
const std::array stages = MakeStages(*full_screen_vert, frag_shader);
|
||||
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
|
||||
pipeline = device.GetLogical().CreateGraphicsPipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
@ -1077,7 +1088,7 @@ void BlitImageHelper::ConvertPipeline(vk::Pipeline& pipeline, VkRenderPass rende
|
|||
.stageCount = static_cast<u32>(stages.size()),
|
||||
.pStages = stages.data(),
|
||||
.pVertexInputState = &PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pInputAssemblyState = &input_assembly_ci,
|
||||
.pTessellationState = nullptr,
|
||||
.pViewportState = &PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
||||
.pRasterizationState = &PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||
|
|
|
@ -400,12 +400,12 @@ static vk::Pipeline CreateWrappedPipelineImpl(
|
|||
.pVertexAttributeDescriptions = nullptr,
|
||||
};
|
||||
|
||||
constexpr VkPipelineInputAssemblyStateCreateInfo input_assembly_ci{
|
||||
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
|
||||
.primitiveRestartEnable = VK_FALSE,
|
||||
.primitiveRestartEnable = device.IsMoltenVK() ? VK_TRUE : VK_FALSE,
|
||||
};
|
||||
|
||||
constexpr VkPipelineViewportStateCreateInfo viewport_state_ci{
|
||||
|
|
|
@ -635,14 +635,16 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
|
|||
.flags = 0,
|
||||
.topology = input_assembly_topology,
|
||||
.primitiveRestartEnable =
|
||||
dynamic.primitive_restart_enable != 0 &&
|
||||
// MoltenVK/Metal always has primitive restart enabled and cannot disable it
|
||||
device.IsMoltenVK() ? VK_TRUE :
|
||||
(dynamic.primitive_restart_enable != 0 &&
|
||||
((input_assembly_topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST &&
|
||||
device.IsTopologyListPrimitiveRestartSupported()) ||
|
||||
SupportsPrimitiveRestart(input_assembly_topology) ||
|
||||
(input_assembly_topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST &&
|
||||
device.IsPatchListPrimitiveRestartSupported()))
|
||||
? VK_TRUE
|
||||
: VK_FALSE,
|
||||
: VK_FALSE),
|
||||
};
|
||||
const VkPipelineTessellationStateCreateInfo tessellation_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
|
@ -8,4 +10,4 @@
|
|||
#define VMA_STATIC_VULKAN_FUNCTIONS 0
|
||||
#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
|
||||
|
||||
#include <vk_mem_alloc.h>
|
||||
#include "vk_mem_alloc.h"
|
||||
|
|
|
@ -725,6 +725,11 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
|
|||
dynamic_state3_enables = true;
|
||||
}
|
||||
|
||||
if (is_mvk && Settings::values.dyna_state.GetValue() != 0) {
|
||||
LOG_WARNING(Render_Vulkan, "MoltenVK detected: Forcing dynamic state to 0 to prevent black screen issues");
|
||||
Settings::values.dyna_state.SetValue(0);
|
||||
}
|
||||
|
||||
if (Settings::values.dyna_state.GetValue() == 0) {
|
||||
must_emulate_scaled_formats = true;
|
||||
LOG_INFO(Render_Vulkan, "Dynamic state is disabled (dyna_state = 0), forcing scaled format emulation ON");
|
||||
|
@ -753,18 +758,24 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
|
|||
functions.vkGetInstanceProcAddr = dld.vkGetInstanceProcAddr;
|
||||
functions.vkGetDeviceProcAddr = dld.vkGetDeviceProcAddr;
|
||||
|
||||
const VmaAllocatorCreateInfo allocator_info = {
|
||||
.flags = VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT,
|
||||
.physicalDevice = physical,
|
||||
.device = *logical,
|
||||
.preferredLargeHeapBlockSize = 0,
|
||||
.pAllocationCallbacks = nullptr,
|
||||
.pDeviceMemoryCallbacks = nullptr,
|
||||
.pHeapSizeLimit = nullptr,
|
||||
.pVulkanFunctions = &functions,
|
||||
.instance = instance,
|
||||
.vulkanApiVersion = VK_API_VERSION_1_1,
|
||||
.pTypeExternalMemoryHandleTypes = nullptr,
|
||||
VmaAllocatorCreateFlags flags = VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT;
|
||||
if (extensions.memory_budget) {
|
||||
flags |= VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT;
|
||||
}
|
||||
const VmaAllocatorCreateInfo allocator_info{
|
||||
.flags = flags,
|
||||
.physicalDevice = physical,
|
||||
.device = *logical,
|
||||
.preferredLargeHeapBlockSize = is_integrated
|
||||
? (64u * 1024u * 1024u)
|
||||
: (256u * 1024u * 1024u),
|
||||
.pAllocationCallbacks = nullptr,
|
||||
.pDeviceMemoryCallbacks = nullptr,
|
||||
.pHeapSizeLimit = nullptr,
|
||||
.pVulkanFunctions = &functions,
|
||||
.instance = instance,
|
||||
.vulkanApiVersion = ApiVersion(),
|
||||
.pTypeExternalMemoryHandleTypes = nullptr,
|
||||
};
|
||||
|
||||
vk::Check(vmaCreateAllocator(&allocator_info, &allocator));
|
||||
|
@ -1090,8 +1101,15 @@ bool Device::GetSuitability(bool requires_swapchain) {
|
|||
// Some features are mandatory. Check those.
|
||||
#define CHECK_FEATURE(feature, name) \
|
||||
if (!features.feature.name) { \
|
||||
LOG_ERROR(Render_Vulkan, "Missing required feature {}", #name); \
|
||||
suitable = false; \
|
||||
if (IsMoltenVK() && (strcmp(#name, "geometryShader") == 0 || \
|
||||
strcmp(#name, "logicOp") == 0 || \
|
||||
strcmp(#name, "shaderCullDistance") == 0 || \
|
||||
strcmp(#name, "wideLines") == 0)) { \
|
||||
LOG_INFO(Render_Vulkan, "MoltenVK missing feature {} - using fallback", #name); \
|
||||
} else { \
|
||||
LOG_ERROR(Render_Vulkan, "Missing required feature {}", #name); \
|
||||
suitable = false; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define LOG_FEATURE(feature, name) \
|
||||
|
|
|
@ -717,6 +717,10 @@ public:
|
|||
return properties.driver.driverID == VK_DRIVER_ID_NVIDIA_PROPRIETARY;
|
||||
}
|
||||
|
||||
bool IsMoltenVK() const noexcept {
|
||||
return properties.driver.driverID == VK_DRIVER_ID_MOLTENVK;
|
||||
}
|
||||
|
||||
NvidiaArchitecture GetNvidiaArch() const noexcept {
|
||||
return nvidia_arch;
|
||||
}
|
||||
|
|
|
@ -6,7 +6,10 @@
|
|||
|
||||
#include <algorithm>
|
||||
#include <bit>
|
||||
#include <limits>
|
||||
#include <optional>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/alignment.h"
|
||||
|
@ -21,379 +24,302 @@
|
|||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace Vulkan {
|
||||
namespace {
|
||||
struct Range {
|
||||
u64 begin;
|
||||
u64 end;
|
||||
namespace {
|
||||
|
||||
[[nodiscard]] bool Contains(u64 iterator, u64 size) const noexcept {
|
||||
return iterator < end && begin < iterator + size;
|
||||
}
|
||||
};
|
||||
// Helpers translating MemoryUsage to flags/usage
|
||||
|
||||
[[nodiscard]] u64 AllocationChunkSize(u64 required_size) {
|
||||
static constexpr std::array sizes{
|
||||
0x1000ULL << 10, 0x1400ULL << 10, 0x1800ULL << 10, 0x1c00ULL << 10, 0x2000ULL << 10,
|
||||
0x3200ULL << 10, 0x4000ULL << 10, 0x6000ULL << 10, 0x8000ULL << 10, 0xA000ULL << 10,
|
||||
0x10000ULL << 10, 0x18000ULL << 10, 0x20000ULL << 10,
|
||||
};
|
||||
static_assert(std::is_sorted(sizes.begin(), sizes.end()));
|
||||
|
||||
const auto it = std::ranges::lower_bound(sizes, required_size);
|
||||
return it != sizes.end() ? *it : Common::AlignUp(required_size, 4ULL << 20);
|
||||
}
|
||||
|
||||
[[nodiscard]] VkMemoryPropertyFlags MemoryUsagePropertyFlags(MemoryUsage usage) {
|
||||
switch (usage) {
|
||||
case MemoryUsage::DeviceLocal:
|
||||
return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
case MemoryUsage::Upload:
|
||||
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||
case MemoryUsage::Download:
|
||||
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
|
||||
VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
|
||||
case MemoryUsage::Stream:
|
||||
return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
||||
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||
}
|
||||
ASSERT_MSG(false, "Invalid memory usage={}", usage);
|
||||
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||
}
|
||||
|
||||
[[nodiscard]] VkMemoryPropertyFlags MemoryUsagePreferredVmaFlags(MemoryUsage usage) {
|
||||
return usage != MemoryUsage::DeviceLocal ? VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
|
||||
: VkMemoryPropertyFlagBits{};
|
||||
}
|
||||
|
||||
[[nodiscard]] VmaAllocationCreateFlags MemoryUsageVmaFlags(MemoryUsage usage) {
|
||||
switch (usage) {
|
||||
case MemoryUsage::Upload:
|
||||
case MemoryUsage::Stream:
|
||||
return VMA_ALLOCATION_CREATE_MAPPED_BIT |
|
||||
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
|
||||
case MemoryUsage::Download:
|
||||
return VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
|
||||
case MemoryUsage::DeviceLocal:
|
||||
return {};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] VmaMemoryUsage MemoryUsageVma(MemoryUsage usage) {
|
||||
switch (usage) {
|
||||
case MemoryUsage::DeviceLocal:
|
||||
case MemoryUsage::Stream:
|
||||
return VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
|
||||
case MemoryUsage::Upload:
|
||||
case MemoryUsage::Download:
|
||||
return VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
|
||||
}
|
||||
return VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
class MemoryAllocation {
|
||||
public:
|
||||
explicit MemoryAllocation(MemoryAllocator* const allocator_, vk::DeviceMemory memory_,
|
||||
VkMemoryPropertyFlags properties, u64 allocation_size_, u32 type)
|
||||
: allocator{allocator_}, memory{std::move(memory_)}, allocation_size{allocation_size_},
|
||||
property_flags{properties}, shifted_memory_type{1U << type} {}
|
||||
|
||||
MemoryAllocation& operator=(const MemoryAllocation&) = delete;
|
||||
MemoryAllocation(const MemoryAllocation&) = delete;
|
||||
|
||||
MemoryAllocation& operator=(MemoryAllocation&&) = delete;
|
||||
MemoryAllocation(MemoryAllocation&&) = delete;
|
||||
|
||||
[[nodiscard]] std::optional<MemoryCommit> Commit(VkDeviceSize size, VkDeviceSize alignment) {
|
||||
const std::optional<u64> alloc = FindFreeRegion(size, alignment);
|
||||
if (!alloc) {
|
||||
// Signal out of memory, it'll try to do more allocations.
|
||||
return std::nullopt;
|
||||
[[maybe_unused]] VkMemoryPropertyFlags MemoryUsagePropertyFlags(MemoryUsage usage) {
|
||||
switch (usage) {
|
||||
case MemoryUsage::DeviceLocal:
|
||||
return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
case MemoryUsage::Upload:
|
||||
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
||||
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||
case MemoryUsage::Download:
|
||||
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
||||
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
|
||||
VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
|
||||
case MemoryUsage::Stream:
|
||||
return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
|
||||
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
||||
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||
}
|
||||
ASSERT_MSG(false, "Invalid memory usage={}", usage);
|
||||
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||
}
|
||||
const Range range{
|
||||
.begin = *alloc,
|
||||
.end = *alloc + size,
|
||||
|
||||
[[nodiscard]] VkMemoryPropertyFlags MemoryUsagePreferredVmaFlags(MemoryUsage usage) {
|
||||
return usage != MemoryUsage::DeviceLocal ? VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
|
||||
: VkMemoryPropertyFlagBits{};
|
||||
}
|
||||
|
||||
[[nodiscard]] VmaAllocationCreateFlags MemoryUsageVmaFlags(MemoryUsage usage) {
|
||||
switch (usage) {
|
||||
case MemoryUsage::Upload:
|
||||
case MemoryUsage::Stream:
|
||||
return VMA_ALLOCATION_CREATE_MAPPED_BIT |
|
||||
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
|
||||
case MemoryUsage::Download:
|
||||
return VMA_ALLOCATION_CREATE_MAPPED_BIT |
|
||||
VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
|
||||
case MemoryUsage::DeviceLocal:
|
||||
return {};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
[[nodiscard]] VmaMemoryUsage MemoryUsageVma(MemoryUsage usage) {
|
||||
switch (usage) {
|
||||
case MemoryUsage::DeviceLocal:
|
||||
case MemoryUsage::Stream:
|
||||
return VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
|
||||
case MemoryUsage::Upload:
|
||||
case MemoryUsage::Download:
|
||||
return VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
|
||||
}
|
||||
return VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
|
||||
}
|
||||
|
||||
|
||||
// This avoids calling vkGetBufferMemoryRequirements* directly.
|
||||
template<typename T>
|
||||
static VkBuffer GetVkHandleFromBuffer(const T &buf) {
|
||||
if constexpr (requires { static_cast<VkBuffer>(buf); }) {
|
||||
return static_cast<VkBuffer>(buf);
|
||||
} else if constexpr (requires {{ buf.GetHandle() } -> std::convertible_to<VkBuffer>; }) {
|
||||
return buf.GetHandle();
|
||||
} else if constexpr (requires {{ buf.Handle() } -> std::convertible_to<VkBuffer>; }) {
|
||||
return buf.Handle();
|
||||
} else if constexpr (requires {{ buf.vk_handle() } -> std::convertible_to<VkBuffer>; }) {
|
||||
return buf.vk_handle();
|
||||
} else {
|
||||
static_assert(sizeof(T) == 0, "Cannot extract VkBuffer handle from vk::Buffer");
|
||||
return VK_NULL_HANDLE;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
//MemoryCommit is now VMA-backed
|
||||
MemoryCommit::MemoryCommit(VmaAllocator alloc, VmaAllocation a,
|
||||
const VmaAllocationInfo &info) noexcept
|
||||
: allocator{alloc}, allocation{a}, memory{info.deviceMemory},
|
||||
offset{info.offset}, size{info.size}, mapped_ptr{info.pMappedData} {}
|
||||
|
||||
MemoryCommit::~MemoryCommit() { Release(); }
|
||||
|
||||
MemoryCommit::MemoryCommit(MemoryCommit &&rhs) noexcept
|
||||
: allocator{std::exchange(rhs.allocator, nullptr)},
|
||||
allocation{std::exchange(rhs.allocation, nullptr)},
|
||||
memory{std::exchange(rhs.memory, VK_NULL_HANDLE)},
|
||||
offset{std::exchange(rhs.offset, 0)},
|
||||
size{std::exchange(rhs.size, 0)},
|
||||
mapped_ptr{std::exchange(rhs.mapped_ptr, nullptr)} {}
|
||||
|
||||
MemoryCommit &MemoryCommit::operator=(MemoryCommit &&rhs) noexcept {
|
||||
if (this != &rhs) {
|
||||
Release();
|
||||
allocator = std::exchange(rhs.allocator, nullptr);
|
||||
allocation = std::exchange(rhs.allocation, nullptr);
|
||||
memory = std::exchange(rhs.memory, VK_NULL_HANDLE);
|
||||
offset = std::exchange(rhs.offset, 0);
|
||||
size = std::exchange(rhs.size, 0);
|
||||
mapped_ptr = std::exchange(rhs.mapped_ptr, nullptr);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
std::span<u8> MemoryCommit::Map()
|
||||
{
|
||||
if (!allocation) return {};
|
||||
if (!mapped_ptr) {
|
||||
if (vmaMapMemory(allocator, allocation, &mapped_ptr) != VK_SUCCESS) return {};
|
||||
}
|
||||
const size_t n = static_cast<size_t>(std::min<VkDeviceSize>(size,
|
||||
std::numeric_limits<size_t>::max()));
|
||||
return std::span<u8>{static_cast<u8 *>(mapped_ptr), n};
|
||||
}
|
||||
|
||||
std::span<const u8> MemoryCommit::Map() const
|
||||
{
|
||||
if (!allocation) return {};
|
||||
if (!mapped_ptr) {
|
||||
void *p = nullptr;
|
||||
if (vmaMapMemory(allocator, allocation, &p) != VK_SUCCESS) return {};
|
||||
const_cast<MemoryCommit *>(this)->mapped_ptr = p;
|
||||
}
|
||||
const size_t n = static_cast<size_t>(std::min<VkDeviceSize>(size,
|
||||
std::numeric_limits<size_t>::max()));
|
||||
return std::span<const u8>{static_cast<const u8 *>(mapped_ptr), n};
|
||||
}
|
||||
|
||||
void MemoryCommit::Unmap()
|
||||
{
|
||||
if (allocation && mapped_ptr) {
|
||||
vmaUnmapMemory(allocator, allocation);
|
||||
mapped_ptr = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryCommit::Release() {
|
||||
if (allocation && allocator) {
|
||||
if (mapped_ptr) {
|
||||
vmaUnmapMemory(allocator, allocation);
|
||||
mapped_ptr = nullptr;
|
||||
}
|
||||
vmaFreeMemory(allocator, allocation);
|
||||
}
|
||||
allocation = nullptr;
|
||||
allocator = nullptr;
|
||||
memory = VK_NULL_HANDLE;
|
||||
offset = 0;
|
||||
size = 0;
|
||||
}
|
||||
|
||||
MemoryAllocator::MemoryAllocator(const Device &device_)
|
||||
: device{device_}, allocator{device.GetAllocator()},
|
||||
properties{device_.GetPhysical().GetMemoryProperties().memoryProperties},
|
||||
buffer_image_granularity{
|
||||
device_.GetPhysical().GetProperties().limits.bufferImageGranularity} {
|
||||
|
||||
// Preserve the previous "RenderDoc small heap" trimming behavior that we had in original vma minus the heap bug
|
||||
if (device.HasDebuggingToolAttached())
|
||||
{
|
||||
using namespace Common::Literals;
|
||||
ForEachDeviceLocalHostVisibleHeap(device, [this](size_t heap_idx, VkMemoryHeap &heap) {
|
||||
if (heap.size <= 256_MiB) {
|
||||
for (u32 t = 0; t < properties.memoryTypeCount; ++t) {
|
||||
if (properties.memoryTypes[t].heapIndex == heap_idx) {
|
||||
valid_memory_types &= ~(1u << t);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
MemoryAllocator::~MemoryAllocator() = default;
|
||||
|
||||
vk::Image MemoryAllocator::CreateImage(const VkImageCreateInfo &ci) const
|
||||
{
|
||||
const VmaAllocationCreateInfo alloc_ci = {
|
||||
.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT,
|
||||
.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE,
|
||||
.requiredFlags = 0,
|
||||
.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
||||
.memoryTypeBits = 0,
|
||||
.pool = VK_NULL_HANDLE,
|
||||
.pUserData = nullptr,
|
||||
.priority = 0.f,
|
||||
};
|
||||
commits.insert(std::ranges::upper_bound(commits, *alloc, {}, &Range::begin), range);
|
||||
return std::make_optional<MemoryCommit>(this, *memory, *alloc, *alloc + size);
|
||||
|
||||
VkImage handle{};
|
||||
VmaAllocation allocation{};
|
||||
vk::Check(vmaCreateImage(allocator, &ci, &alloc_ci, &handle, &allocation, nullptr));
|
||||
return vk::Image(handle, ci.usage, *device.GetLogical(), allocator, allocation,
|
||||
device.GetDispatchLoader());
|
||||
}
|
||||
|
||||
void Free(u64 begin) {
|
||||
const auto it = std::ranges::find(commits, begin, &Range::begin);
|
||||
ASSERT_MSG(it != commits.end(), "Invalid commit");
|
||||
commits.erase(it);
|
||||
if (commits.empty()) {
|
||||
// Do not call any code involving 'this' after this call, the object will be destroyed
|
||||
allocator->ReleaseMemory(this);
|
||||
}
|
||||
vk::Buffer
|
||||
MemoryAllocator::CreateBuffer(const VkBufferCreateInfo &ci, MemoryUsage usage) const
|
||||
{
|
||||
const VmaAllocationCreateInfo alloc_ci = {
|
||||
.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT | MemoryUsageVmaFlags(usage),
|
||||
.usage = MemoryUsageVma(usage),
|
||||
.requiredFlags = 0,
|
||||
.preferredFlags = MemoryUsagePreferredVmaFlags(usage),
|
||||
.memoryTypeBits = usage == MemoryUsage::Stream ? 0u : valid_memory_types,
|
||||
.pool = VK_NULL_HANDLE,
|
||||
.pUserData = nullptr,
|
||||
.priority = 0.f,
|
||||
};
|
||||
|
||||
VkBuffer handle{};
|
||||
VmaAllocationInfo alloc_info{};
|
||||
VmaAllocation allocation{};
|
||||
VkMemoryPropertyFlags property_flags{};
|
||||
|
||||
vk::Check(vmaCreateBuffer(allocator, &ci, &alloc_ci, &handle, &allocation, &alloc_info));
|
||||
vmaGetAllocationMemoryProperties(allocator, allocation, &property_flags);
|
||||
|
||||
u8 *data = reinterpret_cast<u8 *>(alloc_info.pMappedData);
|
||||
const std::span<u8> mapped_data = data ? std::span<u8>{data, ci.size} : std::span<u8>{};
|
||||
const bool is_coherent = (property_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
|
||||
|
||||
return vk::Buffer(handle, *device.GetLogical(), allocator, allocation, mapped_data,
|
||||
is_coherent,
|
||||
device.GetDispatchLoader());
|
||||
}
|
||||
|
||||
[[nodiscard]] std::span<u8> Map() {
|
||||
if (memory_mapped_span.empty()) {
|
||||
u8* const raw_pointer = memory.Map(0, allocation_size);
|
||||
memory_mapped_span = std::span<u8>(raw_pointer, allocation_size);
|
||||
}
|
||||
return memory_mapped_span;
|
||||
}
|
||||
MemoryCommit MemoryAllocator::Commit(const VkMemoryRequirements &reqs, MemoryUsage usage)
|
||||
{
|
||||
const auto vma_usage = MemoryUsageVma(usage);
|
||||
VmaAllocationCreateInfo ci{};
|
||||
ci.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT | MemoryUsageVmaFlags(usage);
|
||||
ci.usage = vma_usage;
|
||||
ci.memoryTypeBits = reqs.memoryTypeBits & valid_memory_types;
|
||||
ci.requiredFlags = 0;
|
||||
ci.preferredFlags = MemoryUsagePreferredVmaFlags(usage);
|
||||
|
||||
/// Returns whether this allocation is compatible with the arguments.
|
||||
[[nodiscard]] bool IsCompatible(VkMemoryPropertyFlags flags, u32 type_mask) const {
|
||||
return (flags & property_flags) == flags && (type_mask & shifted_memory_type) != 0;
|
||||
}
|
||||
VmaAllocation a{};
|
||||
VmaAllocationInfo info{};
|
||||
|
||||
VkResult res = vmaAllocateMemory(allocator, &reqs, &ci, &a, &info);
|
||||
|
||||
private:
|
||||
[[nodiscard]] static constexpr u32 ShiftType(u32 type) {
|
||||
return 1U << type;
|
||||
}
|
||||
if (res != VK_SUCCESS) {
|
||||
// Relax 1: drop budget constraint
|
||||
auto ci2 = ci;
|
||||
ci2.flags &= ~VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT;
|
||||
res = vmaAllocateMemory(allocator, &reqs, &ci2, &a, &info);
|
||||
|
||||
[[nodiscard]] std::optional<u64> FindFreeRegion(u64 size, u64 alignment) noexcept {
|
||||
ASSERT(std::has_single_bit(alignment));
|
||||
const u64 alignment_log2 = std::countr_zero(alignment);
|
||||
std::optional<u64> candidate;
|
||||
u64 iterator = 0;
|
||||
auto commit = commits.begin();
|
||||
while (iterator + size <= allocation_size) {
|
||||
candidate = candidate.value_or(iterator);
|
||||
if (commit == commits.end()) {
|
||||
break;
|
||||
// Relax 2: if we preferred DEVICE_LOCAL, drop that preference
|
||||
if (res != VK_SUCCESS && (ci.preferredFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {
|
||||
auto ci3 = ci2;
|
||||
ci3.preferredFlags &= ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
res = vmaAllocateMemory(allocator, &reqs, &ci3, &a, &info);
|
||||
}
|
||||
if (commit->Contains(*candidate, size)) {
|
||||
candidate = std::nullopt;
|
||||
}
|
||||
|
||||
vk::Check(res);
|
||||
return MemoryCommit(allocator, a, info);
|
||||
}
|
||||
|
||||
MemoryCommit MemoryAllocator::Commit(const vk::Buffer &buffer, MemoryUsage usage) {
|
||||
// Allocate memory appropriate for this buffer automatically
|
||||
const auto vma_usage = MemoryUsageVma(usage);
|
||||
|
||||
VmaAllocationCreateInfo ci{};
|
||||
ci.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT | MemoryUsageVmaFlags(usage);
|
||||
ci.usage = vma_usage;
|
||||
ci.requiredFlags = 0;
|
||||
ci.preferredFlags = MemoryUsagePreferredVmaFlags(usage);
|
||||
ci.pool = VK_NULL_HANDLE;
|
||||
ci.pUserData = nullptr;
|
||||
ci.priority = 0.0f;
|
||||
|
||||
const VkBuffer raw = *buffer;
|
||||
|
||||
VmaAllocation a{};
|
||||
VmaAllocationInfo info{};
|
||||
|
||||
// Let VMA infer memory requirements from the buffer
|
||||
VkResult res = vmaAllocateMemoryForBuffer(allocator, raw, &ci, &a, &info);
|
||||
|
||||
if (res != VK_SUCCESS) {
|
||||
auto ci2 = ci;
|
||||
ci2.flags &= ~VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT;
|
||||
res = vmaAllocateMemoryForBuffer(allocator, raw, &ci2, &a, &info);
|
||||
|
||||
if (res != VK_SUCCESS && (ci.preferredFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {
|
||||
auto ci3 = ci2;
|
||||
ci3.preferredFlags &= ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
res = vmaAllocateMemoryForBuffer(allocator, raw, &ci3, &a, &info);
|
||||
}
|
||||
iterator = Common::AlignUpLog2(commit->end, alignment_log2);
|
||||
++commit;
|
||||
}
|
||||
return candidate;
|
||||
|
||||
vk::Check(res);
|
||||
vk::Check(vmaBindBufferMemory2(allocator, a, 0, raw, nullptr));
|
||||
return MemoryCommit(allocator, a, info);
|
||||
}
|
||||
|
||||
MemoryAllocator* const allocator; ///< Parent memory allocation.
|
||||
const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
|
||||
const u64 allocation_size; ///< Size of this allocation.
|
||||
const VkMemoryPropertyFlags property_flags; ///< Vulkan memory property flags.
|
||||
const u32 shifted_memory_type; ///< Shifted Vulkan memory type.
|
||||
std::vector<Range> commits; ///< All commit ranges done from this allocation.
|
||||
std::span<u8> memory_mapped_span; ///< Memory mapped span. Empty if not queried before.
|
||||
};
|
||||
|
||||
MemoryCommit::MemoryCommit(MemoryAllocation* allocation_, VkDeviceMemory memory_, u64 begin_,
|
||||
u64 end_) noexcept
|
||||
: allocation{allocation_}, memory{memory_}, begin{begin_}, end{end_} {}
|
||||
|
||||
MemoryCommit::~MemoryCommit() {
|
||||
Release();
|
||||
}
|
||||
|
||||
MemoryCommit& MemoryCommit::operator=(MemoryCommit&& rhs) noexcept {
|
||||
Release();
|
||||
allocation = std::exchange(rhs.allocation, nullptr);
|
||||
memory = rhs.memory;
|
||||
begin = rhs.begin;
|
||||
end = rhs.end;
|
||||
span = std::exchange(rhs.span, std::span<u8>{});
|
||||
return *this;
|
||||
}
|
||||
|
||||
MemoryCommit::MemoryCommit(MemoryCommit&& rhs) noexcept
|
||||
: allocation{std::exchange(rhs.allocation, nullptr)}, memory{rhs.memory}, begin{rhs.begin},
|
||||
end{rhs.end}, span{std::exchange(rhs.span, std::span<u8>{})} {}
|
||||
|
||||
std::span<u8> MemoryCommit::Map() {
|
||||
if (span.empty()) {
|
||||
span = allocation->Map().subspan(begin, end - begin);
|
||||
}
|
||||
return span;
|
||||
}
|
||||
|
||||
void MemoryCommit::Release() {
|
||||
if (allocation) {
|
||||
allocation->Free(begin);
|
||||
}
|
||||
}
|
||||
|
||||
MemoryAllocator::MemoryAllocator(const Device& device_)
|
||||
: device{device_}, allocator{device.GetAllocator()},
|
||||
properties{device_.GetPhysical().GetMemoryProperties().memoryProperties},
|
||||
buffer_image_granularity{
|
||||
device_.GetPhysical().GetProperties().limits.bufferImageGranularity} {
|
||||
// GPUs not supporting rebar may only have a region with less than 256MB host visible/device
|
||||
// local memory. In that case, opening 2 RenderDoc captures side-by-side is not possible due to
|
||||
// the heap running out of memory. With RenderDoc attached and only a small host/device region,
|
||||
// only allow the stream buffer in this memory heap.
|
||||
if (device.HasDebuggingToolAttached()) {
|
||||
using namespace Common::Literals;
|
||||
ForEachDeviceLocalHostVisibleHeap(device, [this](size_t index, VkMemoryHeap& heap) {
|
||||
if (heap.size <= 256_MiB) {
|
||||
valid_memory_types &= ~(1u << index);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
MemoryAllocator::~MemoryAllocator() = default;
|
||||
|
||||
vk::Image MemoryAllocator::CreateImage(const VkImageCreateInfo& ci) const {
|
||||
const VmaAllocationCreateInfo alloc_ci = {
|
||||
.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT,
|
||||
.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE,
|
||||
.requiredFlags = 0,
|
||||
.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
||||
.memoryTypeBits = 0,
|
||||
.pool = VK_NULL_HANDLE,
|
||||
.pUserData = nullptr,
|
||||
.priority = 0.f,
|
||||
};
|
||||
|
||||
VkImage handle{};
|
||||
VmaAllocation allocation{};
|
||||
|
||||
vk::Check(vmaCreateImage(allocator, &ci, &alloc_ci, &handle, &allocation, nullptr));
|
||||
|
||||
return vk::Image(handle, ci.usage, *device.GetLogical(), allocator, allocation,
|
||||
device.GetDispatchLoader());
|
||||
}
|
||||
|
||||
vk::Buffer MemoryAllocator::CreateBuffer(const VkBufferCreateInfo& ci, MemoryUsage usage) const {
|
||||
const VmaAllocationCreateInfo alloc_ci = {
|
||||
.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT | MemoryUsageVmaFlags(usage),
|
||||
.usage = MemoryUsageVma(usage),
|
||||
.requiredFlags = 0,
|
||||
.preferredFlags = MemoryUsagePreferredVmaFlags(usage),
|
||||
.memoryTypeBits = usage == MemoryUsage::Stream ? 0u : valid_memory_types,
|
||||
.pool = VK_NULL_HANDLE,
|
||||
.pUserData = nullptr,
|
||||
.priority = 0.f,
|
||||
};
|
||||
|
||||
VkBuffer handle{};
|
||||
VmaAllocationInfo alloc_info{};
|
||||
VmaAllocation allocation{};
|
||||
VkMemoryPropertyFlags property_flags{};
|
||||
|
||||
vk::Check(vmaCreateBuffer(allocator, &ci, &alloc_ci, &handle, &allocation, &alloc_info));
|
||||
vmaGetAllocationMemoryProperties(allocator, allocation, &property_flags);
|
||||
|
||||
u8* data = reinterpret_cast<u8*>(alloc_info.pMappedData);
|
||||
const std::span<u8> mapped_data = data ? std::span<u8>{data, ci.size} : std::span<u8>{};
|
||||
const bool is_coherent = property_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||
|
||||
return vk::Buffer(handle, *device.GetLogical(), allocator, allocation, mapped_data, is_coherent,
|
||||
device.GetDispatchLoader());
|
||||
}
|
||||
|
||||
MemoryCommit MemoryAllocator::Commit(const VkMemoryRequirements& requirements, MemoryUsage usage) {
|
||||
// Find the fastest memory flags we can afford with the current requirements
|
||||
const u32 type_mask = requirements.memoryTypeBits;
|
||||
const VkMemoryPropertyFlags usage_flags = MemoryUsagePropertyFlags(usage);
|
||||
const VkMemoryPropertyFlags flags = MemoryPropertyFlags(type_mask, usage_flags);
|
||||
if (std::optional<MemoryCommit> commit = TryCommit(requirements, flags)) {
|
||||
return std::move(*commit);
|
||||
}
|
||||
// Commit has failed, allocate more memory.
|
||||
const u64 chunk_size = AllocationChunkSize(requirements.size);
|
||||
if (!TryAllocMemory(flags, type_mask, chunk_size)) {
|
||||
// TODO(Rodrigo): Handle out of memory situations in some way like flushing to guest memory.
|
||||
throw vk::Exception(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
// Commit again, this time it won't fail since there's a fresh allocation above.
|
||||
// If it does, there's a bug.
|
||||
return TryCommit(requirements, flags).value();
|
||||
}
|
||||
|
||||
bool MemoryAllocator::TryAllocMemory(VkMemoryPropertyFlags flags, u32 type_mask, u64 size) {
|
||||
const auto type_opt = FindType(flags, type_mask);
|
||||
if (!type_opt) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Adreno stands firm
|
||||
const u64 aligned_size = (device.GetDriverID() == VK_DRIVER_ID_QUALCOMM_PROPRIETARY) ?
|
||||
Common::AlignUp(size, 4096) :
|
||||
size;
|
||||
|
||||
vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory({
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.allocationSize = aligned_size,
|
||||
.memoryTypeIndex = *type_opt,
|
||||
});
|
||||
|
||||
if (!memory) {
|
||||
return false;
|
||||
}
|
||||
|
||||
allocations.push_back(
|
||||
std::make_unique<MemoryAllocation>(this, std::move(memory), flags, aligned_size, *type_opt));
|
||||
return true;
|
||||
}
|
||||
|
||||
void MemoryAllocator::ReleaseMemory(MemoryAllocation* alloc) {
|
||||
const auto it = std::ranges::find(allocations, alloc, &std::unique_ptr<MemoryAllocation>::get);
|
||||
ASSERT(it != allocations.end());
|
||||
allocations.erase(it);
|
||||
}
|
||||
|
||||
std::optional<MemoryCommit> MemoryAllocator::TryCommit(const VkMemoryRequirements& requirements,
|
||||
VkMemoryPropertyFlags flags) {
|
||||
// Conservative, spec-compliant alignment for suballocation
|
||||
VkDeviceSize eff_align = requirements.alignment;
|
||||
const auto& limits = device.GetPhysical().GetProperties().limits;
|
||||
if ((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) &&
|
||||
!(flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) {
|
||||
// Non-coherent memory must be invalidated on atom boundary
|
||||
if (limits.nonCoherentAtomSize > eff_align) eff_align = limits.nonCoherentAtomSize;
|
||||
}
|
||||
// Separate buffers to avoid stalls on tilers
|
||||
if (buffer_image_granularity > eff_align) {
|
||||
eff_align = buffer_image_granularity;
|
||||
}
|
||||
eff_align = std::bit_ceil(eff_align);
|
||||
|
||||
for (auto& allocation : allocations) {
|
||||
if (!allocation->IsCompatible(flags, requirements.memoryTypeBits)) {
|
||||
continue;
|
||||
}
|
||||
if (auto commit = allocation->Commit(requirements.size, eff_align)) {
|
||||
return commit;
|
||||
}
|
||||
}
|
||||
if ((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) {
|
||||
// Look for non device local commits on failure
|
||||
return TryCommit(requirements, flags & ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
VkMemoryPropertyFlags MemoryAllocator::MemoryPropertyFlags(u32 type_mask,
|
||||
VkMemoryPropertyFlags flags) const {
|
||||
if (FindType(flags, type_mask)) {
|
||||
// Found a memory type with those requirements
|
||||
return flags;
|
||||
}
|
||||
if ((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0) {
|
||||
// Remove host cached bit in case it's not supported
|
||||
return MemoryPropertyFlags(type_mask, flags & ~VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
|
||||
}
|
||||
if ((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) {
|
||||
// Remove device local, if it's not supported by the requested resource
|
||||
return MemoryPropertyFlags(type_mask, flags & ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
|
||||
}
|
||||
ASSERT_MSG(false, "No compatible memory types found");
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::optional<u32> MemoryAllocator::FindType(VkMemoryPropertyFlags flags, u32 type_mask) const {
|
||||
for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
|
||||
const VkMemoryPropertyFlags type_flags = properties.memoryTypes[type_index].propertyFlags;
|
||||
if ((type_mask & (1U << type_index)) != 0 && (type_flags & flags) == flags) {
|
||||
// The type matches in type and in the wanted properties.
|
||||
return type_index;
|
||||
}
|
||||
}
|
||||
// Failed to find index
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
|
@ -6,138 +9,134 @@
|
|||
#include <memory>
|
||||
#include <span>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/vulkan_common/vulkan_device.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
VK_DEFINE_HANDLE(VmaAllocator)
|
||||
#include "video_core/vulkan_common/vma.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class MemoryMap;
|
||||
class MemoryAllocation;
|
||||
class Device;
|
||||
|
||||
/// Hints and requirements for the backing memory type of a commit
|
||||
enum class MemoryUsage {
|
||||
DeviceLocal, ///< Requests device local host visible buffer, falling back to device local
|
||||
///< memory.
|
||||
Upload, ///< Requires a host visible memory type optimized for CPU to GPU uploads
|
||||
Download, ///< Requires a host visible memory type optimized for GPU to CPU readbacks
|
||||
Stream, ///< Requests device local host visible buffer, falling back host memory.
|
||||
};
|
||||
enum class MemoryUsage {
|
||||
DeviceLocal, ///< Requests device local host visible buffer, falling back to device local memory.
|
||||
Upload, ///< Requires a host visible memory type optimized for CPU to GPU uploads
|
||||
Download, ///< Requires a host visible memory type optimized for GPU to CPU readbacks
|
||||
Stream, ///< Requests device local host visible buffer, falling back host memory.
|
||||
};
|
||||
|
||||
template <typename F>
|
||||
void ForEachDeviceLocalHostVisibleHeap(const Device& device, F&& f) {
|
||||
auto memory_props = device.GetPhysical().GetMemoryProperties().memoryProperties;
|
||||
for (size_t i = 0; i < memory_props.memoryTypeCount; i++) {
|
||||
auto& memory_type = memory_props.memoryTypes[i];
|
||||
if ((memory_type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
|
||||
(memory_type.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) {
|
||||
f(memory_type.heapIndex, memory_props.memoryHeaps[memory_type.heapIndex]);
|
||||
template<typename F>
|
||||
void ForEachDeviceLocalHostVisibleHeap(const Device &device, F &&f) {
|
||||
auto memory_props = device.GetPhysical().GetMemoryProperties().memoryProperties;
|
||||
for (size_t i = 0; i < memory_props.memoryTypeCount; i++) {
|
||||
auto &memory_type = memory_props.memoryTypes[i];
|
||||
if ((memory_type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
|
||||
(memory_type.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) {
|
||||
f(memory_type.heapIndex, memory_props.memoryHeaps[memory_type.heapIndex]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Ownership handle of a memory commitment.
|
||||
/// Points to a subregion of a memory allocation.
|
||||
class MemoryCommit {
|
||||
public:
|
||||
explicit MemoryCommit() noexcept = default;
|
||||
explicit MemoryCommit(MemoryAllocation* allocation_, VkDeviceMemory memory_, u64 begin_,
|
||||
u64 end_) noexcept;
|
||||
~MemoryCommit();
|
||||
/// Ownership handle of a memory commitment (real VMA allocation).
|
||||
class MemoryCommit {
|
||||
public:
|
||||
MemoryCommit() noexcept = default;
|
||||
|
||||
MemoryCommit& operator=(MemoryCommit&&) noexcept;
|
||||
MemoryCommit(MemoryCommit&&) noexcept;
|
||||
MemoryCommit(VmaAllocator allocator, VmaAllocation allocation,
|
||||
const VmaAllocationInfo &info) noexcept;
|
||||
|
||||
MemoryCommit& operator=(const MemoryCommit&) = delete;
|
||||
MemoryCommit(const MemoryCommit&) = delete;
|
||||
~MemoryCommit();
|
||||
|
||||
/// Returns a host visible memory map.
|
||||
/// It will map the backing allocation if it hasn't been mapped before.
|
||||
std::span<u8> Map();
|
||||
MemoryCommit(const MemoryCommit &) = delete;
|
||||
|
||||
/// Returns the Vulkan memory handler.
|
||||
VkDeviceMemory Memory() const {
|
||||
return memory;
|
||||
}
|
||||
MemoryCommit &operator=(const MemoryCommit &) = delete;
|
||||
|
||||
/// Returns the start position of the commit relative to the allocation.
|
||||
VkDeviceSize Offset() const {
|
||||
return static_cast<VkDeviceSize>(begin);
|
||||
}
|
||||
MemoryCommit(MemoryCommit &&) noexcept;
|
||||
|
||||
private:
|
||||
void Release();
|
||||
MemoryCommit &operator=(MemoryCommit &&) noexcept;
|
||||
|
||||
MemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
|
||||
VkDeviceMemory memory{}; ///< Vulkan device memory handler.
|
||||
u64 begin{}; ///< Beginning offset in bytes to where the commit exists.
|
||||
u64 end{}; ///< Offset in bytes where the commit ends.
|
||||
std::span<u8> span; ///< Host visible memory span. Empty if not queried before.
|
||||
};
|
||||
[[nodiscard]] std::span<u8> Map();
|
||||
|
||||
[[nodiscard]] std::span<const u8> Map() const;
|
||||
|
||||
void Unmap();
|
||||
|
||||
explicit operator bool() const noexcept { return allocation != nullptr; }
|
||||
|
||||
VkDeviceMemory Memory() const noexcept { return memory; }
|
||||
|
||||
VkDeviceSize Offset() const noexcept { return offset; }
|
||||
|
||||
VkDeviceSize Size() const noexcept { return size; }
|
||||
|
||||
VmaAllocation Allocation() const noexcept { return allocation; }
|
||||
|
||||
private:
|
||||
void Release();
|
||||
|
||||
VmaAllocator allocator{}; ///< VMA allocator
|
||||
VmaAllocation allocation{}; ///< VMA allocation handle
|
||||
VkDeviceMemory memory{}; ///< Underlying VkDeviceMemory chosen by VMA
|
||||
VkDeviceSize offset{}; ///< Offset of this allocation inside VkDeviceMemory
|
||||
VkDeviceSize size{}; ///< Size of the allocation
|
||||
void *mapped_ptr{}; ///< Optional persistent mapped pointer
|
||||
};
|
||||
|
||||
/// Memory allocator container.
|
||||
/// Allocates and releases memory allocations on demand.
|
||||
class MemoryAllocator {
|
||||
friend MemoryAllocation;
|
||||
class MemoryAllocator {
|
||||
public:
|
||||
/**
|
||||
* Construct memory allocator
|
||||
*
|
||||
* @param device_ Device to allocate from
|
||||
*
|
||||
* @throw vk::Exception on failure
|
||||
*/
|
||||
explicit MemoryAllocator(const Device &device_);
|
||||
|
||||
public:
|
||||
/**
|
||||
* Construct memory allocator
|
||||
*
|
||||
* @param device_ Device to allocate from
|
||||
*
|
||||
* @throw vk::Exception on failure
|
||||
*/
|
||||
explicit MemoryAllocator(const Device& device_);
|
||||
~MemoryAllocator();
|
||||
~MemoryAllocator();
|
||||
|
||||
MemoryAllocator& operator=(const MemoryAllocator&) = delete;
|
||||
MemoryAllocator(const MemoryAllocator&) = delete;
|
||||
MemoryAllocator &operator=(const MemoryAllocator &) = delete;
|
||||
|
||||
vk::Image CreateImage(const VkImageCreateInfo& ci) const;
|
||||
MemoryAllocator(const MemoryAllocator &) = delete;
|
||||
|
||||
vk::Buffer CreateBuffer(const VkBufferCreateInfo& ci, MemoryUsage usage) const;
|
||||
vk::Image CreateImage(const VkImageCreateInfo &ci) const;
|
||||
|
||||
/**
|
||||
* Commits a memory with the specified requirements.
|
||||
*
|
||||
* @param requirements Requirements returned from a Vulkan call.
|
||||
* @param usage Indicates how the memory will be used.
|
||||
*
|
||||
* @returns A memory commit.
|
||||
*/
|
||||
MemoryCommit Commit(const VkMemoryRequirements& requirements, MemoryUsage usage);
|
||||
vk::Buffer CreateBuffer(const VkBufferCreateInfo &ci, MemoryUsage usage) const;
|
||||
|
||||
/// Commits memory required by the buffer and binds it.
|
||||
MemoryCommit Commit(const vk::Buffer& buffer, MemoryUsage usage);
|
||||
/**
|
||||
* Commits a memory with the specified requirements.
|
||||
*
|
||||
* @param requirements Requirements returned from a Vulkan call.
|
||||
* @param usage Indicates how the memory will be used.
|
||||
*
|
||||
* @returns A memory commit.
|
||||
*/
|
||||
MemoryCommit Commit(const VkMemoryRequirements &requirements, MemoryUsage usage);
|
||||
|
||||
private:
|
||||
/// Tries to allocate a chunk of memory.
|
||||
bool TryAllocMemory(VkMemoryPropertyFlags flags, u32 type_mask, u64 size);
|
||||
/// Commits memory required by the buffer and binds it (for buffers created outside VMA).
|
||||
MemoryCommit Commit(const vk::Buffer &buffer, MemoryUsage usage);
|
||||
|
||||
/// Releases a chunk of memory.
|
||||
void ReleaseMemory(MemoryAllocation* alloc);
|
||||
private:
|
||||
static bool IsAutoUsage(VmaMemoryUsage u) noexcept {
|
||||
switch (u) {
|
||||
case VMA_MEMORY_USAGE_AUTO:
|
||||
case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE:
|
||||
case VMA_MEMORY_USAGE_AUTO_PREFER_HOST:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to allocate a memory commit.
|
||||
std::optional<MemoryCommit> TryCommit(const VkMemoryRequirements& requirements,
|
||||
VkMemoryPropertyFlags flags);
|
||||
|
||||
/// Returns the fastest compatible memory property flags from the wanted flags.
|
||||
VkMemoryPropertyFlags MemoryPropertyFlags(u32 type_mask, VkMemoryPropertyFlags flags) const;
|
||||
|
||||
/// Returns index to the fastest memory type compatible with the passed requirements.
|
||||
std::optional<u32> FindType(VkMemoryPropertyFlags flags, u32 type_mask) const;
|
||||
|
||||
const Device& device; ///< Device handle.
|
||||
VmaAllocator allocator; ///< Vma allocator.
|
||||
const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
|
||||
std::vector<std::unique_ptr<MemoryAllocation>> allocations; ///< Current allocations.
|
||||
VkDeviceSize buffer_image_granularity; // The granularity for adjacent offsets between buffers
|
||||
// and optimal images
|
||||
u32 valid_memory_types{~0u};
|
||||
};
|
||||
const Device &device; ///< Device handle.
|
||||
VmaAllocator allocator; ///< VMA allocator.
|
||||
const VkPhysicalDeviceMemoryProperties properties; ///< Physical device memory properties.
|
||||
VkDeviceSize buffer_image_granularity; ///< Adjacent buffer/image granularity
|
||||
u32 valid_memory_types{~0u};
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
|
|
|
@ -580,6 +580,7 @@ DescriptorSets DescriptorPool::Allocate(const VkDescriptorSetAllocateInfo& ai) c
|
|||
case VK_SUCCESS:
|
||||
return DescriptorSets(std::move(sets), num, owner, handle, *dld);
|
||||
case VK_ERROR_OUT_OF_POOL_MEMORY:
|
||||
case VK_ERROR_FRAGMENTED_POOL:
|
||||
return {};
|
||||
default:
|
||||
throw Exception(result);
|
||||
|
@ -604,6 +605,7 @@ CommandBuffers CommandPool::Allocate(std::size_t num_buffers, VkCommandBufferLev
|
|||
case VK_SUCCESS:
|
||||
return CommandBuffers(std::move(buffers), num_buffers, owner, handle, *dld);
|
||||
case VK_ERROR_OUT_OF_POOL_MEMORY:
|
||||
case VK_ERROR_FRAGMENTED_POOL:
|
||||
return {};
|
||||
default:
|
||||
throw Exception(result);
|
||||
|
|
|
@ -60,6 +60,10 @@ void ConfigureGraphicsExtensions::Setup(const ConfigurationShared::Builder& buil
|
|||
if (setting->Id() == Settings::values.dyna_state.Id()) {
|
||||
widget->slider->setTickInterval(1);
|
||||
widget->slider->setTickPosition(QSlider::TicksAbove);
|
||||
#ifdef __APPLE__
|
||||
widget->setEnabled(false);
|
||||
widget->setToolTip(tr("Extended Dynamic State is disabled on macOS due to MoltenVK compatibility issues that cause black screens."));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5757,7 +5757,7 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
// Fix the Wayland appId. This needs to match the name of the .desktop file without the .desktop
|
||||
// suffix.
|
||||
QGuiApplication::setDesktopFileName(QStringLiteral("org.eden_emu.eden"));
|
||||
QGuiApplication::setDesktopFileName(QStringLiteral("dev.eden_emu.eden"));
|
||||
#endif
|
||||
|
||||
SetHighDPIAttributes();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue