Compare commits
11 commits
55d5afad33
...
4175fb2133
Author | SHA1 | Date | |
---|---|---|---|
4175fb2133 | |||
c0c1e8215a | |||
5fb5d7e721 | |||
cbbdfc75cc | |||
1ca35b7559 | |||
06dabbadcb | |||
8c9cdf0d70 | |||
d623e04606 | |||
4b558e5303 | |||
28b8159da1 | |||
87d42cf542 |
31 changed files with 450 additions and 463 deletions
|
@ -187,9 +187,7 @@ option(YUZU_DOWNLOAD_ANDROID_VVL "Download validation layer binary for android"
|
|||
option(FORCE_DOWNLOAD_WIN_BUNDLES "Forcefully download bundled Windows dependencies (useful for CI)" OFF)
|
||||
|
||||
# TODO(crueter): Cleanup, each dep that has a bundled option should allow to choose between bundled, external, system
|
||||
if (YUZU_USE_CPM AND ENABLE_SDL2)
|
||||
option(YUZU_USE_BUNDLED_SDL2 "Download bundled SDL2 build" "${MSVC}")
|
||||
endif()
|
||||
CMAKE_DEPENDENT_OPTION(YUZU_USE_BUNDLED_SDL2 "Download bundled SDL2 build" "${MSVC}" "ENABLE_SDL2" OFF)
|
||||
|
||||
CMAKE_DEPENDENT_OPTION(YUZU_ROOM "Enable dedicated room functionality" ON "NOT ANDROID" OFF)
|
||||
|
||||
|
@ -209,6 +207,8 @@ CMAKE_DEPENDENT_OPTION(USE_SYSTEM_MOLTENVK "Use the system MoltenVK lib (instead
|
|||
|
||||
set(YUZU_TZDB_PATH "" CACHE STRING "Path to a pre-downloaded timezone database")
|
||||
|
||||
option(YUZU_DISABLE_LLVM "Disable LLVM (useful for CI)" OFF)
|
||||
|
||||
set(DEFAULT_ENABLE_OPENSSL ON)
|
||||
if (ANDROID OR WIN32 OR APPLE OR PLATFORM_SUN)
|
||||
# - Windows defaults to the Schannel backend.
|
||||
|
@ -495,7 +495,10 @@ if (YUZU_USE_CPM)
|
|||
else()
|
||||
# Enforce the search mode of non-required packages for better and shorter failure messages
|
||||
find_package(fmt 8 REQUIRED)
|
||||
find_package(LLVM MODULE COMPONENTS Demangle)
|
||||
if (NOT YUZU_DISABLE_LLVM)
|
||||
find_package(LLVM MODULE COMPONENTS Demangle)
|
||||
endif()
|
||||
|
||||
find_package(nlohmann_json 3.8 REQUIRED)
|
||||
find_package(lz4 REQUIRED)
|
||||
find_package(RenderDoc MODULE)
|
||||
|
|
|
@ -598,11 +598,6 @@ function(AddCIPackage)
|
|||
|
||||
if (DEFINED ARTIFACT_DIR)
|
||||
include(${ARTIFACT_DIR}/${ARTIFACT_CMAKE}.cmake)
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY CPM_PACKAGE_NAMES ${ARTIFACT_NAME})
|
||||
set_property(GLOBAL APPEND PROPERTY CPM_PACKAGE_URLS "https://github.com/${ARTIFACT_REPO}") # TODO(crueter) other hosts?
|
||||
set_property(GLOBAL APPEND PROPERTY CPM_PACKAGE_SHAS ${ARTIFACT_VERSION})
|
||||
|
||||
set(${ARTIFACT_PACKAGE}_ADDED TRUE PARENT_SCOPE)
|
||||
else()
|
||||
find_package(${ARTIFACT_PACKAGE} ${ARTIFACT_MIN_VERSION} REQUIRED)
|
||||
|
|
|
@ -1,38 +1,35 @@
|
|||
# SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# SPDX-FileCopyrightText: 2019 yuzu Emulator Project
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
# Gets a UTC timestamp and sets the provided variable to it
|
||||
# generate git/build information
|
||||
include(GetSCMRev)
|
||||
|
||||
function(get_timestamp _var)
|
||||
string(TIMESTAMP timestamp UTC)
|
||||
set(${_var} "${timestamp}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# generate git/build information
|
||||
include(GetGitRevisionDescription)
|
||||
if(NOT GIT_REF_SPEC)
|
||||
get_git_head_revision(GIT_REF_SPEC GIT_REV)
|
||||
endif()
|
||||
if(NOT GIT_DESC)
|
||||
git_describe(GIT_DESC --always --long --dirty)
|
||||
endif()
|
||||
if (NOT GIT_BRANCH)
|
||||
git_branch_name(GIT_BRANCH)
|
||||
endif()
|
||||
get_timestamp(BUILD_DATE)
|
||||
|
||||
git_get_exact_tag(GIT_TAG --tags)
|
||||
if (GIT_TAG MATCHES "NOTFOUND")
|
||||
set(BUILD_VERSION "${GIT_DESC}")
|
||||
set(IS_DEV_BUILD true)
|
||||
else()
|
||||
set(BUILD_VERSION ${GIT_TAG})
|
||||
if (DEFINED GIT_RELEASE)
|
||||
set(BUILD_VERSION "${GIT_TAG}")
|
||||
set(GIT_REFSPEC "${GIT_RELEASE}")
|
||||
set(IS_DEV_BUILD false)
|
||||
else()
|
||||
string(SUBSTRING ${GIT_COMMIT} 0 10 BUILD_VERSION)
|
||||
set(BUILD_VERSION "${BUILD_VERSION}-${GIT_REFSPEC}")
|
||||
set(IS_DEV_BUILD true)
|
||||
endif()
|
||||
|
||||
set(GIT_DESC ${BUILD_VERSION})
|
||||
|
||||
# Generate cpp with Git revision from template
|
||||
# Also if this is a CI build, add the build name (ie: Nightly, Canary) to the scm_rev file as well
|
||||
set(REPO_NAME "Eden")
|
||||
set(BUILD_ID ${GIT_BRANCH})
|
||||
set(BUILD_ID ${GIT_REFSPEC})
|
||||
set(BUILD_FULLNAME "${REPO_NAME} ${BUILD_VERSION} ")
|
||||
|
||||
set(CXX_COMPILER "${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}")
|
||||
|
|
49
CMakeModules/GetSCMRev.cmake
Normal file
49
CMakeModules/GetSCMRev.cmake
Normal file
|
@ -0,0 +1,49 @@
|
|||
# SPDX-FileCopyrightText: 2025 crueter
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
include(GetGitRevisionDescription)
|
||||
|
||||
function(trim var)
|
||||
string(REGEX REPLACE "\n" "" new "${${var}}")
|
||||
set(${var} ${new} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
set(TAG_FILE ${CMAKE_SOURCE_DIR}/GIT-TAG)
|
||||
set(REF_FILE ${CMAKE_SOURCE_DIR}/GIT-REFSPEC)
|
||||
set(COMMIT_FILE ${CMAKE_SOURCE_DIR}/GIT-COMMIT)
|
||||
set(RELEASE_FILE ${CMAKE_SOURCE_DIR}/GIT-RELEASE)
|
||||
|
||||
if (EXISTS ${REF_FILE} AND EXISTS ${COMMIT_FILE})
|
||||
file(READ ${REF_FILE} GIT_REFSPEC)
|
||||
file(READ ${COMMIT_FILE} GIT_COMMIT)
|
||||
else()
|
||||
get_git_head_revision(GIT_REFSPEC GIT_COMMIT)
|
||||
git_branch_name(GIT_REFSPEC)
|
||||
if (GIT_REFSPEC MATCHES "NOTFOUND")
|
||||
set(GIT_REFSPEC 1.0.0)
|
||||
set(GIT_COMMIT stable)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (EXISTS ${TAG_FILE})
|
||||
file(READ ${TAG_FILE} GIT_TAG)
|
||||
else()
|
||||
git_describe(GIT_TAG --tags --abbrev=0)
|
||||
if (GIT_TAG MATCHES "NOTFOUND")
|
||||
set(GIT_TAG "${GIT_REFSPEC}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (EXISTS ${RELEASE_FILE})
|
||||
file(READ ${RELEASE_FILE} GIT_RELEASE)
|
||||
trim(GIT_RELEASE)
|
||||
message(STATUS "Git release: ${GIT_RELEASE}")
|
||||
endif()
|
||||
|
||||
trim(GIT_REFSPEC)
|
||||
trim(GIT_COMMIT)
|
||||
trim(GIT_TAG)
|
||||
|
||||
message(STATUS "Git commit: ${GIT_COMMIT}")
|
||||
message(STATUS "Git tag: ${GIT_TAG}")
|
||||
message(STATUS "Git refspec: ${GIT_REFSPEC}")
|
|
@ -55,6 +55,8 @@ You can also follow us on [X (Twitter)](https://x.com/edenemuofficial) for updat
|
|||
|
||||
If you would like to contribute, we are open to new developers and pull requests. Please ensure that your work is of a high standard and properly documented. You can also contact any of the developers on Discord or Revolt to learn more about the current state of the emulator.
|
||||
|
||||
See the [sign-up instructions](docs/SIGNUP.md) for information on registration.
|
||||
|
||||
## Building
|
||||
|
||||
See the [General Build Guide](docs/Build.md)
|
||||
|
|
56
docs/SIGNUP.md
Normal file
56
docs/SIGNUP.md
Normal file
|
@ -0,0 +1,56 @@
|
|||
# Signup
|
||||
|
||||
To prevent spam and reduce bandwidth usage, registration is closed, and will likely remain this way.
|
||||
|
||||
## Valid Reasons
|
||||
|
||||
First of all, you MUST have a valid reason to sign up for our Git. Valid reasons include (but are not limited to):
|
||||
|
||||
- I want to add feature XYZ...
|
||||
- I want to improve the macOS version...
|
||||
- I want to improve the Vulkan backend...
|
||||
- I want to fix bug XYZ...
|
||||
- I have experience in XYZ...
|
||||
- I can provide insight on XYZ...
|
||||
|
||||
## Invalid Reasons
|
||||
|
||||
The following are not valid reasons to sign up:
|
||||
|
||||
- I want to contribute to Eden.
|
||||
* Be at least somewhat specific!
|
||||
- I want to support Eden.
|
||||
* If you wish to support us through development, be more specific; otherwise, to support us, check out our [donations page](https://eden-emu.dev/donations).
|
||||
- I want to report issues.
|
||||
* Most of our issue tracking is handled on [GitHub](https://github.com/eden-emulator/Issue-Reports) for the time being. This is subject to change.
|
||||
- I want to play/use Eden.
|
||||
* To download and use Eden, see our [Releases page](https://github.com/eden-emulator/Releases/releases)!
|
||||
- I want to see the source code.
|
||||
* To see Eden's source code, go [here](https://git.eden-emu.dev/eden-emu/eden).
|
||||
## Other Information
|
||||
|
||||
Requests that appear suspicious, automated, OR blank will generally be automatically filtered. In cases of suspicion, or any of the invalid reasons listed above, you may receive an email back asking for clarification.
|
||||
|
||||
You MUST use the following format:
|
||||
|
||||
```
|
||||
Subject: [Eden Git] Registration Request
|
||||
Username: <Your Desired Username>
|
||||
Email: <Your Desired Email>
|
||||
I wish to sign up because... <your reason here>
|
||||
```
|
||||
|
||||
Email notifications are disabled for the time being, so you don't have to use a real email. If you wish to remain anonymous, either send a separate email asking for access to a shared anonymous account, *or* create a fake username and email.
|
||||
|
||||
## Instructions
|
||||
|
||||
If you have read everything above and affirm that you will not abuse your access, click the summary below to get the email to send your request to.
|
||||
|
||||
<details>
|
||||
<summary>I affirm that I have read ALL of the information above, and will not abuse my access to Eden, nor will I send unnecessary spam requests or the following email.</summary>
|
||||
|
||||
Email [crueter@crueter.xyz](mailto:crueter@crueter.xyz) with the format above.
|
||||
|
||||
Once your request is processed, you should receive a confirmation email from crueter with your password alongside a link to a repository containing instructions on SSH, etc. Note that you are required to change your password. If your request is rejected, you will receive a notice as such, asking for clarification if needed. If you do not receive a response in 48 hours, you may send another email.
|
||||
|
||||
</details>
|
|
@ -810,28 +810,26 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||
|
||||
override fun onConfigurationChanged(newConfig: Configuration) {
|
||||
super.onConfigurationChanged(newConfig)
|
||||
if (_binding == null) {
|
||||
return
|
||||
}
|
||||
val b = _binding ?: return
|
||||
|
||||
updateScreenLayout()
|
||||
val showInputOverlay = BooleanSetting.SHOW_INPUT_OVERLAY.getBoolean()
|
||||
if (emulationActivity?.isInPictureInPictureMode == true) {
|
||||
if (binding.drawerLayout.isOpen) {
|
||||
binding.drawerLayout.close()
|
||||
if (b.drawerLayout.isOpen) {
|
||||
b.drawerLayout.close()
|
||||
}
|
||||
if (showInputOverlay) {
|
||||
binding.surfaceInputOverlay.setVisible(visible = false, gone = false)
|
||||
b.surfaceInputOverlay.setVisible(visible = false, gone = false)
|
||||
}
|
||||
} else {
|
||||
binding.surfaceInputOverlay.setVisible(
|
||||
b.surfaceInputOverlay.setVisible(
|
||||
showInputOverlay && emulationViewModel.emulationStarted.value
|
||||
)
|
||||
if (!isInFoldableLayout) {
|
||||
if (newConfig.orientation == Configuration.ORIENTATION_PORTRAIT) {
|
||||
binding.surfaceInputOverlay.layout = OverlayLayout.Portrait
|
||||
b.surfaceInputOverlay.layout = OverlayLayout.Portrait
|
||||
} else {
|
||||
binding.surfaceInputOverlay.layout = OverlayLayout.Landscape
|
||||
b.surfaceInputOverlay.layout = OverlayLayout.Landscape
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -847,8 +845,9 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||
}
|
||||
|
||||
private fun updateQuickOverlayMenuEntry(isVisible: Boolean) {
|
||||
val menu = binding.inGameMenu.menu
|
||||
val item = menu.findItem(R.id.menu_quick_overlay)
|
||||
val b = _binding ?: return
|
||||
val item = b.inGameMenu.menu.findItem(R.id.menu_quick_overlay) ?: return
|
||||
|
||||
if (isVisible) {
|
||||
item.title = getString(R.string.emulation_hide_overlay)
|
||||
item.icon = ResourcesCompat.getDrawable(
|
||||
|
@ -867,8 +866,8 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||
}
|
||||
|
||||
private fun updatePauseMenuEntry(isPaused: Boolean) {
|
||||
val menu = binding.inGameMenu.menu
|
||||
val pauseItem = menu.findItem(R.id.menu_pause_emulation)
|
||||
val b = _binding ?: return
|
||||
val pauseItem = b.inGameMenu.menu.findItem(R.id.menu_pause_emulation) ?: return
|
||||
if (isPaused) {
|
||||
pauseItem.title = getString(R.string.emulation_unpause)
|
||||
pauseItem.icon = ResourcesCompat.getDrawable(
|
||||
|
@ -887,9 +886,11 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||
}
|
||||
|
||||
override fun onPause() {
|
||||
if (emulationState.isRunning && emulationActivity?.isInPictureInPictureMode != true) {
|
||||
emulationState.pause()
|
||||
updatePauseMenuEntry(true)
|
||||
if (this::emulationState.isInitialized) {
|
||||
if (emulationState.isRunning && emulationActivity?.isInPictureInPictureMode != true) {
|
||||
emulationState.pause()
|
||||
updatePauseMenuEntry(true)
|
||||
}
|
||||
}
|
||||
super.onPause()
|
||||
}
|
||||
|
@ -906,15 +907,15 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||
|
||||
override fun onResume() {
|
||||
super.onResume()
|
||||
// If the overlay is enabled, we need to update the position if changed
|
||||
val position = IntSetting.PERF_OVERLAY_POSITION.getInt()
|
||||
updateStatsPosition(position)
|
||||
val b = _binding ?: return
|
||||
updateStatsPosition(IntSetting.PERF_OVERLAY_POSITION.getInt())
|
||||
updateSocPosition(IntSetting.SOC_OVERLAY_POSITION.getInt())
|
||||
|
||||
val socPosition = IntSetting.SOC_OVERLAY_POSITION.getInt()
|
||||
updateSocPosition(socPosition)
|
||||
|
||||
binding.inGameMenu.post {
|
||||
emulationState?.isPaused?.let { updatePauseMenuEntry(it) }
|
||||
if (this::emulationState.isInitialized) {
|
||||
b.inGameMenu.post {
|
||||
if (!this::emulationState.isInitialized || _binding == null) return@post
|
||||
updatePauseMenuEntry(emulationState.isPaused)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1229,6 +1230,7 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||
}
|
||||
|
||||
private fun updateScreenLayout() {
|
||||
val b = _binding ?: return
|
||||
val verticalAlignment =
|
||||
EmulationVerticalAlignment.from(IntSetting.VERTICAL_ALIGNMENT.getInt())
|
||||
val aspectRatio = when (IntSetting.RENDERER_ASPECT_RATIO.getInt()) {
|
||||
|
@ -1240,35 +1242,37 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||
}
|
||||
when (verticalAlignment) {
|
||||
EmulationVerticalAlignment.Top -> {
|
||||
binding.surfaceEmulation.setAspectRatio(aspectRatio)
|
||||
b.surfaceEmulation.setAspectRatio(aspectRatio)
|
||||
val params = FrameLayout.LayoutParams(
|
||||
ViewGroup.LayoutParams.MATCH_PARENT,
|
||||
ViewGroup.LayoutParams.WRAP_CONTENT
|
||||
)
|
||||
params.gravity = Gravity.TOP or Gravity.CENTER_HORIZONTAL
|
||||
binding.surfaceEmulation.layoutParams = params
|
||||
b.surfaceEmulation.layoutParams = params
|
||||
}
|
||||
|
||||
EmulationVerticalAlignment.Center -> {
|
||||
binding.surfaceEmulation.setAspectRatio(null)
|
||||
binding.surfaceEmulation.updateLayoutParams {
|
||||
b.surfaceEmulation.setAspectRatio(null)
|
||||
b.surfaceEmulation.updateLayoutParams {
|
||||
width = ViewGroup.LayoutParams.MATCH_PARENT
|
||||
height = ViewGroup.LayoutParams.MATCH_PARENT
|
||||
}
|
||||
}
|
||||
|
||||
EmulationVerticalAlignment.Bottom -> {
|
||||
binding.surfaceEmulation.setAspectRatio(aspectRatio)
|
||||
b.surfaceEmulation.setAspectRatio(aspectRatio)
|
||||
val params =
|
||||
FrameLayout.LayoutParams(
|
||||
ViewGroup.LayoutParams.MATCH_PARENT,
|
||||
ViewGroup.LayoutParams.WRAP_CONTENT
|
||||
)
|
||||
params.gravity = Gravity.BOTTOM or Gravity.CENTER_HORIZONTAL
|
||||
binding.surfaceEmulation.layoutParams = params
|
||||
b.surfaceEmulation.layoutParams = params
|
||||
}
|
||||
}
|
||||
emulationState.updateSurface()
|
||||
if (this::emulationState.isInitialized) {
|
||||
emulationState.updateSurface()
|
||||
}
|
||||
emulationActivity?.buildPictureInPictureParams()
|
||||
updateOrientation()
|
||||
}
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
|
||||
#include "common/scm_rev.h"
|
||||
|
||||
#define GIT_REV "@GIT_REV@"
|
||||
#define GIT_BRANCH "@GIT_BRANCH@"
|
||||
#define GIT_REV "@GIT_COMMIT@"
|
||||
#define GIT_BRANCH "@GIT_REFSPEC@"
|
||||
#define GIT_DESC "@GIT_DESC@"
|
||||
#define BUILD_NAME "@REPO_NAME@"
|
||||
#define BUILD_DATE "@BUILD_DATE@"
|
||||
|
@ -31,7 +31,7 @@ constexpr const char g_build_version[] = BUILD_VERSION;
|
|||
constexpr const char g_build_id[] = BUILD_ID;
|
||||
constexpr const char g_title_bar_format_idle[] = TITLE_BAR_FORMAT_IDLE;
|
||||
constexpr const char g_title_bar_format_running[] = TITLE_BAR_FORMAT_RUNNING;
|
||||
constexpr const bool g_is_dev_build = IS_DEV_BUILD;
|
||||
constexpr const char g_compiler_id[] = COMPILER_ID;
|
||||
constexpr const bool g_is_dev_build = IS_DEV_BUILD;
|
||||
|
||||
} // namespace Common
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-FileCopyrightText: 2025 Eden Emulator Project
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
// SPDX-FileCopyrightText: 2014 Citra Emulator Project
|
||||
|
@ -19,7 +19,7 @@ extern const char g_build_id[];
|
|||
extern const char g_title_bar_format_idle[];
|
||||
extern const char g_title_bar_format_running[];
|
||||
extern const char g_shader_cache_version[];
|
||||
extern const bool g_is_dev_build;
|
||||
extern const char g_compiler_id[];
|
||||
extern const bool g_is_dev_build;
|
||||
|
||||
} // namespace Common
|
||||
|
|
|
@ -107,8 +107,6 @@ add_library(core STATIC
|
|||
file_sys/fssystem/fssystem_nca_header.cpp
|
||||
file_sys/fssystem/fssystem_nca_header.h
|
||||
file_sys/fssystem/fssystem_nca_reader.cpp
|
||||
file_sys/fssystem/fssystem_pooled_buffer.cpp
|
||||
file_sys/fssystem/fssystem_pooled_buffer.h
|
||||
file_sys/fssystem/fssystem_sparse_storage.cpp
|
||||
file_sys/fssystem/fssystem_sparse_storage.h
|
||||
file_sys/fssystem/fssystem_switch_storage.h
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
|
||||
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
|
||||
#include "core/file_sys/fssystem/fssystem_utility.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
@ -76,13 +78,6 @@ size_t AesCtrStorage::Write(const u8* buffer, size_t size, size_t offset) {
|
|||
ASSERT(Common::IsAligned(offset, BlockSize));
|
||||
ASSERT(Common::IsAligned(size, BlockSize));
|
||||
|
||||
// Get a pooled buffer.
|
||||
PooledBuffer pooled_buffer;
|
||||
const bool use_work_buffer = true;
|
||||
if (use_work_buffer) {
|
||||
pooled_buffer.Allocate(size, BlockSize);
|
||||
}
|
||||
|
||||
// Setup the counter.
|
||||
std::array<u8, IvSize> ctr;
|
||||
std::memcpy(ctr.data(), m_iv.data(), IvSize);
|
||||
|
@ -91,25 +86,20 @@ size_t AesCtrStorage::Write(const u8* buffer, size_t size, size_t offset) {
|
|||
// Loop until all data is written.
|
||||
size_t remaining = size;
|
||||
s64 cur_offset = 0;
|
||||
|
||||
// Get a pooled buffer.
|
||||
std::vector<char> pooled_buffer(BlockSize);
|
||||
while (remaining > 0) {
|
||||
// Determine data we're writing and where.
|
||||
const size_t write_size =
|
||||
use_work_buffer ? (std::min)(pooled_buffer.GetSize(), remaining) : remaining;
|
||||
|
||||
void* write_buf;
|
||||
if (use_work_buffer) {
|
||||
write_buf = pooled_buffer.GetBuffer();
|
||||
} else {
|
||||
write_buf = const_cast<u8*>(buffer);
|
||||
}
|
||||
const size_t write_size = std::min(pooled_buffer.size(), remaining);
|
||||
u8* write_buf = reinterpret_cast<u8*>(pooled_buffer.data());
|
||||
|
||||
// Encrypt the data.
|
||||
m_cipher->SetIV(ctr);
|
||||
m_cipher->Transcode(buffer, write_size, reinterpret_cast<u8*>(write_buf),
|
||||
Core::Crypto::Op::Encrypt);
|
||||
m_cipher->Transcode(buffer, write_size, write_buf, Core::Crypto::Op::Encrypt);
|
||||
|
||||
// Write the encrypted data.
|
||||
m_base_storage->Write(reinterpret_cast<u8*>(write_buf), write_size, offset + cur_offset);
|
||||
m_base_storage->Write(write_buf, write_size, offset + cur_offset);
|
||||
|
||||
// Advance.
|
||||
cur_offset += write_size;
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/file_sys/errors.h"
|
||||
#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
|
||||
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
|
||||
#include "core/file_sys/fssystem/fssystem_utility.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
@ -69,17 +70,14 @@ size_t AesXtsStorage::Read(u8* buffer, size_t size, size_t offset) const {
|
|||
|
||||
// Decrypt into a pooled buffer.
|
||||
{
|
||||
PooledBuffer tmp_buf(m_block_size, m_block_size);
|
||||
ASSERT(tmp_buf.GetSize() >= m_block_size);
|
||||
|
||||
std::memset(tmp_buf.GetBuffer(), 0, skip_size);
|
||||
std::memcpy(tmp_buf.GetBuffer() + skip_size, buffer, data_size);
|
||||
std::vector<char> tmp_buf(m_block_size, 0);
|
||||
std::memcpy(tmp_buf.data() + skip_size, buffer, data_size);
|
||||
|
||||
m_cipher->SetIV(ctr);
|
||||
m_cipher->Transcode(tmp_buf.GetBuffer(), m_block_size, tmp_buf.GetBuffer(),
|
||||
m_cipher->Transcode(tmp_buf.data(), m_block_size, tmp_buf.data(),
|
||||
Core::Crypto::Op::Decrypt);
|
||||
|
||||
std::memcpy(buffer, tmp_buf.GetBuffer() + skip_size, data_size);
|
||||
std::memcpy(buffer, tmp_buf.data() + skip_size, data_size);
|
||||
}
|
||||
|
||||
AddCounter(ctr.data(), IvSize, 1);
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "core/file_sys/errors.h"
|
||||
#include "core/file_sys/fssystem/fs_i_storage.h"
|
||||
#include "core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h"
|
||||
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
|
@ -89,10 +90,11 @@ private:
|
|||
VirtualFile m_base_storage;
|
||||
s64 m_base_storage_size;
|
||||
size_t m_data_align;
|
||||
mutable std::vector<char> work_buffer;
|
||||
|
||||
public:
|
||||
explicit AlignmentMatchingStoragePooledBuffer(VirtualFile bs, size_t da)
|
||||
: m_base_storage(std::move(bs)), m_data_align(da) {
|
||||
: m_base_storage(std::move(bs)), m_data_align(da), work_buffer(da) {
|
||||
ASSERT(Common::IsPowerOfTwo(da));
|
||||
}
|
||||
|
||||
|
@ -104,16 +106,10 @@ public:
|
|||
|
||||
// Validate arguments.
|
||||
ASSERT(buffer != nullptr);
|
||||
|
||||
s64 bs_size = this->GetSize();
|
||||
ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
|
||||
|
||||
// Allocate a pooled buffer.
|
||||
PooledBuffer pooled_buffer;
|
||||
pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
|
||||
|
||||
return AlignmentMatchingStorageImpl::Read(m_base_storage, pooled_buffer.GetBuffer(),
|
||||
pooled_buffer.GetSize(), m_data_align,
|
||||
return AlignmentMatchingStorageImpl::Read(m_base_storage, work_buffer.data(),
|
||||
work_buffer.size(), m_data_align,
|
||||
BufferAlign, offset, buffer, size);
|
||||
}
|
||||
|
||||
|
@ -125,16 +121,10 @@ public:
|
|||
|
||||
// Validate arguments.
|
||||
ASSERT(buffer != nullptr);
|
||||
|
||||
s64 bs_size = this->GetSize();
|
||||
ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
|
||||
|
||||
// Allocate a pooled buffer.
|
||||
PooledBuffer pooled_buffer;
|
||||
pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
|
||||
|
||||
return AlignmentMatchingStorageImpl::Write(m_base_storage, pooled_buffer.GetBuffer(),
|
||||
pooled_buffer.GetSize(), m_data_align,
|
||||
return AlignmentMatchingStorageImpl::Write(m_base_storage, work_buffer.data(),
|
||||
work_buffer.size(), m_data_align,
|
||||
BufferAlign, offset, buffer, size);
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include "core/file_sys/errors.h"
|
||||
#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
|
||||
#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h"
|
||||
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
|
@ -465,16 +464,8 @@ Result BucketTree::Visitor::Find(s64 virtual_address) {
|
|||
}
|
||||
|
||||
Result BucketTree::Visitor::FindEntrySet(s32* out_index, s64 virtual_address, s32 node_index) {
|
||||
const auto node_size = m_tree->m_node_size;
|
||||
|
||||
PooledBuffer pool(node_size, 1);
|
||||
if (node_size <= pool.GetSize()) {
|
||||
R_RETURN(
|
||||
this->FindEntrySetWithBuffer(out_index, virtual_address, node_index, pool.GetBuffer()));
|
||||
} else {
|
||||
pool.Deallocate();
|
||||
R_RETURN(this->FindEntrySetWithoutBuffer(out_index, virtual_address, node_index));
|
||||
}
|
||||
std::vector<char> pool(m_tree->m_node_size);
|
||||
R_RETURN(FindEntrySetWithBuffer(out_index, virtual_address, node_index, pool.data()));
|
||||
}
|
||||
|
||||
Result BucketTree::Visitor::FindEntrySetWithBuffer(s32* out_index, s64 virtual_address,
|
||||
|
@ -525,15 +516,8 @@ Result BucketTree::Visitor::FindEntrySetWithoutBuffer(s32* out_index, s64 virtua
|
|||
}
|
||||
|
||||
Result BucketTree::Visitor::FindEntry(s64 virtual_address, s32 entry_set_index) {
|
||||
const auto entry_set_size = m_tree->m_node_size;
|
||||
|
||||
PooledBuffer pool(entry_set_size, 1);
|
||||
if (entry_set_size <= pool.GetSize()) {
|
||||
R_RETURN(this->FindEntryWithBuffer(virtual_address, entry_set_index, pool.GetBuffer()));
|
||||
} else {
|
||||
pool.Deallocate();
|
||||
R_RETURN(this->FindEntryWithoutBuffer(virtual_address, entry_set_index));
|
||||
}
|
||||
std::vector<char> pool(m_tree->m_node_size);
|
||||
R_RETURN(FindEntryWithBuffer(virtual_address, entry_set_index, pool.data()));
|
||||
}
|
||||
|
||||
Result BucketTree::Visitor::FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index,
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
|
@ -6,7 +9,6 @@
|
|||
#include "core/file_sys/errors.h"
|
||||
#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
|
||||
#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h"
|
||||
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
|
@ -35,23 +37,19 @@ Result BucketTree::ScanContinuousReading(ContinuousReadingInfo* out_info,
|
|||
R_UNLESS(entry.GetVirtualOffset() <= cur_offset, ResultOutOfRange);
|
||||
|
||||
// Create a pooled buffer for our scan.
|
||||
PooledBuffer pool(m_node_size, 1);
|
||||
char* buffer = nullptr;
|
||||
|
||||
std::vector<char> pool(m_node_size);
|
||||
s64 entry_storage_size = m_entry_storage->GetSize();
|
||||
|
||||
// Read the node.
|
||||
if (m_node_size <= pool.GetSize()) {
|
||||
buffer = pool.GetBuffer();
|
||||
const auto ofs = param.entry_set.index * static_cast<s64>(m_node_size);
|
||||
R_UNLESS(m_node_size + ofs <= static_cast<size_t>(entry_storage_size),
|
||||
ResultInvalidBucketTreeNodeEntryCount);
|
||||
u8* buffer = reinterpret_cast<u8*>(pool.data());
|
||||
const auto ofs = param.entry_set.index * s64(m_node_size);
|
||||
R_UNLESS(m_node_size + ofs <= size_t(entry_storage_size),
|
||||
ResultInvalidBucketTreeNodeEntryCount);
|
||||
|
||||
m_entry_storage->Read(reinterpret_cast<u8*>(buffer), m_node_size, ofs);
|
||||
}
|
||||
m_entry_storage->Read(buffer, m_node_size, ofs);
|
||||
|
||||
// Calculate extents.
|
||||
const auto end_offset = cur_offset + static_cast<s64>(param.size);
|
||||
const auto end_offset = cur_offset + s64(param.size);
|
||||
s64 phys_offset = entry.GetPhysicalOffset();
|
||||
|
||||
// Start merge tracking.
|
||||
|
@ -76,14 +74,8 @@ Result BucketTree::ScanContinuousReading(ContinuousReadingInfo* out_info,
|
|||
s64 next_entry_offset;
|
||||
|
||||
if (entry_index + 1 < entry_count) {
|
||||
if (buffer != nullptr) {
|
||||
const auto ofs = impl::GetBucketTreeEntryOffset(0, m_entry_size, entry_index + 1);
|
||||
std::memcpy(std::addressof(next_entry), buffer + ofs, m_entry_size);
|
||||
} else {
|
||||
const auto ofs = impl::GetBucketTreeEntryOffset(param.entry_set.index, m_node_size,
|
||||
m_entry_size, entry_index + 1);
|
||||
m_entry_storage->ReadObject(std::addressof(next_entry), ofs);
|
||||
}
|
||||
const auto offset = impl::GetBucketTreeEntryOffset(0, m_entry_size, entry_index + 1);
|
||||
std::memcpy(std::addressof(next_entry), buffer + offset, m_entry_size);
|
||||
|
||||
next_entry_offset = next_entry.GetVirtualOffset();
|
||||
R_UNLESS(param.offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset);
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
|
@ -9,8 +12,6 @@
|
|||
#include "core/file_sys/fssystem/fs_i_storage.h"
|
||||
#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
|
||||
#include "core/file_sys/fssystem/fssystem_compression_common.h"
|
||||
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
|
||||
#include "core/file_sys/vfs/vfs.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
|
@ -317,23 +318,11 @@ private:
|
|||
R_SUCCEED_IF(entry_count == 0);
|
||||
|
||||
// Get the remaining size in a convenient form.
|
||||
const size_t total_required_size =
|
||||
static_cast<size_t>(required_access_physical_size);
|
||||
const size_t total_required_size = size_t(required_access_physical_size);
|
||||
|
||||
// Perform the read based on whether we need to allocate a buffer.
|
||||
if (will_allocate_pooled_buffer) {
|
||||
// Allocate a pooled buffer.
|
||||
PooledBuffer pooled_buffer;
|
||||
if (pooled_buffer.GetAllocatableSizeMax() >= total_required_size) {
|
||||
pooled_buffer.Allocate(total_required_size, m_block_size_max);
|
||||
} else {
|
||||
pooled_buffer.AllocateParticularlyLarge(
|
||||
std::min<size_t>(
|
||||
total_required_size,
|
||||
PooledBuffer::GetAllocatableParticularlyLargeSizeMax()),
|
||||
m_block_size_max);
|
||||
}
|
||||
|
||||
std::vector<char> pooled_buffer(std::max(m_block_size_max, total_required_size));
|
||||
// Read each of the entries.
|
||||
for (s32 entry_idx = 0; entry_idx < entry_count; ++entry_idx) {
|
||||
// Determine the current read size.
|
||||
|
@ -342,13 +331,13 @@ private:
|
|||
if (const size_t target_entry_size =
|
||||
static_cast<size_t>(entries[entry_idx].physical_size) +
|
||||
static_cast<size_t>(entries[entry_idx].gap_from_prev);
|
||||
target_entry_size <= pooled_buffer.GetSize()) {
|
||||
target_entry_size <= pooled_buffer.size()) {
|
||||
// We'll be using the pooled buffer.
|
||||
will_use_pooled_buffer = true;
|
||||
|
||||
// Determine how much we can read.
|
||||
const size_t max_size = std::min<size_t>(
|
||||
required_access_physical_size, pooled_buffer.GetSize());
|
||||
required_access_physical_size, pooled_buffer.size());
|
||||
|
||||
size_t read_size = 0;
|
||||
for (auto n = entry_idx; n < entry_count; ++n) {
|
||||
|
@ -376,7 +365,7 @@ private:
|
|||
// Perform the read based on whether or not we'll use the pooled buffer.
|
||||
if (will_use_pooled_buffer) {
|
||||
// Read the compressed data into the pooled buffer.
|
||||
auto* const buffer = pooled_buffer.GetBuffer();
|
||||
auto* const buffer = pooled_buffer.data();
|
||||
m_data_storage->Read(reinterpret_cast<u8*>(buffer), cur_read_size,
|
||||
required_access_physical_offset);
|
||||
|
||||
|
@ -863,11 +852,9 @@ private:
|
|||
static_cast<size_t>(unaligned_range->virtual_size));
|
||||
|
||||
// Get a pooled buffer for our read.
|
||||
PooledBuffer pooled_buffer;
|
||||
pooled_buffer.Allocate(size_buffer_required, size_buffer_required);
|
||||
|
||||
std::vector<char> pooled_buffer(size_buffer_required);
|
||||
// Perform read.
|
||||
Result rc = read_impl(pooled_buffer.GetBuffer(), size_buffer_required);
|
||||
Result rc = read_impl(pooled_buffer.data(), size_buffer_required);
|
||||
if (R_FAILED(rc)) {
|
||||
R_THROW(rc);
|
||||
}
|
||||
|
@ -876,8 +863,7 @@ private:
|
|||
const size_t skip_size = cur_offset - unaligned_range->virtual_offset;
|
||||
const size_t copy_size = std::min<size_t>(
|
||||
cur_size, unaligned_range->GetEndVirtualOffset() - cur_offset);
|
||||
|
||||
std::memcpy(cur_dst, pooled_buffer.GetBuffer() + skip_size, copy_size);
|
||||
std::memcpy(cur_dst, pooled_buffer.data() + skip_size, copy_size);
|
||||
|
||||
// Advance.
|
||||
cur_dst += copy_size;
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr size_t HeapBlockSize = BufferPoolAlignment;
|
||||
static_assert(HeapBlockSize == 4_KiB);
|
||||
|
||||
// A heap block is 4KiB. An order is a power of two.
|
||||
// This gives blocks of the order 32KiB, 512KiB, 4MiB.
|
||||
constexpr s32 HeapOrderMax = 7;
|
||||
constexpr s32 HeapOrderMaxForLarge = HeapOrderMax + 3;
|
||||
|
||||
constexpr size_t HeapAllocatableSizeMax = HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMax);
|
||||
constexpr size_t HeapAllocatableSizeMaxForLarge =
|
||||
HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMaxForLarge);
|
||||
|
||||
} // namespace
|
||||
|
||||
size_t PooledBuffer::GetAllocatableSizeMaxCore(bool large) {
|
||||
return large ? HeapAllocatableSizeMaxForLarge : HeapAllocatableSizeMax;
|
||||
}
|
||||
|
||||
void PooledBuffer::AllocateCore(size_t ideal_size, size_t required_size, bool large) {
|
||||
// Ensure preconditions.
|
||||
ASSERT(m_buffer == nullptr);
|
||||
|
||||
// Check that we can allocate this size.
|
||||
ASSERT(required_size <= GetAllocatableSizeMaxCore(large));
|
||||
|
||||
const size_t target_size =
|
||||
(std::min)((std::max)(ideal_size, required_size), GetAllocatableSizeMaxCore(large));
|
||||
|
||||
// Dummy implementation for allocate.
|
||||
if (target_size > 0) {
|
||||
m_buffer =
|
||||
reinterpret_cast<char*>(::operator new(target_size, std::align_val_t{HeapBlockSize}));
|
||||
m_size = target_size;
|
||||
|
||||
// Ensure postconditions.
|
||||
ASSERT(m_buffer != nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
void PooledBuffer::Shrink(size_t ideal_size) {
|
||||
ASSERT(ideal_size <= GetAllocatableSizeMaxCore(true));
|
||||
|
||||
// Shrinking to zero means that we have no buffer.
|
||||
if (ideal_size == 0) {
|
||||
::operator delete(m_buffer, std::align_val_t{HeapBlockSize});
|
||||
m_buffer = nullptr;
|
||||
m_size = ideal_size;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace FileSys
|
|
@ -1,95 +0,0 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/literals.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
using namespace Common::Literals;
|
||||
|
||||
constexpr inline size_t BufferPoolAlignment = 4_KiB;
|
||||
constexpr inline size_t BufferPoolWorkSize = 320;
|
||||
|
||||
class PooledBuffer {
|
||||
YUZU_NON_COPYABLE(PooledBuffer);
|
||||
|
||||
public:
|
||||
// Constructor/Destructor.
|
||||
constexpr PooledBuffer() : m_buffer(), m_size() {}
|
||||
|
||||
PooledBuffer(size_t ideal_size, size_t required_size) : m_buffer(), m_size() {
|
||||
this->Allocate(ideal_size, required_size);
|
||||
}
|
||||
|
||||
~PooledBuffer() {
|
||||
this->Deallocate();
|
||||
}
|
||||
|
||||
// Move and assignment.
|
||||
explicit PooledBuffer(PooledBuffer&& rhs) : m_buffer(rhs.m_buffer), m_size(rhs.m_size) {
|
||||
rhs.m_buffer = nullptr;
|
||||
rhs.m_size = 0;
|
||||
}
|
||||
|
||||
PooledBuffer& operator=(PooledBuffer&& rhs) {
|
||||
PooledBuffer(std::move(rhs)).Swap(*this);
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Allocation API.
|
||||
void Allocate(size_t ideal_size, size_t required_size) {
|
||||
return this->AllocateCore(ideal_size, required_size, false);
|
||||
}
|
||||
|
||||
void AllocateParticularlyLarge(size_t ideal_size, size_t required_size) {
|
||||
return this->AllocateCore(ideal_size, required_size, true);
|
||||
}
|
||||
|
||||
void Shrink(size_t ideal_size);
|
||||
|
||||
void Deallocate() {
|
||||
// Shrink the buffer to empty.
|
||||
this->Shrink(0);
|
||||
ASSERT(m_buffer == nullptr);
|
||||
}
|
||||
|
||||
char* GetBuffer() const {
|
||||
ASSERT(m_buffer != nullptr);
|
||||
return m_buffer;
|
||||
}
|
||||
|
||||
size_t GetSize() const {
|
||||
ASSERT(m_buffer != nullptr);
|
||||
return m_size;
|
||||
}
|
||||
|
||||
public:
|
||||
static size_t GetAllocatableSizeMax() {
|
||||
return GetAllocatableSizeMaxCore(false);
|
||||
}
|
||||
static size_t GetAllocatableParticularlyLargeSizeMax() {
|
||||
return GetAllocatableSizeMaxCore(true);
|
||||
}
|
||||
|
||||
private:
|
||||
static size_t GetAllocatableSizeMaxCore(bool large);
|
||||
|
||||
private:
|
||||
void Swap(PooledBuffer& rhs) {
|
||||
std::swap(m_buffer, rhs.m_buffer);
|
||||
std::swap(m_size, rhs.m_size);
|
||||
}
|
||||
|
||||
void AllocateCore(size_t ideal_size, size_t required_size, bool large);
|
||||
|
||||
private:
|
||||
char* m_buffer;
|
||||
size_t m_size;
|
||||
};
|
||||
|
||||
} // namespace FileSys
|
|
@ -53,6 +53,19 @@ u32 HardwareComposer::ComposeLocked(f32* out_speed_scale, Display& display,
|
|||
// Set default speed limit to 100%.
|
||||
*out_speed_scale = 1.0f;
|
||||
|
||||
// If no layers are available, skip the logic.
|
||||
bool any_visible = false;
|
||||
for (auto& layer : display.stack.layers) {
|
||||
if (layer->visible) {
|
||||
any_visible = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!any_visible) {
|
||||
*out_speed_scale = 1.0f;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Determine the number of vsync periods to wait before composing again.
|
||||
std::optional<s32> swap_interval{};
|
||||
bool has_acquired_buffer{};
|
||||
|
@ -110,7 +123,7 @@ u32 HardwareComposer::ComposeLocked(f32* out_speed_scale, Display& display,
|
|||
}
|
||||
|
||||
// If any new buffers were acquired, we can present.
|
||||
if (has_acquired_buffer) {
|
||||
if (has_acquired_buffer && !composition_stack.empty()) {
|
||||
// Sort by Z-index.
|
||||
std::stable_sort(composition_stack.begin(), composition_stack.end(),
|
||||
[&](auto& l, auto& r) { return l.z_index < r.z_index; });
|
||||
|
@ -119,6 +132,19 @@ u32 HardwareComposer::ComposeLocked(f32* out_speed_scale, Display& display,
|
|||
nvdisp.Composite(composition_stack);
|
||||
}
|
||||
|
||||
// Batch framebuffer releases, instead of one-into-one.
|
||||
std::vector<std::pair<Layer*, Framebuffer*>> to_release;
|
||||
for (auto& [layer_id, framebuffer] : m_framebuffers) {
|
||||
if (framebuffer.release_frame_number > m_frame_number || !framebuffer.is_acquired)
|
||||
continue;
|
||||
if (auto layer = display.stack.FindLayer(layer_id); layer)
|
||||
to_release.emplace_back(layer.get(), &framebuffer);
|
||||
}
|
||||
for (auto& [layer, framebuffer] : to_release) {
|
||||
layer->buffer_item_consumer->ReleaseBuffer(framebuffer->item, android::Fence::NoFence());
|
||||
framebuffer->is_acquired = false;
|
||||
}
|
||||
|
||||
// Advance by at least one frame.
|
||||
const u32 frame_advance = swap_interval.value_or(1);
|
||||
m_frame_number += frame_advance;
|
||||
|
|
|
@ -27,7 +27,7 @@ endif()
|
|||
option(DYNARMIC_FATAL_ERRORS "Errors are fatal" OFF)
|
||||
option(DYNARMIC_IGNORE_ASSERTS "Ignore asserts" OFF)
|
||||
option(DYNARMIC_TESTS_USE_UNICORN "Enable fuzzing tests against unicorn" OFF)
|
||||
option(DYNARMIC_USE_LLVM "Support disassembly of jitted x86_64 code using LLVM" OFF)
|
||||
CMAKE_DEPENDENT_OPTION(DYNARMIC_USE_LLVM "Support disassembly of jitted x86_64 code using LLVM" OFF "NOT YUZU_DISABLE_LLVM" OFF)
|
||||
|
||||
if (PLATFORM_OPENBSD)
|
||||
option(DYNARMIC_USE_PRECOMPILED_HEADERS "Use precompiled headers" OFF)
|
||||
|
|
|
@ -273,52 +273,73 @@ Exclusive OR (i.e.: XOR)
|
|||
|
||||
### Callback: {Read,Write}Memory{8,16,32,64}
|
||||
|
||||
<u8> ReadMemory8(<u32> vaddr)
|
||||
<u8> ReadMemory16(<u32> vaddr)
|
||||
<u8> ReadMemory32(<u32> vaddr)
|
||||
<u8> ReadMemory64(<u32> vaddr)
|
||||
<void> WriteMemory8(<u32> vaddr, <u8> value_to_store)
|
||||
<void> WriteMemory16(<u32> vaddr, <u16> value_to_store)
|
||||
<void> WriteMemory32(<u32> vaddr, <u32> value_to_store)
|
||||
<void> WriteMemory64(<u32> vaddr, <u64> value_to_store)
|
||||
```c++
|
||||
<u8> ReadMemory8(<u32> vaddr)
|
||||
<u8> ReadMemory16(<u32> vaddr)
|
||||
<u8> ReadMemory32(<u32> vaddr)
|
||||
<u8> ReadMemory64(<u32> vaddr)
|
||||
<void> WriteMemory8(<u32> vaddr, <u8> value_to_store)
|
||||
<void> WriteMemory16(<u32> vaddr, <u16> value_to_store)
|
||||
<void> WriteMemory32(<u32> vaddr, <u32> value_to_store)
|
||||
<void> WriteMemory64(<u32> vaddr, <u64> value_to_store)
|
||||
```
|
||||
|
||||
Memory access.
|
||||
|
||||
### Terminal: Interpret
|
||||
|
||||
SetTerm(IR::Term::Interpret{next})
|
||||
```c++
|
||||
SetTerm(IR::Term::Interpret{next})
|
||||
```
|
||||
|
||||
This terminal instruction calls the interpreter, starting at `next`.
|
||||
The interpreter must interpret exactly one instruction.
|
||||
|
||||
### Terminal: ReturnToDispatch
|
||||
|
||||
SetTerm(IR::Term::ReturnToDispatch{})
|
||||
```c++
|
||||
SetTerm(IR::Term::ReturnToDispatch{})
|
||||
```
|
||||
|
||||
This terminal instruction returns control to the dispatcher.
|
||||
The dispatcher will use the value in R15 to determine what comes next.
|
||||
|
||||
### Terminal: LinkBlock
|
||||
|
||||
SetTerm(IR::Term::LinkBlock{next})
|
||||
```c++
|
||||
SetTerm(IR::Term::LinkBlock{next})
|
||||
```
|
||||
|
||||
This terminal instruction jumps to the basic block described by `next` if we have enough
|
||||
cycles remaining. If we do not have enough cycles remaining, we return to the
|
||||
dispatcher, which will return control to the host.
|
||||
|
||||
### Terminal: LinkBlockFast
|
||||
|
||||
```c++
|
||||
SetTerm(IR::Term::LinkBlockFast{next})
|
||||
```
|
||||
|
||||
This terminal instruction jumps to the basic block described by `next` unconditionally.
|
||||
This promises guarantees that must be held at runtime - i.e that the program wont hang,
|
||||
|
||||
### Terminal: PopRSBHint
|
||||
|
||||
SetTerm(IR::Term::PopRSBHint{})
|
||||
```c++
|
||||
SetTerm(IR::Term::PopRSBHint{})
|
||||
```
|
||||
|
||||
This terminal instruction checks the top of the Return Stack Buffer against R15.
|
||||
If RSB lookup fails, control is returned to the dispatcher.
|
||||
This is an optimization for faster function calls. A backend that doesn't support
|
||||
this optimization or doesn't have a RSB may choose to implement this exactly as
|
||||
ReturnToDispatch.
|
||||
`ReturnToDispatch`.
|
||||
|
||||
### Terminal: If
|
||||
|
||||
SetTerm(IR::Term::If{cond, term_then, term_else})
|
||||
```c++
|
||||
SetTerm(IR::Term::If{cond, term_then, term_else})
|
||||
```
|
||||
|
||||
This terminal instruction conditionally executes one terminal or another depending
|
||||
on the run-time state of the ARM flags.
|
||||
|
|
19
src/dynarmic/docs/FastMemory.md
Normal file
19
src/dynarmic/docs/FastMemory.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Fast memory (Fastmem)
|
||||
|
||||
The main way of accessing memory in JITed programs is via an invoked function, say "Read()" and "Write()". On our translator, such functions usually take a sizable amounts of code space (push + call + pop). Trash the i-cache (due to an indirect call) and overall make code emission more bloated.
|
||||
|
||||
The solution? Delegate invalid accesses to a dedicated arena, similar to a swap. The main idea behind such mechanism is to allow the OS to transmit page faults from invalid accesses into the JIT translator directly, bypassing address space calls, while this sacrifices i-cache coherency, it allows for smaller code-size and "faster" throguhput.
|
||||
|
||||
Many kernels however, do not support fast signal dispatching (Solaris, OpenBSD, FreeBSD). Only Linux and Windows support relatively "fast" signal dispatching. Hence this feature is better suited for them only.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
In x86_64 for example, when a page fault occurs, the CPU will transmit via control registers and the stack (see `IRETQ`) the appropriate arguments for a page fault handler, the OS then will transform that into something that can be sent into userspace.
|
||||
|
||||
Most modern OSes implement kernel-page-table-isolation, which means a set of system calls will invoke a context switch (not often used syscalls), whereas others are handled by the same process address space (the smaller kernel portion, often used syscalls) without needing a context switch. This effect can be negated on systems with PCID (up to 4096 unique IDs).
|
||||
|
||||
Signal dispatching takes a performance hit from reloading `%cr3` - but Linux does something more clever to avoid reloads: VDSO will take care of the entire thing in the same address space. Making dispatching as costly as an indirect call - without the hazards of increased code size.
|
||||
|
||||
The main downside from this is the constant i-cache trashing and pipeline hazards introduced by the VDSO signal handlers. However on most benchmarks fastmem does perform faster than without (Linux only). This also abuses the fact of continous address space emulation by using an arena - which can then be potentially transparently mapped into a hugepage, reducing TLB walk times.
|
4
src/dynarmic/docs/Fastmem.svg
Normal file
4
src/dynarmic/docs/Fastmem.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 128 KiB |
4
src/dynarmic/docs/HostToGuest.svg
Normal file
4
src/dynarmic/docs/HostToGuest.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 98 KiB |
|
@ -16,19 +16,34 @@ Note that `Use`ing a value decrements its `use_count` by one. When the `use_coun
|
|||
|
||||
The member functions on `RegAlloc` are just a combination of the above concepts.
|
||||
|
||||
The following registers are reserved for internal use and should NOT participate in register allocation:
|
||||
- `%xmm0`, `%xmm1`, `%xmm2`: Used as scratch in exclusive memory access.
|
||||
- `%rsp`: Stack pointer.
|
||||
- `%r15`: JIT pointer
|
||||
- `%r14`: Page table pointer.
|
||||
- `%r13`: Fastmem pointer.
|
||||
|
||||
The layout convenes `%r15` as the JIT state pointer - while it may be tempting to turn it into a synthetic pointer, keeping an entire register (out of 12 available) is preferable over inlining a directly computed immediate.
|
||||
|
||||
Do NEVER modify `%r15`, we must make it clear that this register is "immutable" for the entirety of the JIT block duration.
|
||||
|
||||
### `Scratch`
|
||||
|
||||
Xbyak::Reg64 ScratchGpr(HostLocList desired_locations = any_gpr)
|
||||
Xbyak::Xmm ScratchXmm(HostLocList desired_locations = any_xmm)
|
||||
```c++
|
||||
Xbyak::Reg64 ScratchGpr(HostLocList desired_locations = any_gpr);
|
||||
Xbyak::Xmm ScratchXmm(HostLocList desired_locations = any_xmm);
|
||||
```
|
||||
|
||||
At runtime, allocate one of the registers in `desired_locations`. You are free to modify the register. The register is discarded at the end of the allocation scope.
|
||||
|
||||
### Pure `Use`
|
||||
|
||||
Xbyak::Reg64 UseGpr(Argument& arg);
|
||||
Xbyak::Xmm UseXmm(Argument& arg);
|
||||
OpArg UseOpArg(Argument& arg);
|
||||
void Use(Argument& arg, HostLoc host_loc);
|
||||
```c++
|
||||
Xbyak::Reg64 UseGpr(Argument& arg);
|
||||
Xbyak::Xmm UseXmm(Argument& arg);
|
||||
OpArg UseOpArg(Argument& arg);
|
||||
void Use(Argument& arg, HostLoc host_loc);
|
||||
```
|
||||
|
||||
At runtime, the value corresponding to `arg` will be placed a register. The actual register is determined by
|
||||
which one of the above functions is called. `UseGpr` places it in an unused GPR, `UseXmm` places it
|
||||
|
@ -39,9 +54,11 @@ This register **must not** have it's value changed.
|
|||
|
||||
### `UseScratch`
|
||||
|
||||
Xbyak::Reg64 UseScratchGpr(Argument& arg);
|
||||
Xbyak::Xmm UseScratchXmm(Argument& arg);
|
||||
void UseScratch(Argument& arg, HostLoc host_loc);
|
||||
```c++
|
||||
Xbyak::Reg64 UseScratchGpr(Argument& arg);
|
||||
Xbyak::Xmm UseScratchXmm(Argument& arg);
|
||||
void UseScratch(Argument& arg, HostLoc host_loc);
|
||||
```
|
||||
|
||||
At runtime, the value corresponding to `arg` will be placed a register. The actual register is determined by
|
||||
which one of the above functions is called. `UseScratchGpr` places it in an unused GPR, `UseScratchXmm` places it
|
||||
|
@ -55,7 +72,9 @@ You are free to modify the value in the register. The register is discarded at t
|
|||
|
||||
A `Define` is the defintion of a value. This is the only time when a value may be set.
|
||||
|
||||
void DefineValue(IR::Inst* inst, const Xbyak::Reg& reg);
|
||||
```c++
|
||||
void DefineValue(IR::Inst* inst, const Xbyak::Reg& reg);
|
||||
```
|
||||
|
||||
By calling `DefineValue`, you are stating that you wish to define the value for `inst`, and you have written the
|
||||
value to the specified register `reg`.
|
||||
|
@ -64,7 +83,9 @@ value to the specified register `reg`.
|
|||
|
||||
Adding a `Define` to an existing value.
|
||||
|
||||
void DefineValue(IR::Inst* inst, Argument& arg);
|
||||
```c++
|
||||
void DefineValue(IR::Inst* inst, Argument& arg);
|
||||
```
|
||||
|
||||
You are declaring that the value for `inst` is the same as the value for `arg`. No host machine instructions are
|
||||
emitted.
|
||||
|
|
|
@ -23,15 +23,17 @@ One complication dynarmic has is that a compiled block is not uniquely identifia
|
|||
the PC alone, but bits in the FPSCR and CPSR are also relevant. We resolve this by
|
||||
computing a 64-bit `UniqueHash` that is guaranteed to uniquely identify a block.
|
||||
|
||||
u64 LocationDescriptor::UniqueHash() const {
|
||||
// This value MUST BE UNIQUE.
|
||||
// This calculation has to match up with EmitX64::EmitTerminalPopRSBHint
|
||||
u64 pc_u64 = u64(arm_pc) << 32;
|
||||
u64 fpscr_u64 = u64(fpscr.Value());
|
||||
u64 t_u64 = cpsr.T() ? 1 : 0;
|
||||
u64 e_u64 = cpsr.E() ? 2 : 0;
|
||||
return pc_u64 | fpscr_u64 | t_u64 | e_u64;
|
||||
}
|
||||
```c++
|
||||
u64 LocationDescriptor::UniqueHash() const {
|
||||
// This value MUST BE UNIQUE.
|
||||
// This calculation has to match up with EmitX64::EmitTerminalPopRSBHint
|
||||
u64 pc_u64 = u64(arm_pc) << 32;
|
||||
u64 fpscr_u64 = u64(fpscr.Value());
|
||||
u64 t_u64 = cpsr.T() ? 1 : 0;
|
||||
u64 e_u64 = cpsr.E() ? 2 : 0;
|
||||
return pc_u64 | fpscr_u64 | t_u64 | e_u64;
|
||||
}
|
||||
```
|
||||
|
||||
## Our implementation isn't actually a stack
|
||||
|
||||
|
@ -49,97 +51,107 @@ host addresses for the corresponding the compiled blocks.
|
|||
size of the real RSB in hardware (which has 3 entries). Larger RSBs than 8
|
||||
showed degraded performance.
|
||||
|
||||
struct JitState {
|
||||
// ...
|
||||
```c++
|
||||
struct JitState {
|
||||
// ...
|
||||
|
||||
static constexpr size_t RSBSize = 8; // MUST be a power of 2.
|
||||
u32 rsb_ptr = 0;
|
||||
std::array<u64, RSBSize> rsb_location_descriptors;
|
||||
std::array<u64, RSBSize> rsb_codeptrs;
|
||||
void ResetRSB();
|
||||
static constexpr size_t RSBSize = 8; // MUST be a power of 2.
|
||||
u32 rsb_ptr = 0;
|
||||
std::array<u64, RSBSize> rsb_location_descriptors;
|
||||
std::array<u64, RSBSize> rsb_codeptrs;
|
||||
void ResetRSB();
|
||||
|
||||
// ...
|
||||
};
|
||||
// ...
|
||||
};
|
||||
```
|
||||
|
||||
### RSB Push
|
||||
|
||||
We insert our prediction at the insertion point iff the RSB doesn't already
|
||||
contain a prediction with the same `UniqueHash`.
|
||||
|
||||
void EmitX64::EmitPushRSB(IR::Block&, IR::Inst* inst) {
|
||||
using namespace Xbyak::util;
|
||||
```c++
|
||||
void EmitX64::EmitPushRSB(IR::Block&, IR::Inst* inst) {
|
||||
using namespace Xbyak::util;
|
||||
|
||||
ASSERT(inst->GetArg(0).IsImmediate());
|
||||
u64 imm64 = inst->GetArg(0).GetU64();
|
||||
ASSERT(inst->GetArg(0).IsImmediate());
|
||||
u64 imm64 = inst->GetArg(0).GetU64();
|
||||
|
||||
Xbyak::Reg64 code_ptr_reg = reg_alloc.ScratchGpr({HostLoc::RCX});
|
||||
Xbyak::Reg64 loc_desc_reg = reg_alloc.ScratchGpr();
|
||||
Xbyak::Reg32 index_reg = reg_alloc.ScratchGpr().cvt32();
|
||||
u64 code_ptr = unique_hash_to_code_ptr.find(imm64) != unique_hash_to_code_ptr.end()
|
||||
? u64(unique_hash_to_code_ptr[imm64])
|
||||
: u64(code->GetReturnFromRunCodeAddress());
|
||||
Xbyak::Reg64 code_ptr_reg = reg_alloc.ScratchGpr({HostLoc::RCX});
|
||||
Xbyak::Reg64 loc_desc_reg = reg_alloc.ScratchGpr();
|
||||
Xbyak::Reg32 index_reg = reg_alloc.ScratchGpr().cvt32();
|
||||
u64 code_ptr = unique_hash_to_code_ptr.find(imm64) != unique_hash_to_code_ptr.end()
|
||||
? u64(unique_hash_to_code_ptr[imm64])
|
||||
: u64(code->GetReturnFromRunCodeAddress());
|
||||
|
||||
code->mov(index_reg, dword[code.ABI_JIT_PTR + offsetof(JitState, rsb_ptr)]);
|
||||
code->add(index_reg, 1);
|
||||
code->and_(index_reg, u32(JitState::RSBSize - 1));
|
||||
code->mov(index_reg, dword[code.ABI_JIT_PTR + offsetof(JitState, rsb_ptr)]);
|
||||
code->add(index_reg, 1);
|
||||
code->and_(index_reg, u32(JitState::RSBSize - 1));
|
||||
|
||||
code->mov(loc_desc_reg, u64(imm64));
|
||||
CodePtr patch_location = code->getCurr<CodePtr>();
|
||||
patch_unique_hash_locations[imm64].emplace_back(patch_location);
|
||||
code->mov(code_ptr_reg, u64(code_ptr)); // This line has to match up with EmitX64::Patch.
|
||||
code->EnsurePatchLocationSize(patch_location, 10);
|
||||
code->mov(loc_desc_reg, u64(imm64));
|
||||
CodePtr patch_location = code->getCurr<CodePtr>();
|
||||
patch_unique_hash_locations[imm64].emplace_back(patch_location);
|
||||
code->mov(code_ptr_reg, u64(code_ptr)); // This line has to match up with EmitX64::Patch.
|
||||
code->EnsurePatchLocationSize(patch_location, 10);
|
||||
|
||||
Xbyak::Label label;
|
||||
for (size_t i = 0; i < JitState::RSBSize; ++i) {
|
||||
code->cmp(loc_desc_reg, qword[code.ABI_JIT_PTR + offsetof(JitState, rsb_location_descriptors) + i * sizeof(u64)]);
|
||||
code->je(label, code->T_SHORT);
|
||||
}
|
||||
|
||||
code->mov(dword[code.ABI_JIT_PTR + offsetof(JitState, rsb_ptr)], index_reg);
|
||||
code->mov(qword[code.ABI_JIT_PTR + index_reg.cvt64() * 8 + offsetof(JitState, rsb_location_descriptors)], loc_desc_reg);
|
||||
code->mov(qword[code.ABI_JIT_PTR + index_reg.cvt64() * 8 + offsetof(JitState, rsb_codeptrs)], code_ptr_reg);
|
||||
code->L(label);
|
||||
Xbyak::Label label;
|
||||
for (size_t i = 0; i < JitState::RSBSize; ++i) {
|
||||
code->cmp(loc_desc_reg, qword[code.ABI_JIT_PTR + offsetof(JitState, rsb_location_descriptors) + i * sizeof(u64)]);
|
||||
code->je(label, code->T_SHORT);
|
||||
}
|
||||
|
||||
code->mov(dword[code.ABI_JIT_PTR + offsetof(JitState, rsb_ptr)], index_reg);
|
||||
code->mov(qword[code.ABI_JIT_PTR + index_reg.cvt64() * 8 + offsetof(JitState, rsb_location_descriptors)], loc_desc_reg);
|
||||
code->mov(qword[code.ABI_JIT_PTR + index_reg.cvt64() * 8 + offsetof(JitState, rsb_codeptrs)], code_ptr_reg);
|
||||
code->L(label);
|
||||
}
|
||||
```
|
||||
|
||||
In pseudocode:
|
||||
|
||||
for (i := 0 .. RSBSize-1)
|
||||
if (rsb_location_descriptors[i] == imm64)
|
||||
goto label;
|
||||
rsb_ptr++;
|
||||
rsb_ptr %= RSBSize;
|
||||
rsb_location_desciptors[rsb_ptr] = imm64; //< The UniqueHash
|
||||
rsb_codeptr[rsb_ptr] = /* codeptr corresponding to the UniqueHash */;
|
||||
label:
|
||||
```c++
|
||||
for (i := 0 .. RSBSize-1)
|
||||
if (rsb_location_descriptors[i] == imm64)
|
||||
goto label;
|
||||
rsb_ptr++;
|
||||
rsb_ptr %= RSBSize;
|
||||
rsb_location_desciptors[rsb_ptr] = imm64; //< The UniqueHash
|
||||
rsb_codeptr[rsb_ptr] = /* codeptr corresponding to the UniqueHash */;
|
||||
label:
|
||||
```
|
||||
|
||||
## RSB Pop
|
||||
|
||||
To check if a predicition is in the RSB, we linearly scan the RSB.
|
||||
|
||||
void EmitX64::EmitTerminalPopRSBHint(IR::Term::PopRSBHint, IR::LocationDescriptor initial_location) {
|
||||
using namespace Xbyak::util;
|
||||
```c++
|
||||
void EmitX64::EmitTerminalPopRSBHint(IR::Term::PopRSBHint, IR::LocationDescriptor initial_location) {
|
||||
using namespace Xbyak::util;
|
||||
|
||||
// This calculation has to match up with IREmitter::PushRSB
|
||||
code->mov(ecx, MJitStateReg(Arm::Reg::PC));
|
||||
code->shl(rcx, 32);
|
||||
code->mov(ebx, dword[code.ABI_JIT_PTR + offsetof(JitState, FPSCR_mode)]);
|
||||
code->or_(ebx, dword[code.ABI_JIT_PTR + offsetof(JitState, CPSR_et)]);
|
||||
code->or_(rbx, rcx);
|
||||
// This calculation has to match up with IREmitter::PushRSB
|
||||
code->mov(ecx, MJitStateReg(Arm::Reg::PC));
|
||||
code->shl(rcx, 32);
|
||||
code->mov(ebx, dword[code.ABI_JIT_PTR + offsetof(JitState, FPSCR_mode)]);
|
||||
code->or_(ebx, dword[code.ABI_JIT_PTR + offsetof(JitState, CPSR_et)]);
|
||||
code->or_(rbx, rcx);
|
||||
|
||||
code->mov(rax, u64(code->GetReturnFromRunCodeAddress()));
|
||||
for (size_t i = 0; i < JitState::RSBSize; ++i) {
|
||||
code->cmp(rbx, qword[code.ABI_JIT_PTR + offsetof(JitState, rsb_location_descriptors) + i * sizeof(u64)]);
|
||||
code->cmove(rax, qword[code.ABI_JIT_PTR + offsetof(JitState, rsb_codeptrs) + i * sizeof(u64)]);
|
||||
}
|
||||
|
||||
code->jmp(rax);
|
||||
code->mov(rax, u64(code->GetReturnFromRunCodeAddress()));
|
||||
for (size_t i = 0; i < JitState::RSBSize; ++i) {
|
||||
code->cmp(rbx, qword[code.ABI_JIT_PTR + offsetof(JitState, rsb_location_descriptors) + i * sizeof(u64)]);
|
||||
code->cmove(rax, qword[code.ABI_JIT_PTR + offsetof(JitState, rsb_codeptrs) + i * sizeof(u64)]);
|
||||
}
|
||||
|
||||
code->jmp(rax);
|
||||
}
|
||||
```
|
||||
|
||||
In pseudocode:
|
||||
|
||||
rbx := ComputeUniqueHash()
|
||||
rax := ReturnToDispatch
|
||||
for (i := 0 .. RSBSize-1)
|
||||
if (rbx == rsb_location_descriptors[i])
|
||||
rax = rsb_codeptrs[i]
|
||||
goto rax
|
||||
```c++
|
||||
rbx := ComputeUniqueHash()
|
||||
rax := ReturnToDispatch
|
||||
for (i := 0 .. RSBSize-1)
|
||||
if (rbx == rsb_location_descriptors[i])
|
||||
rax = rsb_codeptrs[i]
|
||||
goto rax
|
||||
```
|
||||
|
|
|
@ -40,7 +40,10 @@ endif()
|
|||
add_subdirectory(externals)
|
||||
|
||||
target_link_libraries(qt_common PRIVATE core Qt6::Core SimpleIni::SimpleIni QuaZip::QuaZip frozen::frozen)
|
||||
target_link_libraries(qt_common PRIVATE Qt6::Core)
|
||||
|
||||
if (NOT APPLE AND ENABLE_OPENGL)
|
||||
target_compile_definitions(qt_common PUBLIC HAS_OPENGL)
|
||||
endif()
|
||||
|
||||
if (NOT WIN32)
|
||||
target_include_directories(qt_common PRIVATE ${Qt6Gui_PRIVATE_INCLUDE_DIRS})
|
||||
|
|
|
@ -470,8 +470,8 @@ void PresentManager::CopyToSwapchainImpl(Frame* frame) {
|
|||
const std::array wait_semaphores = {present_semaphore, *frame->render_ready};
|
||||
|
||||
static constexpr std::array<VkPipelineStageFlags, 2> wait_stage_masks{
|
||||
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
|
||||
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
};
|
||||
|
||||
const VkSubmitInfo submit_info{
|
||||
|
|
|
@ -480,10 +480,6 @@ if (MSVC)
|
|||
copy_yuzu_FFmpeg_deps(yuzu)
|
||||
endif()
|
||||
|
||||
if (NOT APPLE AND ENABLE_OPENGL)
|
||||
target_compile_definitions(yuzu PRIVATE HAS_OPENGL)
|
||||
endif()
|
||||
|
||||
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
|
||||
target_link_libraries(yuzu PRIVATE dynarmic::dynarmic)
|
||||
endif()
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
|
@ -11,16 +14,11 @@ AboutDialog::AboutDialog(QWidget* parent)
|
|||
: QDialog(parent)
|
||||
, ui{std::make_unique<Ui::AboutDialog>()}
|
||||
{
|
||||
static const std::string description = std::string(Common::g_build_version);
|
||||
static const std::string build_id = std::string(Common::g_build_id);
|
||||
static const std::string compiler = std::string(Common::g_compiler_id);
|
||||
static const std::string description = std::string{Common::g_build_version};
|
||||
static const std::string build_id = std::string{Common::g_build_id};
|
||||
static const std::string compiler = std::string{Common::g_compiler_id};
|
||||
|
||||
std::string yuzu_build;
|
||||
if (Common::g_is_dev_build) {
|
||||
yuzu_build = fmt::format("Eden Nightly | {}-{} | {}", description, build_id, compiler);
|
||||
} else {
|
||||
yuzu_build = fmt::format("Eden | {} | {}", description, compiler);
|
||||
}
|
||||
static const std::string yuzu_build = fmt::format("Eden | {} | {}", description, compiler);
|
||||
|
||||
const auto override_build = fmt::format(fmt::runtime(
|
||||
std::string(Common::g_title_bar_format_idle)),
|
||||
|
|
|
@ -4205,16 +4205,11 @@ void GMainWindow::OnEmulatorUpdateAvailable() {
|
|||
|
||||
void GMainWindow::UpdateWindowTitle(std::string_view title_name, std::string_view title_version,
|
||||
std::string_view gpu_vendor) {
|
||||
static const std::string description = std::string(Common::g_build_version);
|
||||
static const std::string build_id = std::string(Common::g_build_id);
|
||||
static const std::string compiler = std::string(Common::g_compiler_id);
|
||||
static const std::string description = std::string{Common::g_build_version};
|
||||
static const std::string build_id = std::string{Common::g_build_id};
|
||||
static const std::string compiler = std::string{Common::g_compiler_id};
|
||||
|
||||
std::string yuzu_title;
|
||||
if (Common::g_is_dev_build) {
|
||||
yuzu_title = fmt::format("Eden Nightly | {}-{} | {}", description, build_id, compiler);
|
||||
} else {
|
||||
yuzu_title = fmt::format("Eden | {} | {}", description, compiler);
|
||||
}
|
||||
static const std::string yuzu_title = fmt::format("Eden | {} | {}", description, compiler);
|
||||
|
||||
const auto override_title =
|
||||
fmt::format(fmt::runtime(std::string(Common::g_title_bar_format_idle)), build_id);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue