Compare commits

...

29 commits

Author SHA1 Message Date
c3198678fc [dynarmic] enforce higher constraints
All checks were successful
eden-license / license-header (pull_request) Successful in 18s
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
0b66573353 Fix license headers
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
7b2eccc899 [dynarmic] increase cache code size
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
ff4d2e05a6 [dynarmic] inlined pool in block + slab-like for each block
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
a8b1849fee [dynarmic] Allow to skip verification pass
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
0564e5395a [dynarmic] fix exception posix handler
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
5d30093b41 [dynarmic] use better boost::visitor
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
38ae2541ce [dynarmic] regalloc use scratchimpl that uses all instead of iteraiting
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
29238a4dd7 [dynarmic] unconditional branches always take
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
71df3ce118 [dynarmic] fix hardcoded AVX512 registers, use xmm0 instead of xmm16 to align with spec
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
817c84bec1 [dynarmic] checked code alignment
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
c74d2083c9 [dynarmic] remove use of mcl reverse iterator
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
17ce6fd3a8 [dynarmic, docs] fastmem docs
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
85f87b602d [dynarmic] use ARCHITECTURE_ macros instead of MCL ones
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
7e0450027c [dynarmic] add back encoding names (for print_info)
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
25c143c08a [dynarmic] fix ASIMD execution
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
8b3137572b [dynarmic] fix tests
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
87932e5a01 [dynarmic] reduce matcher table noise and cache misses
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
a158ece419 [dynarmic] (prolly makes MSVC crash) - use 128MiB code cache
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
ec16878d68 [docs] fastmem draft
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
694c1e6495 [dynarmic] fix tests_reader and tests_generator
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
7ccba40ea8 [dynarmic] reduce use 2 bits for LRU and 4 bits for clog2 of bit size
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
38f7722402 [dynarmic] use small vector experiment
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
1acf6df739 [dynarmic] reduce opt pass latency
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-09-25 08:01:31 +02:00
42280f34d6
[video_core] Improve asynchronous shader building description (#2568)
This improves the asynchronous shader building description.

Reviewed-on: #2568
Co-authored-by: MaranBr <maranbr@outlook.com>
Co-committed-by: MaranBr <maranbr@outlook.com>
2025-09-24 22:11:13 +02:00
2482846cf6
[core] fix msvc comp (#2567)
Signed-off-by: crueter <crueter@eden-emu.dev>
Reviewed-on: #2567
2025-09-24 21:50:18 +02:00
bf4dce8d0b
[hid_core/frontend] use shared lock for accesses on emulated controller (reduces contention in FBSD) (#2553)
Signed-off-by: lizzie <lizzie@eden-emu.dev>
Reviewed-on: #2553
Reviewed-by: crueter <crueter@eden-emu.dev>
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
Co-authored-by: lizzie <lizzie@eden-emu.dev>
Co-committed-by: lizzie <lizzie@eden-emu.dev>
2025-09-24 19:30:21 +02:00
45263ee7aa
LoadIdTokenCache stub (#2531)
Reviewed-on: #2531
Reviewed-by: crueter <crueter@eden-emu.dev>
Co-authored-by: PavelBARABANOV <pavelbarabanov94@gmail.com>
Co-committed-by: PavelBARABANOV <pavelbarabanov94@gmail.com>
2025-09-24 19:30:00 +02:00
f19bbda517
[common] remove ranges polyfill (#2546)
Signed-off-by: lizzie <lizzie@eden-emu.dev>
Reviewed-on: #2546
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
Reviewed-by: crueter <crueter@eden-emu.dev>
Co-authored-by: lizzie <lizzie@eden-emu.dev>
Co-committed-by: lizzie <lizzie@eden-emu.dev>
2025-09-24 19:29:48 +02:00
94 changed files with 537 additions and 1106 deletions

View file

@ -1,9 +1,12 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_event.h"
#include "common/assert.h"
#include "common/polyfill_ranges.h"
#include <ranges>
namespace AudioCore {

View file

@ -13,7 +13,7 @@
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/polyfill_ranges.h"
#include <ranges>
namespace AudioCore {
constexpr u32 CurrentRevision = 16;

View file

@ -8,7 +8,7 @@
#include "audio_core/adsp/apps/audio_renderer/command_list_processor.h"
#include "audio_core/renderer/command/effect/i3dl2_reverb.h"
#include "common/polyfill_ranges.h"
#include <ranges>
namespace AudioCore::Renderer {

View file

@ -9,7 +9,7 @@
#include "audio_core/adsp/apps/audio_renderer/command_list_processor.h"
#include "audio_core/renderer/command/effect/reverb.h"
#include "common/polyfill_ranges.h"
#include <ranges>
namespace AudioCore::Renderer {

View file

@ -8,7 +8,7 @@
#include "audio_core/renderer/mix/mix_context.h"
#include "audio_core/renderer/splitter/splitter_context.h"
#include "common/polyfill_ranges.h"
#include <ranges>
namespace AudioCore::Renderer {

View file

@ -1,10 +1,13 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <ranges>
#include "audio_core/renderer/voice/voice_context.h"
#include "common/polyfill_ranges.h"
#include <ranges>
namespace AudioCore::Renderer {

View file

@ -1,10 +1,13 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include "common/fs/fs_util.h"
#include "common/polyfill_ranges.h"
#include <ranges>
namespace Common::FS {

View file

@ -1,530 +0,0 @@
// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
//
// TODO: remove this file when ranges are supported by all compilation targets
//
#pragma once
#include <algorithm>
#include <utility>
#include <version>
#ifndef __cpp_lib_ranges
namespace std {
namespace ranges {
template <typename T>
concept range = requires(T& t) {
begin(t);
end(t);
};
template <typename T>
concept input_range = range<T>;
template <typename T>
concept output_range = range<T>;
template <range R>
using range_difference_t = ptrdiff_t;
//
// find, find_if, find_if_not
//
struct find_fn {
template <typename Iterator, typename T, typename Proj = std::identity>
constexpr Iterator operator()(Iterator first, Iterator last, const T& value,
Proj proj = {}) const {
for (; first != last; ++first) {
if (std::invoke(proj, *first) == value) {
return first;
}
}
return first;
}
template <ranges::input_range R, typename T, typename Proj = std::identity>
constexpr ranges::iterator_t<R> operator()(R&& r, const T& value, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), value, std::ref(proj));
}
};
struct find_if_fn {
template <typename Iterator, typename Proj = std::identity, typename Pred>
constexpr Iterator operator()(Iterator first, Iterator last, Pred pred, Proj proj = {}) const {
for (; first != last; ++first) {
if (std::invoke(pred, std::invoke(proj, *first))) {
return first;
}
}
return first;
}
template <ranges::input_range R, typename Proj = std::identity, typename Pred>
constexpr ranges::iterator_t<R> operator()(R&& r, Pred pred, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::ref(pred), std::ref(proj));
}
};
struct find_if_not_fn {
template <typename Iterator, typename Proj = std::identity, typename Pred>
constexpr Iterator operator()(Iterator first, Iterator last, Pred pred, Proj proj = {}) const {
for (; first != last; ++first) {
if (!std::invoke(pred, std::invoke(proj, *first))) {
return first;
}
}
return first;
}
template <ranges::input_range R, typename Proj = std::identity, typename Pred>
constexpr ranges::iterator_t<R> operator()(R&& r, Pred pred, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::ref(pred), std::ref(proj));
}
};
inline constexpr find_fn find;
inline constexpr find_if_fn find_if;
inline constexpr find_if_not_fn find_if_not;
//
// any_of, all_of, none_of
//
struct all_of_fn {
template <typename Iterator, typename Proj = std::identity, typename Pred>
constexpr bool operator()(Iterator first, Iterator last, Pred pred, Proj proj = {}) const {
return ranges::find_if_not(first, last, std::ref(pred), std::ref(proj)) == last;
}
template <ranges::input_range R, typename Proj = std::identity, typename Pred>
constexpr bool operator()(R&& r, Pred pred, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::ref(pred), std::ref(proj));
}
};
struct any_of_fn {
template <typename Iterator, typename Proj = std::identity, typename Pred>
constexpr bool operator()(Iterator first, Iterator last, Pred pred, Proj proj = {}) const {
return ranges::find_if(first, last, std::ref(pred), std::ref(proj)) != last;
}
template <ranges::input_range R, typename Proj = std::identity, typename Pred>
constexpr bool operator()(R&& r, Pred pred, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::ref(pred), std::ref(proj));
}
};
struct none_of_fn {
template <typename Iterator, typename Proj = std::identity, typename Pred>
constexpr bool operator()(Iterator first, Iterator last, Pred pred, Proj proj = {}) const {
return ranges::find_if(first, last, std::ref(pred), std::ref(proj)) == last;
}
template <ranges::input_range R, typename Proj = std::identity, typename Pred>
constexpr bool operator()(R&& r, Pred pred, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::ref(pred), std::ref(proj));
}
};
inline constexpr any_of_fn any_of;
inline constexpr all_of_fn all_of;
inline constexpr none_of_fn none_of;
//
// count, count_if
//
struct count_fn {
template <typename Iterator, typename T, typename Proj = std::identity>
constexpr ptrdiff_t operator()(Iterator first, Iterator last, const T& value,
Proj proj = {}) const {
ptrdiff_t counter = 0;
for (; first != last; ++first)
if (std::invoke(proj, *first) == value)
++counter;
return counter;
}
template <ranges::input_range R, typename T, typename Proj = std::identity>
constexpr ptrdiff_t operator()(R&& r, const T& value, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), value, std::ref(proj));
}
};
struct count_if_fn {
template <typename Iterator, typename Proj = std::identity, typename Pred>
constexpr ptrdiff_t operator()(Iterator first, Iterator last, Pred pred, Proj proj = {}) const {
ptrdiff_t counter = 0;
for (; first != last; ++first)
if (std::invoke(pred, std::invoke(proj, *first)))
++counter;
return counter;
}
template <ranges::input_range R, typename Proj = std::identity, typename Pred>
constexpr ptrdiff_t operator()(R&& r, Pred pred, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::ref(pred), std::ref(proj));
}
};
inline constexpr count_fn count;
inline constexpr count_if_fn count_if;
//
// transform
//
struct transform_fn {
template <typename InputIterator, typename OutputIterator, typename F,
typename Proj = std::identity>
constexpr void operator()(InputIterator first1, InputIterator last1, OutputIterator result,
F op, Proj proj = {}) const {
for (; first1 != last1; ++first1, (void)++result) {
*result = std::invoke(op, std::invoke(proj, *first1));
}
}
template <ranges::input_range R, typename OutputIterator, typename F,
typename Proj = std::identity>
constexpr void operator()(R&& r, OutputIterator result, F op, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), result, std::ref(op), std::ref(proj));
}
};
inline constexpr transform_fn transform;
//
// sort
//
struct sort_fn {
template <typename Iterator, typename Comp = ranges::less, typename Proj = std::identity>
constexpr void operator()(Iterator first, Iterator last, Comp comp = {}, Proj proj = {}) const {
if (first == last)
return;
Iterator last_iter = ranges::next(first, last);
std::sort(first, last_iter,
[&](auto& lhs, auto& rhs) { return comp(proj(lhs), proj(rhs)); });
}
template <ranges::input_range R, typename Comp = ranges::less, typename Proj = std::identity>
constexpr void operator()(R&& r, Comp comp = {}, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::move(comp), std::move(proj));
}
};
inline constexpr sort_fn sort;
//
// fill
//
struct fill_fn {
template <typename T, typename OutputIterator>
constexpr OutputIterator operator()(OutputIterator first, OutputIterator last,
const T& value) const {
while (first != last) {
*first++ = value;
}
return first;
}
template <typename T, ranges::output_range R>
constexpr ranges::iterator_t<R> operator()(R&& r, const T& value) const {
return operator()(ranges::begin(r), ranges::end(r), value);
}
};
inline constexpr fill_fn fill;
//
// for_each
//
struct for_each_fn {
template <typename Iterator, typename Proj = std::identity, typename Fun>
constexpr void operator()(Iterator first, Iterator last, Fun f, Proj proj = {}) const {
for (; first != last; ++first) {
std::invoke(f, std::invoke(proj, *first));
}
}
template <ranges::input_range R, typename Proj = std::identity, typename Fun>
constexpr void operator()(R&& r, Fun f, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::move(f), std::ref(proj));
}
};
inline constexpr for_each_fn for_each;
//
// min_element, max_element
//
struct min_element_fn {
template <typename Iterator, typename Proj = std::identity, typename Comp = ranges::less>
constexpr Iterator operator()(Iterator first, Iterator last, Comp comp = {},
Proj proj = {}) const {
if (first == last) {
return last;
}
auto smallest = first;
++first;
for (; first != last; ++first) {
if (!std::invoke(comp, std::invoke(proj, *smallest), std::invoke(proj, *first))) {
smallest = first;
}
}
return smallest;
}
template <ranges::input_range R, typename Proj = std::identity, typename Comp = ranges::less>
constexpr ranges::iterator_t<R> operator()(R&& r, Comp comp = {}, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::ref(comp), std::ref(proj));
}
};
struct max_element_fn {
template <typename Iterator, typename Proj = std::identity, typename Comp = ranges::less>
constexpr Iterator operator()(Iterator first, Iterator last, Comp comp = {},
Proj proj = {}) const {
if (first == last) {
return last;
}
auto largest = first;
++first;
for (; first != last; ++first) {
if (std::invoke(comp, std::invoke(proj, *largest), std::invoke(proj, *first))) {
largest = first;
}
}
return largest;
}
template <ranges::input_range R, typename Proj = std::identity, typename Comp = ranges::less>
constexpr ranges::iterator_t<R> operator()(R&& r, Comp comp = {}, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::ref(comp), std::ref(proj));
}
};
inline constexpr min_element_fn min_element;
inline constexpr max_element_fn max_element;
//
// replace, replace_if
//
struct replace_fn {
template <typename Iterator, typename T1, typename T2, typename Proj = std::identity>
constexpr Iterator operator()(Iterator first, Iterator last, const T1& old_value,
const T2& new_value, Proj proj = {}) const {
for (; first != last; ++first) {
if (old_value == std::invoke(proj, *first)) {
*first = new_value;
}
}
return first;
}
template <ranges::input_range R, typename T1, typename T2, typename Proj = std::identity>
constexpr ranges::iterator_t<R> operator()(R&& r, const T1& old_value, const T2& new_value,
Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), old_value, new_value, std::move(proj));
}
};
struct replace_if_fn {
template <typename Iterator, typename T, typename Proj = std::identity, typename Pred>
constexpr Iterator operator()(Iterator first, Iterator last, Pred pred, const T& new_value,
Proj proj = {}) const {
for (; first != last; ++first) {
if (!!std::invoke(pred, std::invoke(proj, *first))) {
*first = new_value;
}
}
return std::move(first);
}
template <ranges::input_range R, typename T, typename Proj = std::identity, typename Pred>
constexpr ranges::iterator_t<R> operator()(R&& r, Pred pred, const T& new_value,
Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::move(pred), new_value,
std::move(proj));
}
};
inline constexpr replace_fn replace;
inline constexpr replace_if_fn replace_if;
//
// copy, copy_if
//
struct copy_fn {
template <typename InputIterator, typename OutputIterator>
constexpr void operator()(InputIterator first, InputIterator last,
OutputIterator result) const {
for (; first != last; ++first, (void)++result) {
*result = *first;
}
}
template <ranges::input_range R, typename OutputIterator>
constexpr void operator()(R&& r, OutputIterator result) const {
return operator()(ranges::begin(r), ranges::end(r), std::move(result));
}
};
struct copy_if_fn {
template <typename InputIterator, typename OutputIterator, typename Proj = std::identity,
typename Pred>
constexpr void operator()(InputIterator first, InputIterator last, OutputIterator result,
Pred pred, Proj proj = {}) const {
for (; first != last; ++first) {
if (std::invoke(pred, std::invoke(proj, *first))) {
*result = *first;
++result;
}
}
}
template <ranges::input_range R, typename OutputIterator, typename Proj = std::identity,
typename Pred>
constexpr void operator()(R&& r, OutputIterator result, Pred pred, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::move(result), std::ref(pred),
std::ref(proj));
}
};
inline constexpr copy_fn copy;
inline constexpr copy_if_fn copy_if;
//
// generate
//
struct generate_fn {
template <typename Iterator, typename F>
constexpr Iterator operator()(Iterator first, Iterator last, F gen) const {
for (; first != last; *first = std::invoke(gen), ++first)
;
return first;
}
template <typename R, std::copy_constructible F>
requires std::invocable<F&> && ranges::output_range<R>
constexpr ranges::iterator_t<R> operator()(R&& r, F gen) const {
return operator()(ranges::begin(r), ranges::end(r), std::move(gen));
}
};
inline constexpr generate_fn generate;
//
// lower_bound, upper_bound
//
struct lower_bound_fn {
template <typename Iterator, typename T, typename Proj = std::identity,
typename Comp = ranges::less>
constexpr Iterator operator()(Iterator first, Iterator last, const T& value, Comp comp = {},
Proj proj = {}) const {
Iterator it;
std::ptrdiff_t _count, _step;
_count = std::distance(first, last);
while (_count > 0) {
it = first;
_step = _count / 2;
ranges::advance(it, _step, last);
if (comp(std::invoke(proj, *it), value)) {
first = ++it;
_count -= _step + 1;
} else {
_count = _step;
}
}
return first;
}
template <ranges::input_range R, typename T, typename Proj = std::identity,
typename Comp = ranges::less>
constexpr ranges::iterator_t<R> operator()(R&& r, const T& value, Comp comp = {},
Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), value, std::ref(comp), std::ref(proj));
}
};
struct upper_bound_fn {
template <typename Iterator, typename T, typename Proj = std::identity,
typename Comp = ranges::less>
constexpr Iterator operator()(Iterator first, Iterator last, const T& value, Comp comp = {},
Proj proj = {}) const {
Iterator it;
std::ptrdiff_t _count, _step;
_count = std::distance(first, last);
while (_count > 0) {
it = first;
_step = _count / 2;
ranges::advance(it, _step, last);
if (!comp(value, std::invoke(proj, *it))) {
first = ++it;
_count -= _step + 1;
} else {
_count = _step;
}
}
return first;
}
template <ranges::input_range R, typename T, typename Proj = std::identity,
typename Comp = ranges::less>
constexpr ranges::iterator_t<R> operator()(R&& r, const T& value, Comp comp = {},
Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), value, std::ref(comp), std::ref(proj));
}
};
inline constexpr lower_bound_fn lower_bound;
inline constexpr upper_bound_fn upper_bound;
//
// adjacent_find
//
struct adjacent_find_fn {
template <typename Iterator, typename Proj = std::identity, typename Pred = ranges::equal_to>
constexpr Iterator operator()(Iterator first, Iterator last, Pred pred = {},
Proj proj = {}) const {
if (first == last)
return first;
auto _next = ranges::next(first);
for (; _next != last; ++_next, ++first)
if (std::invoke(pred, std::invoke(proj, *first), std::invoke(proj, *_next)))
return first;
return _next;
}
template <ranges::input_range R, typename Proj = std::identity,
typename Pred = ranges::equal_to>
constexpr ranges::iterator_t<R> operator()(R&& r, Pred pred = {}, Proj proj = {}) const {
return operator()(ranges::begin(r), ranges::end(r), std::ref(pred), std::ref(proj));
}
};
inline constexpr adjacent_find_fn adjacent_find;
} // namespace ranges
} // namespace std
#endif

View file

@ -15,7 +15,7 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/polyfill_ranges.h"
#include <ranges>
namespace Common {

View file

@ -210,12 +210,9 @@ std::shared_ptr<Dynarmic::A32::Jit> ArmDynarmic32::MakeJit(Common::PageTable* pa
config.wall_clock_cntpct = m_uses_wall_clock;
config.enable_cycle_counting = !m_uses_wall_clock;
// Code cache size
#ifdef ARCHITECTURE_arm64
// Code cache size - max in ARM is 128MiB, max in x86_64 is 2GiB
// Solaris doesn't support kPageSize >= 512MiB
config.code_cache_size = std::uint32_t(128_MiB);
#else
config.code_cache_size = std::uint32_t(512_MiB);
#endif
// Allow memory fault handling to work
if (m_system.DebuggerEnabled()) {

View file

@ -269,12 +269,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa
config.wall_clock_cntpct = m_uses_wall_clock;
config.enable_cycle_counting = !m_uses_wall_clock;
// Code cache size
#ifdef ARCHITECTURE_arm64
// Code cache size - max in ARM is 128MiB, max in x86_64 is 2GiB
// Solaris doesn't support kPageSize >= 512MiB
config.code_cache_size = std::uint32_t(128_MiB);
#else
config.code_cache_size = std::uint32_t(512_MiB);
#endif
// Allow memory fault handling to work
if (m_system.DebuggerEnabled()) {

View file

@ -10,7 +10,7 @@
#include <utility>
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "core/crypto/aes_util.h"
#include "core/crypto/ctr_encryption_layer.h"
#include "core/crypto/key_manager.h"

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -10,7 +13,7 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"

View file

@ -11,7 +11,7 @@
#include "common/fs/file.h"
#include "common/fs/path_util.h"
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/stb.h"
#include "common/string_util.h"
#include "common/swap.h"
@ -647,7 +647,8 @@ public:
{0, &IManagerForApplication::CheckAvailability, "CheckAvailability"},
{1, &IManagerForApplication::GetAccountId, "GetAccountId"},
{2, &IManagerForApplication::EnsureIdTokenCacheAsync, "EnsureIdTokenCacheAsync"},
{3, &IManagerForApplication::LoadIdTokenCache, "LoadIdTokenCache"},
{3, &IManagerForApplication::LoadIdTokenCacheDeprecated, "LoadIdTokenCache"},
{4, &IManagerForApplication::LoadIdTokenCache, "LoadIdTokenCache"},
{130, &IManagerForApplication::GetNintendoAccountUserResourceCacheForApplication, "GetNintendoAccountUserResourceCacheForApplication"},
{136, &IManagerForApplication::GetNintendoAccountUserResourceCacheForApplication, "GetNintendoAccountUserResourceCache"}, // 19.0.0+
{150, nullptr, "CreateAuthorizationRequest"},
@ -683,12 +684,25 @@ private:
rb.PushIpcInterface(ensure_token_id);
}
void LoadIdTokenCache(HLERequestContext& ctx) {
void LoadIdTokenCacheDeprecated(HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
ensure_token_id->LoadIdTokenCache(ctx);
}
void LoadIdTokenCache(HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
std::vector<u8> token_data(0x100);
std::fill(token_data.begin(), token_data.end(), u8(0));
ctx.WriteBuffer(token_data, 0);
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(ResultSuccess);
rb.Push(static_cast<u32>(token_data.size()));
}
void GetNintendoAccountUserResourceCacheForApplication(HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");

View file

@ -12,7 +12,7 @@
#include "common/fs/file.h"
#include "common/fs/fs.h"
#include "common/fs/path_util.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/settings.h"
#include "common/string_util.h"
#include "core/hle/service/acc/profile_manager.h"

View file

@ -9,7 +9,7 @@
#include "common/bit_cast.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/settings.h"
#include "common/string_util.h"
#include "core/internal_network/emu_net_state.h"

View file

@ -18,11 +18,7 @@ endif()
# Dynarmic project options
option(DYNARMIC_ENABLE_CPU_FEATURE_DETECTION "Turning this off causes dynarmic to assume the host CPU doesn't support anything later than SSE3" ON)
if (PLATFORM_OPENBSD)
option(DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT "Enables support for systems that require W^X" ON)
else()
option(DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT "Enables support for systems that require W^X" OFF)
endif()
option(DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT "Enables support for systems that require W^X" ${PLATFORM_OPENBSD})
option(DYNARMIC_FATAL_ERRORS "Errors are fatal" OFF)
option(DYNARMIC_IGNORE_ASSERTS "Ignore asserts" OFF)

View file

@ -58,14 +58,11 @@ add_library(dynarmic
common/lut_from_list.h
common/math_util.cpp
common/math_util.h
common/memory_pool.cpp
common/memory_pool.h
common/safe_ops.h
common/spin_lock.h
common/string_util.h
common/u128.cpp
common/u128.h
common/variant_util.h
frontend/A32/a32_types.cpp
frontend/A32/a32_types.h
frontend/A64/a64_types.cpp
@ -80,7 +77,6 @@ add_library(dynarmic
ir/basic_block.cpp
ir/basic_block.h
ir/cond.h
ir/ir_emitter.cpp
ir/ir_emitter.h
ir/location_descriptor.cpp
ir/location_descriptor.h

View file

@ -15,15 +15,15 @@
#include <mcl/macro/architecture.hpp>
#include "dynarmic/common/common_types.h"
#if defined(MCL_ARCHITECTURE_X86_64)
#if defined(ARCHITECTURE_x86_64)
namespace Dynarmic::Backend::X64 {
class BlockOfCode;
} // namespace Dynarmic::Backend::X64
#elif defined(MCL_ARCHITECTURE_ARM64)
#elif defined(ARCHITECTURE_arm64)
namespace oaknut {
class CodeBlock;
} // namespace oaknut
#elif defined(MCL_ARCHITECTURE_RISCV)
#elif defined(ARCHITECTURE_riscv64)
namespace Dynarmic::Backend::RV64 {
class CodeBlock;
} // namespace Dynarmic::Backend::RV64
@ -33,16 +33,16 @@ class CodeBlock;
namespace Dynarmic::Backend {
#if defined(MCL_ARCHITECTURE_X86_64)
#if defined(ARCHITECTURE_x86_64)
struct FakeCall {
u64 call_rip;
u64 ret_rip;
};
#elif defined(MCL_ARCHITECTURE_ARM64)
#elif defined(ARCHITECTURE_arm64)
struct FakeCall {
u64 call_pc;
};
#elif defined(MCL_ARCHITECTURE_RISCV)
#elif defined(ARCHITECTURE_riscv64)
struct FakeCall {
};
#else
@ -54,11 +54,11 @@ public:
ExceptionHandler();
~ExceptionHandler();
#if defined(MCL_ARCHITECTURE_X86_64)
#if defined(ARCHITECTURE_x86_64)
void Register(X64::BlockOfCode& code);
#elif defined(MCL_ARCHITECTURE_ARM64)
#elif defined(ARCHITECTURE_arm64)
void Register(oaknut::CodeBlock& mem, std::size_t mem_size);
#elif defined(MCL_ARCHITECTURE_RISCV)
#elif defined(ARCHITECTURE_riscv64)
void Register(RV64::CodeBlock& mem, std::size_t mem_size);
#else
# error "Invalid architecture"

View file

@ -28,19 +28,7 @@ A32AddressSpace::A32AddressSpace(const A32::UserConfig& conf)
IR::Block A32AddressSpace::GenerateIR(IR::LocationDescriptor descriptor) const {
IR::Block ir_block = A32::Translate(A32::LocationDescriptor{descriptor}, conf.callbacks, {conf.arch_version, conf.define_unpredictable_behaviour, conf.hook_hint_instructions});
Optimization::PolyfillPass(ir_block, {});
if (conf.HasOptimization(OptimizationFlag::GetSetElimination)) {
Optimization::A32GetSetElimination(ir_block, {.convert_nzc_to_nz = true});
Optimization::DeadCodeElimination(ir_block);
}
if (conf.HasOptimization(OptimizationFlag::ConstProp)) {
Optimization::A32ConstantMemoryReads(ir_block, conf.callbacks);
Optimization::ConstantPropagation(ir_block);
Optimization::DeadCodeElimination(ir_block);
}
Optimization::VerificationPass(ir_block);
Optimization::Optimize(ir_block, conf, {});
return ir_block;
}

View file

@ -28,7 +28,6 @@
#include "dynarmic/backend/x64/nzcv_util.h"
#include "dynarmic/backend/x64/perf_map.h"
#include "dynarmic/backend/x64/stack_layout.h"
#include "dynarmic/common/variant_util.h"
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
#include "dynarmic/frontend/A32/a32_types.h"
#include "dynarmic/interface/A32/coprocessor.h"

View file

@ -122,9 +122,9 @@ A64EmitX64::BlockDescriptor A64EmitX64::Emit(IR::Block& block) noexcept {
auto const opcode = inst.GetOpcode();
// Call the relevant Emit* member function.
switch (opcode) {
#define OPCODE(name, type, ...) [[likely]] case IR::Opcode::name: goto opcode_branch;
#define OPCODE(name, type, ...) case IR::Opcode::name: goto opcode_branch;
#define A32OPC(name, type, ...)
#define A64OPC(name, type, ...) [[likely]] case IR::Opcode::A64##name: goto a64_branch;
#define A64OPC(name, type, ...) case IR::Opcode::A64##name: goto a64_branch;
#include "dynarmic/ir/opcodes.inc"
#undef OPCODE
#undef A32OPC
@ -764,7 +764,7 @@ void A64EmitX64::EmitPatchMovRcx(CodePtr target_code_ptr) {
target_code_ptr = code.GetReturnFromRunCodeAddress();
}
const CodePtr patch_location = code.getCurr();
code.mov(code.rcx, reinterpret_cast<u64>(target_code_ptr));
code.mov(code.rcx, u64(target_code_ptr));
code.EnsurePatchLocationSize(patch_location, 10);
}

View file

@ -80,16 +80,16 @@ public:
};
// TODO: Check code alignment
const CodePtr current_code_ptr = [this] {
const CodePtr aligned_code_ptr = CodePtr((uintptr_t(GetCurrentBlock()) + 15) & ~uintptr_t(15));
const CodePtr current_code_ptr = [this, aligned_code_ptr] {
// RSB optimization
const u32 new_rsb_ptr = (jit_state.rsb_ptr - 1) & A64JitState::RSBPtrMask;
if (jit_state.GetUniqueHash() == jit_state.rsb_location_descriptors[new_rsb_ptr]) {
jit_state.rsb_ptr = new_rsb_ptr;
return reinterpret_cast<CodePtr>(jit_state.rsb_codeptrs[new_rsb_ptr]);
return CodePtr(jit_state.rsb_codeptrs[new_rsb_ptr]);
}
return GetCurrentBlock();
return aligned_code_ptr;
//return GetCurrentBlock();
}();
const HaltReason hr = block_of_code.RunCode(&jit_state, current_code_ptr);

View file

@ -10,7 +10,6 @@
#include <algorithm>
#include <mcl/iterator/reverse.hpp>
#include "dynarmic/common/common_types.h"
#include <xbyak/xbyak.h>
@ -76,7 +75,8 @@ void ABI_PopRegistersAndAdjustStack(BlockOfCode& code, const size_t frame_size,
const FrameInfo frame_info = CalculateFrameInfo(num_gprs, num_xmms, frame_size);
size_t xmm_offset = frame_info.xmm_offset + (num_xmms * XMM_SIZE);
for (auto const xmm : mcl::iterator::reverse(regs)) {
for (auto it = regs.rbegin(); it != regs.rend(); ++it) {
auto const xmm = *it;
if (HostLocIsXMM(xmm)) {
xmm_offset -= XMM_SIZE;
if (code.HasHostFeature(HostFeature::AVX)) {
@ -88,9 +88,11 @@ void ABI_PopRegistersAndAdjustStack(BlockOfCode& code, const size_t frame_size,
}
if (frame_info.stack_subtraction != 0)
code.add(rsp, u32(frame_info.stack_subtraction));
for (auto const gpr : mcl::iterator::reverse(regs))
for (auto it = regs.rbegin(); it != regs.rend(); ++it) {
auto const gpr = *it;
if (HostLocIsGPR(gpr))
code.pop(HostLocToReg64(gpr));
}
}
void ABI_PushCalleeSaveRegistersAndAdjustStack(BlockOfCode& code, const std::size_t frame_size) {

View file

@ -364,8 +364,7 @@ void BlockOfCode::GenRunCode(std::function<void(BlockOfCode&)> rcp) {
cmp(dword[ABI_JIT_PTR + jsi.offsetof_halt_reason], 0);
jne(return_to_caller_mxcsr_already_exited, T_NEAR);
lock();
or_(dword[ABI_JIT_PTR + jsi.offsetof_halt_reason], static_cast<u32>(HaltReason::Step));
lock(); or_(dword[ABI_JIT_PTR + jsi.offsetof_halt_reason], static_cast<u32>(HaltReason::Step));
SwitchMxcsrOnEntry();
jmp(ABI_PARAM2);
@ -415,7 +414,6 @@ void BlockOfCode::GenRunCode(std::function<void(BlockOfCode&)> rcp) {
}
xor_(eax, eax);
lock();
xchg(dword[ABI_JIT_PTR + jsi.offsetof_halt_reason], eax);
ABI_PopCalleeSaveRegistersAndAdjustStack(*this, sizeof(StackLayout));

View file

@ -11,6 +11,7 @@
#include <iterator>
#include "dynarmic/common/assert.h"
#include <boost/variant/detail/apply_visitor_binary.hpp>
#include <mcl/bit/bit_field.hpp>
#include <mcl/scope_exit.hpp>
#include "dynarmic/common/common_types.h"
@ -21,7 +22,6 @@
#include "dynarmic/backend/x64/perf_map.h"
#include "dynarmic/backend/x64/stack_layout.h"
#include "dynarmic/backend/x64/verbose_debugging_output.h"
#include "dynarmic/common/variant_util.h"
#include "dynarmic/ir/basic_block.h"
#include "dynarmic/ir/microinstruction.h"
#include "dynarmic/ir/opcodes.h"
@ -347,14 +347,14 @@ EmitX64::BlockDescriptor EmitX64::RegisterBlock(const IR::LocationDescriptor& de
}
void EmitX64::EmitTerminal(IR::Terminal terminal, IR::LocationDescriptor initial_location, bool is_single_step) {
Common::VisitVariant<void>(terminal, [this, initial_location, is_single_step](auto x) {
boost::apply_visitor([this, initial_location, is_single_step](auto x) {
using T = std::decay_t<decltype(x)>;
if constexpr (!std::is_same_v<T, IR::Term::Invalid>) {
this->EmitTerminalImpl(x, initial_location, is_single_step);
} else {
ASSERT_MSG(false, "Invalid terminal");
}
});
}, terminal);
}
void EmitX64::Patch(const IR::LocationDescriptor& target_desc, CodePtr target_code_ptr) {

View file

@ -92,13 +92,10 @@ void ForceDenormalsToZero(BlockOfCode& code, std::initializer_list<Xbyak::Xmm> t
FpFixup::Norm_Src,
FpFixup::Norm_Src,
FpFixup::Norm_Src);
const Xbyak::Xmm tmp = xmm16;
const Xbyak::Xmm tmp = xmm0;
FCODE(vmovap)(tmp, code.BConst<fsize>(xword, denormal_to_zero));
for (const Xbyak::Xmm& xmm : to_daz) {
for (const Xbyak::Xmm& xmm : to_daz)
FCODE(vfixupimms)(xmm, xmm, tmp, u8(0));
}
return;
}

View file

@ -273,34 +273,31 @@ void AxxEmitX64::EmitExclusiveWriteMemory(AxxEmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const bool ordered = IsOrdered(args[3].GetImmediateAccType());
if constexpr (bitsize != 128) {
ctx.reg_alloc.HostCall(inst, {}, args[1], args[2]);
} else {
if constexpr (bitsize == 128) {
ctx.reg_alloc.Use(args[1], ABI_PARAM2);
ctx.reg_alloc.Use(args[2], HostLoc::XMM1);
ctx.reg_alloc.EndOfAllocScope();
ctx.reg_alloc.HostCall(inst);
} else {
ctx.reg_alloc.HostCall(inst, {}, args[1], args[2]);
}
const Xbyak::Reg64 tmp = ctx.reg_alloc.ScratchGpr();
Xbyak::Label end;
code.mov(code.ABI_RETURN, u32(1));
code.cmp(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(0));
code.movzx(tmp.cvt32(), code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)]);
code.test(tmp.cvt8(), tmp.cvt8());
code.je(end);
code.mov(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(0));
code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(&conf));
code.xor_(tmp.cvt32(), tmp.cvt32());
code.xchg(tmp.cvt8(), code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)]);
code.mov(code.ABI_PARAM1, u64(&conf));
if constexpr (bitsize != 128) {
using T = mcl::unsigned_integer_of_size<bitsize>;
code.CallLambda(
[](AxxUserConfig& conf, Axx::VAddr vaddr, T value) -> u32 {
return conf.global_monitor->DoExclusiveOperation<T>(conf.processor_id, vaddr,
[&](T expected) -> bool {
return (conf.callbacks->*callback)(vaddr, value, expected);
})
? 0
: 1;
});
code.CallLambda([](AxxUserConfig& conf, Axx::VAddr vaddr, T value) -> u32 {
return conf.global_monitor->DoExclusiveOperation<T>(conf.processor_id, vaddr, [&](T expected) -> bool {
return (conf.callbacks->*callback)(vaddr, value, expected);
}) ? 0 : 1;
});
if (ordered) {
code.mfence();
}
@ -308,15 +305,11 @@ void AxxEmitX64::EmitExclusiveWriteMemory(AxxEmitContext& ctx, IR::Inst* inst) {
ctx.reg_alloc.AllocStackSpace(16 + ABI_SHADOW_SPACE);
code.lea(code.ABI_PARAM3, ptr[rsp + ABI_SHADOW_SPACE]);
code.movaps(xword[code.ABI_PARAM3], xmm1);
code.CallLambda(
[](AxxUserConfig& conf, Axx::VAddr vaddr, Vector& value) -> u32 {
return conf.global_monitor->DoExclusiveOperation<Vector>(conf.processor_id, vaddr,
[&](Vector expected) -> bool {
return (conf.callbacks->*callback)(vaddr, value, expected);
})
? 0
: 1;
});
code.CallLambda([](AxxUserConfig& conf, Axx::VAddr vaddr, Vector& value) -> u32 {
return conf.global_monitor->DoExclusiveOperation<Vector>(conf.processor_id, vaddr, [&](Vector expected) -> bool {
return (conf.callbacks->*callback)(vaddr, value, expected);
}) ? 0 : 1;
});
if (ordered) {
code.mfence();
}
@ -437,10 +430,11 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
SharedLabel end = GenSharedLabel();
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.mov(status, u32(1));
code.cmp(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(0));
code.movzx(tmp.cvt32(), code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)]);
code.test(tmp.cvt8(), tmp.cvt8());
code.je(*end, code.T_NEAR);
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.cmp(qword[tmp], vaddr);
code.jne(*end, code.T_NEAR);
@ -474,30 +468,29 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
const auto location = code.getCurr();
if constexpr (bitsize == 128) {
switch (bitsize) {
case 8:
code.lock();
code.cmpxchg(code.byte[dest_ptr], value.cvt8());
break;
case 16:
code.lock();
code.cmpxchg(word[dest_ptr], value.cvt16());
break;
case 32:
code.lock();
code.cmpxchg(dword[dest_ptr], value.cvt32());
break;
case 64:
code.lock();
code.cmpxchg(qword[dest_ptr], value.cvt64());
break;
case 128:
code.lock();
code.cmpxchg16b(ptr[dest_ptr]);
} else {
switch (bitsize) {
case 8:
code.lock();
code.cmpxchg(code.byte[dest_ptr], value.cvt8());
break;
case 16:
code.lock();
code.cmpxchg(word[dest_ptr], value.cvt16());
break;
case 32:
code.lock();
code.cmpxchg(dword[dest_ptr], value.cvt32());
break;
case 64:
code.lock();
code.cmpxchg(qword[dest_ptr], value.cvt64());
break;
default:
UNREACHABLE();
}
break;
default:
UNREACHABLE();
}
code.setnz(status.cvt8());

View file

@ -609,8 +609,8 @@ void EmitX64::EmitVectorArithmeticVShift16(EmitContext& ctx, IR::Inst* inst) {
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm left_shift = ctx.reg_alloc.UseScratchXmm(args[1]);
const Xbyak::Xmm right_shift = xmm16;
const Xbyak::Xmm tmp = xmm17;
const Xbyak::Xmm right_shift = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
code.vmovdqa32(tmp, code.Const(xword, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF));
code.vpxord(right_shift, right_shift, right_shift);
@ -674,8 +674,8 @@ void EmitX64::EmitVectorArithmeticVShift64(EmitContext& ctx, IR::Inst* inst) {
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm left_shift = ctx.reg_alloc.UseScratchXmm(args[1]);
const Xbyak::Xmm right_shift = xmm16;
const Xbyak::Xmm tmp = xmm17;
const Xbyak::Xmm right_shift = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
code.vmovdqa32(tmp, code.Const(xword, 0x00000000000000FF, 0x00000000000000FF));
code.vpxorq(right_shift, right_shift, right_shift);
@ -1955,8 +1955,8 @@ void EmitX64::EmitVectorLogicalVShift16(EmitContext& ctx, IR::Inst* inst) {
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm left_shift = ctx.reg_alloc.UseScratchXmm(args[1]);
const Xbyak::Xmm right_shift = xmm16;
const Xbyak::Xmm tmp = xmm17;
const Xbyak::Xmm right_shift = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
code.vmovdqa32(tmp, code.Const(xword, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF));
code.vpxord(right_shift, right_shift, right_shift);
@ -2737,7 +2737,7 @@ void EmitX64::EmitVectorPairedAddSignedWiden32(EmitContext& ctx, IR::Inst* inst)
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
if (code.HasHostFeature(HostFeature::AVX512_Ortho)) {
const Xbyak::Xmm c = xmm16;
const Xbyak::Xmm c = ctx.reg_alloc.ScratchXmm();
code.vpsraq(c, a, 32);
code.vpsllq(a, a, 32);
code.vpsraq(a, a, 32);
@ -5461,7 +5461,7 @@ void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
if (code.HasHostFeature(HostFeature::AVX512_Ortho | HostFeature::AVX512BW)) {
const Xbyak::Xmm indicies = ctx.reg_alloc.UseXmm(args[2]);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm masked = xmm16;
const Xbyak::Xmm masked = ctx.reg_alloc.ScratchXmm();
code.vpandd(masked, indicies, code.Const(xword_b, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0));

View file

@ -9,6 +9,7 @@
#include "dynarmic/backend/x64/reg_alloc.h"
#include <algorithm>
#include <limits>
#include <numeric>
#include <utility>
@ -118,7 +119,7 @@ void HostLocInfo::AddValue(IR::Inst* inst) noexcept {
values.push_back(inst);
ASSERT(size_t(total_uses) + inst->UseCount() < (std::numeric_limits<uint16_t>::max)());
total_uses += inst->UseCount();
max_bit_width = std::max<uint8_t>(max_bit_width, GetBitWidth(inst->GetType()));
max_bit_width = std::max<uint8_t>(max_bit_width, std::countr_zero(GetBitWidth(inst->GetType())));
}
void HostLocInfo::EmitVerboseDebuggingOutput(BlockOfCode* code, size_t host_loc_index) const noexcept {
@ -152,19 +153,19 @@ bool Argument::GetImmediateU1() const noexcept {
u8 Argument::GetImmediateU8() const noexcept {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x100);
ASSERT(imm <= u64(std::numeric_limits<u8>::max()));
return u8(imm);
}
u16 Argument::GetImmediateU16() const noexcept {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x10000);
ASSERT(imm <= u64(std::numeric_limits<u16>::max()));
return u16(imm);
}
u32 Argument::GetImmediateU32() const noexcept {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x100000000);
ASSERT(imm <= u64(std::numeric_limits<u32>::max()));
return u32(imm);
}
@ -366,10 +367,20 @@ void RegAlloc::HostCall(IR::Inst* result_def,
if (result_def) {
DefineValueImpl(result_def, ABI_RETURN);
}
for (size_t i = 0; i < args.size(); i++) {
if (args[i]) {
UseScratch(*args[i], args_hostloc[i]);
} else {
ScratchGpr(args_hostloc[i]); // TODO: Force spill
}
}
// Must match with with ScratchImpl
for (auto const gpr : other_caller_save) {
MoveOutOfTheWay(gpr);
LocInfo(gpr).WriteLock();
}
for (size_t i = 0; i < args.size(); i++) {
if (args[i] && !args[i]->get().IsVoid()) {
UseScratch(*args[i], args_hostloc[i]);
// LLVM puts the burden of zero-extension of 8 and 16 bit values on the caller instead of the callee
const Xbyak::Reg64 reg = HostLocToReg64(args_hostloc[i]);
switch (args[i]->get().GetType()) {
@ -389,14 +400,6 @@ void RegAlloc::HostCall(IR::Inst* result_def,
}
}
}
for (size_t i = 0; i < args.size(); i++)
if (!args[i]) {
// TODO: Force spill
ScratchGpr(args_hostloc[i]);
}
for (auto const caller_saved : other_caller_save)
ScratchImpl({caller_saved});
}
void RegAlloc::AllocStackSpace(const size_t stack_space) noexcept {
@ -559,13 +562,12 @@ void RegAlloc::SpillRegister(HostLoc loc) noexcept {
}
HostLoc RegAlloc::FindFreeSpill(bool is_xmm) const noexcept {
#if 0
// TODO(lizzie): Ok, Windows hates XMM spills, this means less perf for windows
// but it's fine anyways. We can find other ways to cheat it later - but which?!?!
// we should NOT save xmm each block entering... MAYBE xbyak has a bug on start/end?
// TODO(lizzie): This needs to be investigated further later.
// Do not spill XMM into other XMM silly
if (!is_xmm) {
/*if (!is_xmm) {
// TODO(lizzie): Using lower (xmm0 and such) registers results in issues/crashes - INVESTIGATE WHY
// Intel recommends to spill GPR onto XMM registers IF POSSIBLE
// TODO(lizzie): Issues on DBZ, theory: Scratch XMM not properly restored after a function call?
@ -573,8 +575,9 @@ HostLoc RegAlloc::FindFreeSpill(bool is_xmm) const noexcept {
for (size_t i = size_t(HostLoc::XMM15); i >= size_t(HostLoc::XMM3); --i)
if (const auto loc = HostLoc(i); LocInfo(loc).IsEmpty())
return loc;
}
#endif
}*/
// TODO: Doing this would mean saving XMM on each call... need to benchmark the benefits
// of spilling on XMM versus the potential cost of using XMM registers.....
// Otherwise go to stack spilling
for (size_t i = size_t(HostLoc::FirstSpill); i < hostloc_info.size(); ++i)
if (const auto loc = HostLoc(i); LocInfo(loc).IsEmpty())

View file

@ -12,6 +12,7 @@
#include <functional>
#include <optional>
#include "boost/container/small_vector.hpp"
#include "dynarmic/common/common_types.h"
#include <xbyak/xbyak.h>
#include <boost/container/static_vector.hpp>
@ -77,13 +78,13 @@ public:
return std::find(values.begin(), values.end(), inst) != values.end();
}
inline size_t GetMaxBitWidth() const noexcept {
return max_bit_width;
return 1 << max_bit_width;
}
void AddValue(IR::Inst* inst) noexcept;
void EmitVerboseDebuggingOutput(BlockOfCode* code, size_t host_loc_index) const noexcept;
private:
//non trivial
std::vector<IR::Inst*> values; //24
boost::container::small_vector<IR::Inst*, 3> values; //24
// Block state
uint16_t total_uses = 0; //8
//sometimes zeroed
@ -93,10 +94,10 @@ private:
uint16_t is_being_used_count = 0; //8
uint16_t current_references = 0; //8
// Value state
uint8_t max_bit_width = 0; //Valid values: 1,2,4,8,16,32,128
uint8_t max_bit_width : 4 = 0; //Valid values: log2(1,2,4,8,16,32,128) = (0, 1, 2, 3, 4, 5, 6)
uint8_t lru_counter : 2 = 0; //1
bool is_scratch : 1 = false; //1
bool is_set_last_use : 1 = false; //1
alignas(16) uint8_t lru_counter = 0; //1
friend class RegAlloc;
};
static_assert(sizeof(HostLocInfo) == 64);

View file

@ -1,13 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "dynarmic/common/memory_pool.h"
#include <cstdlib>
namespace Dynarmic::Common {
} // namespace Dynarmic::Common

View file

@ -1,61 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
#include <vector>
namespace Dynarmic::Common {
/// @tparam object_size Byte-size of objects to construct
/// @tparam slab_size Number of objects to have per slab
template<size_t object_size, size_t slab_size>
class Pool {
public:
inline Pool() noexcept {
AllocateNewSlab();
}
inline ~Pool() noexcept {
std::free(current_slab);
for (char* slab : slabs) {
std::free(slab);
}
}
Pool(const Pool&) = delete;
Pool(Pool&&) = delete;
Pool& operator=(const Pool&) = delete;
Pool& operator=(Pool&&) = delete;
/// @brief Returns a pointer to an `object_size`-bytes block of memory.
[[nodiscard]] void* Alloc() noexcept {
if (remaining == 0) {
slabs.push_back(current_slab);
AllocateNewSlab();
}
void* ret = static_cast<void*>(current_ptr);
current_ptr += object_size;
remaining--;
return ret;
}
private:
/// @brief Allocates a completely new memory slab.
/// Used when an entirely new slab is needed
/// due the current one running out of usable space.
void AllocateNewSlab() noexcept {
current_slab = static_cast<char*>(std::malloc(object_size * slab_size));
current_ptr = current_slab;
remaining = slab_size;
}
std::vector<char*> slabs;
char* current_slab = nullptr;
char* current_ptr = nullptr;
size_t remaining = 0;
};
} // namespace Dynarmic::Common

View file

@ -1,29 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <boost/variant.hpp>
namespace Dynarmic::Common {
namespace detail {
template<typename ReturnT, typename Lambda>
struct VariantVisitor : boost::static_visitor<ReturnT>
, Lambda {
VariantVisitor(Lambda&& lambda)
: Lambda(std::move(lambda)) {}
using Lambda::operator();
};
} // namespace detail
template<typename ReturnT, typename Variant, typename Lambda>
inline ReturnT VisitVariant(Variant&& variant, Lambda&& lambda) {
return boost::apply_visitor(detail::VariantVisitor<ReturnT, Lambda>(std::move(lambda)), variant);
}
} // namespace Dynarmic::Common

View file

@ -9,12 +9,9 @@
#pragma once
#include <string>
#include <utility>
#include <fmt/format.h>
#include "dynarmic/common/assert.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/interface/A32/coprocessor_util.h"
#include "dynarmic/ir/cond.h"
@ -89,24 +86,17 @@ constexpr bool IsQuadExtReg(ExtReg reg) {
inline size_t RegNumber(Reg reg) {
ASSERT(reg != Reg::INVALID_REG);
return static_cast<size_t>(reg);
return size_t(reg);
}
inline size_t RegNumber(ExtReg reg) {
if (IsSingleExtReg(reg)) {
return static_cast<size_t>(reg) - static_cast<size_t>(ExtReg::S0);
return size_t(reg) - size_t(ExtReg::S0);
} else if (IsDoubleExtReg(reg)) {
return size_t(reg) - size_t(ExtReg::D0);
}
if (IsDoubleExtReg(reg)) {
return static_cast<size_t>(reg) - static_cast<size_t>(ExtReg::D0);
}
if (IsQuadExtReg(reg)) {
return static_cast<size_t>(reg) - static_cast<size_t>(ExtReg::Q0);
}
ASSERT_MSG(false, "Invalid extended register");
return 0;
ASSERT(IsQuadExtReg(reg));
return size_t(reg) - size_t(ExtReg::Q0);
}
inline Reg operator+(Reg reg, size_t number) {

View file

@ -30,13 +30,13 @@ template<typename Visitor>
using ArmDecodeTable = std::array<std::vector<ArmMatcher<Visitor>>, 0x1000>;
namespace detail {
inline size_t ToFastLookupIndexArm(u32 instruction) {
inline size_t ToFastLookupIndexArm(u32 instruction) noexcept {
return ((instruction >> 4) & 0x00F) | ((instruction >> 16) & 0xFF0);
}
} // namespace detail
template<typename V>
constexpr ArmDecodeTable<V> GetArmDecodeTable() {
constexpr ArmDecodeTable<V> GetArmDecodeTable() noexcept {
std::vector<ArmMatcher<V>> list = {
#define INST(fn, name, bitstring) DYNARMIC_DECODER_GET_MATCHER(ArmMatcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)),
#include "./arm.inc"
@ -62,15 +62,27 @@ constexpr ArmDecodeTable<V> GetArmDecodeTable() {
}
template<typename V>
std::optional<std::reference_wrapper<const ArmMatcher<V>>> DecodeArm(u32 instruction) {
std::optional<std::reference_wrapper<const ArmMatcher<V>>> DecodeArm(u32 instruction) noexcept {
alignas(64) static const auto table = GetArmDecodeTable<V>();
const auto matches_instruction = [instruction](const auto& matcher) {
return matcher.Matches(instruction);
};
const auto& subtable = table[detail::ToFastLookupIndexArm(instruction)];
auto iter = std::find_if(subtable.begin(), subtable.end(), matches_instruction);
return iter != subtable.end() ? std::optional<std::reference_wrapper<const ArmMatcher<V>>>(*iter) : std::nullopt;
}
template<typename V>
std::optional<std::string_view> GetNameARM(u32 inst) noexcept {
std::vector<std::pair<std::string_view, ArmMatcher<V>>> list = {
#define INST(fn, name, bitstring) { name, DYNARMIC_DECODER_GET_MATCHER(ArmMatcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)) },
#include "./arm.inc"
#undef INST
};
auto const iter = std::find_if(list.cbegin(), list.cend(), [inst](auto const& m) {
return m.second.Matches(inst);
});
return iter != list.cend() ? std::optional{iter->first} : std::nullopt;
}
} // namespace Dynarmic::A32

View file

@ -26,15 +26,12 @@ template<typename Visitor>
using ASIMDMatcher = Decoder::Matcher<Visitor, u32>;
template<typename V>
std::vector<ASIMDMatcher<V>> GetASIMDDecodeTable() {
std::vector<ASIMDMatcher<V>> table = {
#define INST(fn, name, bitstring) DYNARMIC_DECODER_GET_MATCHER(ASIMDMatcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)),
std::vector<ASIMDMatcher<V>> GetASIMDDecodeTable() noexcept {
std::vector<std::pair<const char*, ASIMDMatcher<V>>> table = {
#define INST(fn, name, bitstring) { name, DYNARMIC_DECODER_GET_MATCHER(ASIMDMatcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)) },
#include "./asimd.inc"
#undef INST
};
// Exceptions to the rule of thumb.
const std::set<std::string> comes_first{
"VBIC, VMOV, VMVN, VORR (immediate)",
@ -53,29 +50,43 @@ std::vector<ASIMDMatcher<V>> GetASIMDDecodeTable() {
"VQDMULH (scalar)",
"VQRDMULH (scalar)",
};
const auto sort_begin = std::stable_partition(table.begin(), table.end(), [&](const auto& matcher) {
return comes_first.count(matcher.GetName()) > 0;
const auto sort_begin = std::stable_partition(table.begin(), table.end(), [&](const auto& e) {
return comes_first.count(e.first) > 0;
});
const auto sort_end = std::stable_partition(table.begin(), table.end(), [&](const auto& matcher) {
return comes_last.count(matcher.GetName()) == 0;
const auto sort_end = std::stable_partition(table.begin(), table.end(), [&](const auto& e) {
return comes_last.count(e.first) == 0;
});
// If a matcher has more bits in its mask it is more specific, so it should come first.
std::stable_sort(sort_begin, sort_end, [](const auto& matcher1, const auto& matcher2) {
return mcl::bit::count_ones(matcher1.GetMask()) > mcl::bit::count_ones(matcher2.GetMask());
std::stable_sort(sort_begin, sort_end, [](const auto& a, const auto& b) {
return mcl::bit::count_ones(a.second.GetMask()) > mcl::bit::count_ones(b.second.GetMask());
});
return table;
std::vector<ASIMDMatcher<V>> final_table;
std::transform(table.cbegin(), table.cend(), std::back_inserter(final_table), [](auto const& e) {
return e.second;
});
return final_table;
}
template<typename V>
std::optional<std::reference_wrapper<const ASIMDMatcher<V>>> DecodeASIMD(u32 instruction) {
static const auto table = GetASIMDDecodeTable<V>();
const auto matches_instruction = [instruction](const auto& matcher) { return matcher.Matches(instruction); };
auto iter = std::find_if(table.begin(), table.end(), matches_instruction);
std::optional<std::reference_wrapper<const ASIMDMatcher<V>>> DecodeASIMD(u32 instruction) noexcept {
alignas(64) static const auto table = GetASIMDDecodeTable<V>();
auto iter = std::find_if(table.begin(), table.end(), [instruction](const auto& matcher) {
return matcher.Matches(instruction);
});
return iter != table.end() ? std::optional<std::reference_wrapper<const ASIMDMatcher<V>>>(*iter) : std::nullopt;
}
template<typename V>
std::optional<std::string_view> GetNameASIMD(u32 inst) noexcept {
std::vector<std::pair<std::string_view, ASIMDMatcher<V>>> list = {
#define INST(fn, name, bitstring) { name, DYNARMIC_DECODER_GET_MATCHER(ASIMDMatcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)) },
#include "./asimd.inc"
#undef INST
};
auto const iter = std::find_if(list.cbegin(), list.cend(), [inst](auto const& m) {
return m.second.Matches(inst);
});
return iter != list.cend() ? std::optional{iter->first} : std::nullopt;
}
} // namespace Dynarmic::A32

View file

@ -25,18 +25,28 @@ using Thumb16Matcher = Decoder::Matcher<Visitor, u16>;
template<typename V>
std::optional<std::reference_wrapper<const Thumb16Matcher<V>>> DecodeThumb16(u16 instruction) {
static const std::vector<Thumb16Matcher<V>> table = {
alignas(64) static const std::vector<Thumb16Matcher<V>> table = {
#define INST(fn, name, bitstring) DYNARMIC_DECODER_GET_MATCHER(Thumb16Matcher, fn, name, Decoder::detail::StringToArray<16>(bitstring)),
#include "./thumb16.inc"
#undef INST
};
const auto matches_instruction = [instruction](const auto& matcher) { return matcher.Matches(instruction); };
auto iter = std::find_if(table.begin(), table.end(), matches_instruction);
auto iter = std::find_if(table.begin(), table.end(), [instruction](const auto& matcher) {
return matcher.Matches(instruction);
});
return iter != table.end() ? std::optional<std::reference_wrapper<const Thumb16Matcher<V>>>(*iter) : std::nullopt;
}
template<typename V>
std::optional<std::string_view> GetNameThumb16(u32 inst) noexcept {
std::vector<std::pair<std::string_view, Thumb16Matcher<V>>> list = {
#define INST(fn, name, bitstring) { name, DYNARMIC_DECODER_GET_MATCHER(Thumb16Matcher, fn, name, Decoder::detail::StringToArray<16>(bitstring)) },
#include "./thumb16.inc"
#undef INST
};
auto const iter = std::find_if(list.cbegin(), list.cend(), [inst](auto const& m) {
return m.second.Matches(inst);
});
return iter != list.cend() ? std::optional{iter->first} : std::nullopt;
}
} // namespace Dynarmic::A32

View file

@ -24,18 +24,28 @@ using Thumb32Matcher = Decoder::Matcher<Visitor, u32>;
template<typename V>
std::optional<std::reference_wrapper<const Thumb32Matcher<V>>> DecodeThumb32(u32 instruction) {
static const std::vector<Thumb32Matcher<V>> table = {
alignas(64) static const std::vector<Thumb32Matcher<V>> table = {
#define INST(fn, name, bitstring) DYNARMIC_DECODER_GET_MATCHER(Thumb32Matcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)),
#include "./thumb32.inc"
#undef INST
};
const auto matches_instruction = [instruction](const auto& matcher) { return matcher.Matches(instruction); };
auto iter = std::find_if(table.begin(), table.end(), matches_instruction);
auto iter = std::find_if(table.begin(), table.end(), [instruction](const auto& matcher) {
return matcher.Matches(instruction);
});
return iter != table.end() ? std::optional<std::reference_wrapper<const Thumb32Matcher<V>>>(*iter) : std::nullopt;
}
template<typename V>
std::optional<std::string_view> GetNameThumb32(u32 inst) noexcept {
std::vector<std::pair<std::string_view, Thumb32Matcher<V>>> list = {
#define INST(fn, name, bitstring) { name, DYNARMIC_DECODER_GET_MATCHER(Thumb32Matcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)) },
#include "./thumb32.inc"
#undef INST
};
auto const iter = std::find_if(list.cbegin(), list.cend(), [inst](auto const& m) {
return m.second.Matches(inst);
});
return iter != list.cend() ? std::optional{iter->first} : std::nullopt;
}
} // namespace Dynarmic::A32

View file

@ -26,36 +26,42 @@ using VFPMatcher = Decoder::Matcher<Visitor, u32>;
template<typename V>
std::optional<std::reference_wrapper<const VFPMatcher<V>>> DecodeVFP(u32 instruction) {
using Table = std::vector<VFPMatcher<V>>;
static const struct Tables {
alignas(64) static const struct Tables {
Table unconditional;
Table conditional;
} tables = [] {
} tables = []() {
Table list = {
#define INST(fn, name, bitstring) DYNARMIC_DECODER_GET_MATCHER(VFPMatcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)),
#include "./vfp.inc"
#undef INST
};
const auto division = std::stable_partition(list.begin(), list.end(), [&](const auto& matcher) {
auto const it = std::stable_partition(list.begin(), list.end(), [&](const auto& matcher) {
return (matcher.GetMask() & 0xF0000000) == 0xF0000000;
});
return Tables{
Table{list.begin(), division},
Table{division, list.end()},
Table{list.begin(), it},
Table{it, list.end()},
};
}();
const bool is_unconditional = (instruction & 0xF0000000) == 0xF0000000;
const Table& table = is_unconditional ? tables.unconditional : tables.conditional;
const auto matches_instruction = [instruction](const auto& matcher) { return matcher.Matches(instruction); };
auto iter = std::find_if(table.begin(), table.end(), matches_instruction);
auto iter = std::find_if(table.begin(), table.end(), [instruction](const auto& matcher) {
return matcher.Matches(instruction);
});
return iter != table.end() ? std::optional<std::reference_wrapper<const VFPMatcher<V>>>(*iter) : std::nullopt;
}
template<typename V>
std::optional<std::string_view> GetNameVFP(u32 inst) noexcept {
std::vector<std::pair<std::string_view, VFPMatcher<V>>> list = {
#define INST(fn, name, bitstring) { name, DYNARMIC_DECODER_GET_MATCHER(VFPMatcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)) },
#include "./vfp.inc"
#undef INST
};
auto const iter = std::find_if(list.cbegin(), list.cend(), [inst](auto const& m) {
return m.second.Matches(inst);
});
return iter != list.cend() ? std::optional{iter->first} : std::nullopt;
}
} // namespace Dynarmic::A32

View file

@ -97,7 +97,7 @@ u32 ConvertASIMDInstruction(u32 thumb_instruction) {
return 0xF7F0A000; // UDF
}
bool MaybeVFPOrASIMDInstruction(u32 thumb_instruction) {
inline bool MaybeVFPOrASIMDInstruction(u32 thumb_instruction) noexcept {
return (thumb_instruction & 0xEC000000) == 0xEC000000 || (thumb_instruction & 0xFF100000) == 0xF9000000;
}

View file

@ -37,34 +37,31 @@ inline size_t ToFastLookupIndex(u32 instruction) {
template<typename V>
constexpr DecodeTable<V> GetDecodeTable() {
std::vector<Matcher<V>> list = {
#define INST(fn, name, bitstring) DYNARMIC_DECODER_GET_MATCHER(Matcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)),
std::vector<std::pair<const char*, Matcher<V>>> list = {
#define INST(fn, name, bitstring) { name, DYNARMIC_DECODER_GET_MATCHER(Matcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)) },
#include "./a64.inc"
#undef INST
};
// If a matcher has more bits in its mask it is more specific, so it should come first.
std::stable_sort(list.begin(), list.end(), [](const auto& matcher1, const auto& matcher2) {
std::stable_sort(list.begin(), list.end(), [](const auto& a, const auto& b) {
// If a matcher has more bits in its mask it is more specific, so it should come first.
return mcl::bit::count_ones(matcher1.GetMask()) > mcl::bit::count_ones(matcher2.GetMask());
return mcl::bit::count_ones(a.second.GetMask()) > mcl::bit::count_ones(b.second.GetMask());
});
// Exceptions to the above rule of thumb.
std::stable_partition(list.begin(), list.end(), [&](const auto& matcher) {
std::stable_partition(list.begin(), list.end(), [&](const auto& e) {
return std::set<std::string>{
"MOVI, MVNI, ORR, BIC (vector, immediate)",
"FMOV (vector, immediate)",
"Unallocated SIMD modified immediate",
}.count(matcher.GetName()) > 0;
}.count(e.first) > 0;
});
DecodeTable<V> table{};
for (size_t i = 0; i < table.size(); ++i) {
for (auto matcher : list) {
const auto expect = detail::ToFastLookupIndex(matcher.GetExpected());
const auto mask = detail::ToFastLookupIndex(matcher.GetMask());
for (auto const& e : list) {
const auto expect = detail::ToFastLookupIndex(e.second.GetExpected());
const auto mask = detail::ToFastLookupIndex(e.second.GetMask());
if ((i & mask) == expect) {
table[i].push_back(matcher);
table[i].push_back(e.second);
}
}
}
@ -74,12 +71,24 @@ constexpr DecodeTable<V> GetDecodeTable() {
template<typename V>
std::optional<std::reference_wrapper<const Matcher<V>>> Decode(u32 instruction) {
alignas(64) static const auto table = GetDecodeTable<V>();
const auto matches_instruction = [instruction](const auto& matcher) {
return matcher.Matches(instruction);
};
const auto& subtable = table[detail::ToFastLookupIndex(instruction)];
auto iter = std::find_if(subtable.begin(), subtable.end(), matches_instruction);
auto iter = std::find_if(subtable.begin(), subtable.end(), [instruction](const auto& matcher) {
return matcher.Matches(instruction);
});
return iter != subtable.end() ? std::optional<std::reference_wrapper<const Matcher<V>>>(*iter) : std::nullopt;
}
template<typename V>
std::optional<std::string_view> GetName(u32 inst) noexcept {
std::vector<std::pair<std::string_view, Matcher<V>>> list = {
#define INST(fn, name, bitstring) { name, DYNARMIC_DECODER_GET_MATCHER(Matcher, fn, name, Decoder::detail::StringToArray<32>(bitstring)) },
#include "./a64.inc"
#undef INST
};
auto const iter = std::find_if(list.cbegin(), list.cend(), [inst](auto const& m) {
return m.second.Matches(inst);
});
return iter != list.cend() ? std::optional{iter->first} : std::nullopt;
}
} // namespace Dynarmic::A64

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
@ -20,9 +23,12 @@ bool TranslatorVisitor::B_cond(Imm<19> imm19, Cond cond) {
bool TranslatorVisitor::B_uncond(Imm<26> imm26) {
const s64 offset = concatenate(imm26, Imm<2>{0}).SignExtend<s64>();
const u64 target = ir.PC() + offset;
//ir.SetTerm(IR::Term::LinkBlockFast{ir.current_location->SetPC(target)});
ir.SetTerm(IR::Term::LinkBlock{ir.current_location->SetPC(target)});
// Pattern to halt execution (B .)
if (target == ir.PC()) {
ir.SetTerm(IR::Term::LinkBlock{ir.current_location->SetPC(target)});
return false;
}
ir.SetTerm(IR::Term::LinkBlockFast{ir.current_location->SetPC(target)});
return false;
}

View file

@ -70,11 +70,9 @@ struct detail {
return std::make_tuple(mask, expect);
}
/**
* Generates the masks and shifts for each argument.
* A '-' in a bitstring indicates that we don't care about that value.
* An argument is specified by a continuous string of the same character.
*/
/// @brief Generates the masks and shifts for each argument.
/// A '-' in a bitstring indicates that we don't care about that value.
/// An argument is specified by a continuous string of the same character.
template<size_t N>
static consteval auto GetArgInfo(std::array<char, opcode_bitsize> bitstring) {
std::array<opcode_type, N> masks = {};
@ -98,7 +96,6 @@ struct detail {
if constexpr (N > 0) {
const size_t bit_position = opcode_bitsize - i - 1;
if (arg_index >= N)
throw std::out_of_range("Unexpected field");
@ -109,20 +106,16 @@ struct detail {
}
}
}
#if !defined(DYNARMIC_IGNORE_ASSERTS) && !defined(__ANDROID__)
// Avoids a MSVC ICE, and avoids Android NDK issue.
ASSERT(std::all_of(masks.begin(), masks.end(), [](auto m) { return m != 0; }));
#endif
return std::make_tuple(masks, shifts);
}
/**
* This struct's Make member function generates a lambda which decodes an instruction based on
* the provided arg_masks and arg_shifts. The Visitor member function to call is provided as a
* template argument.
*/
/// @brief This struct's Make member function generates a lambda which decodes an instruction
/// based on the provided arg_masks and arg_shifts. The Visitor member function to call is
/// provided as a template argument.
template<typename FnT>
struct VisitorCaller;
@ -130,36 +123,36 @@ struct detail {
# pragma warning(push)
# pragma warning(disable : 4800) // forcing value to bool 'true' or 'false' (performance warning)
#endif
template<typename Visitor, typename... Args, typename CallRetT>
struct VisitorCaller<CallRetT (Visitor::*)(Args...)> {
template<typename V, typename... Args, typename ReturnType>
struct VisitorCaller<ReturnType (V::*)(Args...)> {
template<size_t... iota>
static auto Make(std::integer_sequence<size_t, iota...>,
CallRetT (Visitor::*const fn)(Args...),
static constexpr auto Make(std::integer_sequence<size_t, iota...>,
ReturnType (V::*const fn)(Args...),
const std::array<opcode_type, sizeof...(iota)> arg_masks,
const std::array<size_t, sizeof...(iota)> arg_shifts) {
static_assert(std::is_same_v<visitor_type, Visitor>, "Member function is not from Matcher's Visitor");
return [fn, arg_masks, arg_shifts](Visitor& v, opcode_type instruction) {
static_assert(std::is_same_v<visitor_type, V>, "Member function is not from Matcher's Visitor");
return [fn, arg_masks, arg_shifts](V& v, opcode_type instruction) {
(void)instruction;
(void)arg_masks;
(void)arg_shifts;
return (v.*fn)(static_cast<Args>((instruction & arg_masks[iota]) >> arg_shifts[iota])...);
return (v.*fn)(Args((instruction & arg_masks[iota]) >> arg_shifts[iota])...);
};
}
};
template<typename Visitor, typename... Args, typename CallRetT>
struct VisitorCaller<CallRetT (Visitor::*)(Args...) const> {
template<typename V, typename... Args, typename ReturnType>
struct VisitorCaller<ReturnType (V::*)(Args...) const> {
template<size_t... iota>
static auto Make(std::integer_sequence<size_t, iota...>,
CallRetT (Visitor::*const fn)(Args...) const,
static constexpr auto Make(std::integer_sequence<size_t, iota...>,
ReturnType (V::*const fn)(Args...) const,
const std::array<opcode_type, sizeof...(iota)> arg_masks,
const std::array<size_t, sizeof...(iota)> arg_shifts) {
static_assert(std::is_same_v<visitor_type, const Visitor>, "Member function is not from Matcher's Visitor");
return [fn, arg_masks, arg_shifts](const Visitor& v, opcode_type instruction) {
static_assert(std::is_same_v<visitor_type, const V>, "Member function is not from Matcher's Visitor");
return [fn, arg_masks, arg_shifts](const V& v, opcode_type instruction) {
(void)instruction;
(void)arg_masks;
(void)arg_shifts;
return (v.*fn)(static_cast<Args>((instruction & arg_masks[iota]) >> arg_shifts[iota])...);
return (v.*fn)(Args((instruction & arg_masks[iota]) >> arg_shifts[iota])...);
};
}
};
@ -167,27 +160,21 @@ struct detail {
# pragma warning(pop)
#endif
/**
* Creates a matcher that can match and parse instructions based on bitstring.
* See also: GetMaskAndExpect and GetArgInfo for format of bitstring.
*/
template<auto bitstring, typename FnT>
static auto GetMatcher(FnT fn, const char* const name) {
constexpr size_t args_count = mcl::parameter_count_v<FnT>;
/// @brief Creates a matcher that can match and parse instructions based on bitstring.
/// See also: GetMaskAndExpect and GetArgInfo for format of bitstring.
template<auto bitstring, typename F>
static constexpr auto GetMatcher(F fn) {
constexpr size_t args_count = mcl::parameter_count_v<F>;
constexpr auto mask = std::get<0>(GetMaskAndExpect(bitstring));
constexpr auto expect = std::get<1>(GetMaskAndExpect(bitstring));
constexpr auto arg_masks = std::get<0>(GetArgInfo<args_count>(bitstring));
constexpr auto arg_shifts = std::get<1>(GetArgInfo<args_count>(bitstring));
using Iota = std::make_index_sequence<args_count>;
const auto proxy_fn = VisitorCaller<FnT>::Make(Iota(), fn, arg_masks, arg_shifts);
return MatcherT(name, mask, expect, proxy_fn);
const auto proxy_fn = VisitorCaller<F>::Make(std::make_index_sequence<args_count>(), fn, arg_masks, arg_shifts);
return MatcherT(mask, expect, proxy_fn);
}
};
#define DYNARMIC_DECODER_GET_MATCHER(MatcherT, fn, name, bitstring) Decoder::detail::detail<MatcherT<V>>::template GetMatcher<bitstring>(&V::fn, name)
#define DYNARMIC_DECODER_GET_MATCHER(MatcherT, fn, name, bitstring) Decoder::detail::detail<MatcherT<V>>::template GetMatcher<bitstring>(&V::fn)
} // namespace detail
} // namespace Dynarmic::Decoder

View file

@ -14,16 +14,12 @@
namespace Dynarmic::Decoder {
/**
* Generic instruction handling construct.
*
* @tparam Visitor An arbitrary visitor type that will be passed through
* to the function being handled. This type must be the
* type of the first parameter in a handler function.
*
* @tparam OpcodeType Type representing an opcode. This must be the
* type of the second parameter in a handler function.
*/
/// Generic instruction handling construct.
/// @tparam Visitor An arbitrary visitor type that will be passed through
/// to the function being handled. This type must be the
/// type of the first parameter in a handler function.
/// @tparam OpcodeType Type representing an opcode. This must be the
/// type of the second parameter in a handler function.
template<typename Visitor, typename OpcodeType>
class Matcher {
public:
@ -31,46 +27,35 @@ public:
using visitor_type = Visitor;
using handler_return_type = typename Visitor::instruction_return_type;
using handler_function = std::function<handler_return_type(Visitor&, opcode_type)>;
Matcher(const char* const name, opcode_type mask, opcode_type expected, handler_function func)
: name{name}, mask{mask}, expected{expected}, fn{std::move(func)} {}
/// Gets the name of this type of instruction.
const char* GetName() const {
return name;
}
Matcher(opcode_type mask, opcode_type expected, handler_function func)
: mask{mask}, expected{expected}, fn{std::move(func)} {}
/// Gets the mask for this instruction.
opcode_type GetMask() const {
inline opcode_type GetMask() const noexcept {
return mask;
}
/// Gets the expected value after masking for this instruction.
opcode_type GetExpected() const {
inline opcode_type GetExpected() const noexcept {
return expected;
}
/**
* Tests to see if the given instruction is the instruction this matcher represents.
* @param instruction The instruction to test
* @returns true if the given instruction matches.
*/
bool Matches(opcode_type instruction) const {
/// Tests to see if the given instruction is the instruction this matcher represents.
/// @param instruction The instruction to test
/// @returns true if the given instruction matches.
inline bool Matches(opcode_type instruction) const noexcept {
return (instruction & mask) == expected;
}
/**
* Calls the corresponding instruction handler on visitor for this type of instruction.
* @param v The visitor to use
* @param instruction The instruction to decode.
*/
handler_return_type call(Visitor& v, opcode_type instruction) const {
/// Calls the corresponding instruction handler on visitor for this type of instruction.
/// @param v The visitor to use
/// @param instruction The instruction to decode.
inline handler_return_type call(Visitor& v, opcode_type instruction) const noexcept {
ASSERT(Matches(instruction));
return fn(v, instruction);
}
private:
const char* name;
opcode_type mask;
opcode_type expected;
handler_function fn;

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
@ -34,6 +37,8 @@ enum class OptimizationFlag : std::uint32_t {
MiscIROpt = 0x00000020,
/// Optimize for code speed rather than for code size (this serves well for tight loops)
CodeSpeed = 0x00000040,
/// Disable verification passes
DisableVerification = 0x00000080,
/// This is an UNSAFE optimization that reduces accuracy of fused multiply-add operations.
/// This unfuses fused instructions to improve performance on host CPUs without FMA support.

View file

@ -15,8 +15,6 @@
#include <fmt/format.h>
#include "dynarmic/common/assert.h"
#include "dynarmic/common/memory_pool.h"
#include "dynarmic/frontend/A32/a32_types.h"
#include "dynarmic/frontend/A64/a64_types.h"
#include "dynarmic/ir/cond.h"
@ -27,8 +25,7 @@ namespace Dynarmic::IR {
Block::Block(const LocationDescriptor& location)
: location{location},
end_location{location},
cond{Cond::AL},
instruction_alloc_pool{std::make_unique<std::remove_reference_t<decltype(*instruction_alloc_pool)>>()}
cond{Cond::AL}
{
}
@ -40,7 +37,21 @@ Block::Block(const LocationDescriptor& location)
/// @param args A sequence of Value instances used as arguments for the instruction.
/// @returns Iterator to the newly created instruction.
Block::iterator Block::PrependNewInst(iterator insertion_point, Opcode opcode, std::initializer_list<Value> args) noexcept {
IR::Inst* inst = new (instruction_alloc_pool->Alloc()) IR::Inst(opcode);
// First try using the "inline" buffer, otherwise fallback to a slower slab-like allocation scheme
// purpouse is to avoid many calls to new/delete which invoke malloc which invokes mmap
// just pool it!!! - reason why there is an inline buffer is because many small blocks are created
// with few instructions due to subpar optimisations on other passes... plus branch-heavy code will
// hugely benefit from the coherency of faster allocations...
IR::Inst* inst;
if (inlined_inst.size() < inlined_inst.max_size()) {
inst = &inlined_inst[inlined_inst.size()];
inlined_inst.emplace_back(opcode);
} else {
if (pooled_inst.empty() || pooled_inst.back().size() == pooled_inst.back().max_size())
pooled_inst.emplace_back();
inst = &pooled_inst.back()[pooled_inst.back().size()];
pooled_inst.back().emplace_back(opcode);
}
DEBUG_ASSERT(args.size() == inst->NumArgs());
std::for_each(args.begin(), args.end(), [&inst, index = size_t(0)](const auto& arg) mutable {
inst->SetArg(index, arg);

View file

@ -13,6 +13,9 @@
#include <optional>
#include <string>
#include <boost/container/container_fwd.hpp>
#include <boost/container/static_vector.hpp>
#include <boost/container/stable_vector.hpp>
#include <mcl/container/intrusive_list.hpp>
#include "dynarmic/common/common_types.h"
@ -21,7 +24,6 @@
#include "dynarmic/ir/terminal.h"
#include "dynarmic/ir/value.h"
#include "dynarmic/ir/dense_list.h"
#include "dynarmic/common/memory_pool.h"
namespace Dynarmic::IR {
@ -164,8 +166,12 @@ public:
return cycle_count;
}
private:
/// "Hot cache" for small blocks so we don't call global allocator
boost::container::static_vector<Inst, 14> inlined_inst;
/// List of instructions in this block.
instruction_list_type instructions;
/// "Long/far" memory pool
boost::container::stable_vector<boost::container::static_vector<Inst, 32>> pooled_inst;
/// Block to execute next if `cond` did not pass.
std::optional<LocationDescriptor> cond_failed = {};
/// Description of the starting location of this block
@ -174,8 +180,6 @@ private:
LocationDescriptor end_location;
/// Conditional to pass in order to execute this block
Cond cond;
/// Memory pool for instruction list
std::unique_ptr<Common::Pool<sizeof(Inst), 2097152UL / sizeof(Inst)>> instruction_alloc_pool;
/// Terminal instruction of this block.
Terminal terminal = Term::Invalid{};
/// Number of cycles this block takes to execute if the conditional fails.
@ -183,6 +187,7 @@ private:
/// Number of cycles this block takes to execute.
size_t cycle_count = 0;
};
static_assert(sizeof(Block) == 2048);
/// Returns a string representation of the contents of block. Intended for debugging.
std::string DumpBlock(const IR::Block& block) noexcept;

View file

@ -1,21 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "dynarmic/ir/ir_emitter.h"
#include <vector>
#include "dynarmic/common/assert.h"
#include <mcl/bit_cast.hpp>
#include "dynarmic/ir/opcodes.h"
namespace Dynarmic::IR {
} // namespace Dynarmic::IR

View file

@ -1491,9 +1491,9 @@ void Optimize(IR::Block& block, const A32::UserConfig& conf, const Optimization:
Optimization::DeadCodeElimination(block);
}
Optimization::IdentityRemovalPass(block);
//if (!conf.HasOptimization(OptimizationFlag::DisableVerification)) {
if (!conf.HasOptimization(OptimizationFlag::DisableVerification)) {
Optimization::VerificationPass(block);
//}
}
}
void Optimize(IR::Block& block, const A64::UserConfig& conf, const Optimization::PolyfillOptions& polyfill_options) {
@ -1511,9 +1511,9 @@ void Optimize(IR::Block& block, const A64::UserConfig& conf, const Optimization:
if (conf.HasOptimization(OptimizationFlag::MiscIROpt)) [[likely]] {
Optimization::A64MergeInterpretBlocksPass(block, conf.callbacks);
}
//if (!conf.HasOptimization(OptimizationFlag::DisableVerification)) {
if (!conf.HasOptimization(OptimizationFlag::DisableVerification)) {
Optimization::VerificationPass(block);
//}
}
}
} // namespace Dynarmic::Optimization

View file

@ -24,6 +24,7 @@
#include "../rand_int.h"
#include "../unicorn_emu/a32_unicorn.h"
#include "./testenv.h"
#include "../native/testenv.h"
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/llvm_disassemble.h"
@ -46,7 +47,7 @@ using namespace Dynarmic;
template<typename Fn>
bool AnyLocationDescriptorForTerminalHas(IR::Terminal terminal, Fn fn) {
return Common::VisitVariant<bool>(terminal, [&](auto t) -> bool {
return boost::apply_visitor([&](auto t) -> bool {
using T = std::decay_t<decltype(t)>;
if constexpr (std::is_same_v<T, IR::Term::Invalid>) {
return false;
@ -72,7 +73,7 @@ bool AnyLocationDescriptorForTerminalHas(IR::Terminal terminal, Fn fn) {
ASSERT_MSG(false, "Invalid terminal type");
return false;
}
});
}, terminal);
}
bool ShouldTestInst(u32 instruction, u32 pc, bool is_thumb, bool is_last_inst, A32::ITState it_state = {}) {

View file

@ -22,6 +22,7 @@
#include "../rand_int.h"
#include "../unicorn_emu/a32_unicorn.h"
#include "./testenv.h"
#include "../native/testenv.h"
#include "dynarmic/frontend/A32/FPSCR.h"
#include "dynarmic/frontend/A32/PSR.h"
#include "dynarmic/frontend/A32/a32_location_descriptor.h"

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
@ -6,6 +9,7 @@
#include <catch2/catch_test_macros.hpp>
#include "./testenv.h"
#include "../native/testenv.h"
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
#include "dynarmic/interface/A32/a32.h"

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
@ -8,6 +11,7 @@
#include <catch2/catch_test_macros.hpp>
#include "./testenv.h"
#include "../native/testenv.h"
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
#include "dynarmic/interface/A32/a32.h"
#include "dynarmic/interface/A32/coprocessor.h"

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
@ -8,6 +11,7 @@
#include <catch2/catch_test_macros.hpp>
#include "./testenv.h"
#include "../native/testenv.h"
using namespace Dynarmic;

View file

@ -10,6 +10,7 @@
#include "dynarmic/common/common_types.h"
#include "./testenv.h"
#include "../native/testenv.h"
#include "dynarmic/interface/A32/a32.h"
static Dynarmic::A32::UserConfig GetUserConfig(ThumbTestEnv* testenv) {

View file

@ -17,7 +17,6 @@
#include "dynarmic/common/assert.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/interface/A32/a32.h"
#include "../native/testenv.h"
template<typename InstructionType_, u32 infinite_loop_u32>
class A32TestEnv : public Dynarmic::A32::UserCallbacks {

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
@ -7,6 +10,7 @@
#include <oaknut/oaknut.hpp>
#include "./testenv.h"
#include "../native/testenv.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/interface/exclusive_monitor.h"

View file

@ -12,6 +12,7 @@
#include "dynarmic/common/common_types.h"
#include "./testenv.h"
#include "../native/testenv.h"
using namespace Dynarmic;

View file

@ -19,6 +19,7 @@
#include "../rand_int.h"
#include "../unicorn_emu/a64_unicorn.h"
#include "./testenv.h"
#include "../native/testenv.h"
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/llvm_disassemble.h"

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
@ -6,6 +9,7 @@
#include <catch2/catch_test_macros.hpp>
#include "./testenv.h"
#include "../native/testenv.h"
#include "dynarmic/interface/A64/a64.h"
TEST_CASE("misaligned load/store do not use page_table when detect_misaligned_access_via_page_table is set", "[a64]") {

View file

@ -5,6 +5,7 @@
#include <oaknut/oaknut.hpp>
#include "./testenv.h"
#include "../native/testenv.h"
#include "dynarmic/interface/A64/a64.h"
using namespace Dynarmic;

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
@ -6,6 +9,7 @@
#include <catch2/catch_test_macros.hpp>
#include "./testenv.h"
#include "../native/testenv.h"
#include "dynarmic/interface/A64/a64.h"
using namespace Dynarmic;

View file

@ -12,7 +12,6 @@
#include "dynarmic/common/assert.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/interface/A64/a64.h"
#include "../native/testenv.h"
using Vector = Dynarmic::A64::Vector;

View file

@ -36,22 +36,12 @@ TEST_CASE("ASIMD Decoder: Ensure table order correctness", "[decode][a32][.]") {
const auto is_decode_error = [&get_ir](const A32::ASIMDMatcher<A32::TranslatorVisitor>& matcher, u32 instruction) {
const auto block = get_ir(matcher, instruction);
for (const auto& ir_inst : block) {
if (ir_inst.GetOpcode() == IR::Opcode::A32ExceptionRaised) {
if (static_cast<A32::Exception>(ir_inst.GetArg(1).GetU64()) == A32::Exception::DecodeError) {
return true;
}
}
}
return false;
return std::find_if(block.cbegin(), block.cend(), [](auto const& e) {
return e.GetOpcode() == IR::Opcode::A32ExceptionRaised && A32::Exception(e.GetArg(1).GetU64()) == A32::Exception::DecodeError;
}) != block.cend();
};
for (auto iter = table.cbegin(); iter != table.cend(); ++iter) {
if (std::strncmp(iter->GetName(), "UNALLOCATED", 11) == 0) {
continue;
}
const u32 expect = iter->GetExpected();
const u32 mask = iter->GetMask();
u32 x = 0;
@ -59,15 +49,17 @@ TEST_CASE("ASIMD Decoder: Ensure table order correctness", "[decode][a32][.]") {
const u32 instruction = expect | x;
const bool iserr = is_decode_error(*iter, instruction);
const auto alternative = std::find_if(table.cbegin(), iter, [instruction](const auto& m) { return m.Matches(instruction); });
const auto alternative = std::find_if(table.cbegin(), iter, [instruction](const auto& m) {
return m.Matches(instruction);
});
const bool altiserr = is_decode_error(*alternative, instruction);
INFO("Instruction: " << std::hex << std::setfill('0') << std::setw(8) << instruction);
INFO("Expect: " << std::hex << std::setfill('0') << std::setw(8) << expect);
INFO("Fill: " << std::hex << std::setfill('0') << std::setw(8) << x);
INFO("Name: " << iter->GetName());
INFO("Name: " << *A32::GetNameASIMD<A32::TranslatorVisitor>(instruction));
INFO("iserr: " << iserr);
INFO("alternative: " << alternative->GetName());
//INFO("alternative: " << alternative->GetName());
INFO("altiserr: " << altiserr);
REQUIRE(((!iserr && alternative == iter) || (iserr && alternative != iter && !altiserr)));
@ -75,4 +67,4 @@ TEST_CASE("ASIMD Decoder: Ensure table order correctness", "[decode][a32][.]") {
x = ((x | mask) + 1) & ~mask;
} while (x != 0);
}
}
}

View file

@ -6,6 +6,7 @@
#include <immintrin.h>
#include "../A64/testenv.h"
#include "../native/testenv.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/interface/exclusive_monitor.h"

View file

@ -32,27 +32,26 @@
#include "dynarmic/frontend/A64/translate/a64_translate.h"
#include "dynarmic/frontend/A64/translate/impl/impl.h"
#include "dynarmic/interface/A32/a32.h"
#include "dynarmic/interface/A32/config.h"
#include "dynarmic/interface/A32/disassembler.h"
#include "dynarmic/ir/basic_block.h"
#include "dynarmic/ir/opt_passes.h"
using namespace Dynarmic;
const char* GetNameOfA32Instruction(u32 instruction) {
if (auto vfp_decoder = A32::DecodeVFP<A32::TranslatorVisitor>(instruction)) {
return vfp_decoder->get().GetName();
} else if (auto asimd_decoder = A32::DecodeASIMD<A32::TranslatorVisitor>(instruction)) {
return asimd_decoder->get().GetName();
} else if (auto decoder = A32::DecodeArm<A32::TranslatorVisitor>(instruction)) {
return decoder->get().GetName();
}
std::string_view GetNameOfA32Instruction(u32 instruction) {
if (auto const vfp_decoder = A32::DecodeVFP<A32::TranslatorVisitor>(instruction))
return *A32::GetNameVFP<A32::TranslatorVisitor>(instruction);
else if (auto const asimd_decoder = A32::DecodeASIMD<A32::TranslatorVisitor>(instruction))
return *A32::GetNameASIMD<A32::TranslatorVisitor>(instruction);
else if (auto const decoder = A32::DecodeArm<A32::TranslatorVisitor>(instruction))
return *A32::GetNameARM<A32::TranslatorVisitor>(instruction);
return "<null>";
}
const char* GetNameOfA64Instruction(u32 instruction) {
if (auto decoder = A64::Decode<A64::TranslatorVisitor>(instruction)) {
return decoder->get().GetName();
}
std::string_view GetNameOfA64Instruction(u32 instruction) {
if (auto const decoder = A64::Decode<A64::TranslatorVisitor>(instruction))
return *A64::GetName<A64::TranslatorVisitor>(instruction);
return "<null>";
}
@ -66,7 +65,7 @@ void PrintA32Instruction(u32 instruction) {
fmt::print("should_continue: {}\n\n", should_continue);
fmt::print("IR:\n");
fmt::print("{}\n", IR::DumpBlock(ir_block));
Optimization::Optimize(ir_block, conf, {});
Optimization::Optimize(ir_block, A32::UserConfig{}, {});
fmt::print("Optimized IR:\n");
fmt::print("{}\n", IR::DumpBlock(ir_block));
}
@ -81,7 +80,7 @@ void PrintA64Instruction(u32 instruction) {
fmt::print("should_continue: {}\n\n", should_continue);
fmt::print("IR:\n");
fmt::print("{}\n", IR::DumpBlock(ir_block));
Optimization::Optimize(ir_block, conf, {});
Optimization::Optimize(ir_block, A64::UserConfig{}, {});
fmt::print("Optimized IR:\n");
fmt::print("{}\n", IR::DumpBlock(ir_block));
}
@ -99,7 +98,7 @@ void PrintThumbInstruction(u32 instruction) {
fmt::print("should_continue: {}\n\n", should_continue);
fmt::print("IR:\n");
fmt::print("{}\n", IR::DumpBlock(ir_block));
Optimization::Optimize(ir_block, conf, {});
Optimization::Optimize(ir_block, A32::UserConfig{}, {});
fmt::print("Optimized IR:\n");
fmt::print("{}\n", IR::DumpBlock(ir_block));
}

View file

@ -8,7 +8,7 @@
#include <chrono>
#include <common/scope_exit.h>
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/thread.h"
#include "hid_core/frontend/emulated_controller.h"
#include "hid_core/frontend/input_converter.h"
@ -577,7 +577,7 @@ void EmulatedController::UnloadInput() {
}
void EmulatedController::EnableConfiguration() {
std::scoped_lock lock{connect_mutex, npad_mutex};
std::unique_lock lock1{connect_mutex}, lock2{npad_mutex};
is_configuring = true;
tmp_is_connected = is_connected;
tmp_npad_type = npad_type;
@ -614,19 +614,19 @@ void EmulatedController::DisableConfiguration() {
}
void EmulatedController::EnableSystemButtons() {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
system_buttons_enabled = true;
}
void EmulatedController::DisableSystemButtons() {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
system_buttons_enabled = false;
controller.home_button_state.raw = 0;
controller.capture_button_state.raw = 0;
}
void EmulatedController::ResetSystemButtons() {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
controller.home_button_state.home.Assign(false);
controller.capture_button_state.capture.Assign(false);
}
@ -937,7 +937,7 @@ void EmulatedController::SetStick(const Common::Input::CallbackStatus& callback,
auto trigger_guard = SCOPE_GUARD {
TriggerOnChange(ControllerTriggerType::Stick, !is_configuring);
};
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
const auto stick_value = TransformToStick(callback);
// Only read stick values that have the same uuid or are over the threshold to avoid flapping
@ -994,7 +994,7 @@ void EmulatedController::SetTrigger(const Common::Input::CallbackStatus& callbac
auto trigger_guard = SCOPE_GUARD {
TriggerOnChange(ControllerTriggerType::Trigger, !is_configuring);
};
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
const auto trigger_value = TransformToTrigger(callback);
// Only read trigger values that have the same uuid or are pressed once
@ -1042,7 +1042,7 @@ void EmulatedController::SetMotion(const Common::Input::CallbackStatus& callback
SCOPE_EXIT {
TriggerOnChange(ControllerTriggerType::Motion, !is_configuring);
};
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
auto& raw_status = controller.motion_values[index].raw_status;
auto& emulated = controller.motion_values[index].emulated;
@ -1078,7 +1078,7 @@ void EmulatedController::SetColors(const Common::Input::CallbackStatus& callback
auto trigger_guard = SCOPE_GUARD {
TriggerOnChange(ControllerTriggerType::Color, !is_configuring);
};
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
controller.color_values[index] = TransformToColor(callback);
if (is_configuring) {
@ -1129,7 +1129,7 @@ void EmulatedController::SetBattery(const Common::Input::CallbackStatus& callbac
SCOPE_EXIT {
TriggerOnChange(ControllerTriggerType::Battery, !is_configuring);
};
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
controller.battery_values[index] = TransformToBattery(callback);
if (is_configuring) {
@ -1194,7 +1194,7 @@ void EmulatedController::SetCamera(const Common::Input::CallbackStatus& callback
SCOPE_EXIT {
TriggerOnChange(ControllerTriggerType::IrSensor, !is_configuring);
};
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
controller.camera_values = TransformToCamera(callback);
if (is_configuring) {
@ -1211,7 +1211,7 @@ void EmulatedController::SetRingAnalog(const Common::Input::CallbackStatus& call
SCOPE_EXIT {
TriggerOnChange(ControllerTriggerType::RingController, !is_configuring);
};
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
const auto force_value = TransformToStick(callback);
controller.ring_analog_value = force_value.x;
@ -1227,7 +1227,7 @@ void EmulatedController::SetNfc(const Common::Input::CallbackStatus& callback) {
SCOPE_EXIT {
TriggerOnChange(ControllerTriggerType::Nfc, !is_configuring);
};
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
controller.nfc_values = TransformToNfc(callback);
if (is_configuring) {
@ -1662,7 +1662,7 @@ void EmulatedController::SetSupportedNpadStyleTag(NpadStyleTag supported_styles)
}
bool EmulatedController::IsControllerFullkey(bool use_temporary_value) const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type;
switch (type) {
case NpadStyleIndex::Fullkey:
@ -1678,7 +1678,7 @@ bool EmulatedController::IsControllerFullkey(bool use_temporary_value) const {
}
bool EmulatedController::IsControllerSupported(bool use_temporary_value) const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type;
switch (type) {
case NpadStyleIndex::Fullkey:
@ -1718,7 +1718,7 @@ void EmulatedController::Connect(bool use_temporary_value) {
auto trigger_guard = SCOPE_GUARD {
TriggerOnChange(ControllerTriggerType::Connected, !is_configuring);
};
std::scoped_lock lock{connect_mutex, mutex};
std::unique_lock lock1{connect_mutex}, lock2{mutex};
if (is_configuring) {
tmp_is_connected = true;
return;
@ -1735,7 +1735,7 @@ void EmulatedController::Disconnect() {
auto trigger_guard = SCOPE_GUARD {
TriggerOnChange(ControllerTriggerType::Disconnected, !is_configuring);
};
std::scoped_lock lock{connect_mutex, mutex};
std::unique_lock lock1{connect_mutex}, lock2{mutex};
if (is_configuring) {
tmp_is_connected = false;
return;
@ -1749,23 +1749,21 @@ void EmulatedController::Disconnect() {
}
bool EmulatedController::IsConnected(bool get_temporary_value) const {
std::scoped_lock lock{connect_mutex};
if (get_temporary_value && is_configuring) {
std::shared_lock lock{connect_mutex};
if (get_temporary_value && is_configuring)
return tmp_is_connected;
}
return is_connected;
}
NpadIdType EmulatedController::GetNpadIdType() const {
std::scoped_lock lock{mutex};
std::shared_lock lock{mutex};
return npad_id_type;
}
NpadStyleIndex EmulatedController::GetNpadStyleIndex(bool get_temporary_value) const {
std::scoped_lock lock{npad_mutex};
if (get_temporary_value && is_configuring) {
std::shared_lock lock{npad_mutex};
if (get_temporary_value && is_configuring)
return tmp_npad_type;
}
return npad_type;
}
@ -1773,7 +1771,7 @@ void EmulatedController::SetNpadStyleIndex(NpadStyleIndex npad_type_) {
auto trigger_guard = SCOPE_GUARD {
TriggerOnChange(ControllerTriggerType::Type, !is_configuring);
};
std::scoped_lock lock{mutex, npad_mutex};
std::unique_lock lock1{mutex}, lock2{npad_mutex};
if (is_configuring) {
if (tmp_npad_type == npad_type_) {
@ -1819,37 +1817,37 @@ LedPattern EmulatedController::GetLedPattern() const {
}
ButtonValues EmulatedController::GetButtonsValues() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
return controller.button_values;
}
SticksValues EmulatedController::GetSticksValues() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
return controller.stick_values;
}
TriggerValues EmulatedController::GetTriggersValues() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
return controller.trigger_values;
}
ControllerMotionValues EmulatedController::GetMotionValues() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
return controller.motion_values;
}
ColorValues EmulatedController::GetColorsValues() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
return controller.color_values;
}
BatteryValues EmulatedController::GetBatteryValues() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
return controller.battery_values;
}
CameraValues EmulatedController::GetCameraValues() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
return controller.camera_values;
}
@ -1858,7 +1856,7 @@ RingAnalogValue EmulatedController::GetRingSensorValues() const {
}
HomeButtonState EmulatedController::GetHomeButtons() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
if (is_configuring) {
return {};
}
@ -1866,7 +1864,7 @@ HomeButtonState EmulatedController::GetHomeButtons() const {
}
CaptureButtonState EmulatedController::GetCaptureButtons() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
if (is_configuring) {
return {};
}
@ -1874,7 +1872,7 @@ CaptureButtonState EmulatedController::GetCaptureButtons() const {
}
NpadButtonState EmulatedController::GetNpadButtons() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
if (is_configuring) {
return {};
}
@ -1882,7 +1880,7 @@ NpadButtonState EmulatedController::GetNpadButtons() const {
}
DebugPadButton EmulatedController::GetDebugPadButtons() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
if (is_configuring) {
return {};
}
@ -1890,7 +1888,7 @@ DebugPadButton EmulatedController::GetDebugPadButtons() const {
}
AnalogSticks EmulatedController::GetSticks() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
if (is_configuring) {
return {};
@ -1900,7 +1898,7 @@ AnalogSticks EmulatedController::GetSticks() const {
}
NpadGcTriggerState EmulatedController::GetTriggers() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
if (is_configuring) {
return {};
}
@ -1913,17 +1911,17 @@ MotionState EmulatedController::GetMotions() const {
}
ControllerColors EmulatedController::GetColors() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
return controller.colors_state;
}
BatteryLevelState EmulatedController::GetBattery() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
return controller.battery_state;
}
const CameraState& EmulatedController::GetCamera() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
return controller.camera_state;
}
@ -1932,7 +1930,7 @@ RingSensorForce EmulatedController::GetRingSensorForce() const {
}
const NfcState& EmulatedController::GetNfc() const {
std::scoped_lock lock{mutex};
std::unique_lock lock{mutex};
return controller.nfc_state;
}
@ -1946,7 +1944,7 @@ NpadColor EmulatedController::GetNpadColor(u32 color) {
}
void EmulatedController::TriggerOnChange(ControllerTriggerType type, bool is_npad_service_update) {
std::scoped_lock lock{callback_mutex};
std::unique_lock lock{callback_mutex};
for (const auto& poller_pair : callback_list) {
const ControllerUpdateCallback& poller = poller_pair.second;
if (!is_npad_service_update && poller.is_npad_service) {
@ -1959,13 +1957,13 @@ void EmulatedController::TriggerOnChange(ControllerTriggerType type, bool is_npa
}
int EmulatedController::SetCallback(ControllerUpdateCallback update_callback) {
std::scoped_lock lock{callback_mutex};
std::unique_lock lock{callback_mutex};
callback_list.insert_or_assign(last_callback_key, std::move(update_callback));
return last_callback_key++;
}
void EmulatedController::DeleteCallback(int key) {
std::scoped_lock lock{callback_mutex};
std::unique_lock lock{callback_mutex};
const auto& iterator = callback_list.find(key);
if (iterator == callback_list.end()) {
LOG_ERROR(Input, "Tried to delete non-existent callback {}", key);

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -7,6 +10,7 @@
#include <functional>
#include <memory>
#include <mutex>
#include <shared_mutex>
#include <unordered_map>
#include <vector>
@ -626,10 +630,10 @@ private:
StickDevices virtual_stick_devices;
ControllerMotionDevices virtual_motion_devices;
mutable std::mutex mutex;
mutable std::mutex callback_mutex;
mutable std::mutex npad_mutex;
mutable std::mutex connect_mutex;
mutable std::shared_mutex mutex;
mutable std::shared_mutex callback_mutex;
mutable std::shared_mutex npad_mutex;
mutable std::shared_mutex connect_mutex;
std::unordered_map<int, ControllerUpdateCallback> callback_list;
int last_callback_key = 0;

View file

@ -1,10 +1,13 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <fmt/ranges.h>
#include "common/param_package.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/polyfill_thread.h"
#include "common/settings.h"
#include "common/thread.h"

View file

@ -296,9 +296,7 @@ std::unique_ptr<TranslationMap> InitializeTranslations(QObject* parent)
INSERT(Settings,
use_asynchronous_shaders,
tr("Use asynchronous shader building (Hack)"),
tr("Enables asynchronous shader compilation, which may reduce shader stutter.\nThis "
"feature "
"is experimental."));
tr("Enables asynchronous shader compilation, which may reduce shader stutter."));
INSERT(Settings, use_fast_gpu_time, QString(), QString());
INSERT(Settings,
fast_gpu_time,

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -8,7 +11,7 @@
#include <fmt/ranges.h>
#include "common/polyfill_ranges.h"
#include <ranges>
#include "shader_recompiler/frontend/ir/type.h"
namespace Shader::IR {

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -9,7 +12,7 @@
#include <fmt/ranges.h>
#include "common/polyfill_ranges.h"
#include <ranges>
#include "shader_recompiler/exception.h"
#include "shader_recompiler/frontend/maxwell/control_flow.h"
#include "shader_recompiler/frontend/maxwell/decode.h"

View file

@ -9,7 +9,7 @@
#include <memory>
#include "common/common_types.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "shader_recompiler/exception.h"
#include "shader_recompiler/frontend/maxwell/decode.h"
#include "shader_recompiler/frontend/maxwell/opcodes.h"

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -12,7 +15,7 @@
#include <boost/intrusive/list.hpp>
#include "common/polyfill_ranges.h"
#include <ranges>
#include "shader_recompiler/environment.h"
#include "shader_recompiler/frontend/ir/basic_block.h"
#include "shader_recompiler/frontend/ir/ir_emitter.h"

View file

@ -7,7 +7,7 @@
#include "common/algorithm.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/settings.h"
#include "core/core.h"
#include "video_core/engines/maxwell_3d.h"

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -14,7 +17,7 @@
#include "common/literals.h"
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/settings.h"
#include "shader_recompiler/stage.h"
#include "video_core/renderer_opengl/gl_device.h"

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -7,7 +10,7 @@
#include "common/bit_cast.h"
#include "common/cityhash.h"
#include "common/common_types.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "video_core/engines/draw_manager.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h"

View file

@ -1,10 +1,13 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <list>
#include "common/assert.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "video_core/renderer_vulkan/present/smaa.h"
#include "video_core/renderer_vulkan/present/util.h"

View file

@ -5,7 +5,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "video_core/renderer_vulkan/present/util.h"
namespace Vulkan {

View file

@ -15,7 +15,7 @@
#include <fmt/ranges.h>
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/scope_exit.h"
#include "common/settings.h"
#include "core/core_timing.h"

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -7,7 +10,7 @@
#include <vector>
#include "common/common_types.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_resource_pool.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"

View file

@ -896,11 +896,6 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
.pName = "main",
.pSpecializationInfo = nullptr,
});
/*
if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(stage_ci.stage)) {
stage_ci.pNext = &subgroup_size_ci;
}
*/
}
VkPipelineCreateFlags flags{};
if (device.IsKhrPipelineExecutablePropertiesEnabled() && Settings::values.renderer_debug.GetValue()) {

View file

@ -6,7 +6,7 @@
#include <thread>
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/settings.h"
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/vulkan_common/vulkan_device.h"

View file

@ -10,7 +10,7 @@
#include <vector>
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/settings.h"
#include "core/core.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -12,7 +15,7 @@
#include <vector>
#include "common/common_types.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "video_core/control/channel_state_cache.h"
#include "video_core/host1x/gpu_device_memory_manager.h"
#include "video_core/rasterizer_interface.h"

View file

@ -18,7 +18,7 @@
#include "common/fs/fs.h"
#include "common/fs/path_util.h"
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "shader_recompiler/environment.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/memory_manager.h"

View file

@ -7,7 +7,7 @@
#include <algorithm>
#include <string>
#include "common/polyfill_ranges.h"
#include <ranges>
#include "video_core/texture_cache/formatter.h"
#include "video_core/texture_cache/image_base.h"
#include "video_core/texture_cache/image_info.h"

View file

@ -22,7 +22,7 @@
#include "common/hash.h"
#include "common/literals.h"
#include "common/lru_cache.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/scratch_buffer.h"
#include "common/slot_vector.h"
#include "common/thread_worker.h"

View file

@ -18,7 +18,7 @@
#include "common/alignment.h"
#include "common/common_types.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "video_core/textures/astc.h"
#include "video_core/textures/workers.h"

View file

@ -10,7 +10,7 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "shader_recompiler/shader_info.h"
#include "video_core/transform_feedback.h"

View file

@ -15,7 +15,7 @@
#include "common/assert.h"
#include "common/literals.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "common/settings.h"
#include "video_core/vulkan_common/nsight_aftermath_tracker.h"
#include "video_core/vulkan_common/vma.h"

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -9,7 +12,7 @@
#include "common/common_types.h"
#include "common/dynamic_library.h"
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "core/frontend/emu_window.h"
#include "video_core/vulkan_common/vulkan_instance.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"

View file

@ -17,7 +17,7 @@
#include "common/common_types.h"
#include "common/literals.h"
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
#include <ranges>
#include "video_core/vulkan_common/vma.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"