[cmake] enable clang-cl and WoA builds #348

Merged
crueter merged 31 commits from liz-clang-cl-cmake into master 2025-09-09 20:47:51 +02:00
245 changed files with 624 additions and 604 deletions
Showing only changes of commit 58b4ace06e - Show all commits

View file

@ -33,17 +33,25 @@ if (PLATFORM_SUN)
endif()
endif()
# clang-cl prints literally 10000+ warnings without this
if (MSVC AND CXX_CLANG)
add_compile_options(
-Wno-unused-command-line-argument
-Wno-unsafe-buffer-usage
-Wno-unused-value
-Wno-extra-semi-stmt
-Wno-sign-conversion
-Wno-reserved-identifier
-Wno-deprecated-declarations
# clang-cl prints literally 10000+ warnings without this
$<$<COMPILE_LANGUAGE:C,CXX>:-Wno-unused-command-line-argument>
$<$<COMPILE_LANGUAGE:C,CXX>:-Wno-unsafe-buffer-usage>
$<$<COMPILE_LANGUAGE:C,CXX>:-Wno-unused-value>
$<$<COMPILE_LANGUAGE:C,CXX>:-Wno-extra-semi-stmt>
$<$<COMPILE_LANGUAGE:C,CXX>:-Wno-sign-conversion>
$<$<COMPILE_LANGUAGE:C,CXX>:-Wno-reserved-identifier>
$<$<COMPILE_LANGUAGE:C,CXX>:-Wno-deprecated-declarations>
# Required CPU features
$<$<COMPILE_LANGUAGE:C,CXX>:-msse4.1>
$<$<COMPILE_LANGUAGE:C,CXX>:-mcx16>
)
if(CMAKE_ASM_MASM_COMPILER)
set(CMAKE_ASM_MASM_FLAGS "/nologo" CACHE STRING "Flags for MASM assembler" FORCE)
endif()
endif()
set(CPM_SOURCE_CACHE ${CMAKE_SOURCE_DIR}/.cache/cpm)
@ -367,13 +375,10 @@ if (YUZU_USE_CPM)
if (Boost_ADDED)
if (MSVC OR ANDROID)
add_compile_definitions(YUZU_BOOST_v1)
else()
message(WARNING "Using bundled Boost on a non-MSVC or Android system is not recommended. You are strongly encouraged to install Boost through your system's package manager.")
endif()
if (NOT MSVC)
if (NOT MSVC OR CXX_CLANG)
# boost sucks
# Solaris (and probably other NIXes) need explicit pthread definition
if (PLATFORM_SUN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthreads")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthreads")

View file

@ -139,8 +139,16 @@ add_subdirectory(nx_tzdb)
# VMA
AddJsonPackage(vulkan-memory-allocator)
if (VulkanMemoryAllocator_ADDED AND MSVC)
target_compile_options(VulkanMemoryAllocator INTERFACE /wd4189)
if (VulkanMemoryAllocator_ADDED)
if (CXX_CLANG)
target_compile_options(VulkanMemoryAllocator INTERFACE
-Wno-unused-variable
)
elseif(MSVC)
target_compile_options(VulkanMemoryAllocator INTERFACE
/wd4189
)
endif()
endif()
if (NOT TARGET LLVM::Demangle)

View file

@ -18,7 +18,7 @@ set_property(DIRECTORY APPEND PROPERTY
COMPILE_DEFINITIONS $<$<CONFIG:Debug>:_DEBUG> $<$<NOT:$<CONFIG:Debug>>:NDEBUG>)
# Set compilation flags
if (MSVC)
if (MSVC AND NOT CXX_CLANG)
set(CMAKE_CONFIGURATION_TYPES Debug Release CACHE STRING "" FORCE)
# Silence "deprecation" warnings
@ -137,14 +137,19 @@ else()
-Wno-missing-field-initializers
)
if (CMAKE_CXX_COMPILER_ID MATCHES Clang OR CMAKE_CXX_COMPILER_ID MATCHES IntelLLVM) # Clang or AppleClang
if (CMAKE_CXX_COMPILER_ID MATCHES Clang OR CMAKE_CXX_COMPILER_ID MATCHES IntelLLVM) # Clang or AppleClang but NOT clang-cl
if (NOT MSVC)
add_compile_options(
-Werror=shadow-uncaptured-local
-Werror=implicit-fallthrough
-Werror=type-limits
)
endif()
add_compile_options(
-Wno-braced-scalar-init
-Wno-unused-private-field
-Wno-nullability-completeness
-Werror=shadow-uncaptured-local
-Werror=implicit-fallthrough
-Werror=type-limits
)
endif()

View file

@ -193,7 +193,7 @@ void AudioRenderer::Main(std::stop_token stop_token) {
}
}
max_time = std::min(command_buffer.time_limit, max_time);
max_time = (std::min)(command_buffer.time_limit, max_time);
command_list_processor.SetProcessTimeMax(max_time);
if (index == 0) {

View file

@ -73,9 +73,9 @@ constexpr s32 HighestVoicePriority = 0;
constexpr u32 BufferAlignment = 0x40;
constexpr u32 WorkbufferAlignment = 0x1000;
constexpr s32 FinalMixId = 0;
constexpr s32 InvalidDistanceFromFinalMix = std::numeric_limits<s32>::min();
constexpr s32 InvalidDistanceFromFinalMix = (std::numeric_limits<s32>::min)();
constexpr s32 UnusedSplitterId = -1;
constexpr s32 UnusedMixId = std::numeric_limits<s32>::max();
constexpr s32 UnusedMixId = (std::numeric_limits<s32>::max)();
constexpr u32 InvalidNodeId = 0xF0000000;
constexpr s32 InvalidProcessOrder = -1;
constexpr u32 MaxBiquadFilters = 2;

View file

@ -51,7 +51,7 @@ public:
*/
void RegisterBuffers(boost::container::static_vector<AudioBuffer, N>& out_buffers) {
std::scoped_lock l{lock};
const s32 to_register{std::min(std::min(appended_count, BufferAppendLimit),
const s32 to_register{(std::min)((std::min)(appended_count, BufferAppendLimit),
BufferAppendLimit - registered_count)};
for (s32 i = 0; i < to_register; i++) {
@ -175,7 +175,7 @@ public:
}
size_t buffers_to_flush{
std::min(static_cast<u32>(registered_count + appended_count), max_buffers)};
(std::min)(static_cast<u32>(registered_count + appended_count), max_buffers)};
if (buffers_to_flush == 0) {
return 0;
}

View file

@ -45,7 +45,7 @@ u32 AudioDevice::ListAudioDeviceName(std::span<AudioDeviceName> out_buffer) cons
names = device_names;
}
const u32 out_count{static_cast<u32>(std::min(out_buffer.size(), names.size()))};
const u32 out_count{static_cast<u32>((std::min)(out_buffer.size(), names.size()))};
for (u32 i = 0; i < out_count; i++) {
out_buffer[i] = names[i];
}
@ -53,7 +53,7 @@ u32 AudioDevice::ListAudioDeviceName(std::span<AudioDeviceName> out_buffer) cons
}
u32 AudioDevice::ListAudioOutputDeviceName(std::span<AudioDeviceName> out_buffer) const {
const u32 out_count{static_cast<u32>(std::min(out_buffer.size(), output_device_names.size()))};
const u32 out_count{static_cast<u32>((std::min)(out_buffer.size(), output_device_names.size()))};
for (u32 i = 0; i < out_count; i++) {
out_buffer[i] = output_device_names[i];

View file

@ -43,7 +43,7 @@ void BehaviorInfo::AppendError(const ErrorInfo& error) {
}
void BehaviorInfo::CopyErrorInfo(std::span<ErrorInfo> out_errors, u32& out_count) const {
out_count = std::min(error_count, MaxErrors);
out_count = (std::min)(error_count, MaxErrors);
for (size_t i = 0; i < MaxErrors; i++) {
if (i < out_count) {

View file

@ -464,7 +464,7 @@ void CommandBuffer::GenerateDeviceSinkCommand(const s32 node_id, const s16 buffe
s16 max_input{0};
for (u32 i = 0; i < parameter.input_count; i++) {
cmd.inputs[i] = buffer_offset + parameter.inputs[i];
max_input = std::max(max_input, cmd.inputs[i]);
max_input = (std::max)(max_input, cmd.inputs[i]);
}
if (state.upsampler_info != nullptr) {

View file

@ -56,11 +56,11 @@ public:
// Voices
u64 voice_size{0};
if (behavior.IsWaveBufferVer2Supported()) {
voice_size = std::max(std::max(sizeof(AdpcmDataSourceVersion2Command),
voice_size = (std::max)((std::max)(sizeof(AdpcmDataSourceVersion2Command),
sizeof(PcmInt16DataSourceVersion2Command)),
sizeof(PcmFloatDataSourceVersion2Command));
} else {
voice_size = std::max(std::max(sizeof(AdpcmDataSourceVersion1Command),
voice_size = (std::max)((std::max)(sizeof(AdpcmDataSourceVersion1Command),
sizeof(PcmInt16DataSourceVersion1Command)),
sizeof(PcmFloatDataSourceVersion1Command));
}
@ -82,7 +82,7 @@ public:
// Sinks
size +=
params.sinks * std::max(sizeof(DeviceSinkCommand), sizeof(CircularBufferSinkCommand));
params.sinks * (std::max)(sizeof(DeviceSinkCommand), sizeof(CircularBufferSinkCommand));
// Performance
size += (params.effects + params.voices + params.sinks + params.sub_mixes + 1 +

View file

@ -29,8 +29,8 @@ constexpr std::array<u8, 3> PitchBySrcQuality = {4, 8, 4};
template <typename T>
static u32 DecodePcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
const DecodeArg& req) {
constexpr s32 min{std::numeric_limits<s16>::min()};
constexpr s32 max{std::numeric_limits<s16>::max()};
constexpr s32 min{(std::numeric_limits<s16>::min)()};
constexpr s32 max{(std::numeric_limits<s16>::max)()};
if (req.buffer == 0 || req.buffer_size == 0) {
return 0;
@ -41,7 +41,7 @@ static u32 DecodePcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
}
auto samples_to_decode{
std::min(req.samples_to_read, req.end_offset - req.start_offset - req.offset)};
(std::min)(req.samples_to_read, req.end_offset - req.start_offset - req.offset)};
u32 channel_count{static_cast<u32>(req.channel_count)};
switch (req.channel_count) {
@ -55,7 +55,7 @@ static u32 DecodePcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
if constexpr (std::is_floating_point_v<T>) {
for (u32 i = 0; i < samples_to_decode; i++) {
auto sample{static_cast<s32>(samples[i * channel_count + req.target_channel] *
std::numeric_limits<s16>::max())};
(std::numeric_limits<s16>::max)())};
out_buffer[i] = static_cast<s16>(std::clamp(sample, min, max));
}
} else {
@ -79,7 +79,7 @@ static u32 DecodePcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
if constexpr (std::is_floating_point_v<T>) {
for (u32 i = 0; i < samples_to_decode; i++) {
auto sample{static_cast<s32>(samples[i * channel_count + req.target_channel] *
std::numeric_limits<s16>::max())};
(std::numeric_limits<s16>::max)())};
out_buffer[i] = static_cast<s16>(std::clamp(sample, min, max));
}
} else {
@ -125,7 +125,7 @@ static u32 DecodeAdpcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
}
auto start_pos{req.start_offset + req.offset};
auto samples_to_process{std::min(req.end_offset - start_pos, req.samples_to_read)};
auto samples_to_process{(std::min)(req.end_offset - start_pos, req.samples_to_read)};
if (samples_to_process == 0) {
return 0;
}
@ -139,7 +139,7 @@ static u32 DecodeAdpcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
position_in_frame += 2;
}
const auto size{std::max((samples_to_process / 8U) * SamplesPerFrame, 8U)};
const auto size{(std::max)((samples_to_process / 8U) * SamplesPerFrame, 8U)};
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> wavebuffer(
memory, req.buffer + position_in_frame / 2, size);
@ -260,7 +260,7 @@ void DecodeFromWaveBuffers(Core::Memory::Memory& memory, const DecodeFromWaveBuf
auto max_remaining_sample_count{
((Common::FixedPoint<17, 15>(TempBufferSize) - fraction) / sample_rate_ratio)
.to_uint_floor()};
max_remaining_sample_count = std::min(max_remaining_sample_count, remaining_sample_count);
max_remaining_sample_count = (std::min)(max_remaining_sample_count, remaining_sample_count);
auto wavebuffers_consumed{voice_state.wave_buffers_consumed};
auto wavebuffer_index{voice_state.wave_buffer_index};
@ -273,7 +273,7 @@ void DecodeFromWaveBuffers(Core::Memory::Memory& memory, const DecodeFromWaveBuf
std::array<s16, TempBufferSize> temp_buffer{};
while (remaining_sample_count > 0) {
const auto samples_to_write{std::min(remaining_sample_count, max_remaining_sample_count)};
const auto samples_to_write{(std::min)(remaining_sample_count, max_remaining_sample_count)};
const auto samples_to_read{
(fraction + samples_to_write * sample_rate_ratio).to_uint_floor()};

View file

@ -86,7 +86,7 @@ static u32 WriteAuxBufferDsp(Core::Memory::Memory& memory, CpuAddr send_info_,
u32 write_count{write_count_};
u32 read_pos{0};
while (write_count > 0) {
u32 to_write{std::min(count_max - target_write_offset, write_count)};
u32 to_write{(std::min)(count_max - target_write_offset, write_count)};
if (to_write > 0) {
const auto write_addr = send_buffer + target_write_offset * sizeof(s32);
memory.WriteBlockUnsafe(write_addr, &input[read_pos], to_write * sizeof(s32));
@ -157,7 +157,7 @@ static u32 ReadAuxBufferDsp(Core::Memory::Memory& memory, CpuAddr return_info_,
u32 read_count{read_count_};
u32 write_pos{0};
while (read_count > 0) {
u32 to_read{std::min(count_max - target_read_offset, read_count)};
u32 to_read{(std::min)(count_max - target_read_offset, read_count)};
if (to_read > 0) {
const auto read_addr = return_buffer + target_read_offset * sizeof(s32);
memory.ReadBlockUnsafe(read_addr, &output[write_pos], to_read * sizeof(s32));

View file

@ -20,8 +20,8 @@ namespace AudioCore::Renderer {
void ApplyBiquadFilterFloat(std::span<s32> output, std::span<const s32> input,
std::array<s16, 3>& b_, std::array<s16, 2>& a_,
VoiceState::BiquadFilterState& state, const u32 sample_count) {
constexpr f64 min{std::numeric_limits<s32>::min()};
constexpr f64 max{std::numeric_limits<s32>::max()};
constexpr f64 min{(std::numeric_limits<s32>::min)()};
constexpr f64 max{(std::numeric_limits<s32>::max)()};
std::array<f64, 3> b{Common::FixedPoint<50, 14>::from_base(b_[0]).to_double(),
Common::FixedPoint<50, 14>::from_base(b_[1]).to_double(),
Common::FixedPoint<50, 14>::from_base(b_[2]).to_double()};
@ -61,8 +61,8 @@ void ApplyBiquadFilterFloat(std::span<s32> output, std::span<const s32> input,
static void ApplyBiquadFilterInt(std::span<s32> output, std::span<const s32> input,
std::array<s16, 3>& b, std::array<s16, 2>& a,
VoiceState::BiquadFilterState& state, const u32 sample_count) {
constexpr s64 min{std::numeric_limits<s32>::min()};
constexpr s64 max{std::numeric_limits<s32>::max()};
constexpr s64 min{(std::numeric_limits<s32>::min)()};
constexpr s64 max{(std::numeric_limits<s32>::max)()};
for (u32 i = 0; i < sample_count; i++) {
const s64 in_sample{input[i]};

View file

@ -79,7 +79,7 @@ static u32 WriteAuxBufferDsp(Core::Memory::Memory& memory, const CpuAddr send_in
u32 write_count{write_count_};
u32 write_pos{0};
while (write_count > 0) {
u32 to_write{std::min(count_max - target_write_offset, write_count)};
u32 to_write{(std::min)(count_max - target_write_offset, write_count)};
if (to_write > 0) {
memory.WriteBlockUnsafe(send_buffer + target_write_offset * sizeof(s32),

View file

@ -76,9 +76,9 @@ static void UpdateI3dl2ReverbEffectParameter(const I3dl2ReverbInfo::ParameterVer
state.dry_gain = params.dry_gain;
Common::FixedPoint<50, 14> early_gain{
std::min(params.room_gain + params.reflection_gain, 5000.0f) / 2000.0f};
(std::min)(params.room_gain + params.reflection_gain, 5000.0f) / 2000.0f};
state.early_gain = pow_10(early_gain.to_float());
Common::FixedPoint<50, 14> late_gain{std::min(params.room_gain + params.reverb_gain, 5000.0f) /
Common::FixedPoint<50, 14> late_gain{(std::min)(params.room_gain + params.reverb_gain, 5000.0f) /
2000.0f};
state.late_gain = pow_10(late_gain.to_float());
@ -94,7 +94,7 @@ static void UpdateI3dl2ReverbEffectParameter(const I3dl2ReverbInfo::ParameterVer
const Common::FixedPoint<50, 14> c{
std::sqrt(std::pow(b.to_float(), 2.0f) + (std::pow(a.to_float(), 2.0f) * -4.0f))};
state.lowpass_1 = std::min(((b - c) / (a * 2.0f)).to_float(), 0.99723f);
state.lowpass_1 = (std::min)(((b - c) / (a * 2.0f)).to_float(), 0.99723f);
state.lowpass_2 = 1.0f - state.lowpass_1;
}

View file

@ -50,8 +50,8 @@ static void ApplyLightLimiterEffect(const LightLimiterInfo::ParameterVersion2& p
std::span<std::span<const s32>> inputs,
std::span<std::span<s32>> outputs, const u32 sample_count,
LightLimiterInfo::StatisticsInternal* statistics) {
constexpr s64 min{std::numeric_limits<s32>::min()};
constexpr s64 max{std::numeric_limits<s32>::max()};
constexpr s64 min{(std::numeric_limits<s32>::min)()};
constexpr s64 max{(std::numeric_limits<s32>::max)()};
const auto recip_estimate = [](f64 a) -> f64 {
s32 q, s;
@ -117,9 +117,9 @@ static void ApplyLightLimiterEffect(const LightLimiterInfo::ParameterVersion2& p
if (statistics) {
statistics->channel_max_sample[channel] =
std::max(statistics->channel_max_sample[channel], abs_sample.to_float());
(std::max)(statistics->channel_max_sample[channel], abs_sample.to_float());
statistics->channel_compression_gain_min[channel] =
std::min(statistics->channel_compression_gain_min[channel],
(std::min)(statistics->channel_compression_gain_min[channel],
state.compression_gain[channel].to_float());
}
}

View file

@ -94,7 +94,7 @@ static void UpdateReverbEffectParameter(const ReverbInfo::ParameterVersion2& par
for (u32 i = 0; i < ReverbInfo::MaxDelayTaps; i++) {
auto early_delay{
((pre_delay_time + EarlyDelayTimes[params.early_mode][i]) * sample_rate).to_int()};
early_delay = std::min(early_delay, state.pre_delay_line.sample_count_max);
early_delay = (std::min)(early_delay, state.pre_delay_line.sample_count_max);
state.early_delay_times[i] = early_delay + 1;
state.early_gains[i] = Common::FixedPoint<50, 14>::from_base(params.early_gain) *
EarlyDelayGains[params.early_mode][i];
@ -107,7 +107,7 @@ static void UpdateReverbEffectParameter(const ReverbInfo::ParameterVersion2& par
auto pre_time{
((pre_delay_time + EarlyDelayTimes[params.early_mode][10]) * sample_rate).to_int()};
state.pre_delay_time = std::min(pre_time, state.pre_delay_line.sample_count_max);
state.pre_delay_time = (std::min)(pre_time, state.pre_delay_line.sample_count_max);
if (!unk_initialized) {
unk_value = cos((1280.0f / sample_rate).to_float());
@ -117,13 +117,13 @@ static void UpdateReverbEffectParameter(const ReverbInfo::ParameterVersion2& par
for (u32 i = 0; i < ReverbInfo::MaxDelayLines; i++) {
const auto fdn_delay{(FdnDelayTimes[params.late_mode][i] * sample_rate).to_int()};
state.fdn_delay_lines[i].sample_count =
std::min(fdn_delay, state.fdn_delay_lines[i].sample_count_max);
(std::min)(fdn_delay, state.fdn_delay_lines[i].sample_count_max);
state.fdn_delay_lines[i].buffer_end =
&state.fdn_delay_lines[i].buffer[state.fdn_delay_lines[i].sample_count - 1];
const auto decay_delay{(DecayDelayTimes[params.late_mode][i] * sample_rate).to_int()};
state.decay_delay_lines[i].sample_count =
std::min(decay_delay, state.decay_delay_lines[i].sample_count_max);
(std::min)(decay_delay, state.decay_delay_lines[i].sample_count_max);
state.decay_delay_lines[i].buffer_end =
&state.decay_delay_lines[i].buffer[state.decay_delay_lines[i].sample_count - 1];

View file

@ -43,7 +43,7 @@ void DepopForMixBuffersCommand::Dump(
}
void DepopForMixBuffersCommand::Process(const AudioRenderer::CommandListProcessor& processor) {
auto end_index{std::min(processor.buffer_count, input + count)};
auto end_index{(std::min)(processor.buffer_count, input + count)};
std::span<s32> depop_buff{reinterpret_cast<s32*>(depop_buffer), end_index};
for (u32 index = input; index < end_index; index++) {

View file

@ -215,7 +215,7 @@ auto UpsampleCommand::Dump([[maybe_unused]] const AudioRenderer::CommandListProc
void UpsampleCommand::Process(const AudioRenderer::CommandListProcessor& processor) {
const auto info{reinterpret_cast<UpsamplerInfo*>(upsampler_info)};
const auto input_count{std::min(info->input_count, buffer_count)};
const auto input_count{(std::min)(info->input_count, buffer_count)};
const std::span<const s16> inputs_{reinterpret_cast<const s16*>(inputs), input_count};
for (u32 i = 0; i < input_count; i++) {

View file

@ -21,8 +21,8 @@ void CircularBufferSinkCommand::Dump(
}
void CircularBufferSinkCommand::Process(const AudioRenderer::CommandListProcessor& processor) {
constexpr s32 min{std::numeric_limits<s16>::min()};
constexpr s32 max{std::numeric_limits<s16>::max()};
constexpr s32 min{(std::numeric_limits<s16>::min)()};
constexpr s32 max{(std::numeric_limits<s16>::max)()};
std::array<s16, TargetSampleCount * MaxChannels> output{};
for (u32 channel = 0; channel < input_count; channel++) {

View file

@ -20,8 +20,8 @@ void DeviceSinkCommand::Dump([[maybe_unused]] const AudioRenderer::CommandListPr
}
void DeviceSinkCommand::Process(const AudioRenderer::CommandListProcessor& processor) {
constexpr s32 min = std::numeric_limits<s16>::min();
constexpr s32 max = std::numeric_limits<s16>::max();
constexpr s32 min = (std::numeric_limits<s16>::min)();
constexpr s32 max = (std::numeric_limits<s16>::max)();
auto stream{processor.GetOutputSinkStream()};
stream->SetSystemChannels(input_count);

View file

@ -126,7 +126,7 @@ bool MixContext::TSortInfo(const SplitterContext& splitter_context) {
}
auto sorted_results{node_states.GetSortedResuls()};
const auto result_size{std::min(count, static_cast<s32>(sorted_results.second))};
const auto result_size{(std::min)(count, static_cast<s32>(sorted_results.second))};
for (s32 i = 0; i < result_size; i++) {
sorted_mix_infos[i] = &mix_infos[sorted_results.first[i]];
}

View file

@ -168,9 +168,9 @@ protected:
/// Node id for this sink
u32 node_id{};
/// State buffer for this sink
std::array<u8, std::max(sizeof(DeviceState), sizeof(CircularBufferState))> state{};
std::array<u8, (std::max)(sizeof(DeviceState), sizeof(CircularBufferState))> state{};
/// Parameter buffer for this sink
std::array<u8, std::max(sizeof(DeviceInParameter), sizeof(CircularBufferInParameter))>
std::array<u8, (std::max)(sizeof(DeviceInParameter), sizeof(CircularBufferInParameter))>
parameter{};
};

View file

@ -170,7 +170,7 @@ void SplitterContext::RecomposeDestination(SplitterInfo& out_info,
auto dest_count{info_header->destination_count};
if (!splitter_bug_fixed) {
dest_count = std::min(dest_count, GetDestCountPerInfoForCompat());
dest_count = (std::min)(dest_count, GetDestCountPerInfoForCompat());
}
if (dest_count == 0) {

View file

@ -718,7 +718,7 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
const auto estimated_time{start_estimated_time - end_estimated_time};
const auto time_limit{static_cast<u32>(std::max(dsp_time_limit + estimated_time, 0.0f))};
const auto time_limit{static_cast<u32>((std::max)(dsp_time_limit + estimated_time, 0.0f))};
num_voices_dropped =
DropVoices(command_buffer, static_cast<u32>(start_estimated_time), time_limit);
}

View file

@ -73,7 +73,7 @@ public:
minimum_latency = TargetSampleCount * 2;
}
minimum_latency = std::max(minimum_latency, TargetSampleCount * 2);
minimum_latency = (std::max)(minimum_latency, TargetSampleCount * 2);
LOG_INFO(Service_Audio,
"Opening cubeb stream {} type {} with: rate {} channels {} (system channels {}) "
@ -372,7 +372,7 @@ u32 GetCubebLatency() {
LOG_CRITICAL(Audio_Sink, "Error getting minimum latency, error: {}", latency_error);
latency = TargetSampleCount * 2;
}
latency = std::max(latency, TargetSampleCount * 2);
latency = (std::max)(latency, TargetSampleCount * 2);
cubeb_destroy(ctx);
return latency;
}
@ -426,7 +426,7 @@ bool IsCubebSuitable() {
LOG_ERROR(Audio_Sink, "Cubeb could not get min latency, it is not suitable.");
return false;
}
latency = std::max(latency, TargetSampleCount * 2);
latency = (std::max)(latency, TargetSampleCount * 2);
// Test opening a device with standard parameters
cubeb_devid output_device{0};

View file

@ -31,8 +31,8 @@ void SinkStream::AppendBuffer(SinkBuffer& buffer, std::span<s16> samples) {
return;
}
constexpr s32 min{std::numeric_limits<s16>::min()};
constexpr s32 max{std::numeric_limits<s16>::max()};
constexpr s32 min{(std::numeric_limits<s16>::min)()};
constexpr s32 max{(std::numeric_limits<s16>::max)()};
auto yuzu_volume{Settings::Volume()};
if (yuzu_volume > 1.0f) {
@ -123,8 +123,8 @@ void SinkStream::AppendBuffer(SinkBuffer& buffer, std::span<s16> samples) {
}
std::vector<s16> SinkStream::ReleaseBuffer(u64 num_samples) {
constexpr s32 min = std::numeric_limits<s16>::min();
constexpr s32 max = std::numeric_limits<s16>::max();
constexpr s32 min = (std::numeric_limits<s16>::min)();
constexpr s32 max = (std::numeric_limits<s16>::max)();
auto samples{samples_buffer.Pop(num_samples)};

View file

@ -27,8 +27,8 @@ public:
// If we are, join with them, ensuring we stay in bounds.
if (it != m_free_regions.end()) {
start_address = std::min(start_address, it->lower());
end_address = std::max(end_address, it->upper());
start_address = (std::min)(start_address, it->lower());
end_address = (std::max)(end_address, it->upper());
}
// Free the relevant region.

View file

@ -484,9 +484,9 @@ std::string GetParentPath(std::string_view path) {
std::size_t name_index;
if (name_bck_index == std::string_view::npos || name_fwd_index == std::string_view::npos) {
name_index = std::min(name_bck_index, name_fwd_index);
name_index = (std::min)(name_bck_index, name_fwd_index);
} else {
name_index = std::max(name_bck_index, name_fwd_index);
name_index = (std::max)(name_bck_index, name_fwd_index);
}
return std::string(path.substr(0, name_index));
@ -506,7 +506,7 @@ std::string_view GetPathWithoutTop(std::string_view path) {
const auto name_bck_index = path.find('\\');
const auto name_fwd_index = path.find('/');
return path.substr(std::min(name_bck_index, name_fwd_index) + 1);
return path.substr((std::min)(name_bck_index, name_fwd_index) + 1);
}
} // namespace Common::FS

View file

@ -144,8 +144,7 @@ void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission p
}
// Clamp to end.
next = std::min(next, end);
next = (std::min)(next, end);
// Reprotect, if we need to.
if (should_protect) {
m_buffer.Protect(cur, next - cur, perm);
@ -211,8 +210,8 @@ void HeapTracker::RebuildSeparateHeapAddressSpace() {
// Despite being worse in theory, this has proven to be better in practice than more
// regularly dumping a smaller amount, because it significantly reduces average case
// lock contention.
const size_t desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2;
const size_t evict_count = m_resident_map_count - desired_count;
std::size_t const desired_count = (std::min)(m_resident_map_count, m_max_resident_map_count) / 2;
std::size_t const evict_count = m_resident_map_count - desired_count;
auto it = m_resident_mappings.begin();
for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {

View file

@ -199,8 +199,8 @@ public:
std::scoped_lock lock{placeholder_mutex};
auto [it, end] = placeholders.equal_range({virtual_offset, virtual_end});
while (it != end) {
const size_t offset = std::max(it->lower(), virtual_offset);
const size_t protect_length = std::min(it->upper(), virtual_end) - offset;
const size_t offset = (std::max)(it->lower(), virtual_offset);
const size_t protect_length = (std::min)(it->upper(), virtual_end) - offset;
DWORD old_flags{};
if (!VirtualProtect(virtual_base + offset, protect_length, new_flags, &old_flags)) {
LOG_CRITICAL(HW_Memory, "Failed to change virtual memory protect rules");
@ -266,8 +266,8 @@ private:
}
const size_t placeholder_begin = it->lower();
const size_t placeholder_end = it->upper();
const size_t unmap_begin = std::max(virtual_offset, placeholder_begin);
const size_t unmap_end = std::min(virtual_offset + length, placeholder_end);
const size_t unmap_begin = (std::max)(virtual_offset, placeholder_begin);
const size_t unmap_end = (std::min)(virtual_offset + length, placeholder_end);
ASSERT(unmap_begin >= placeholder_begin && unmap_begin < placeholder_end);
ASSERT(unmap_end <= placeholder_end && unmap_end > placeholder_begin);
@ -655,8 +655,8 @@ private:
*virtual_offset = 0;
*length = 0;
} else {
*virtual_offset = std::max(intended_start, address_space_start);
*length = std::min(intended_end, address_space_end) - *virtual_offset;
*virtual_offset = (std::max)(intended_start, address_space_start);
*length = (std::min)(intended_end, address_space_end) - *virtual_offset;
}
}

View file

@ -18,7 +18,7 @@ constexpr const char* TrimSourcePath(std::string_view source) {
const auto rfind = [source](const std::string_view match) {
return source.rfind(match) == source.npos ? 0 : (source.rfind(match) + match.size());
};
auto idx = std::max({rfind("src/"), rfind("src\\"), rfind("../"), rfind("..\\")});
auto idx = (std::max)({rfind("src/"), rfind("src\\"), rfind("../"), rfind("..\\")});
return source.data() + idx;
}

View file

@ -85,10 +85,10 @@ struct Rectangle {
}
[[nodiscard]] constexpr bool Intersect(const Rectangle<T>& with, Rectangle<T>* result) const {
result->left = std::max(left, with.left);
result->top = std::max(top, with.top);
result->right = std::min(right, with.right);
result->bottom = std::min(bottom, with.bottom);
result->left = (std::max)(left, with.left);
result->top = (std::max)(top, with.top);
result->right = (std::min)(right, with.right);
result->bottom = (std::min)(bottom, with.bottom);
return !result->IsEmpty();
}
};

View file

@ -25,9 +25,9 @@ template <typename T>
inline bool CanAddWithoutOverflow(T lhs, T rhs) {
#ifdef _MSC_VER
if (lhs >= 0 && rhs >= 0) {
return WrappingAdd(lhs, rhs) >= std::max(lhs, rhs);
return WrappingAdd(lhs, rhs) >= (std::max)(lhs, rhs);
} else if (lhs < 0 && rhs < 0) {
return WrappingAdd(lhs, rhs) <= std::min(lhs, rhs);
return WrappingAdd(lhs, rhs) <= (std::min)(lhs, rhs);
} else {
return true;
}

View file

@ -18,7 +18,7 @@ private:
public:
explicit RangeMap(ValueT null_value_) : null_value{null_value_} {
container.emplace(std::numeric_limits<KeyT>::min(), null_value);
container.emplace((std::numeric_limits<KeyT>::min)(), null_value);
};
~RangeMap() = default;
@ -66,7 +66,7 @@ private:
}
const auto it_end = std::next(it);
if (it_end == container.end()) {
return std::numeric_limits<KeyT>::max() - address;
return (std::numeric_limits<KeyT>::max)() - address;
}
return it_end->first - address;
}

View file

@ -274,7 +274,7 @@ void OverlapRangeSet<AddressType>::Subtract(AddressType base_address, size_t siz
template <typename AddressType>
void OverlapRangeSet<AddressType>::DeleteAll(AddressType base_address, size_t size) {
m_impl->template Subtract<false>(base_address, size, std::numeric_limits<s32>::max(),
m_impl->template Subtract<false>(base_address, size, (std::numeric_limits<s32>::max)(),
[](AddressType, AddressType) {});
}

View file

@ -29,7 +29,7 @@ class RingBuffer {
// T must be safely memcpy-able and have a trivial default constructor.
static_assert(std::is_trivial_v<T>);
// Ensure capacity is sensible.
static_assert(capacity < std::numeric_limits<std::size_t>::max() / 2);
static_assert(capacity < (std::numeric_limits<std::size_t>::max)() / 2);
static_assert((capacity & (capacity - 1)) == 0, "capacity must be a power of two");
// Ensure lock-free.
static_assert(std::atomic_size_t::is_always_lock_free);
@ -43,9 +43,9 @@ public:
std::lock_guard lock(rb_mutex);
const std::size_t slots_free = capacity + read_index - write_index;
const std::size_t push_count = std::min(slot_count, slots_free);
const std::size_t push_count = (std::min)(slot_count, slots_free);
const std::size_t pos = write_index % capacity;
const std::size_t first_copy = std::min(capacity - pos, push_count);
const std::size_t first_copy = (std::min)(capacity - pos, push_count);
const std::size_t second_copy = push_count - first_copy;
const char* in = static_cast<const char*>(new_slots);
@ -69,9 +69,9 @@ public:
std::lock_guard lock(rb_mutex);
const std::size_t slots_filled = write_index - read_index;
const std::size_t pop_count = std::min(slots_filled, max_slots);
const std::size_t pop_count = (std::min)(slots_filled, max_slots);
const std::size_t pos = read_index % capacity;
const std::size_t first_copy = std::min(capacity - pos, pop_count);
const std::size_t first_copy = (std::min)(capacity - pos, pop_count);
const std::size_t second_copy = pop_count - first_copy;
char* out = static_cast<char*>(output);
@ -84,7 +84,7 @@ public:
}
std::vector<T> Pop(std::size_t max_slots = ~std::size_t(0)) {
std::vector<T> out(std::min(max_slots, capacity));
std::vector<T> out((std::min)(max_slots, capacity));
const std::size_t count = Pop(out.data(), out.size());
out.resize(count);
return out;

View file

@ -37,14 +37,14 @@ struct ResolutionScalingInfo {
if (value == 0) {
return 0;
}
return std::max((value * static_cast<s32>(up_scale)) >> static_cast<s32>(down_shift), 1);
return (std::max)((value * static_cast<s32>(up_scale)) >> static_cast<s32>(down_shift), 1);
}
u32 ScaleUp(u32 value) const {
if (value == 0U) {
return 0U;
}
return std::max((value * up_scale) >> down_shift, 1U);
return (std::max)((value * up_scale) >> down_shift, 1U);
}
};
@ -612,8 +612,8 @@ struct Values {
false, true, &custom_rtc_enabled};
SwitchableSetting<s64, true> custom_rtc_offset{linkage,
0,
std::numeric_limits<int>::min(),
std::numeric_limits<int>::max(),
(std::numeric_limits<int>::min)(),
(std::numeric_limits<int>::max)(),
"custom_rtc_offset",
Category::System,
Specialization::Countable,

View file

@ -223,7 +223,7 @@ public:
if constexpr (std::is_enum_v<Type>) {
return EnumMetadata<Type>::Index();
} else {
return std::numeric_limits<u32>::max();
return (std::numeric_limits<u32>::max)();
}
}
@ -237,14 +237,14 @@ public:
[[nodiscard]] std::string MinVal() const override final {
if constexpr (std::is_arithmetic_v<Type> && !ranged) {
return this->ToString(std::numeric_limits<Type>::min());
return this->ToString((std::numeric_limits<Type>::min)());
} else {
return this->ToString(minimum);
}
}
[[nodiscard]] std::string MaxVal() const override final {
if constexpr (std::is_arithmetic_v<Type> && !ranged) {
return this->ToString(std::numeric_limits<Type>::max());
return this->ToString((std::numeric_limits<Type>::max)());
} else {
return this->ToString(maximum);
}

View file

@ -17,7 +17,7 @@
namespace Common {
struct SlotId {
static constexpr u32 INVALID_INDEX = std::numeric_limits<u32>::max();
static constexpr u32 INVALID_INDEX = (std::numeric_limits<u32>::max)();
constexpr auto operator<=>(const SlotId&) const noexcept = default;

View file

@ -66,7 +66,7 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
const auto scheduling_type = SCHED_OTHER;
s32 max_prio = sched_get_priority_max(scheduling_type);
s32 min_prio = sched_get_priority_min(scheduling_type);
u32 level = std::max(static_cast<u32>(new_priority) + 1, 4U);
u32 level = (std::max)(static_cast<u32>(new_priority) + 1, 4U);
struct sched_param params;
if (max_prio > min_prio) {
@ -101,7 +101,7 @@ void SetCurrentThreadName(const char* name) {
#elif defined(__linux__)
// Linux limits thread names to 15 characters and will outright reject any
// attempt to set a longer name with ERANGE.
std::string truncated(name, std::min(strlen(name), static_cast<size_t>(15)));
std::string truncated(name, (std::min)(strlen(name), static_cast<size_t>(15)));
if (int e = pthread_setname_np(pthread_self(), truncated.c_str())) {
errno = e;
LOG_ERROR(Common, "Failed to set thread name to '{}': {}", truncated, GetLastErrorMsg());

View file

@ -124,7 +124,7 @@ public:
this->state.data[3] = ParamTmat;
{
const int num_init_iterations = std::max(seed_count + 1, MinimumInitIterations) - 1;
const int num_init_iterations = (std::max)(seed_count + 1, MinimumInitIterations) - 1;
GenerateInitialValuePlus(&this->state, 0, seed_count);

View file

@ -65,7 +65,7 @@ namespace Common {
#endif
#else
// This one is bit more inaccurate.
return MultiplyAndDivide64(std::numeric_limits<u64>::max(), numerator, divisor);
return MultiplyAndDivide64((std::numeric_limits<u64>::max)(), numerator, divisor);
#endif
}

View file

@ -283,9 +283,9 @@ Loader::AppLoader::Modules FindModules(Kernel::KProcess* process) {
// Ignore leading directories.
char* path_pointer = module_path.path.data();
char* path_end =
path_pointer + std::min(PathLengthMax, module_path.path_length);
path_pointer + (std::min)(PathLengthMax, module_path.path_length);
for (s32 i = 0; i < std::min(PathLengthMax, module_path.path_length) &&
for (s32 i = 0; i < (std::min)(PathLengthMax, module_path.path_length) &&
module_path.path[i] != '\0';
i++) {
if (module_path.path[i] == '/' || module_path.path[i] == '\\') {

View file

@ -185,7 +185,7 @@ struct System::Impl {
Service::PSC::Time::LocationName name{};
auto new_name = Settings::GetTimeZoneString(Settings::values.time_zone_index.GetValue());
std::memcpy(name.data(), new_name.data(), std::min(name.size(), new_name.size()));
std::memcpy(name.data(), new_name.data(), (std::min)(name.size(), new_name.size()));
timezone_service->SetDeviceLocationName(name);

View file

@ -34,8 +34,8 @@ std::size_t XTSEncryptionLayer::Read(u8* data, std::size_t length, std::size_t o
buffer.resize(XTS_SECTOR_SIZE);
cipher.XTSTranscode(buffer.data(), buffer.size(), buffer.data(), offset / XTS_SECTOR_SIZE,
XTS_SECTOR_SIZE, Op::Decrypt);
std::memcpy(data, buffer.data(), std::min(buffer.size(), length));
return std::min(buffer.size(), length);
std::memcpy(data, buffer.data(), (std::min)(buffer.size(), length));
return (std::min)(buffer.size(), length);
}
// offset does not fall on block boundary (0x4000)

View file

@ -664,7 +664,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
if (svc_mem_info.state != Kernel::Svc::MemoryState::Inaccessible ||
svc_mem_info.base_address + svc_mem_info.size - 1 !=
std::numeric_limits<u64>::max()) {
(std::numeric_limits<u64>::max)()) {
const char* state = GetMemoryStateName(svc_mem_info.state);
const char* perm = GetMemoryPermissionString(svc_mem_info);
const char l = True(svc_mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-';
@ -710,7 +710,7 @@ std::vector<char>::const_iterator GDBStub::CommandEnd() const {
const auto end{std::find(current_command.begin(), current_command.end(), GDB_STUB_END)};
// Require the checksum to be present
return std::min(end + 2, current_command.end());
return (std::min)(end + 2, current_command.end());
}
std::optional<std::string> GDBStub::DetachCommand() {

View file

@ -12,7 +12,7 @@ static T HexToValue(std::string_view hex) {
static_assert(std::is_trivially_copyable_v<T>);
T value{};
const auto mem{Common::HexStringToVector(hex, false)};
std::memcpy(&value, mem.data(), std::min(mem.size(), sizeof(T)));
std::memcpy(&value, mem.data(), (std::min)(mem.size(), sizeof(T)));
return value;
}

View file

@ -388,7 +388,7 @@ void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto o
while (remaining_size) {
const size_t next_pages = static_cast<std::size_t>(continuity_tracker[page_index]);
const std::size_t copy_amount =
std::min((next_pages << Memory::YUZU_PAGEBITS) - page_offset, remaining_size);
(std::min)((next_pages << Memory::YUZU_PAGEBITS) - page_offset, remaining_size);
const auto current_vaddr =
static_cast<u64>((page_index << Memory::YUZU_PAGEBITS) + page_offset);
SCOPE_EXIT{

View file

@ -683,7 +683,7 @@ public:
const auto max_mount_len =
out_mount_name_buffer_size == 0
? MountNameLengthMax + 1
: std::min(MountNameLengthMax + 1, out_mount_name_buffer_size);
: (std::min)(MountNameLengthMax + 1, out_mount_name_buffer_size);
// Parse the path until we see a drive separator
size_t mount_len = 0;

View file

@ -48,7 +48,7 @@ public:
private:
Result DoRead(s64* out_count, DirectoryEntry* out_entries, s64 max_entries) {
const u64 actual_entries =
std::min(static_cast<u64>(max_entries), entries.size() - next_entry_index);
(std::min)(static_cast<u64>(max_entries), entries.size() - next_entry_index);
const auto* begin = reinterpret_cast<u8*>(entries.data() + next_entry_index);
const auto* end = reinterpret_cast<u8*>(entries.data() + next_entry_index + actual_entries);
const auto range_size = static_cast<std::size_t>(std::distance(begin, end));

View file

@ -93,7 +93,7 @@ protected:
R_TRY(this->DoGetSize(std::addressof(file_size)));
R_UNLESS(offset <= file_size, ResultOutOfRange);
*out = static_cast<size_t>(std::min(file_size - offset, static_cast<s64>(size)));
*out = static_cast<size_t>((std::min)(file_size - offset, static_cast<s64>(size)));
R_SUCCEED();
}

View file

@ -213,7 +213,7 @@ size_t AesCtrCounterExtendedStorage::Read(u8* buffer, size_t size, size_t offset
// Determine how much is left.
const auto remaining_size = end_offset - cur_offset;
const auto cur_size = static_cast<size_t>(std::min(remaining_size, data_size));
const auto cur_size = static_cast<size_t>((std::min)(remaining_size, data_size));
ASSERT(cur_size <= size);
// If necessary, perform decryption.

View file

@ -94,7 +94,7 @@ size_t AesCtrStorage::Write(const u8* buffer, size_t size, size_t offset) {
while (remaining > 0) {
// Determine data we're writing and where.
const size_t write_size =
use_work_buffer ? std::min(pooled_buffer.GetSize(), remaining) : remaining;
use_work_buffer ? (std::min)(pooled_buffer.GetSize(), remaining) : remaining;
void* write_buf;
if (use_work_buffer) {

View file

@ -65,7 +65,7 @@ size_t AesXtsStorage::Read(u8* buffer, size_t size, size_t offset) const {
// Determine the size of the pre-data read.
const size_t skip_size =
static_cast<size_t>(offset - Common::AlignDown(offset, m_block_size));
const size_t data_size = std::min(size, m_block_size - skip_size);
const size_t data_size = (std::min)(size, m_block_size - skip_size);
// Decrypt into a pooled buffer.
{
@ -84,14 +84,14 @@ size_t AesXtsStorage::Read(u8* buffer, size_t size, size_t offset) const {
AddCounter(ctr.data(), IvSize, 1);
processed_size += data_size;
ASSERT(processed_size == std::min(size, m_block_size - skip_size));
ASSERT(processed_size == (std::min)(size, m_block_size - skip_size));
}
// Decrypt aligned chunks.
char* cur = reinterpret_cast<char*>(buffer) + processed_size;
size_t remaining = size - processed_size;
while (remaining > 0) {
const size_t cur_size = std::min(m_block_size, remaining);
const size_t cur_size = (std::min)(m_block_size, remaining);
m_cipher->SetIV(ctr);
m_cipher->Transcode(cur, cur_size, cur, Core::Crypto::Op::Decrypt);

View file

@ -104,7 +104,7 @@ size_t AlignmentMatchingStorageImpl::Read(VirtualFile base_storage, char* work_b
while (remaining_tail_size > 0) {
const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment);
const auto cur_size =
std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
(std::min)(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
remaining_tail_size);
base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
@ -186,7 +186,7 @@ size_t AlignmentMatchingStorageImpl::Write(VirtualFile base_storage, char* work_
const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment);
const auto cur_size =
std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
(std::min)(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
remaining_tail_size);
base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);

View file

@ -29,12 +29,12 @@ void GenerateKey(void* dst_key, size_t dst_key_size, const void* src_key, size_t
key_type == static_cast<s32>(KeyType::NcaHeaderKey2)) {
const s32 key_index = static_cast<s32>(KeyType::NcaHeaderKey2) == key_type;
const auto key = instance.GetKey(Core::Crypto::S256KeyType::Header);
std::memcpy(dst_key, key.data() + key_index * 0x10, std::min(dst_key_size, key.size() / 2));
std::memcpy(dst_key, key.data() + key_index * 0x10, (std::min)(dst_key_size, key.size() / 2));
return;
}
const s32 key_generation =
std::max(key_type / NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount, 1) - 1;
(std::max)(key_type / NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount, 1) - 1;
const s32 key_index = key_type % NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount;
Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(

View file

@ -34,7 +34,7 @@ void IntegrityVerificationStorage::Initialize(VirtualFile hs,
ASSERT(m_verification_block_size == 1ll << m_verification_block_order);
// Set upper layer block sizes.
upper_layer_verif_block_size = std::max(upper_layer_verif_block_size, HashSize);
upper_layer_verif_block_size = (std::max)(upper_layer_verif_block_size, HashSize);
m_upper_layer_verification_block_size = upper_layer_verif_block_size;
m_upper_layer_verification_block_order = ILog2(static_cast<u32>(upper_layer_verif_block_size));
ASSERT(m_upper_layer_verification_block_size == 1ll << m_upper_layer_verification_block_order);

View file

@ -9,7 +9,7 @@
namespace FileSys {
u8 NcaHeader::GetProperKeyGeneration() const {
return std::max(this->key_generation, this->key_generation_2);
return (std::max)(this->key_generation, this->key_generation_2);
}
bool NcaPatchInfo::HasIndirectTable() const {

View file

@ -34,7 +34,7 @@ void PooledBuffer::AllocateCore(size_t ideal_size, size_t required_size, bool la
ASSERT(required_size <= GetAllocatableSizeMaxCore(large));
const size_t target_size =
std::min(std::max(ideal_size, required_size), GetAllocatableSizeMaxCore(large));
(std::min)((std::max)(ideal_size, required_size), GetAllocatableSizeMaxCore(large));
// Dummy implementation for allocate.
if (target_size > 0) {

View file

@ -18,7 +18,7 @@ private:
virtual ~ZeroStorage() {}
virtual size_t GetSize() const override {
return std::numeric_limits<size_t>::max();
return (std::numeric_limits<size_t>::max)();
}
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
@ -62,7 +62,7 @@ public:
private:
void SetZeroStorage() {
return this->SetStorage(1, m_zero_storage, 0, std::numeric_limits<s64>::max());
return this->SetStorage(1, m_zero_storage, 0, (std::numeric_limits<s64>::max)());
}
private:

View file

@ -102,7 +102,7 @@ std::vector<u8> CNMT::Serialize() const {
header.type >= TitleType::Application && header.type <= TitleType::AOC;
const auto dead_zone = header.table_offset + sizeof(CNMTHeader);
std::vector<u8> out(
std::max(sizeof(CNMTHeader) + (has_opt_header ? sizeof(OptionalHeader) : 0), dead_zone) +
(std::max)(sizeof(CNMTHeader) + (has_opt_header ? sizeof(OptionalHeader) : 0), dead_zone) +
content_records.size() * sizeof(ContentRecord) + meta_records.size() * sizeof(MetaRecord));
memcpy(out.data(), &header, sizeof(CNMTHeader));

View file

@ -273,7 +273,7 @@ std::vector<NcaID> PlaceholderCache::List() const {
NcaID PlaceholderCache::Generate() {
std::random_device device;
std::mt19937 gen(device());
std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
std::uniform_int_distribution<u64> distribution(1, (std::numeric_limits<u64>::max)());
NcaID out{};

View file

@ -75,7 +75,7 @@ std::pair<EntryType, std::string> GetEntry(const RomFSTraversalContext& ctx, siz
}
std::memcpy(&entry, data + offset, sizeof(EntryType));
const size_t name_length = std::min(entry_end + entry.name_length, size) - entry_end;
const size_t name_length = (std::min)(entry_end + entry.name_length, size) - entry_end;
std::string name(reinterpret_cast<const char*>(data + entry_end), name_length);
return {entry, std::move(name)};

View file

@ -507,9 +507,9 @@ bool VfsRawCopy(const VirtualFile& src, const VirtualFile& dest, std::size_t blo
if (!dest->Resize(src->GetSize()))
return false;
std::vector<u8> temp(std::min(block_size, src->GetSize()));
std::vector<u8> temp((std::min)(block_size, src->GetSize()));
for (std::size_t i = 0; i < src->GetSize(); i += block_size) {
const auto read = std::min(block_size, src->GetSize() - i);
const auto read = (std::min)(block_size, src->GetSize() - i);
if (src->Read(temp.data(), read, i) != read) {
return false;

View file

@ -43,7 +43,7 @@ public:
}
std::size_t Read(u8* data, std::size_t length, std::size_t offset) const override {
const auto read = std::min(length, size - offset);
const auto read = (std::min)(length, size - offset);
std::fill(data, data + read, value);
return read;
}
@ -61,7 +61,7 @@ public:
}
std::vector<u8> ReadBytes(std::size_t length, std::size_t offset) const override {
const auto read = std::min(length, size - offset);
const auto read = (std::min)(length, size - offset);
return std::vector<u8>(read, value);
}

View file

@ -37,7 +37,7 @@ bool VectorVfsFile::IsReadable() const {
}
std::size_t VectorVfsFile::Read(u8* data_, std::size_t length, std::size_t offset) const {
const auto read = std::min(length, data.size() - offset);
const auto read = (std::min)(length, data.size() - offset);
std::memcpy(data_, data.data() + offset, read);
return read;
}
@ -45,7 +45,7 @@ std::size_t VectorVfsFile::Read(u8* data_, std::size_t length, std::size_t offse
std::size_t VectorVfsFile::Write(const u8* data_, std::size_t length, std::size_t offset) {
if (offset + length > data.size())
data.resize(offset + length);
const auto write = std::min(length, data.size() - offset);
const auto write = (std::min)(length, data.size() - offset);
std::memcpy(data.data() + offset, data_, write);
return write;
}

View file

@ -45,7 +45,7 @@ public:
}
std::size_t Read(u8* data_, std::size_t length, std::size_t offset) const override {
const auto read = std::min(length, size - offset);
const auto read = (std::min)(length, size - offset);
std::memcpy(data_, data.data() + offset, read);
return read;
}

View file

@ -28,11 +28,11 @@ std::pair<f32, f32> EmuWindow::MapToTouchScreen(u32 framebuffer_x, u32 framebuff
}
std::pair<u32, u32> EmuWindow::ClipToTouchScreen(u32 new_x, u32 new_y) const {
new_x = std::max(new_x, framebuffer_layout.screen.left);
new_x = std::min(new_x, framebuffer_layout.screen.right - 1);
new_x = (std::max)(new_x, framebuffer_layout.screen.left);
new_x = (std::min)(new_x, framebuffer_layout.screen.right - 1);
new_y = std::max(new_y, framebuffer_layout.screen.top);
new_y = std::min(new_y, framebuffer_layout.screen.bottom - 1);
new_y = (std::max)(new_y, framebuffer_layout.screen.top);
new_y = (std::min)(new_y, framebuffer_layout.screen.bottom - 1);
return std::make_pair(new_x, new_y);
}

View file

@ -14,7 +14,7 @@ namespace Layout {
template <class T>
static Common::Rectangle<T> MaxRectangle(Common::Rectangle<T> window_area,
float screen_aspect_ratio) {
const float scale = std::min(static_cast<float>(window_area.GetWidth()),
const float scale = (std::min)(static_cast<float>(window_area.GetWidth()),
static_cast<float>(window_area.GetHeight()) / screen_aspect_ratio);
return Common::Rectangle<T>{0, 0, static_cast<T>(std::round(scale)),
static_cast<T>(std::round(scale * screen_aspect_ratio))};

View file

@ -133,7 +133,7 @@ void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout) {
// Decide on starting addresses for our pools.
const u64 application_pool_start = pool_end - application_pool_size;
const u64 applet_pool_start = application_pool_start - applet_pool_size;
const u64 unsafe_system_pool_start = std::min(
const u64 unsafe_system_pool_start = (std::min)(
kernel_dram_start + CarveoutSizeMax,
Common::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment));
const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start;

View file

@ -182,13 +182,13 @@ namespace {
template <typename F>
u64 GenerateUniformRange(u64 min, u64 max, F f) {
// Handle the case where the difference is too large to represent.
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
if (max == (std::numeric_limits<u64>::max)() && min == (std::numeric_limits<u64>::min)()) {
return f();
}
// Iterate until we get a value in range.
const u64 range_size = ((max + 1) - min);
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
const u64 effective_max = ((std::numeric_limits<u64>::max)() / range_size) * range_size;
while (true) {
if (const u64 rnd = f(); rnd < effective_max) {
return min + (rnd % range_size);
@ -201,7 +201,7 @@ u64 GenerateUniformRange(u64 min, u64 max, F f) {
u64 KSystemControl::GenerateRandomU64() {
std::random_device device;
std::mt19937 gen(device());
std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
std::uniform_int_distribution<u64> distribution(1, (std::numeric_limits<u64>::max)());
return distribution(gen);
}

View file

@ -110,7 +110,7 @@ public:
// Update our tracking.
m_page_bitmap.ClearBit(offset);
m_peak = std::max(m_peak, (++m_used));
m_peak = (std::max)(m_peak, (++m_used));
return GetPointer<PageBuffer>(m_aligned_address) + offset;
}
@ -131,7 +131,7 @@ public:
// Update our tracking.
m_page_bitmap.ClearRange(offset, count);
m_used += count;
m_peak = std::max(m_peak, m_used);
m_peak = (std::max)(m_peak, m_used);
return GetPointer<PageBuffer>(m_aligned_address) + offset;
}

View file

@ -179,7 +179,7 @@ private:
m_free_head_index = m_entry_infos[index].GetNextFreeIndex();
m_max_count = std::max(m_max_count, ++m_count);
m_max_count = (std::max)(m_max_count, ++m_count);
return index;
}

View file

@ -19,7 +19,7 @@ void KHardwareTimer::Initialize() {
void KHardwareTimer::Finalize() {
m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type);
m_wakeup_time = std::numeric_limits<s64>::max();
m_wakeup_time = (std::numeric_limits<s64>::max)();
m_event_type.reset();
}
@ -37,7 +37,7 @@ void KHardwareTimer::DoTask() {
// Disable the timer interrupt while we handle this.
// Not necessary due to core timing already having popped this event to call it.
// this->DisableInterrupt();
m_wakeup_time = std::numeric_limits<s64>::max();
m_wakeup_time = (std::numeric_limits<s64>::max)();
if (const s64 next_time = this->DoInterruptTaskImpl(GetTick());
0 < next_time && next_time <= m_wakeup_time) {
@ -63,7 +63,7 @@ void KHardwareTimer::EnableInterrupt(s64 wakeup_time) {
void KHardwareTimer::DisableInterrupt() {
m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type,
Core::Timing::UnscheduleEventType::NoWait);
m_wakeup_time = std::numeric_limits<s64>::max();
m_wakeup_time = (std::numeric_limits<s64>::max)();
}
s64 KHardwareTimer::GetTick() const {
@ -71,7 +71,7 @@ s64 KHardwareTimer::GetTick() const {
}
bool KHardwareTimer::GetInterruptEnabled() {
return m_wakeup_time != std::numeric_limits<s64>::max();
return m_wakeup_time != (std::numeric_limits<s64>::max)();
}
} // namespace Kernel

View file

@ -40,7 +40,7 @@ private:
private:
// Absolute time in nanoseconds
s64 m_wakeup_time{std::numeric_limits<s64>::max()};
s64 m_wakeup_time{(std::numeric_limits<s64>::max)()};
std::shared_ptr<Core::Timing::EventType> m_event_type{};
};

View file

@ -11,7 +11,7 @@ namespace Kernel {
namespace {
constexpr u64 InvalidThreadId = std::numeric_limits<u64>::max();
constexpr u64 InvalidThreadId = (std::numeric_limits<u64>::max)();
class ThreadQueueImplForKLightServerSessionRequest final : public KThreadQueue {
private:

View file

@ -19,7 +19,7 @@ private:
KLightSession* m_parent{};
KThread::WaiterList m_request_list{};
KThread* m_current_request{};
u64 m_server_thread_id{std::numeric_limits<u64>::max()};
u64 m_server_thread_id{(std::numeric_limits<u64>::max)()};
KThread* m_server_thread{};
public:

View file

@ -551,7 +551,7 @@ public:
}
m_device_disable_merge_left_count =
std::min(m_device_disable_merge_left_count, m_device_use_count);
(std::min)(m_device_disable_merge_left_count, m_device_use_count);
if (m_device_disable_merge_left_count == 0) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(

View file

@ -66,7 +66,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
this->insert(*found);
// Insert a new region for the split.
const u64 new_pair = (old_pair != std::numeric_limits<u64>::max())
const u64 new_pair = (old_pair != (std::numeric_limits<u64>::max)())
? old_pair + (address - old_address)
: old_pair;
this->insert(*AllocateRegion(m_memory_region_allocator, address, inserted_region_last,
@ -75,7 +75,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
// If we need to insert a region after the region, do so.
if (old_last != inserted_region_last) {
const u64 after_pair = (old_pair != std::numeric_limits<u64>::max())
const u64 after_pair = (old_pair != (std::numeric_limits<u64>::max)())
? old_pair + (inserted_region_end - old_address)
: old_pair;
this->insert(*AllocateRegion(m_memory_region_allocator, inserted_region_end, old_last,

View file

@ -323,7 +323,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
// Process part or all of the block.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
(std::min)(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
manager.OpenFirst(cur_address, cur_pages);
// Advance.
@ -385,7 +385,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Process part or all of the block.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
(std::min)(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
any_new = manager.ProcessOptimizedAllocation(m_system.Kernel(), cur_address,
cur_pages, fill_pattern);
@ -409,7 +409,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Track some or all of the current pages.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
(std::min)(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
manager.TrackOptimizedAllocation(m_system.Kernel(), cur_address, cur_pages);
// Advance.

View file

@ -68,7 +68,7 @@ public:
// Repeatedly open references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
const size_t cur_pages = (std::min)(num_pages, manager.GetPageOffsetToEnd(address));
{
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
@ -84,7 +84,7 @@ public:
// Repeatedly open references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
const size_t cur_pages = (std::min)(num_pages, manager.GetPageOffsetToEnd(address));
{
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
@ -100,7 +100,7 @@ public:
// Repeatedly close references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
const size_t cur_pages = (std::min)(num_pages, manager.GetPageOffsetToEnd(address));
{
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);

View file

@ -28,7 +28,7 @@ public:
: m_address(address), m_last_address(last_address), m_pair_address(pair_address),
m_attributes(attributes), m_type_id(type_id) {}
constexpr KMemoryRegion(u64 address, u64 last_address, u32 attributes, u32 type_id)
: KMemoryRegion(address, last_address, std::numeric_limits<u64>::max(), attributes,
: KMemoryRegion(address, last_address, (std::numeric_limits<u64>::max)(), attributes,
type_id) {}
~KMemoryRegion() = default;

View file

@ -83,7 +83,7 @@ public:
}
// Determine how many bits to take this round.
const auto cur_bits = std::min(num_bits, m_bits_available);
const auto cur_bits = (std::min)(num_bits, m_bits_available);
// Generate mask for our current bits.
const u64 mask = (static_cast<u64>(1) << cur_bits) - 1;

View file

@ -75,7 +75,7 @@ public:
}
static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
const size_t target_pages = std::max(num_pages, align_pages);
const size_t target_pages = (std::max)(num_pages, align_pages);
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return static_cast<s32>(i);

View file

@ -1731,7 +1731,7 @@ void KPageTableBase::RemapPageGroup(PageLinkedList* page_list, KProcessAddress a
}
// Map whatever we can.
const size_t cur_pages = std::min(pg_pages, map_pages);
const size_t cur_pages = (std::min)(pg_pages, map_pages);
R_ASSERT(this->Operate(page_list, map_address, map_pages, pg_phys_addr, true,
map_properties, OperationType::Map, true));
@ -1929,7 +1929,7 @@ Result KPageTableBase::GetContiguousMemoryRangeWithState(
}
// Take the minimum size for our region.
size = std::min(size, contig_size);
size = (std::min)(size, contig_size);
// Check that the memory is contiguous (modulo the reference count bit).
const KMemoryState test_state_mask = state_mask | KMemoryState::FlagReferenceCounted;
@ -5297,7 +5297,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
KMemoryPermission::None, false, false,
DisableMergeAttribute::None};
const size_t cur_pages =
std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
(std::min)(KProcessAddress(info.GetEndAddress()) - cur_address,
last_unmap_address + 1 - cur_address) /
PageSize;
@ -5345,7 +5345,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
? DisableMergeAttribute::DisableHead
: DisableMergeAttribute::None};
size_t map_pages =
std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
(std::min)(KProcessAddress(info.GetEndAddress()) - cur_address,
last_address + 1 - cur_address) /
PageSize;
@ -5373,7 +5373,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
}
// Add whatever we can to the current block.
const size_t cur_pages = std::min(pg_pages, remain_pages);
const size_t cur_pages = (std::min)(pg_pages, remain_pages);
R_TRY(cur_pg.AddBlock(pg_phys_addr +
((pg_pages - cur_pages) * PageSize),
cur_pages));
@ -5535,7 +5535,7 @@ Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size)
// Determine the range to unmap.
const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
DisableMergeAttribute::None};
const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
const size_t cur_pages = (std::min)(KProcessAddress(info.GetEndAddress()) - cur_address,
last_address + 1 - cur_address) /
PageSize;
@ -5656,7 +5656,7 @@ Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t si
}
// Update our current size.
m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size);
m_cur_size = (std::min)(m_remaining_size, m_cur_size + m_entry.block_size);
}
}
};

View file

@ -59,7 +59,7 @@ public:
static constexpr u64 InitialProcessIdMax = 0x50;
static constexpr u64 ProcessIdMin = InitialProcessIdMax + 1;
static constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max();
static constexpr u64 ProcessIdMax = (std::numeric_limits<u64>::max)();
private:
using SharedMemoryInfoList = Common::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;

View file

@ -111,7 +111,7 @@ bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
if (m_current_values[index] + value <= m_limit_values[index]) {
m_current_values[index] += value;
m_current_hints[index] += value;
m_peak_values[index] = std::max(m_peak_values[index], m_current_values[index]);
m_peak_values[index] = (std::max)(m_peak_values[index], m_current_values[index]);
return true;
}

View file

@ -149,7 +149,7 @@ public:
size_t GetObjectIndex(const void* obj) const {
if constexpr (SupportDynamicExpansion) {
if (!this->Contains(reinterpret_cast<uintptr_t>(obj))) {
return std::numeric_limits<size_t>::max();
return (std::numeric_limits<size_t>::max)();
}
}

View file

@ -1016,7 +1016,7 @@ void KThread::RestorePriority(KernelCore& kernel, KThread* thread) {
s32 new_priority = thread->GetBasePriority();
for (const auto& held_lock : thread->m_held_lock_info_list) {
new_priority =
std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority());
(std::min)(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority());
}
// If the priority we would inherit is not different from ours, don't do anything.

View file

@ -507,7 +507,7 @@ struct KernelCore::Impl {
constexpr size_t MiscRegionAlign = KernelAslrAlignment;
constexpr size_t MiscRegionMinimumSize = 32_MiB;
const size_t misc_region_size = Common::AlignUp(
std::max(misc_region_needed_size, MiscRegionMinimumSize), MiscRegionAlign);
(std::max)(misc_region_needed_size, MiscRegionMinimumSize), MiscRegionAlign);
ASSERT(misc_region_size > 0);
// Setup the misc region.

View file

@ -58,10 +58,10 @@ Result WaitForAddress(Core::System& system, u64 address, ArbitrationType arb_typ
if (offset_tick > 0) {
timeout = system.Kernel().HardwareTimer().GetTick() + offset_tick + 2;
if (timeout <= 0) {
timeout = std::numeric_limits<s64>::max();
timeout = (std::numeric_limits<s64>::max)();
}
} else {
timeout = std::numeric_limits<s64>::max();
timeout = (std::numeric_limits<s64>::max)();
}
} else {
timeout = timeout_ns;

View file

@ -31,10 +31,10 @@ Result WaitProcessWideKeyAtomic(Core::System& system, u64 address, u64 cv_key, u
if (offset_tick > 0) {
timeout = system.Kernel().HardwareTimer().GetTick() + offset_tick + 2;
if (timeout <= 0) {
timeout = std::numeric_limits<s64>::max();
timeout = (std::numeric_limits<s64>::max)();
}
} else {
timeout = std::numeric_limits<s64>::max();
timeout = (std::numeric_limits<s64>::max)();
}
} else {
timeout = timeout_ns;

View file

@ -61,10 +61,10 @@ Result ReplyAndReceiveImpl(KernelCore& kernel, int32_t* out_index, uintptr_t mes
if (offset_tick > 0) {
timeout = kernel.HardwareTimer().GetTick() + offset_tick + 2;
if (timeout <= 0) {
timeout = std::numeric_limits<s64>::max();
timeout = (std::numeric_limits<s64>::max)();
}
} else {
timeout = std::numeric_limits<s64>::max();
timeout = (std::numeric_limits<s64>::max)();
}
} else {
timeout = timeout_ns;

View file

@ -82,7 +82,7 @@ Result GetProcessList(Core::System& system, s32* out_num_processes, u64 out_proc
const auto num_processes = process_list.size();
const auto copy_amount =
std::min(static_cast<std::size_t>(out_process_ids_size), num_processes);
(std::min)(static_cast<std::size_t>(out_process_ids_size), num_processes);
for (std::size_t i = 0; i < copy_amount && it != process_list.end(); ++i, ++it) {
memory.Write64(out_process_ids, (*it)->GetProcessId());

View file

@ -117,10 +117,10 @@ void SleepThread(Core::System& system, s64 ns) {
if (offset_tick > 0) {
timeout = kernel.HardwareTimer().GetTick() + offset_tick + 2;
if (timeout <= 0) {
timeout = std::numeric_limits<s64>::max();
timeout = (std::numeric_limits<s64>::max)();
}
} else {
timeout = std::numeric_limits<s64>::max();
timeout = (std::numeric_limits<s64>::max)();
}
// Sleep.
@ -226,7 +226,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, u64 out_thread_
auto& memory = GetCurrentMemory(system.Kernel());
const auto& thread_list = current_process->GetThreadList();
const auto num_threads = thread_list.size();
const auto copy_amount = std::min(static_cast<std::size_t>(out_thread_ids_size), num_threads);
const auto copy_amount = (std::min)(static_cast<std::size_t>(out_thread_ids_size), num_threads);
auto list_iter = thread_list.cbegin();
for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {

View file

@ -72,7 +72,7 @@ static void SanitizeJPEGImageSize(std::vector<u8>& image) {
}
}
image.resize(std::min(image.size(), max_jpeg_image_size));
image.resize((std::min)(image.size(), max_jpeg_image_size));
}
class IManagerForSystemService final : public ServiceFramework<IManagerForSystemService> {

View file

@ -118,7 +118,7 @@ void Cabinet::DisplayCompleted(bool apply_changes, std::string_view amiibo_name)
case Service::NFP::CabinetMode::StartNicknameAndOwnerSettings: {
Service::NFP::RegisterInfoPrivate register_info{};
std::memcpy(register_info.amiibo_name.data(), amiibo_name.data(),
std::min(amiibo_name.size(), register_info.amiibo_name.size() - 1));
(std::min)(amiibo_name.size(), register_info.amiibo_name.size() - 1));
register_info.mii_store_data.BuildRandom(Mii::Age::All, Mii::Gender::All, Mii::Race::All);
register_info.mii_store_data.SetNickname({u'y', u'u', u'z', u'u'});
nfp_device->SetRegisterInfoPrivate(register_info);

View file

@ -31,7 +31,7 @@ static Core::Frontend::ControllerParameters ConvertToFrontendParameters(
npad_style_set.raw = private_arg.style_set;
return {
.min_players = std::max(s8{1}, header.player_count_min),
.min_players = (std::max)(s8{1}, header.player_count_min),
.max_players = header.player_count_max,
.keep_controllers_connected = header.enable_take_over_connection,
.enable_single_mode = header.enable_single_mode,

View file

@ -115,7 +115,7 @@ Result IApplicationAccessor::GetApplicationControlProperty(
R_TRY(system.GetARPManager().GetControlProperty(&nacp, m_applet->program_id));
std::memcpy(out_control_property.data(), nacp.data(),
std::min(out_control_property.size(), nacp.size()));
(std::min)(out_control_property.size(), nacp.size()));
R_SUCCEED();
}

Some files were not shown because too many files have changed in this diff Show more