Common: Move CPU macros to types.h

This commit is contained in:
Stenzek 2023-10-01 13:57:25 +10:00
parent dd204d116e
commit b1bb33a566
18 changed files with 135 additions and 142 deletions

View file

@ -38,7 +38,6 @@ add_library(common
minizip_helpers.cpp
minizip_helpers.h
path.h
platform.h
progress_callback.cpp
progress_callback.h
rectangle.h

View file

@ -28,7 +28,6 @@
<ClInclude Include="memory_settings_interface.h" />
<ClInclude Include="md5_digest.h" />
<ClInclude Include="path.h" />
<ClInclude Include="platform.h" />
<ClInclude Include="progress_callback.h" />
<ClInclude Include="rectangle.h" />
<ClInclude Include="scoped_guard.h" />

View file

@ -29,7 +29,6 @@
<ClInclude Include="lru_cache.h" />
<ClInclude Include="easing.h" />
<ClInclude Include="error.h" />
<ClInclude Include="platform.h" />
<ClInclude Include="http_downloader_winhttp.h" />
<ClInclude Include="http_downloader.h" />
<ClInclude Include="path.h" />

View file

@ -1,66 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#if defined(_MSC_VER)
#if defined(_M_X64)
#define CPU_X64 1
#elif defined(_M_IX86)
#define CPU_X86 1
#elif defined(_M_ARM64)
#define CPU_AARCH64 1
#elif defined(_M_ARM)
#define CPU_AARCH32 1
#else
#error Unknown architecture.
#endif
#elif defined(__GNUC__) || defined(__clang__)
#if defined(__x86_64__)
#define CPU_X64 1
#elif defined(__i386__)
#define CPU_X86 1
#elif defined(__aarch64__)
#define CPU_AARCH64 1
#elif defined(__arm__)
#define CPU_AARCH32 1
#elif defined(__riscv) && __riscv_xlen == 64
#define CPU_RISCV64 1
#else
#error Unknown architecture.
#endif
#else
#error Unknown compiler.
#endif
#if defined(CPU_X64)
#define CPU_ARCH_STR "x64"
#elif defined(CPU_X86)
#define CPU_ARCH_STR "x86"
#elif defined(CPU_AARCH32)
#define CPU_ARCH_STR "AArch32"
#elif defined(CPU_AARCH64)
#define CPU_ARCH_STR "AArch64"
#else
#define CPU_ARCH_STR "Unknown"
#endif
#if defined(_WIN32)
#define SYSTEM_STR "Windows"
#elif defined(__ANDROID__)
#define SYSTEM_STR "Android"
#elif defined(__linux__)
#define SYSTEM_STR "Linux"
#elif defined(__FreeBSD__)
#define SYSTEM_STR "FreeBSD"
#elif defined(__APPLE__)
#define SYSTEM_STR "macOS"
#else
#define SYSTEM_STR "Unknown"
#endif

View file

@ -51,9 +51,9 @@ char (&__countof_ArraySizeHelper(T (&array)[N]))[N];
#endif
#ifdef __GNUC__
#define printflike(n,m) __attribute__((format(printf,n,m)))
#define printflike(n, m) __attribute__((format(printf, n, m)))
#else
#define printflike(n,m)
#define printflike(n, m)
#endif
// [[noreturn]] which can be used on function pointers.
@ -68,7 +68,12 @@ char (&__countof_ArraySizeHelper(T (&array)[N]))[N];
#ifdef _MSC_VER
#define ASSUME(x) __assume(x)
#else
#define ASSUME(x) do { if (!(x)) __builtin_unreachable(); } while(0)
#define ASSUME(x) \
do \
{ \
if (!(x)) \
__builtin_unreachable(); \
} while (0)
#endif
// disable warnings that show up at warning level 4
@ -88,6 +93,82 @@ using u32 = uint32_t;
using s64 = int64_t;
using u64 = uint64_t;
// Enable use of static_assert in constexpr if
template<class T>
struct dependent_false : std::false_type
{
};
template<int T>
struct dependent_int_false : std::false_type
{
};
// Architecture detection.
#if defined(_MSC_VER)
#if defined(_M_X64)
#define CPU_ARCH_X64 1
#elif defined(_M_IX86)
#define CPU_ARCH_X86 1
#elif defined(_M_ARM64)
#define CPU_ARCH_ARM64 1
#elif defined(_M_ARM)
#define CPU_ARCH_ARM32 1
#else
#error Unknown architecture.
#endif
#elif defined(__GNUC__) || defined(__clang__)
#if defined(__x86_64__)
#define CPU_ARCH_X64 1
#elif defined(__i386__)
#define CPU_ARCH_X86 1
#elif defined(__aarch64__)
#define CPU_ARCH_ARM64 1
#elif defined(__arm__)
#define CPU_ARCH_ARM32 1
#elif defined(__riscv) && __riscv_xlen == 64
#define CPU_ARCH_RISCV64 1
#else
#error Unknown architecture.
#endif
#else
#error Unknown compiler.
#endif
#if defined(CPU_ARCH_X64)
#define CPU_ARCH_STR "x64"
#elif defined(CPU_ARCH_X86)
#define CPU_ARCH_STR "x86"
#elif defined(CPU_ARCH_ARM32)
#define CPU_ARCH_STR "arm32"
#elif defined(CPU_ARCH_ARM64)
#define CPU_ARCH_STR "arm64"
#elif defined(CPU_ARCH_RISCV64)
#define CPU_ARCH_STR "riscv64"
#else
#define CPU_ARCH_STR "Unknown"
#endif
// OS detection.
#if defined(_WIN32)
#define TARGET_OS_STR "Windows"
#elif defined(__ANDROID__)
#define TARGET_OS_STR "Android"
#elif defined(__linux__)
#define TARGET_OS_STR "Linux"
#elif defined(__FreeBSD__)
#define TARGET_OS_STR "FreeBSD"
#elif defined(__APPLE__)
#define TARGET_OS_STR "macOS"
#else
#define TARGET_OS_STR "Unknown"
#endif
// Host page sizes.
#if defined(__APPLE__) && defined(__aarch64__)
static constexpr u32 HOST_PAGE_SIZE = 0x4000;
@ -99,16 +180,6 @@ static constexpr u32 HOST_PAGE_MASK = HOST_PAGE_SIZE - 1;
static constexpr u32 HOST_PAGE_SHIFT = 12;
#endif
// Enable use of static_assert in constexpr if
template<class T>
struct dependent_false : std::false_type
{
};
template<int T>
struct dependent_int_false : std::false_type
{
};
// Zero-extending helper
template<typename TReturn, typename TValue>
ALWAYS_INLINE constexpr TReturn ZeroExtend(TValue value)

View file

@ -23,7 +23,6 @@
#include "common/log.h"
#include "common/md5_digest.h"
#include "common/path.h"
#include "common/platform.h"
#include "common/scoped_guard.h"
#include "common/small_string.h"
#include "common/string_util.h"
@ -231,7 +230,7 @@ std::unique_lock<std::recursive_mutex> Achievements::GetLock()
std::string Achievements::GetUserAgent()
{
return fmt::format("DuckStation for {} ({}) {}", SYSTEM_STR, CPU_ARCH_STR, g_scm_tag_str);
return fmt::format("DuckStation for {} ({}) {}", TARGET_OS_STR, CPU_ARCH_STR, g_scm_tag_str);
}
void Achievements::ReportError(const std::string_view& sv)

View file

@ -22,7 +22,6 @@
#include "common/file_system.h"
#include "common/heap_array.h"
#include "common/log.h"
#include "common/platform.h"
#include "imgui.h"
@ -30,7 +29,7 @@
#include <vector>
Log_SetChannel(CDROM);
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
#include <emmintrin.h>
#endif
@ -3074,7 +3073,7 @@ static s16 GetPeakVolume(const u8* raw_sector, u8 channel)
{
static constexpr u32 NUM_SAMPLES = CDImage::RAW_SECTOR_SIZE / sizeof(s16);
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
static_assert(Common::IsAlignedPow2(NUM_SAMPLES, 8));
const u8* current_ptr = raw_sector;
__m128i v_peak = _mm_set1_epi16(0);

View file

@ -36,7 +36,7 @@ static constexpr u32 INVALIDATE_THRESHOLD_TO_DISABLE_LINKING = 10;
#define USE_STATIC_CODE_BUFFER 1
#endif
#if defined(CPU_AARCH32)
#if defined(CPU_ARCH_ARM32)
// Use a smaller code buffer size on AArch32 to have a better chance of being in range.
static constexpr u32 RECOMPILER_CODE_CACHE_SIZE = 16 * 1024 * 1024;
static constexpr u32 RECOMPILER_FAR_CODE_CACHE_SIZE = 8 * 1024 * 1024;

View file

@ -993,7 +993,7 @@ void CodeGenerator::BlockPrologue()
void CodeGenerator::BlockEpilogue()
{
#if defined(_DEBUG) && defined(CPU_X64)
#if defined(_DEBUG) && defined(CPU_ARCH_X64)
m_emit->nop();
#endif
@ -1007,7 +1007,7 @@ void CodeGenerator::BlockEpilogue()
void CodeGenerator::InstructionPrologue(const CodeBlockInstruction& cbi, TickCount cycles,
bool force_sync /* = false */)
{
#if defined(_DEBUG) && defined(CPU_X64)
#if defined(_DEBUG) && defined(CPU_ARCH_X64)
m_emit->nop();
#endif

View file

@ -198,9 +198,9 @@ struct Value
static Value FromConstantU64(u64 value) { return FromConstant(value, RegSize_64); }
static Value FromConstantPtr(const void* pointer)
{
#if defined(CPU_AARCH64) || defined(CPU_X64)
#if defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_X64)
return FromConstant(static_cast<u64>(reinterpret_cast<uintptr_t>(pointer)), RegSize_64);
#elif defined(CPU_AARCH32)
#elif defined(CPU_ARCH_ARM32)
return FromConstant(static_cast<u32>(reinterpret_cast<uintptr_t>(pointer)), RegSize_32);
#else
return FromConstant(0, RegSize_32);

View file

@ -2,10 +2,9 @@
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "common/platform.h"
#include "cpu_types.h"
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
// We need to include windows.h before xbyak does..
#ifdef _WIN32
@ -15,13 +14,13 @@
#define XBYAK_NO_OP_NAMES 1
#include "xbyak.h"
#elif defined(CPU_AARCH32)
#elif defined(CPU_ARCH_ARM32)
#include "vixl/aarch32/constants-aarch32.h"
#include "vixl/aarch32/instructions-aarch32.h"
#include "vixl/aarch32/macro-assembler-aarch32.h"
#elif defined(CPU_AARCH64)
#elif defined(CPU_ARCH_ARM64)
#include "vixl/aarch64/constants-aarch64.h"
#include "vixl/aarch64/macro-assembler-aarch64.h"
@ -64,7 +63,7 @@ enum class Condition : u8
Zero
};
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
using HostReg = unsigned;
using CodeEmitter = Xbyak::CodeGenerator;
@ -92,7 +91,7 @@ constexpr u32 CODE_STORAGE_ALIGNMENT = 4096;
#error Unknown ABI.
#endif
#elif defined(CPU_AARCH32)
#elif defined(CPU_ARCH_ARM32)
using HostReg = unsigned;
using CodeEmitter = vixl::aarch32::MacroAssembler;
@ -111,7 +110,7 @@ constexpr u32 MAX_FAR_HOST_BYTES_PER_INSTRUCTION = 128;
// Alignment of code stoarge.
constexpr u32 CODE_STORAGE_ALIGNMENT = 4096;
#elif defined(CPU_AARCH64)
#elif defined(CPU_ARCH_ARM64)
using HostReg = unsigned;
using CodeEmitter = vixl::aarch64::MacroAssembler;
@ -130,7 +129,7 @@ constexpr u32 MAX_FAR_HOST_BYTES_PER_INSTRUCTION = 128;
// Alignment of code stoarge.
constexpr u32 CODE_STORAGE_ALIGNMENT = 4096;
#elif defined(CPU_RISCV64)
#elif defined(CPU_ARCH_RISCV64)
using HostReg = unsigned;

View file

@ -10,15 +10,14 @@
#include "common/assert.h"
#include "common/log.h"
#include "common/make_array.h"
#include "common/platform.h"
#include <algorithm>
Log_SetChannel(GPU_SW);
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
#include <emmintrin.h>
#elif defined(CPU_AARCH64)
#elif defined(CPU_ARCH_ARM64)
#ifdef _MSC_VER
#include <arm64_neon.h>
#else
@ -163,7 +162,7 @@ ALWAYS_INLINE void CopyOutRow16<GPUTexture::Format::RGBA5551, u16>(const u16* sr
{
u32 col = 0;
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
const u32 aligned_width = Common::AlignDownPow2(width, 8);
for (; col < aligned_width; col += 8)
{
@ -177,7 +176,7 @@ ALWAYS_INLINE void CopyOutRow16<GPUTexture::Format::RGBA5551, u16>(const u16* sr
_mm_storeu_si128(reinterpret_cast<__m128i*>(dst_ptr), value);
dst_ptr += 8;
}
#elif defined(CPU_AARCH64)
#elif defined(CPU_ARCH_ARM64)
const u32 aligned_width = Common::AlignDownPow2(width, 8);
for (; col < aligned_width; col += 8)
{
@ -202,7 +201,7 @@ ALWAYS_INLINE void CopyOutRow16<GPUTexture::Format::RGB565, u16>(const u16* src_
{
u32 col = 0;
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
const u32 aligned_width = Common::AlignDownPow2(width, 8);
for (; col < aligned_width; col += 8)
{
@ -217,7 +216,7 @@ ALWAYS_INLINE void CopyOutRow16<GPUTexture::Format::RGB565, u16>(const u16* src_
_mm_storeu_si128(reinterpret_cast<__m128i*>(dst_ptr), value);
dst_ptr += 8;
}
#elif defined(CPU_AARCH64)
#elif defined(CPU_ARCH_ARM64)
const u32 aligned_width = Common::AlignDownPow2(width, 8);
const uint16x8_t single_mask = vdupq_n_u16(0x1F);
for (; col < aligned_width; col += 8)

View file

@ -41,9 +41,9 @@
#include <span>
#include <unordered_map>
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
#include <emmintrin.h>
#elif defined(CPU_AARCH64)
#elif defined(CPU_ARCH_ARM64)
#ifdef _MSC_VER
#include <arm64_neon.h>
#else
@ -66,7 +66,7 @@ static void Draw();
static std::tuple<float, float> GetMinMax(std::span<const float> values)
{
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
__m128 vmin(_mm_loadu_ps(values.data()));
__m128 vmax(vmin);
@ -76,8 +76,8 @@ static std::tuple<float, float> GetMinMax(std::span<const float> values)
for (; i < aligned_count; i += 4)
{
const __m128 v(_mm_loadu_ps(&values[i]));
vmin = _mm_min_ps(v);
vmax = _mm_max_ps(v);
vmin = _mm_min_ps(vmin, v);
vmax = _mm_max_ps(vmax, v);
}
#ifdef _MSC_VER
@ -94,7 +94,7 @@ static std::tuple<float, float> GetMinMax(std::span<const float> values)
}
return std::tie(min, max);
#elif defined(CPU_AARCH64)
#elif defined(CPU_ARCH_ARM64)
float32x4_t vmin(vld1q_f32(values.data()));
float32x4_t vmax(vmin);
@ -104,8 +104,8 @@ static std::tuple<float, float> GetMinMax(std::span<const float> values)
for (; i < aligned_count; i += 4)
{
const float32x4_t v(vld1q_f32(&values[i]));
vmin = vminq_f32(v);
vmax = vmaxq_f32(v);
vmin = vminq_f32(vmin, v);
vmax = vmaxq_f32(vmax, v);
}
float min = vminvq_f32(vmin);

View file

@ -5,14 +5,13 @@
#include "common/file_system.h"
#include "common/log.h"
#include "common/path.h"
#include "common/platform.h"
#include "common/string_util.h"
#include "common/timer.h"
#include "fmt/format.h"
#include "host.h"
#include "settings.h"
#include "xxhash.h"
#if defined(CPU_X86) || defined(CPU_X64)
#if defined(CPU_ARCH_X86) || defined(CPU_ARCH_X64)
#include "xxh_x86dispatch.h"
#endif
#include <cinttypes>

View file

@ -7,7 +7,6 @@
#include "common/assert.h"
#include "common/log.h"
#include "common/make_array.h"
#include "common/platform.h"
#include "common/timer.h"
#include <algorithm>
#include <cmath>
@ -23,7 +22,7 @@
#include <arm64_neon.h>
#elif defined(__aarch64__)
#include <arm_neon.h>
#elif defined(CPU_X86) || defined(CPU_X64)
#elif defined(CPU_ARCH_X86) || defined(CPU_ARCH_X64)
#include <emmintrin.h>
#endif
@ -365,7 +364,7 @@ void AudioStream::EndWrite(u32 num_frames)
static constexpr float S16_TO_FLOAT = 1.0f / 32767.0f;
static constexpr float FLOAT_TO_S16 = 32767.0f;
#if defined(CPU_AARCH64)
#if defined(CPU_ARCH_ARM64)
static void S16ChunkToFloat(const s32* src, float* dst)
{
@ -418,7 +417,7 @@ static void FloatChunkToS16(s32* dst, const float* src, uint size)
}
}
#elif defined(CPU_X86) || defined(CPU_X64)
#elif defined(CPU_ARCH_X86) || defined(CPU_ARCH_X64)
static void S16ChunkToFloat(const s32* src, float* dst)
{

View file

@ -11,7 +11,6 @@
#include "common/hash_combine.h"
#include "common/log.h"
#include "common/path.h"
#include "common/platform.h"
#include "common/string_util.h"
#include "fmt/format.h"
@ -448,7 +447,7 @@ bool CDImageCHD::IsPrecached() const
ALWAYS_INLINE static void CopyAndSwap(void* dst_ptr, const u8* src_ptr, u32 data_size)
{
u8* dst_ptr_byte = static_cast<u8*>(dst_ptr);
#if defined(CPU_X64) || defined(CPU_AARCH64)
#if defined(CPU_ARCH_X64) || defined(CPU_ARCH_ARM64)
const u32 num_values = data_size / 8;
for (u32 i = 0; i < num_values; i++)
{
@ -459,7 +458,7 @@ ALWAYS_INLINE static void CopyAndSwap(void* dst_ptr, const u8* src_ptr, u32 data
src_ptr += sizeof(value);
dst_ptr_byte += sizeof(value);
}
#elif defined(CPU_X86) || defined(CPU_ARM)
#elif defined(CPU_ARCH_X86) || defined(CPU_ARCH_ARM32)
const u32 num_values = data_size / 4;
for (u32 i = 0; i < num_values; i++)
{

View file

@ -5,7 +5,6 @@
#include "common/align.h"
#include "common/assert.h"
#include "common/log.h"
#include "common/platform.h"
#include <algorithm>
Log_SetChannel(JitCodeBuffer);
@ -209,7 +208,7 @@ void JitCodeBuffer::CommitCode(u32 length)
if (length == 0)
return;
#if defined(CPU_AARCH32) || defined(CPU_AARCH64) || defined(CPU_RISCV64)
#if defined(CPU_ARCH_ARM32) || defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64)
// ARM instruction and data caches are not coherent, we need to flush after every block.
FlushInstructionCache(m_free_code_ptr, length);
#endif
@ -224,7 +223,7 @@ void JitCodeBuffer::CommitFarCode(u32 length)
if (length == 0)
return;
#if defined(CPU_AARCH32) || defined(CPU_AARCH64) || defined(CPU_RISCV64)
#if defined(CPU_ARCH_ARM32) || defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64)
// ARM instruction and data caches are not coherent, we need to flush after every block.
FlushInstructionCache(m_free_far_code_ptr, length);
#endif

View file

@ -3,7 +3,6 @@
#include "page_fault_handler.h"
#include "common/log.h"
#include "common/platform.h"
#include <algorithm>
#include <cstring>
#include <mutex>
@ -36,7 +35,7 @@ static std::vector<RegisteredHandler> m_handlers;
static std::mutex m_handler_lock;
static thread_local bool s_in_handler;
#if defined(CPU_AARCH32)
#if defined(CPU_ARCH_ARM32)
static bool IsStoreInstruction(const void* ptr)
{
u32 bits;
@ -46,7 +45,7 @@ static bool IsStoreInstruction(const void* ptr)
return false;
}
#elif defined(CPU_AARCH64)
#elif defined(CPU_ARCH_ARM64)
static bool IsStoreInstruction(const void* ptr)
{
u32 bits;
@ -81,7 +80,7 @@ static bool IsStoreInstruction(const void* ptr)
return false;
}
}
#elif defined(CPU_RISCV64)
#elif defined(CPU_ARCH_RISCV64)
static bool IsStoreInstruction(const void* ptr)
{
u32 bits;
@ -91,7 +90,7 @@ static bool IsStoreInstruction(const void* ptr)
}
#endif
#if defined(_WIN32) && (defined(CPU_X64) || defined(CPU_AARCH64))
#if defined(_WIN32) && (defined(CPU_ARCH_X64) || defined(CPU_ARCH_ARM64))
static PVOID s_veh_handle;
static LONG ExceptionHandler(PEXCEPTION_POINTERS exi)
@ -142,16 +141,16 @@ static void SIGSEGVHandler(int sig, siginfo_t* info, void* ctx)
#if defined(__linux__) || defined(__ANDROID__)
void* const exception_address = reinterpret_cast<void*>(info->si_addr);
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.gregs[REG_RIP]);
const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext.gregs[REG_ERR] & 2) != 0;
#elif defined(CPU_AARCH32)
#elif defined(CPU_ARCH_ARM32)
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.arm_pc);
const bool is_write = IsStoreInstruction(exception_pc);
#elif defined(CPU_AARCH64)
#elif defined(CPU_ARCH_ARM64)
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.pc);
const bool is_write = IsStoreInstruction(exception_pc);
#elif defined(CPU_RISCV64)
#elif defined(CPU_ARCH_RISCV64)
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.__gregs[REG_PC]);
const bool is_write = IsStoreInstruction(exception_pc);
#else
@ -161,12 +160,12 @@ static void SIGSEGVHandler(int sig, siginfo_t* info, void* ctx)
#elif defined(__APPLE__)
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
void* const exception_address =
reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__faultvaddr);
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__rip);
const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__err & 2) != 0;
#elif defined(CPU_AARCH64)
#elif defined(CPU_ARCH_ARM64)
void* const exception_address = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__far);
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__pc);
const bool is_write = IsStoreInstruction(exception_pc);
@ -178,11 +177,11 @@ static void SIGSEGVHandler(int sig, siginfo_t* info, void* ctx)
#elif defined(__FreeBSD__)
#if defined(CPU_X64)
#if defined(CPU_ARCH_X64)
void* const exception_address = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_addr);
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_rip);
const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_err & 2) != 0;
#elif defined(CPU_AARCH64)
#elif defined(CPU_ARCH_ARM64)
void* const exception_address = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__far);
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__pc);
const bool is_write = IsStoreInstruction(exception_pc);
@ -238,7 +237,7 @@ bool InstallHandler(const void* owner, void* start_pc, u32 code_size, Callback c
if (was_empty)
{
#if defined(_WIN32) && (defined(CPU_X64) || defined(CPU_AARCH64))
#if defined(_WIN32) && (defined(CPU_ARCH_X64) || defined(CPU_ARCH_ARM64))
s_veh_handle = AddVectoredExceptionHandler(1, ExceptionHandler);
if (!s_veh_handle)
{
@ -284,7 +283,7 @@ bool RemoveHandler(const void* owner)
if (m_handlers.empty())
{
#if defined(_WIN32) && (defined(CPU_X64) || defined(CPU_AARCH64))
#if defined(_WIN32) && (defined(CPU_ARCH_X64) || defined(CPU_ARCH_ARM64))
RemoveVectoredExceptionHandler(s_veh_handle);
s_veh_handle = nullptr;
#elif defined(USE_SIGSEGV)