Merge pull request #1840 from stenzek/map-jit

CPU/Recompiler: Use MAP_JIT for code space on Apple Silicon
This commit is contained in:
Connor McLaughlin 2021-03-21 02:51:45 +10:00 committed by GitHub
commit a6a3590722
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 63 additions and 6 deletions

View file

@ -13,6 +13,11 @@ Log_SetChannel(JitCodeBuffer);
#include <sys/mman.h> #include <sys/mman.h>
#endif #endif
#if defined(__APPLE__) && defined(__aarch64__)
// pthread_jit_write_protect_np()
#include <pthread.h>
#endif
JitCodeBuffer::JitCodeBuffer() = default; JitCodeBuffer::JitCodeBuffer() = default;
JitCodeBuffer::JitCodeBuffer(u32 size, u32 far_code_size) JitCodeBuffer::JitCodeBuffer(u32 size, u32 far_code_size)
@ -46,8 +51,13 @@ bool JitCodeBuffer::Allocate(u32 size /* = 64 * 1024 * 1024 */, u32 far_code_siz
return false; return false;
} }
#elif defined(__linux__) || defined(__ANDROID__) || defined(__APPLE__) || defined(__HAIKU__) || defined(__FreeBSD__) #elif defined(__linux__) || defined(__ANDROID__) || defined(__APPLE__) || defined(__HAIKU__) || defined(__FreeBSD__)
m_code_ptr = static_cast<u8*>( int flags = MAP_PRIVATE | MAP_ANONYMOUS;
mmap(nullptr, m_total_size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)); #if defined(__APPLE__) && defined(__aarch64__)
// MAP_JIT and toggleable write protection is required on Apple Silicon.
flags |= MAP_JIT;
#endif
m_code_ptr = static_cast<u8*>(mmap(nullptr, m_total_size, PROT_READ | PROT_WRITE | PROT_EXEC, flags, -1, 0));
if (!m_code_ptr) if (!m_code_ptr)
{ {
Log_ErrorPrintf("mmap(RWX, %u) for internal buffer failed: %d", m_total_size, errno); Log_ErrorPrintf("mmap(RWX, %u) for internal buffer failed: %d", m_total_size, errno);
@ -195,6 +205,8 @@ void JitCodeBuffer::CommitFarCode(u32 length)
void JitCodeBuffer::Reset() void JitCodeBuffer::Reset()
{ {
WriteProtect(false);
m_free_code_ptr = m_code_ptr + m_guard_size; m_free_code_ptr = m_code_ptr + m_guard_size;
m_code_used = 0; m_code_used = 0;
std::memset(m_free_code_ptr, 0, m_code_size); std::memset(m_free_code_ptr, 0, m_code_size);
@ -207,6 +219,8 @@ void JitCodeBuffer::Reset()
std::memset(m_free_far_code_ptr, 0, m_far_code_size); std::memset(m_free_far_code_ptr, 0, m_far_code_size);
FlushInstructionCache(m_free_far_code_ptr, m_far_code_size); FlushInstructionCache(m_free_far_code_ptr, m_far_code_size);
} }
WriteProtect(true);
} }
void JitCodeBuffer::Align(u32 alignment, u8 padding_value) void JitCodeBuffer::Align(u32 alignment, u8 padding_value)
@ -231,3 +245,26 @@ void JitCodeBuffer::FlushInstructionCache(void* address, u32 size)
#error Unknown platform. #error Unknown platform.
#endif #endif
} }
#if defined(__APPLE__) && defined(__aarch64__)
void JitCodeBuffer::WriteProtect(bool enabled)
{
static bool initialized = false;
static bool needs_write_protect = false;
if (!initialized)
{
initialized = true;
needs_write_protect = (pthread_jit_write_protect_supported_np() != 0);
if (needs_write_protect)
Log_InfoPrint("pthread_jit_write_protect_np() will be used before writing to JIT space.");
}
if (!needs_write_protect)
return;
pthread_jit_write_protect_np(enabled ? 1 : 0);
}
#endif

View file

@ -29,6 +29,13 @@ public:
/// Flushes the instruction cache on the host for the specified range. /// Flushes the instruction cache on the host for the specified range.
static void FlushInstructionCache(void* address, u32 size); static void FlushInstructionCache(void* address, u32 size);
/// For Apple Silicon - Toggles write protection on the JIT space.
#if defined(__APPLE__) && defined(__aarch64__)
static void WriteProtect(bool enabled);
#else
ALWAYS_INLINE static void WriteProtect(bool enabled) {}
#endif
private: private:
u8* m_code_ptr = nullptr; u8* m_code_ptr = nullptr;
u8* m_free_code_ptr = nullptr; u8* m_free_code_ptr = nullptr;
@ -45,4 +52,3 @@ private:
u32 m_old_protection = 0; u32 m_old_protection = 0;
bool m_owns_buffer = false; bool m_owns_buffer = false;
}; };

View file

@ -286,6 +286,8 @@ void Execute()
void CompileDispatcher() void CompileDispatcher()
{ {
s_code_buffer.WriteProtect(false);
{ {
Recompiler::CodeGenerator cg(&s_code_buffer); Recompiler::CodeGenerator cg(&s_code_buffer);
s_asm_dispatcher = cg.CompileDispatcher(); s_asm_dispatcher = cg.CompileDispatcher();
@ -294,6 +296,8 @@ void CompileDispatcher()
Recompiler::CodeGenerator cg(&s_code_buffer); Recompiler::CodeGenerator cg(&s_code_buffer);
s_single_block_asm_dispatcher = cg.CompileSingleBlockDispatcher(); s_single_block_asm_dispatcher = cg.CompileSingleBlockDispatcher();
} }
s_code_buffer.WriteProtect(true);
} }
CodeBlock::HostCodePointer* GetFastMapPointer() CodeBlock::HostCodePointer* GetFastMapPointer()
@ -613,8 +617,12 @@ bool CompileBlock(CodeBlock* block)
Flush(); Flush();
} }
s_code_buffer.WriteProtect(false);
Recompiler::CodeGenerator codegen(&s_code_buffer); Recompiler::CodeGenerator codegen(&s_code_buffer);
if (!codegen.CompileBlock(block, &block->host_code, &block->host_code_size)) const bool compile_result = codegen.CompileBlock(block, &block->host_code, &block->host_code_size);
s_code_buffer.WriteProtect(true);
if (!compile_result)
{ {
Log_ErrorPrintf("Failed to compile host code for block at 0x%08X", block->key.GetPC()); Log_ErrorPrintf("Failed to compile host code for block at 0x%08X", block->key.GetPC());
return false; return false;
@ -839,7 +847,10 @@ Common::PageFaultHandler::HandlerResult MMapPageFaultHandler(void* exception_pc,
} }
// found it, do fixup // found it, do fixup
if (Recompiler::CodeGenerator::BackpatchLoadStore(lbi)) s_code_buffer.WriteProtect(false);
const bool backpatch_result = Recompiler::CodeGenerator::BackpatchLoadStore(lbi);
s_code_buffer.WriteProtect(true);
if (backpatch_result)
{ {
// remove the backpatch entry since we won't be coming back to this one // remove the backpatch entry since we won't be coming back to this one
block->loadstore_backpatch_info.erase(bpi_iter); block->loadstore_backpatch_info.erase(bpi_iter);
@ -880,7 +891,10 @@ Common::PageFaultHandler::HandlerResult LUTPageFaultHandler(void* exception_pc,
if (lbi.host_pc == exception_pc) if (lbi.host_pc == exception_pc)
{ {
// found it, do fixup // found it, do fixup
if (Recompiler::CodeGenerator::BackpatchLoadStore(lbi)) s_code_buffer.WriteProtect(false);
const bool backpatch_result = Recompiler::CodeGenerator::BackpatchLoadStore(lbi);
s_code_buffer.WriteProtect(true);
if (backpatch_result)
{ {
// remove the backpatch entry since we won't be coming back to this one // remove the backpatch entry since we won't be coming back to this one
block->loadstore_backpatch_info.erase(bpi_iter); block->loadstore_backpatch_info.erase(bpi_iter);