diff --git a/src/common/memmap.cpp b/src/common/memmap.cpp index 1cdd91a52..dcc3dfde1 100644 --- a/src/common/memmap.cpp +++ b/src/common/memmap.cpp @@ -13,6 +13,14 @@ #if defined(_WIN32) #include "windows_headers.h" +#elif defined(__APPLE__) +#ifdef __aarch64__ +#include // pthread_jit_write_protect_np() +#endif +#include +#include +#include +#include #elif !defined(__ANDROID__) #include #include @@ -20,11 +28,6 @@ #include #endif -#if defined(__APPLE__) && defined(__aarch64__) -// pthread_jit_write_protect_np() -#include -#endif - Log_SetChannel(MemoryArena); #ifdef _WIN32 @@ -278,6 +281,176 @@ bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size) return true; } +#elif defined(__APPLE__) + +bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode) +{ + DebugAssertMsg((size & (HOST_PAGE_SIZE - 1)) == 0, "Size is page aligned"); + + kern_return_t res = mach_vm_protect(mach_task_self(), reinterpret_cast(baseaddr), size, false, + static_cast(mode)); + if (res != KERN_SUCCESS) [[unlikely]] + { + ERROR_LOG("mach_vm_protect() failed: {}", res); + return false; + } + + return true; +} + +std::string MemMap::GetFileMappingName(const char* prefix) +{ + // name actually is not used. + return {}; +} + +void* MemMap::CreateSharedMemory(const char* name, size_t size, Error* error) +{ + mach_vm_size_t vm_size = size; + mach_port_t port; + const kern_return_t res = mach_make_memory_entry_64( + mach_task_self(), &vm_size, 0, MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port, MACH_PORT_NULL); + if (res != KERN_SUCCESS) + { + Error::SetStringFmt(error, "mach_make_memory_entry_64() failed: {}", res); + return nullptr; + } + + return reinterpret_cast(static_cast(port)); +} + +void MemMap::DestroySharedMemory(void* ptr) +{ + mach_port_deallocate(mach_task_self(), static_cast(reinterpret_cast(ptr))); +} + +void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode) +{ + mach_vm_address_t ptr = reinterpret_cast(baseaddr); + const kern_return_t res = mach_vm_map(mach_task_self(), &ptr, size, 0, baseaddr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE, + static_cast(reinterpret_cast(handle)), offset, FALSE, + static_cast(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE); + if (res != KERN_SUCCESS) + { + ERROR_LOG("mach_vm_map() failed: {}", res); + return nullptr; + } + + return reinterpret_cast(ptr); +} + +void MemMap::UnmapSharedMemory(void* baseaddr, size_t size) +{ + const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast(baseaddr), size); + if (res != KERN_SUCCESS) + Panic("Failed to unmap shared memory"); +} + +SharedMemoryMappingArea::SharedMemoryMappingArea() = default; + +SharedMemoryMappingArea::~SharedMemoryMappingArea() +{ + Destroy(); +} + +bool SharedMemoryMappingArea::Create(size_t size) +{ + AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned"); + Destroy(); + + const kern_return_t res = + mach_vm_map(mach_task_self(), reinterpret_cast(&m_base_ptr), size, 0, VM_FLAGS_ANYWHERE, + MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE); + if (res != KERN_SUCCESS) + { + ERROR_LOG("mach_vm_map() failed: {}", res); + return false; + } + + m_size = size; + m_num_pages = size / HOST_PAGE_SIZE; + return true; +} + +void SharedMemoryMappingArea::Destroy() +{ + AssertMsg(m_num_mappings == 0, "No mappings left"); + + if (m_base_ptr && + mach_vm_deallocate(mach_task_self(), reinterpret_cast(m_base_ptr), m_size) != KERN_SUCCESS) + { + Panic("Failed to release shared memory area"); + } + + m_base_ptr = nullptr; + m_size = 0; + m_num_pages = 0; +} + +u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, + PageProtect mode) +{ + DebugAssert(static_cast(map_base) >= m_base_ptr && static_cast(map_base) < (m_base_ptr + m_size)); + + const kern_return_t res = + mach_vm_map(mach_task_self(), reinterpret_cast(&map_base), map_size, 0, VM_FLAGS_OVERWRITE, + static_cast(reinterpret_cast(file_handle)), file_offset, false, + static_cast(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE); + if (res != KERN_SUCCESS) [[unlikely]] + { + ERROR_LOG("mach_vm_map() failed: {}", res); + return nullptr; + } + + m_num_mappings++; + return static_cast(map_base); +} + +bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size) +{ + DebugAssert(static_cast(map_base) >= m_base_ptr && static_cast(map_base) < (m_base_ptr + m_size)); + + const kern_return_t res = + mach_vm_map(mach_task_self(), reinterpret_cast(&map_base), map_size, 0, VM_FLAGS_OVERWRITE, + MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE); + if (res != KERN_SUCCESS) [[unlikely]] + { + ERROR_LOG("mach_vm_map() failed: {}", res); + return false; + } + + m_num_mappings--; + return true; +} + +#ifdef __aarch64__ + +static thread_local int s_code_write_depth = 0; + +void MemMap::BeginCodeWrite() +{ + // DEBUG_LOG("BeginCodeWrite(): {}", s_code_write_depth); + if ((s_code_write_depth++) == 0) + { + // DEBUG_LOG(" pthread_jit_write_protect_np(0)"); + pthread_jit_write_protect_np(0); + } +} + +void MemMap::EndCodeWrite() +{ + // DEBUG_LOG("EndCodeWrite(): {}", s_code_write_depth); + + DebugAssert(s_code_write_depth > 0); + if ((--s_code_write_depth) == 0) + { + // DEBUG_LOG(" pthread_jit_write_protect_np(1)"); + pthread_jit_write_protect_np(1); + } +} + +#endif + #elif !defined(__ANDROID__) bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode) @@ -354,7 +527,7 @@ void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_ void MemMap::UnmapSharedMemory(void* baseaddr, size_t size) { - if (mmap(baseaddr, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED) + if (munmap(baseaddr, size) != 0) Panic("Failed to unmap shared memory"); } @@ -418,31 +591,3 @@ bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size) } #endif - -#if defined(__APPLE__) && defined(__aarch64__) - -static thread_local int s_code_write_depth = 0; - -void MemMap::BeginCodeWrite() -{ - // Log_DebugFmt("BeginCodeWrite(): {}", s_code_write_depth); - if ((s_code_write_depth++) == 0) - { - // Log_DebugPrint(" pthread_jit_write_protect_np(0)"); - pthread_jit_write_protect_np(0); - } -} - -void MemMap::EndCodeWrite() -{ - // Log_DebugFmt("EndCodeWrite(): {}", s_code_write_depth); - - DebugAssert(s_code_write_depth > 0); - if ((--s_code_write_depth) == 0) - { - // Log_DebugPrint(" pthread_jit_write_protect_np(1)"); - pthread_jit_write_protect_np(1); - } -} - -#endif diff --git a/src/common/memmap.h b/src/common/memmap.h index fd713b528..288f62a6e 100644 --- a/src/common/memmap.h +++ b/src/common/memmap.h @@ -8,7 +8,7 @@ #include #include -#ifdef _WIN32 +#if defined(_WIN32) // eww :/ but better than including windows.h enum class PageProtect : u32 @@ -19,6 +19,20 @@ enum class PageProtect : u32 ReadExecute = 0x20, // PAGE_EXECUTE_READ ReadWriteExecute = 0x40, // PAGE_EXECUTE_READWRITE }; + +#elif defined(__APPLE__) + +#include + +enum class PageProtect : u32 +{ + NoAccess = VM_PROT_NONE, + ReadOnly = VM_PROT_READ, + ReadWrite = VM_PROT_READ | VM_PROT_WRITE, + ReadExecute = VM_PROT_READ | VM_PROT_EXECUTE, + ReadWriteExecute = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, +}; + #else #include diff --git a/src/util/jit_code_buffer.cpp b/src/util/jit_code_buffer.cpp index 0f7a5f179..a463bf813 100644 --- a/src/util/jit_code_buffer.cpp +++ b/src/util/jit_code_buffer.cpp @@ -17,6 +17,10 @@ Log_SetChannel(JitCodeBuffer); #else #include #include +#ifdef __APPLE__ +#include +#include +#endif #endif JitCodeBuffer::JitCodeBuffer() = default; @@ -108,6 +112,26 @@ bool JitCodeBuffer::TryAllocateAt(const void* addr) return false; } + return true; +#elif defined(__APPLE__) && !defined(__aarch64__) + kern_return_t ret = mach_vm_allocate(mach_task_self(), reinterpret_cast(&addr), m_total_size, + addr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE); + if (ret != KERN_SUCCESS) + { + ERROR_LOG("mach_vm_allocate() returned {}", ret); + return false; + } + + ret = mach_vm_protect(mach_task_self(), reinterpret_cast(addr), m_total_size, false, + VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE); + if (ret != KERN_SUCCESS) + { + ERROR_LOG("mach_vm_protect() returned {}", ret); + mach_vm_deallocate(mach_task_self(), reinterpret_cast(addr), m_total_size); + return false; + } + + m_code_ptr = static_cast(const_cast(addr)); return true; #elif defined(__linux__) || defined(__ANDROID__) || defined(__APPLE__) || defined(__HAIKU__) || defined(__FreeBSD__) int flags = MAP_PRIVATE | MAP_ANONYMOUS; @@ -119,14 +143,11 @@ bool JitCodeBuffer::TryAllocateAt(const void* addr) // FreeBSD achieves the same with MAP_FIXED and MAP_EXCL. if (addr) flags |= MAP_FIXED | MAP_EXCL; -#elif defined(__APPLE__) && defined(__aarch64__) - // MAP_JIT and toggleable write protection is required on Apple Silicon. - flags |= MAP_JIT; #elif defined(__APPLE__) - // MAP_FIXED is needed on x86 apparently.. hopefully there's nothing mapped at this address, otherwise we'll end up - // clobbering it.. + // On ARM64, we need to use MAP_JIT, which means we can't use MAP_FIXED. if (addr) - flags |= MAP_FIXED; + return false; + flags |= MAP_JIT; #endif m_code_ptr = @@ -230,6 +251,11 @@ void JitCodeBuffer::Destroy() #if defined(_WIN32) if (!VirtualFree(m_code_ptr, 0, MEM_RELEASE)) ERROR_LOG("Failed to free code pointer {}", static_cast(m_code_ptr)); +#elif defined(__APPLE__) && !defined(__aarch64__) + const kern_return_t res = + mach_vm_deallocate(mach_task_self(), reinterpret_cast(m_code_ptr), m_total_size); + if (res != KERN_SUCCESS) + ERROR_LOG("mach_vm_deallocate() failed: {}", res); #elif defined(__linux__) || defined(__ANDROID__) || defined(__APPLE__) || defined(__HAIKU__) || defined(__FreeBSD__) if (munmap(m_code_ptr, m_total_size) != 0) ERROR_LOG("Failed to free code pointer {}", static_cast(m_code_ptr));