Bus: 16KB page compatibility for mmap fastmem

This commit is contained in:
Stenzek 2023-09-20 22:57:24 +10:00
parent 5bbb7cf906
commit 3c68543491
23 changed files with 666 additions and 687 deletions

View file

@ -29,6 +29,8 @@ add_library(common
log.cpp
log.h
make_array.h
memmap.cpp
memmap.h
md5_digest.cpp
md5_digest.h
memory_settings_interface.cpp
@ -70,7 +72,7 @@ if(WIN32)
thirdparty/StackWalker.h
windows_headers.h
)
target_link_libraries(common PRIVATE winhttp.lib)
target_link_libraries(common PRIVATE winhttp.lib OneCore.lib)
endif()
if(MSVC)

View file

@ -9,7 +9,7 @@
<ItemDefinitionGroup>
<Link>
<AdditionalDependencies>%(AdditionalDependencies);winhttp.lib</AdditionalDependencies>
<AdditionalDependencies>%(AdditionalDependencies);winhttp.lib;OneCore.lib</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
</Project>

View file

@ -24,6 +24,7 @@
<ClInclude Include="log.h" />
<ClInclude Include="lru_cache.h" />
<ClInclude Include="make_array.h" />
<ClInclude Include="memmap.h" />
<ClInclude Include="memory_settings_interface.h" />
<ClInclude Include="md5_digest.h" />
<ClInclude Include="path.h" />
@ -55,6 +56,7 @@
<ClCompile Include="image.cpp" />
<ClCompile Include="layered_settings_interface.cpp" />
<ClCompile Include="log.cpp" />
<ClCompile Include="memmap.cpp" />
<ClCompile Include="memory_settings_interface.cpp" />
<ClCompile Include="md5_digest.cpp" />
<ClCompile Include="minizip_helpers.cpp" />

View file

@ -43,6 +43,7 @@
<ClInclude Include="build_timestamp.h" />
<ClInclude Include="sha1_digest.h" />
<ClInclude Include="fastjmp.h" />
<ClInclude Include="memmap.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="string.cpp" />
@ -68,6 +69,7 @@
<ClCompile Include="threading.cpp" />
<ClCompile Include="sha1_digest.cpp" />
<ClCompile Include="fastjmp.cpp" />
<ClCompile Include="memmap.cpp" />
</ItemGroup>
<ItemGroup>
<Natvis Include="bitfield.natvis" />

400
src/common/memmap.cpp Normal file
View file

@ -0,0 +1,400 @@
// SPDX-FileCopyrightText: 2019-2023 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "memmap.h"
#include "align.h"
#include "assert.h"
#include "log.h"
#include "string_util.h"
#include "fmt/format.h"
#if defined(_WIN32)
#include "windows_headers.h"
#elif !defined(__ANDROID__)
#include <cerrno>
#include <fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
#endif
Log_SetChannel(MemoryArena);
#ifdef _WIN32
bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode)
{
DebugAssert((size & (HOST_PAGE_SIZE - 1)) == 0);
DWORD old_protect;
if (!VirtualProtect(baseaddr, size, static_cast<DWORD>(mode), &old_protect))
{
Log_ErrorPrintf("VirtualProtect() failed with error %u", GetLastError());
return false;
}
return true;
}
std::string MemMap::GetFileMappingName(const char* prefix)
{
const unsigned pid = GetCurrentProcessId();
return fmt::format("{}_{}", prefix, pid);
}
void* MemMap::CreateSharedMemory(const char* name, size_t size)
{
return static_cast<void*>(CreateFileMappingW(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE,
static_cast<DWORD>(size >> 32), static_cast<DWORD>(size),
StringUtil::UTF8StringToWideString(name).c_str()));
}
void MemMap::DestroySharedMemory(void* ptr)
{
CloseHandle(static_cast<HANDLE>(ptr));
}
void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode)
{
void* ret = MapViewOfFileEx(static_cast<HANDLE>(handle), FILE_MAP_READ | FILE_MAP_WRITE,
static_cast<DWORD>(offset >> 32), static_cast<DWORD>(offset), size, baseaddr);
if (!ret)
return nullptr;
if (mode != PageProtect::ReadWrite)
{
DWORD old_prot;
if (!VirtualProtect(ret, size, static_cast<DWORD>(mode), &old_prot))
Panic("Failed to protect memory mapping");
}
return ret;
}
void MemMap::UnmapSharedMemory(void* baseaddr, size_t size)
{
if (!UnmapViewOfFile(baseaddr))
Panic("Failed to unmap shared memory");
}
SharedMemoryMappingArea::SharedMemoryMappingArea() = default;
SharedMemoryMappingArea::~SharedMemoryMappingArea()
{
Destroy();
}
SharedMemoryMappingArea::PlaceholderMap::iterator SharedMemoryMappingArea::FindPlaceholder(size_t offset)
{
if (m_placeholder_ranges.empty())
return m_placeholder_ranges.end();
// this will give us an iterator equal or after page
auto it = m_placeholder_ranges.lower_bound(offset);
if (it == m_placeholder_ranges.end())
{
// check the last page
it = (++m_placeholder_ranges.rbegin()).base();
}
// it's the one we found?
if (offset >= it->first && offset < it->second)
return it;
// otherwise try the one before
if (it == m_placeholder_ranges.begin())
return m_placeholder_ranges.end();
--it;
if (offset >= it->first && offset < it->second)
return it;
else
return m_placeholder_ranges.end();
}
bool SharedMemoryMappingArea::Create(size_t size)
{
Destroy();
AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned");
m_base_ptr = static_cast<u8*>(VirtualAlloc2(GetCurrentProcess(), nullptr, size, MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
PAGE_NOACCESS, nullptr, 0));
if (!m_base_ptr)
return false;
m_size = size;
m_num_pages = size / HOST_PAGE_SIZE;
m_placeholder_ranges.emplace(0, size);
return true;
}
void SharedMemoryMappingArea::Destroy()
{
AssertMsg(m_num_mappings == 0, "No mappings left");
// hopefully this will be okay, and we don't need to coalesce all the placeholders...
if (m_base_ptr && !VirtualFreeEx(GetCurrentProcess(), m_base_ptr, 0, MEM_RELEASE))
Panic("Failed to release shared memory area");
m_placeholder_ranges.clear();
m_base_ptr = nullptr;
m_size = 0;
m_num_pages = 0;
m_num_mappings = 0;
}
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size,
PageProtect mode)
{
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
const size_t map_offset = static_cast<u8*>(map_base) - m_base_ptr;
DebugAssert(Common::IsAlignedPow2(map_offset, HOST_PAGE_SIZE));
DebugAssert(Common::IsAlignedPow2(map_size, HOST_PAGE_SIZE));
// should be a placeholder. unless there's some other mapping we didn't free.
PlaceholderMap::iterator phit = FindPlaceholder(map_offset);
DebugAssertMsg(phit != m_placeholder_ranges.end(), "Page we're mapping is a placeholder");
DebugAssertMsg(map_offset >= phit->first && map_offset < phit->second, "Page is in returned placeholder range");
DebugAssertMsg((map_offset + map_size) <= phit->second, "Page range is in returned placeholder range");
// do we need to split to the left? (i.e. is there a placeholder before this range)
const size_t old_ph_end = phit->second;
if (map_offset != phit->first)
{
phit->second = map_offset;
// split it (i.e. left..start and start..end are now separated)
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(phit->first), (map_offset - phit->first),
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER))
{
Panic("Failed to left split placeholder for map");
}
}
else
{
// start of the placeholder is getting used, we'll split it right below if there's anything left over
m_placeholder_ranges.erase(phit);
}
// do we need to split to the right? (i.e. is there a placeholder after this range)
if ((map_offset + map_size) != old_ph_end)
{
// split out end..ph_end
m_placeholder_ranges.emplace(map_offset + map_size, old_ph_end);
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(map_offset), map_size,
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER))
{
Panic("Failed to right split placeholder for map");
}
}
// actually do the mapping, replacing the placeholder on the range
if (!MapViewOfFile3(static_cast<HANDLE>(file_handle), GetCurrentProcess(), map_base, file_offset, map_size,
MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0))
{
Log_ErrorPrintf("MapViewOfFile3() failed: %u", GetLastError());
return nullptr;
}
if (mode != PageProtect::ReadWrite)
{
DWORD old_prot;
if (!VirtualProtect(map_base, map_size, static_cast<DWORD>(mode), &old_prot))
Panic("Failed to protect memory mapping");
}
m_num_mappings++;
return static_cast<u8*>(map_base);
}
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
{
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
const size_t map_offset = static_cast<u8*>(map_base) - m_base_ptr;
DebugAssert(Common::IsAlignedPow2(map_offset, HOST_PAGE_SIZE));
DebugAssert(Common::IsAlignedPow2(map_size, HOST_PAGE_SIZE));
// unmap the specified range
if (!UnmapViewOfFile2(GetCurrentProcess(), map_base, MEM_PRESERVE_PLACEHOLDER))
{
Log_ErrorPrintf("UnmapViewOfFile2() failed: %u", GetLastError());
return false;
}
// can we coalesce to the left?
PlaceholderMap::iterator left_it = (map_offset > 0) ? FindPlaceholder(map_offset - 1) : m_placeholder_ranges.end();
if (left_it != m_placeholder_ranges.end())
{
// the left placeholder should end at our start
DebugAssert(map_offset == left_it->second);
left_it->second = map_offset + map_size;
// combine placeholders before and the range we're unmapping, i.e. to the left
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(left_it->first), left_it->second - left_it->first,
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS))
{
Panic("Failed to coalesce placeholders left for unmap");
}
}
else
{
// this is a new placeholder
left_it = m_placeholder_ranges.emplace(map_offset, map_offset + map_size).first;
}
// can we coalesce to the right?
PlaceholderMap::iterator right_it =
((map_offset + map_size) < m_size) ? FindPlaceholder(map_offset + map_size) : m_placeholder_ranges.end();
if (right_it != m_placeholder_ranges.end())
{
// should start at our end
DebugAssert(right_it->first == (map_offset + map_size));
left_it->second = right_it->second;
m_placeholder_ranges.erase(right_it);
// combine our placeholder and the next, i.e. to the right
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(left_it->first), left_it->second - left_it->first,
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS))
{
Panic("Failed to coalescae placeholders right for unmap");
}
}
m_num_mappings--;
return true;
}
#else
bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode)
{
DebugAssertMsg((size & (HOST_PAGE_SIZE - 1)) == 0, "Size is page aligned");
const int result = mprotect(baseaddr, size, static_cast<int>(mode));
if (result != 0)
{
Log_ErrorPrintf("mprotect() for %zu at %p failed", size, baseaddr);
return false;
}
return true;
}
std::string MemMap::GetFileMappingName(const char* prefix)
{
const unsigned pid = static_cast<unsigned>(getpid());
#if defined(__FreeBSD__)
// FreeBSD's shm_open(3) requires name to be absolute
return fmt::format("/tmp/{}_{}", prefix, pid);
#else
return fmt::format("{}_{}", prefix, pid);
#endif
}
void* MemMap::CreateSharedMemory(const char* name, size_t size)
{
const int fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600);
if (fd < 0)
{
Log_ErrorPrintf("shm_open failed: %d\n", errno);
return nullptr;
}
// we're not going to be opening this mapping in other processes, so remove the file
shm_unlink(name);
// ensure it's the correct size
if (ftruncate(fd, static_cast<off_t>(size)) < 0)
{
Log_ErrorPrintf("ftruncate(%zu) failed: %d\n", size, errno);
return nullptr;
}
return reinterpret_cast<void*>(static_cast<intptr_t>(fd));
}
void MemMap::DestroySharedMemory(void* ptr)
{
close(static_cast<int>(reinterpret_cast<intptr_t>(ptr)));
}
void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode)
{
const int flags = (baseaddr != nullptr) ? (MAP_SHARED | MAP_FIXED) : MAP_SHARED;
void* ptr = mmap(baseaddr, size, static_cast<int>(mode), flags, static_cast<int>(reinterpret_cast<intptr_t>(handle)),
static_cast<off_t>(offset));
if (ptr == MAP_FAILED)
return nullptr;
return ptr;
}
void MemMap::UnmapSharedMemory(void* baseaddr, size_t size)
{
if (mmap(baseaddr, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED)
Panic("Failed to unmap shared memory");
}
SharedMemoryMappingArea::SharedMemoryMappingArea() = default;
SharedMemoryMappingArea::~SharedMemoryMappingArea()
{
Destroy();
}
bool SharedMemoryMappingArea::Create(size_t size)
{
AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned");
Destroy();
void* alloc = mmap(nullptr, size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (alloc == MAP_FAILED)
return false;
m_base_ptr = static_cast<u8*>(alloc);
m_size = size;
m_num_pages = size / HOST_PAGE_SIZE;
return true;
}
void SharedMemoryMappingArea::Destroy()
{
AssertMsg(m_num_mappings == 0, "No mappings left");
if (m_base_ptr && munmap(m_base_ptr, m_size) != 0)
Panic("Failed to release shared memory area");
m_base_ptr = nullptr;
m_size = 0;
m_num_pages = 0;
}
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size,
PageProtect mode)
{
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
void* const ptr = mmap(map_base, map_size, static_cast<int>(mode), MAP_SHARED | MAP_FIXED,
static_cast<int>(reinterpret_cast<intptr_t>(file_handle)), static_cast<off_t>(file_offset));
if (ptr == MAP_FAILED)
return nullptr;
m_num_mappings++;
return static_cast<u8*>(ptr);
}
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
{
DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
if (mmap(map_base, map_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED)
return false;
m_num_mappings--;
return true;
}
#endif

78
src/common/memmap.h Normal file
View file

@ -0,0 +1,78 @@
// SPDX-FileCopyrightText: 2019-2023 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "types.h"
#include <map>
#include <string>
#ifdef _WIN32
// eww :/ but better than including windows.h
enum class PageProtect : u32
{
NoAccess = 0x01, // PAGE_NOACCESS
ReadOnly = 0x02, // PAGE_READONLY
ReadWrite = 0x04, // PAGE_READWRITE
ReadExecute = 0x20, // PAGE_EXECUTE_READ
ReadWriteExecute = 0x40, // PAGE_EXECUTE_READWRITE
};
#else
#include <sys/mman.h>
enum class PageProtect : u32
{
NoAccess = PROT_NONE,
ReadOnly = PROT_READ,
ReadWrite = PROT_READ | PROT_WRITE,
ReadExecute = PROT_READ | PROT_EXEC,
ReadWriteExecute = PROT_READ | PROT_WRITE | PROT_EXEC,
};
#endif
namespace MemMap {
std::string GetFileMappingName(const char* prefix);
void* CreateSharedMemory(const char* name, size_t size);
void DestroySharedMemory(void* ptr);
void* MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode);
void UnmapSharedMemory(void* baseaddr, size_t size);
bool MemProtect(void* baseaddr, size_t size, PageProtect mode);
} // namespace MemMap
class SharedMemoryMappingArea
{
public:
SharedMemoryMappingArea();
~SharedMemoryMappingArea();
ALWAYS_INLINE size_t GetSize() const { return m_size; }
ALWAYS_INLINE size_t GetNumPages() const { return m_num_pages; }
ALWAYS_INLINE u8* BasePointer() const { return m_base_ptr; }
ALWAYS_INLINE u8* OffsetPointer(size_t offset) const { return m_base_ptr + offset; }
ALWAYS_INLINE u8* PagePointer(size_t page) const { return m_base_ptr + HOST_PAGE_SIZE * page; }
bool Create(size_t size);
void Destroy();
u8* Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, PageProtect mode);
bool Unmap(void* map_base, size_t map_size);
private:
u8* m_base_ptr = nullptr;
size_t m_size = 0;
size_t m_num_pages = 0;
size_t m_num_mappings = 0;
#ifdef _WIN32
using PlaceholderMap = std::map<size_t, size_t>;
PlaceholderMap::iterator FindPlaceholder(size_t page);
PlaceholderMap m_placeholder_ranges;
#endif
};

View file

@ -88,6 +88,17 @@ using u32 = uint32_t;
using s64 = int64_t;
using u64 = uint64_t;
// Host page sizes.
#if defined(__APPLE__) && defined(__aarch64__)
static constexpr u32 HOST_PAGE_SIZE = 0x4000;
static constexpr u32 HOST_PAGE_MASK = HOST_PAGE_SIZE - 1;
static constexpr u32 HOST_PAGE_SHIFT = 14;
#else
static constexpr u32 HOST_PAGE_SIZE = 0x1000;
static constexpr u32 HOST_PAGE_MASK = HOST_PAGE_SIZE - 1;
static constexpr u32 HOST_PAGE_SHIFT = 12;
#endif
// Enable use of static_assert in constexpr if
template<class T>
struct dependent_false : std::false_type

View file

@ -141,11 +141,7 @@ elseif(${CPU_ARCH} STREQUAL "aarch32")
target_link_libraries(core PUBLIC vixl)
message("Building AArch32 recompiler")
elseif(${CPU_ARCH} STREQUAL "aarch64")
target_compile_definitions(core PUBLIC "ENABLE_RECOMPILER=1")
if (NOT APPLE)
# Disabled until we support 16K pages.
target_compile_definitions(core PUBLIC "ENABLE_MMAP_FASTMEM=1")
endif()
target_compile_definitions(core PUBLIC "ENABLE_RECOMPILER=1" "ENABLE_MMAP_FASTMEM=1")
target_sources(core PRIVATE ${RECOMPILER_SRCS}
cpu_recompiler_code_generator_aarch64.cpp
)

View file

@ -26,6 +26,7 @@
#include "common/assert.h"
#include "common/log.h"
#include "common/make_array.h"
#include "common/memmap.h"
#include <cstdio>
#include <tuple>
@ -99,11 +100,12 @@ union MEMCTRL
} // namespace
std::bitset<RAM_8MB_CODE_PAGE_COUNT> g_ram_code_bits{};
static u32 s_ram_code_page_count = 0;
u8* g_ram = nullptr; // 2MB RAM
u8* g_ram = nullptr;
u32 g_ram_size = 0;
u32 g_ram_mask = 0;
u8 g_bios[BIOS_SIZE]{}; // 512K BIOS ROM
u8 g_bios[BIOS_SIZE]{};
static void* s_ram_handle = nullptr;
static std::array<TickCount, 3> s_exp1_access_time = {};
static std::array<TickCount, 3> s_exp2_access_time = {};
@ -118,14 +120,11 @@ static u32 s_ram_size_reg = 0;
static std::string s_tty_line_buffer;
static Common::MemoryArena s_memory_arena;
static CPUFastmemMode s_fastmem_mode = CPUFastmemMode::Disabled;
#ifdef ENABLE_MMAP_FASTMEM
static u8* s_fastmem_base = nullptr;
static std::vector<Common::MemoryArena::View> s_fastmem_ram_views;
static std::vector<Common::MemoryArena::View> s_fastmem_reserved_views;
static SharedMemoryMappingArea s_fastmem_arena;
static std::vector<std::pair<u8*, size_t>> s_fastmem_ram_views;
#endif
static u8** s_fastmem_lut = nullptr;
@ -133,15 +132,11 @@ static constexpr auto s_fastmem_ram_mirrors =
make_array(0x00000000u, 0x00200000u, 0x00400000u, 0x00600000u, 0x80000000u, 0x80200000u, 0x80400000u, 0x80600000u,
0xA0000000u, 0xA0200000u, 0xA0400000u, 0xA0600000u);
static u32 FastmemAddressToLUTPageIndex(u32 address);
static void SetLUTFastmemPage(u32 address, u8* ptr, bool writable);
static void SetRAMSize(bool enable_8mb_ram);
static std::tuple<TickCount, TickCount, TickCount> CalculateMemoryTiming(MEMDELAY mem_delay, COMDELAY common_delay);
static void RecalculateMemoryTimings();
static bool AllocateMemory(bool enable_8mb_ram);
static void ReleaseMemory();
static void SetCodePageFastmemProtection(u32 page_index, bool writable);
} // namespace Bus
@ -157,32 +152,90 @@ static void SetCodePageFastmemProtection(u32 page_index, bool writable);
#define FIXUP_WORD_WRITE_VALUE(size, offset, value) \
((size == MemoryAccessSize::Word) ? (value) : ((value) << (((offset)&3u) * 8)))
bool Bus::Initialize()
bool Bus::AllocateMemory()
{
if (!AllocateMemory(g_settings.enable_8mb_ram))
s_ram_handle = MemMap::CreateSharedMemory("duckstation_ram", RAM_8MB_SIZE);
if (!s_ram_handle)
{
Host::ReportErrorAsync("Error", "Failed to allocate memory");
return false;
}
g_ram = static_cast<u8*>(MemMap::MapSharedMemory(s_ram_handle, 0, nullptr, RAM_8MB_SIZE, PageProtect::ReadWrite));
if (!g_ram)
{
Host::ReportErrorAsync("Error", "Failed to map memory");
ReleaseMemory();
return false;
}
Log_InfoPrintf("RAM is mapped at %p.", g_ram);
#ifdef ENABLE_MMAP_FASTMEM
if (!s_fastmem_arena.Create(FASTMEM_ARENA_SIZE))
{
// TODO: maybe make this non-fatal?
Host::ReportErrorAsync("Error", "Failed to create fastmem arena");
ReleaseMemory();
return false;
}
Log_InfoPrintf("Fastmem base: %p", s_fastmem_arena.BasePointer());
#endif
return true;
}
void Bus::ReleaseMemory()
{
#ifdef ENABLE_MMAP_FASTMEM
DebugAssert(s_fastmem_ram_views.empty());
s_fastmem_arena.Destroy();
#endif
std::free(s_fastmem_lut);
s_fastmem_lut = nullptr;
if (g_ram)
{
MemMap::UnmapSharedMemory(g_ram, RAM_8MB_SIZE);
g_ram = nullptr;
}
if (s_ram_handle)
{
MemMap::DestroySharedMemory(s_ram_handle);
s_ram_handle = nullptr;
}
}
bool Bus::Initialize()
{
SetRAMSize(g_settings.enable_8mb_ram);
Reset();
return true;
}
void Bus::SetRAMSize(bool enable_8mb_ram)
{
g_ram_size = enable_8mb_ram ? RAM_8MB_SIZE : RAM_2MB_SIZE;
g_ram_mask = enable_8mb_ram ? RAM_8MB_MASK : RAM_2MB_MASK;
Exports::RAM = reinterpret_cast<uintptr_t>(g_ram);
Exports::RAM_SIZE = g_ram_size;
Exports::RAM_MASK = g_ram_mask;
}
void Bus::Shutdown()
{
std::free(s_fastmem_lut);
s_fastmem_lut = nullptr;
#ifdef ENABLE_MMAP_FASTMEM
s_fastmem_base = nullptr;
s_fastmem_ram_views.clear();
#endif
UpdateFastmemViews(CPUFastmemMode::Disabled);
CPU::g_state.fastmem_base = nullptr;
s_fastmem_mode = CPUFastmemMode::Disabled;
ReleaseMemory();
g_ram_mask = 0;
g_ram_size = 0;
Exports::RAM = 0;
Exports::RAM_SIZE = 0;
Exports::RAM_MASK = 0;
}
void Bus::Reset()
@ -238,10 +291,7 @@ bool Bus::DoState(StateWrapper& sw)
if (ram_size != g_ram_size)
{
const bool using_8mb_ram = (ram_size == RAM_8MB_SIZE);
ReleaseMemory();
if (!AllocateMemory(using_8mb_ram))
return false;
SetRAMSize(using_8mb_ram);
UpdateFastmemViews(s_fastmem_mode);
CPU::UpdateFastmemBase();
}
@ -326,64 +376,6 @@ void Bus::RecalculateMemoryTimings()
s_spu_access_time[2] + 1);
}
bool Bus::AllocateMemory(bool enable_8mb_ram)
{
if (!s_memory_arena.Create(MEMORY_ARENA_SIZE, true, false))
{
Log_ErrorPrint("Failed to create memory arena");
return false;
}
// Create the base views.
const u32 ram_size = enable_8mb_ram ? RAM_8MB_SIZE : RAM_2MB_SIZE;
const u32 ram_mask = enable_8mb_ram ? RAM_8MB_MASK : RAM_2MB_MASK;
g_ram = static_cast<u8*>(s_memory_arena.CreateViewPtr(MEMORY_ARENA_RAM_OFFSET, ram_size, true, false));
if (!g_ram)
{
Log_ErrorPrintf("Failed to create base views of memory (%u bytes RAM)", ram_size);
return false;
}
g_ram_mask = ram_mask;
g_ram_size = ram_size;
s_ram_code_page_count = enable_8mb_ram ? RAM_8MB_CODE_PAGE_COUNT : RAM_2MB_CODE_PAGE_COUNT;
Exports::RAM = reinterpret_cast<uintptr_t>(g_ram);
Exports::RAM_SIZE = g_ram_size;
Exports::RAM_MASK = g_ram_mask;
Log_InfoPrintf("RAM is %u bytes at %p", g_ram_size, g_ram);
return true;
}
void Bus::ReleaseMemory()
{
if (g_ram)
{
s_memory_arena.ReleaseViewPtr(g_ram, g_ram_size);
g_ram = nullptr;
g_ram_mask = 0;
g_ram_size = 0;
Exports::RAM = 0;
Exports::RAM_SIZE = 0;
Exports::RAM_MASK = 0;
}
s_memory_arena.Destroy();
}
ALWAYS_INLINE u32 Bus::FastmemAddressToLUTPageIndex(u32 address)
{
return address >> 12;
}
ALWAYS_INLINE_RELEASE void Bus::SetLUTFastmemPage(u32 address, u8* ptr, bool writable)
{
s_fastmem_lut[FastmemAddressToLUTPageIndex(address)] = ptr;
s_fastmem_lut[FASTMEM_LUT_NUM_PAGES + FastmemAddressToLUTPageIndex(address)] = writable ? ptr : nullptr;
}
CPUFastmemMode Bus::GetFastmemMode()
{
return s_fastmem_mode;
@ -393,7 +385,7 @@ u8* Bus::GetFastmemBase()
{
#ifdef ENABLE_MMAP_FASTMEM
if (s_fastmem_mode == CPUFastmemMode::MMap)
return s_fastmem_base;
return s_fastmem_arena.BasePointer();
#endif
if (s_fastmem_mode == CPUFastmemMode::LUT)
return reinterpret_cast<u8*>(s_fastmem_lut);
@ -406,115 +398,75 @@ void Bus::UpdateFastmemViews(CPUFastmemMode mode)
#ifndef ENABLE_MMAP_FASTMEM
Assert(mode != CPUFastmemMode::MMap);
#else
for (const auto& it : s_fastmem_ram_views)
s_fastmem_arena.Unmap(it.first, it.second);
s_fastmem_ram_views.clear();
s_fastmem_reserved_views.clear();
#endif
s_fastmem_mode = mode;
if (mode == CPUFastmemMode::Disabled)
{
#ifdef ENABLE_MMAP_FASTMEM
s_fastmem_base = nullptr;
#endif
std::free(s_fastmem_lut);
s_fastmem_lut = nullptr;
return;
}
#ifdef ENABLE_MMAP_FASTMEM
if (mode == CPUFastmemMode::MMap)
{
std::free(s_fastmem_lut);
s_fastmem_lut = nullptr;
if (!s_fastmem_base)
{
s_fastmem_base = static_cast<u8*>(s_memory_arena.FindBaseAddressForMapping(FASTMEM_REGION_SIZE));
if (!s_fastmem_base)
{
Log_ErrorPrint("Failed to find base address for fastmem");
return;
}
Log_InfoPrintf("Fastmem base: %p", s_fastmem_base);
}
auto MapRAM = [](u32 base_address) {
u8* map_address = s_fastmem_base + base_address;
auto view = s_memory_arena.CreateView(MEMORY_ARENA_RAM_OFFSET, g_ram_size, true, false, map_address);
if (!view)
u8* map_address = s_fastmem_arena.BasePointer() + base_address;
if (!s_fastmem_arena.Map(s_ram_handle, 0, map_address, g_ram_size, PageProtect::ReadWrite))
{
Log_ErrorPrintf("Failed to map RAM at fastmem area %p (offset 0x%08X)", map_address, g_ram_size);
return;
}
// mark all pages with code as non-writable
for (u32 i = 0; i < s_ram_code_page_count; i++)
for (u32 i = 0; i < static_cast<u32>(g_ram_code_bits.size()); i++)
{
if (g_ram_code_bits[i])
{
u8* page_address = map_address + (i * HOST_PAGE_SIZE);
if (!s_memory_arena.SetPageProtection(page_address, HOST_PAGE_SIZE, true, false, false))
if (!MemMap::MemProtect(page_address, HOST_PAGE_SIZE, PageProtect::ReadOnly))
{
Log_ErrorPrintf("Failed to write-protect code page at %p", page_address);
s_fastmem_arena.Unmap(map_address, g_ram_size);
return;
}
}
}
s_fastmem_ram_views.push_back(std::move(view.value()));
};
auto ReserveRegion = [](u32 start_address, u32 end_address_inclusive) {
// We don't reserve memory regions on Android because the app could be subject to address space size limitations.
#ifndef __ANDROID__
Assert(end_address_inclusive >= start_address);
u8* map_address = s_fastmem_base + start_address;
auto view = s_memory_arena.CreateReservedView(end_address_inclusive - start_address + 1, map_address);
if (!view)
{
Log_ErrorPrintf("Failed to map reserved region %p (size 0x%08X)", map_address,
end_address_inclusive - start_address + 1);
return;
}
s_fastmem_reserved_views.push_back(std::move(view.value()));
#endif
s_fastmem_ram_views.emplace_back(map_address, g_ram_size);
};
// KUSEG - cached
MapRAM(0x00000000);
ReserveRegion(0x00000000 + g_ram_size, 0x80000000 - 1);
// KSEG0 - cached
MapRAM(0x80000000);
ReserveRegion(0x80000000 + g_ram_size, 0xA0000000 - 1);
// KSEG1 - uncached
MapRAM(0xA0000000);
ReserveRegion(0xA0000000 + g_ram_size, 0xFFFFFFFF);
return;
}
#endif
#ifdef ENABLE_MMAP_FASTMEM
s_fastmem_base = nullptr;
#endif
if (!s_fastmem_lut)
{
s_fastmem_lut = static_cast<u8**>(std::calloc(FASTMEM_LUT_NUM_SLOTS, sizeof(u8*)));
s_fastmem_lut = static_cast<u8**>(std::malloc(sizeof(u8*) * FASTMEM_LUT_NUM_SLOTS));
Assert(s_fastmem_lut);
Log_InfoPrintf("Fastmem base (software): %p", s_fastmem_lut);
}
std::memset(s_fastmem_lut, 0, sizeof(u8*) * FASTMEM_LUT_NUM_SLOTS);
auto MapRAM = [](u32 base_address) {
for (u32 address = 0; address < g_ram_size; address += HOST_PAGE_SIZE)
u8* ram_ptr = g_ram;
for (u32 address = 0; address < g_ram_size; address += FASTMEM_LUT_PAGE_SIZE)
{
SetLUTFastmemPage(base_address + address, &g_ram[address],
!g_ram_code_bits[FastmemAddressToLUTPageIndex(address)]);
const u32 lut_index = (base_address + address) >> FASTMEM_LUT_PAGE_SHIFT;
s_fastmem_lut[lut_index] = ram_ptr;
s_fastmem_lut[FASTMEM_LUT_NUM_PAGES + lut_index] = g_ram_code_bits[address >> HOST_PAGE_SHIFT] ? nullptr : ram_ptr;
ram_ptr += FASTMEM_LUT_PAGE_SIZE;
}
};
@ -591,11 +543,13 @@ void Bus::SetCodePageFastmemProtection(u32 page_index, bool writable)
#ifdef ENABLE_MMAP_FASTMEM
if (s_fastmem_mode == CPUFastmemMode::MMap)
{
const PageProtect protect = writable ? PageProtect::ReadWrite : PageProtect::ReadOnly;
// unprotect fastmem pages
for (const auto& view : s_fastmem_ram_views)
for (const auto& it : s_fastmem_ram_views)
{
u8* page_address = static_cast<u8*>(view.GetBasePointer()) + (page_index * HOST_PAGE_SIZE);
if (!s_memory_arena.SetPageProtection(page_address, HOST_PAGE_SIZE, true, writable, false))
u8* page_address = it.first + (page_index * HOST_PAGE_SIZE);
if (!MemMap::MemProtect(page_address, HOST_PAGE_SIZE, protect))
{
Log_ErrorPrintf("Failed to %s code page %u (0x%08X) @ %p", writable ? "unprotect" : "protect", page_index,
page_index * static_cast<u32>(HOST_PAGE_SIZE), page_address);
@ -609,9 +563,19 @@ void Bus::SetCodePageFastmemProtection(u32 page_index, bool writable)
if (s_fastmem_mode == CPUFastmemMode::LUT)
{
// mirrors...
const u32 ram_address = page_index * HOST_PAGE_SIZE;
const u32 code_addr = page_index << HOST_PAGE_SHIFT;
u8* code_ptr = &g_ram[code_addr];
for (u32 mirror_start : s_fastmem_ram_mirrors)
SetLUTFastmemPage(mirror_start + ram_address, &g_ram[ram_address], writable);
{
u32 ram_offset = code_addr;
u8* ram_ptr = code_ptr;
for (u32 i = 0; i < FASTMEM_LUT_PAGES_PER_CODE_PAGE; i++)
{
s_fastmem_lut[FASTMEM_LUT_NUM_PAGES + ((mirror_start + ram_offset) >> FASTMEM_LUT_PAGE_SHIFT)] = ram_ptr;
ram_offset += FASTMEM_LUT_PAGE_SIZE;
ram_ptr += FASTMEM_LUT_PAGE_SIZE;
}
}
}
}
@ -623,11 +587,11 @@ void Bus::ClearRAMCodePageFlags()
if (s_fastmem_mode == CPUFastmemMode::MMap)
{
// unprotect fastmem pages
for (const auto& view : s_fastmem_ram_views)
for (const auto& it : s_fastmem_ram_views)
{
if (!s_memory_arena.SetPageProtection(view.GetBasePointer(), view.GetMappingSize(), true, true, false))
if (!MemMap::MemProtect(it.first, it.second, PageProtect::ReadWrite))
{
Log_ErrorPrintf("Failed to unprotect code pages for fastmem view @ %p", view.GetBasePointer());
Log_ErrorPrintf("Failed to unprotect code pages for fastmem view @ %p", it.first);
}
}
}
@ -635,11 +599,19 @@ void Bus::ClearRAMCodePageFlags()
if (s_fastmem_mode == CPUFastmemMode::LUT)
{
for (u32 i = 0; i < s_ram_code_page_count; i++)
for (u32 i = 0; i < static_cast<u32>(g_ram_code_bits.size()); i++)
{
const u32 addr = (i * HOST_PAGE_SIZE);
const u32 code_addr = (i * HOST_PAGE_SIZE);
for (u32 mirror_start : s_fastmem_ram_mirrors)
SetLUTFastmemPage(mirror_start + addr, &g_ram[addr], true);
{
u32 ram_offset = code_addr;
for (u32 j = 0; j < FASTMEM_LUT_PAGES_PER_CODE_PAGE; j++)
{
s_fastmem_lut[FASTMEM_LUT_NUM_PAGES + ((mirror_start + ram_offset) >> FASTMEM_LUT_PAGE_SHIFT)] =
&g_ram[ram_offset];
ram_offset += FASTMEM_LUT_PAGE_SIZE;
}
}
}
}
}

View file

@ -3,7 +3,6 @@
#pragma once
#include "common/bitfield.h"
#include "util/memory_arena.h"
#include "types.h"
#include <array>
#include <bitset>
@ -81,29 +80,26 @@ enum : TickCount
RAM_READ_TICKS = 6
};
enum : size_t
{
// Our memory arena contains storage for RAM.
MEMORY_ARENA_SIZE = RAM_8MB_SIZE,
// Offsets within the memory arena.
MEMORY_ARENA_RAM_OFFSET = 0,
#ifdef ENABLE_MMAP_FASTMEM
// Fastmem region size is 4GB to cover the entire 32-bit address space.
FASTMEM_REGION_SIZE = UINT64_C(0x100000000),
#endif
};
enum : u32
{
RAM_2MB_CODE_PAGE_COUNT = (RAM_2MB_SIZE + (HOST_PAGE_SIZE + 1)) / HOST_PAGE_SIZE,
RAM_8MB_CODE_PAGE_COUNT = (RAM_8MB_SIZE + (HOST_PAGE_SIZE + 1)) / HOST_PAGE_SIZE,
FASTMEM_LUT_PAGE_SIZE = 4096,
FASTMEM_LUT_PAGE_MASK = FASTMEM_LUT_PAGE_SIZE - 1,
FASTMEM_LUT_PAGE_SHIFT = 12,
FASTMEM_LUT_PAGES_PER_CODE_PAGE = HOST_PAGE_SIZE / FASTMEM_LUT_PAGE_SIZE,
FASTMEM_LUT_NUM_PAGES = 0x100000, // 0x100000000 >> 12
FASTMEM_LUT_NUM_SLOTS = FASTMEM_LUT_NUM_PAGES * 2,
};
// Fastmem region size is 4GB to cover the entire 32-bit address space.
static constexpr size_t FASTMEM_ARENA_SIZE = UINT64_C(0x100000000);
bool AllocateMemory();
void ReleaseMemory();
bool Initialize();
void Shutdown();
void Reset();

View file

@ -1142,7 +1142,7 @@ void ShutdownFastmem()
Common::PageFaultHandler::HandlerResult MMapPageFaultHandler(void* exception_pc, void* fault_address, bool is_write)
{
if (static_cast<u8*>(fault_address) < g_state.fastmem_base ||
(static_cast<u8*>(fault_address) - g_state.fastmem_base) >= static_cast<ptrdiff_t>(Bus::FASTMEM_REGION_SIZE))
(static_cast<u8*>(fault_address) - g_state.fastmem_base) >= static_cast<ptrdiff_t>(Bus::FASTMEM_ARENA_SIZE))
{
return Common::PageFaultHandler::HandlerResult::ExecuteNextHandler;
}

View file

@ -1209,8 +1209,8 @@ void CodeGenerator::EmitLoadGuestRAMFastmem(const Value& address, RegSize size,
address_reg = address.host_reg;
}
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), 12);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), HOST_PAGE_OFFSET_MASK);
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_SHIFT);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_MASK);
m_emit->ldr(GetHostReg32(RARG1),
a32::MemOperand(GetHostReg32(fastmem_base), GetHostReg32(RARG1), a32::LSL, 2)); // pointer load
@ -1257,8 +1257,8 @@ void CodeGenerator::EmitLoadGuestMemoryFastmem(const CodeBlockInstruction& cbi,
address_reg = address.host_reg;
}
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), 12);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), HOST_PAGE_OFFSET_MASK);
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_SHIFT);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_MASK);
m_emit->ldr(GetHostReg32(RARG1),
a32::MemOperand(GetHostReg32(fastmem_base), GetHostReg32(RARG1), a32::LSL, 2)); // pointer load
@ -1417,8 +1417,8 @@ void CodeGenerator::EmitStoreGuestMemoryFastmem(const CodeBlockInstruction& cbi,
// TODO: if this gets backpatched, these instructions are wasted
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), 12);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), HOST_PAGE_OFFSET_MASK);
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_SHIFT);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_MASK);
m_emit->ldr(GetHostReg32(RARG1),
a32::MemOperand(GetHostReg32(fastmem_base), GetHostReg32(RARG1), a32::LSL, 2)); // pointer load

View file

@ -1374,8 +1374,8 @@ void CodeGenerator::EmitLoadGuestRAMFastmem(const Value& address, RegSize size,
}
else
{
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), 12);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), HOST_PAGE_OFFSET_MASK);
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_SHIFT);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_MASK);
m_emit->ldr(GetHostReg64(RARG1), a64::MemOperand(GetFastmemBasePtrReg(), GetHostReg32(RARG1), a64::LSL, 3));
switch (size)
@ -1447,8 +1447,8 @@ void CodeGenerator::EmitLoadGuestMemoryFastmem(const CodeBlockInstruction& cbi,
}
else
{
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), 12);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), HOST_PAGE_OFFSET_MASK);
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_SHIFT);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_MASK);
m_emit->ldr(GetHostReg64(RARG1), a64::MemOperand(GetFastmemBasePtrReg(), GetHostReg32(RARG1), a64::LSL, 3));
bpi.host_pc = GetCurrentNearCodePointer();
@ -1622,8 +1622,8 @@ void CodeGenerator::EmitStoreGuestMemoryFastmem(const CodeBlockInstruction& cbi,
}
else
{
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), 12);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), HOST_PAGE_OFFSET_MASK);
m_emit->lsr(GetHostReg32(RARG1), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_SHIFT);
m_emit->and_(GetHostReg32(RARG2), GetHostReg32(address_reg), Bus::FASTMEM_LUT_PAGE_MASK);
m_emit->add(GetHostReg64(RARG3), GetFastmemBasePtrReg(), Bus::FASTMEM_LUT_NUM_PAGES * sizeof(u32*));
m_emit->ldr(GetHostReg64(RARG1), a64::MemOperand(GetHostReg64(RARG3), GetHostReg32(RARG1), a64::LSL, 3));

View file

@ -1877,8 +1877,8 @@ void CodeGenerator::EmitLoadGuestRAMFastmem(const Value& address, RegSize size,
// TODO: We could mask the LSBs here for unaligned protection.
EmitCopyValue(RARG1, address);
m_emit->mov(GetHostReg32(RARG2), GetHostReg32(RARG1));
m_emit->shr(GetHostReg32(RARG1), 12);
m_emit->and_(GetHostReg32(RARG2), HOST_PAGE_OFFSET_MASK);
m_emit->shr(GetHostReg32(RARG1), Bus::FASTMEM_LUT_PAGE_SHIFT);
m_emit->and_(GetHostReg32(RARG2), Bus::FASTMEM_LUT_PAGE_MASK);
m_emit->mov(GetHostReg64(RARG1), m_emit->qword[GetFastmemBasePtrReg() + GetHostReg64(RARG1) * 8]);
switch (size)
@ -1985,8 +1985,8 @@ void CodeGenerator::EmitLoadGuestMemoryFastmem(const CodeBlockInstruction& cbi,
// TODO: We could mask the LSBs here for unaligned protection.
EmitCopyValue(RARG1, address);
m_emit->mov(GetHostReg32(RARG2), GetHostReg32(RARG1));
m_emit->shr(GetHostReg32(RARG1), 12);
m_emit->and_(GetHostReg32(RARG2), HOST_PAGE_OFFSET_MASK);
m_emit->shr(GetHostReg32(RARG1), Bus::FASTMEM_LUT_PAGE_SHIFT);
m_emit->and_(GetHostReg32(RARG2), Bus::FASTMEM_LUT_PAGE_MASK);
m_emit->mov(GetHostReg64(RARG1), m_emit->qword[GetFastmemBasePtrReg() + GetHostReg64(RARG1) * 8]);
bpi.host_pc = GetCurrentNearCodePointer();
@ -2248,8 +2248,8 @@ void CodeGenerator::EmitStoreGuestMemoryFastmem(const CodeBlockInstruction& cbi,
// TODO: We could mask the LSBs here for unaligned protection.
EmitCopyValue(RARG1, address);
m_emit->mov(GetHostReg32(RARG2), GetHostReg32(RARG1));
m_emit->shr(GetHostReg32(RARG1), 12);
m_emit->and_(GetHostReg32(RARG2), HOST_PAGE_OFFSET_MASK);
m_emit->shr(GetHostReg32(RARG1), Bus::FASTMEM_LUT_PAGE_SHIFT);
m_emit->and_(GetHostReg32(RARG2), Bus::FASTMEM_LUT_PAGE_MASK);
m_emit->mov(GetHostReg64(RARG1),
m_emit->qword[GetFastmemBasePtrReg() + GetHostReg64(RARG1) * 8 + (Bus::FASTMEM_LUT_NUM_PAGES * 8)]);
bpi.host_pc = GetCurrentNearCodePointer();

View file

@ -436,7 +436,9 @@ struct Settings
#ifdef ENABLE_RECOMPILER
static constexpr CPUExecutionMode DEFAULT_CPU_EXECUTION_MODE = CPUExecutionMode::Recompiler;
#ifdef ENABLE_MMAP_FASTMEM
// LUT still ends up faster on Apple Silicon for now, because of 16K pages.
#if defined(ENABLE_MMAP_FASTMEM) && (!defined(__APPLE__) || !defined(__aarch64__))
static constexpr CPUFastmemMode DEFAULT_CPU_FASTMEM_MODE = CPUFastmemMode::MMap;
#else
static constexpr CPUFastmemMode DEFAULT_CPU_FASTMEM_MODE = CPUFastmemMode::LUT;

View file

@ -242,6 +242,9 @@ static TinyString GetTimestampStringForFileName()
void System::Internal::ProcessStartup()
{
if (!Bus::AllocateMemory())
Panic("Failed to allocate memory for emulated bus.");
// This will call back to Host::LoadSettings() -> ReloadSources().
LoadSettings(false);
@ -262,6 +265,8 @@ void System::Internal::ProcessShutdown()
Achievements::Shutdown(false);
InputManager::CloseSources();
Bus::ReleaseMemory();
}
void System::Internal::IdlePollUpdate()

View file

@ -202,9 +202,3 @@ enum class CPUFastmemMode
LUT,
Count
};
enum : size_t
{
HOST_PAGE_SIZE = 4096,
HOST_PAGE_OFFSET_MASK = HOST_PAGE_SIZE - 1,
};

View file

@ -566,6 +566,7 @@ int main(int argc, char* argv[])
return EXIT_FAILURE;
}
System::Internal::ProcessStartup();
RegTestHost::HookSignals();
int result = -1;
@ -594,5 +595,6 @@ int main(int argc, char* argv[])
result = 0;
cleanup:
System::Internal::ProcessShutdown();
return result;
}

View file

@ -43,8 +43,6 @@ add_library(util
iso_reader.h
jit_code_buffer.cpp
jit_code_buffer.h
memory_arena.cpp
memory_arena.h
page_fault_handler.cpp
page_fault_handler.h
platform_misc.h

View file

@ -1,400 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "memory_arena.h"
#include "common/assert.h"
#include "common/log.h"
#include "common/string_util.h"
Log_SetChannel(Common::MemoryArena);
#if defined(_WIN32)
#include "common/windows_headers.h"
#elif defined(ANDROID)
#include <dlfcn.h>
#include <fcntl.h>
#include <linux/ashmem.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <unistd.h>
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
#include <cerrno>
#include <fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
#endif
namespace Common {
// Borrowed from Dolphin
#ifdef ANDROID
#define ASHMEM_DEVICE "/dev/ashmem"
static int AshmemCreateFileMapping(const char* name, size_t size)
{
// ASharedMemory path - works on API >= 26 and falls through on API < 26:
// We can't call ASharedMemory_create the normal way without increasing the
// minimum version requirement to API 26, so we use dlopen/dlsym instead
static void* libandroid = dlopen("libandroid.so", RTLD_LAZY | RTLD_LOCAL);
static auto shared_memory_create =
reinterpret_cast<int (*)(const char*, size_t)>(dlsym(libandroid, "ASharedMemory_create"));
if (shared_memory_create)
return shared_memory_create(name, size);
// /dev/ashmem path - works on API < 29:
int fd, ret;
fd = open(ASHMEM_DEVICE, O_RDWR);
if (fd < 0)
return fd;
// We don't really care if we can't set the name, it is optional
ioctl(fd, ASHMEM_SET_NAME, name);
ret = ioctl(fd, ASHMEM_SET_SIZE, size);
if (ret < 0)
{
close(fd);
Log_ErrorPrintf("Ashmem returned error: 0x%08x", ret);
return ret;
}
return fd;
}
#endif
MemoryArena::MemoryArena() = default;
MemoryArena::~MemoryArena()
{
Destroy();
}
void* MemoryArena::FindBaseAddressForMapping(size_t size)
{
void* base_address;
#if defined(_WIN32)
base_address = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_READWRITE);
if (base_address)
VirtualFree(base_address, 0, MEM_RELEASE);
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
base_address = mmap(nullptr, size, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
if (base_address)
munmap(base_address, size);
#elif defined(__ANDROID__)
base_address = mmap(nullptr, size, PROT_NONE, MAP_ANON | MAP_SHARED, -1, 0);
if (base_address)
munmap(base_address, size);
#else
base_address = nullptr;
#endif
if (!base_address)
{
Log_ErrorPrintf("Failed to get base address for memory mapping of size %zu", size);
return nullptr;
}
return base_address;
}
bool MemoryArena::IsValid() const
{
#if defined(_WIN32)
return m_file_handle != nullptr;
#else
return m_shmem_fd >= 0;
#endif
}
static std::string GetFileMappingName()
{
#if defined(_WIN32)
const unsigned pid = GetCurrentProcessId();
#elif defined(__ANDROID__) || defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
const unsigned pid = static_cast<unsigned>(getpid());
#else
#error Unknown platform.
#endif
const std::string ret(StringUtil::StdStringFromFormat("duckstation_%u", pid));
Log_InfoPrintf("File mapping name: %s", ret.c_str());
return ret;
}
bool MemoryArena::Create(size_t size, bool writable, bool executable)
{
if (IsValid())
Destroy();
const std::string file_mapping_name(GetFileMappingName());
#if defined(_WIN32)
const DWORD protect = (writable ? (executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE) : PAGE_READONLY);
m_file_handle = CreateFileMappingA(INVALID_HANDLE_VALUE, nullptr, protect, Truncate32(size >> 32), Truncate32(size),
file_mapping_name.c_str());
if (!m_file_handle)
{
Log_ErrorPrintf("CreateFileMapping failed: %u", GetLastError());
return false;
}
m_size = size;
m_writable = writable;
m_executable = executable;
return true;
#elif defined(__ANDROID__)
m_shmem_fd = AshmemCreateFileMapping(file_mapping_name.c_str(), size);
if (m_shmem_fd < 0)
{
Log_ErrorPrintf("AshmemCreateFileMapping failed: %d %d", m_shmem_fd, errno);
return false;
}
m_size = size;
m_writable = writable;
m_executable = executable;
return true;
#elif defined(__linux__)
m_shmem_fd = shm_open(file_mapping_name.c_str(), O_CREAT | O_EXCL | (writable ? O_RDWR : O_RDONLY), 0600);
if (m_shmem_fd < 0)
{
Log_ErrorPrintf("shm_open failed: %d", errno);
return false;
}
// we're not going to be opening this mapping in other processes, so remove the file
shm_unlink(file_mapping_name.c_str());
// ensure it's the correct size
if (ftruncate64(m_shmem_fd, static_cast<off64_t>(size)) < 0)
{
Log_ErrorPrintf("ftruncate64(%zu) failed: %d", size, errno);
return false;
}
m_size = size;
m_writable = writable;
m_executable = executable;
return true;
#elif defined(__APPLE__) || defined(__FreeBSD__)
#if defined(__APPLE__)
m_shmem_fd = shm_open(file_mapping_name.c_str(), O_CREAT | O_EXCL | (writable ? O_RDWR : O_RDONLY), 0600);
#else
m_shmem_fd = shm_open(SHM_ANON, O_CREAT | O_EXCL | (writable ? O_RDWR : O_RDONLY), 0600);
#endif
if (m_shmem_fd < 0)
{
Log_ErrorPrintf("shm_open failed: %d", errno);
return false;
}
#ifdef __APPLE__
// we're not going to be opening this mapping in other processes, so remove the file
shm_unlink(file_mapping_name.c_str());
#endif
// ensure it's the correct size
if (ftruncate(m_shmem_fd, static_cast<off_t>(size)) < 0)
{
Log_ErrorPrintf("ftruncate(%zu) failed: %d", size, errno);
return false;
}
m_size = size;
m_writable = writable;
m_executable = executable;
return true;
#else
return false;
#endif
}
void MemoryArena::Destroy()
{
#if defined(_WIN32)
if (m_file_handle)
{
CloseHandle(m_file_handle);
m_file_handle = nullptr;
}
#elif defined(__linux__) || defined(__FreeBSD__)
if (m_shmem_fd > 0)
{
close(m_shmem_fd);
m_shmem_fd = -1;
}
#endif
}
std::optional<MemoryArena::View> MemoryArena::CreateView(size_t offset, size_t size, bool writable, bool executable,
void* fixed_address)
{
void* base_pointer = CreateViewPtr(offset, size, writable, executable, fixed_address);
if (!base_pointer)
return std::nullopt;
return View(this, base_pointer, offset, size, writable);
}
std::optional<MemoryArena::View> MemoryArena::CreateReservedView(size_t size, void* fixed_address /*= nullptr*/)
{
void* base_pointer = CreateReservedPtr(size, fixed_address);
if (!base_pointer)
return std::nullopt;
return View(this, base_pointer, View::RESERVED_REGION_OFFSET, size, false);
}
void* MemoryArena::CreateViewPtr(size_t offset, size_t size, bool writable, bool executable,
void* fixed_address /*= nullptr*/)
{
void* base_pointer;
#if defined(_WIN32)
const DWORD desired_access = FILE_MAP_READ | (writable ? FILE_MAP_WRITE : 0) | (executable ? FILE_MAP_EXECUTE : 0);
base_pointer =
MapViewOfFileEx(m_file_handle, desired_access, Truncate32(offset >> 32), Truncate32(offset), size, fixed_address);
if (!base_pointer)
return nullptr;
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
const int flags = (fixed_address != nullptr) ? (MAP_SHARED | MAP_FIXED) : MAP_SHARED;
const int prot = PROT_READ | (writable ? PROT_WRITE : 0) | (executable ? PROT_EXEC : 0);
base_pointer = mmap(fixed_address, size, prot, flags, m_shmem_fd, static_cast<off_t>(offset));
if (base_pointer == reinterpret_cast<void*>(-1))
return nullptr;
#else
return nullptr;
#endif
m_num_views.fetch_add(1);
return base_pointer;
}
bool MemoryArena::FlushViewPtr(void* address, size_t size)
{
#if defined(_WIN32)
return FlushViewOfFile(address, size);
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
return (msync(address, size, 0) >= 0);
#else
return false;
#endif
}
bool MemoryArena::ReleaseViewPtr(void* address, size_t size)
{
bool result;
#if defined(_WIN32)
result = static_cast<bool>(UnmapViewOfFile(address));
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
result = (munmap(address, size) >= 0);
#else
result = false;
#endif
if (!result)
{
Log_ErrorPrintf("Failed to unmap previously-created view at %p", address);
return false;
}
const size_t prev_count = m_num_views.fetch_sub(1);
Assert(prev_count > 0);
return true;
}
void* MemoryArena::CreateReservedPtr(size_t size, void* fixed_address /*= nullptr*/)
{
void* base_pointer;
#if defined(_WIN32)
base_pointer = VirtualAlloc(fixed_address, size, MEM_RESERVE, PAGE_NOACCESS);
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
const int flags =
(fixed_address != nullptr) ? (MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED) : (MAP_PRIVATE | MAP_ANONYMOUS);
base_pointer = mmap(fixed_address, size, PROT_NONE, flags, -1, 0);
if (base_pointer == reinterpret_cast<void*>(-1))
return nullptr;
#else
return nullptr;
#endif
m_num_views.fetch_add(1);
return base_pointer;
}
bool MemoryArena::ReleaseReservedPtr(void* address, size_t size)
{
bool result;
#if defined(_WIN32)
result = static_cast<bool>(VirtualFree(address, 0, MEM_RELEASE));
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
result = (munmap(address, size) >= 0);
#else
result = false;
#endif
if (!result)
{
Log_ErrorPrintf("Failed to release previously-created view at %p", address);
return false;
}
const size_t prev_count = m_num_views.fetch_sub(1);
Assert(prev_count > 0);
return true;
}
bool MemoryArena::SetPageProtection(void* address, size_t length, bool readable, bool writable, bool executable)
{
#if defined(_WIN32)
static constexpr DWORD protection_table[2][2][2] = {
{{PAGE_NOACCESS, PAGE_EXECUTE}, {PAGE_WRITECOPY, PAGE_EXECUTE_WRITECOPY}},
{{PAGE_READONLY, PAGE_EXECUTE_READ}, {PAGE_READWRITE, PAGE_EXECUTE_READWRITE}}};
DWORD old_protect;
return static_cast<bool>(
VirtualProtect(address, length, protection_table[readable][writable][executable], &old_protect));
#elif defined(__linux__) || defined(__ANDROID__) || defined(__APPLE__) || defined(__FreeBSD__)
const int prot = (readable ? PROT_READ : 0) | (writable ? PROT_WRITE : 0) | (executable ? PROT_EXEC : 0);
return (mprotect(address, length, prot) >= 0);
#else
return false;
#endif
}
MemoryArena::View::View(MemoryArena* parent, void* base_pointer, size_t arena_offset, size_t mapping_size,
bool writable)
: m_parent(parent), m_base_pointer(base_pointer), m_arena_offset(arena_offset), m_mapping_size(mapping_size),
m_writable(writable)
{
}
MemoryArena::View::View(View&& view)
: m_parent(view.m_parent), m_base_pointer(view.m_base_pointer), m_arena_offset(view.m_arena_offset),
m_mapping_size(view.m_mapping_size)
{
view.m_parent = nullptr;
view.m_base_pointer = nullptr;
view.m_arena_offset = 0;
view.m_mapping_size = 0;
}
MemoryArena::View::~View()
{
if (m_parent)
{
if (m_arena_offset != RESERVED_REGION_OFFSET)
{
if (m_writable && !m_parent->FlushViewPtr(m_base_pointer, m_mapping_size))
Panic("Failed to flush previously-created view");
if (!m_parent->ReleaseViewPtr(m_base_pointer, m_mapping_size))
Panic("Failed to unmap previously-created view");
}
else
{
if (!m_parent->ReleaseReservedPtr(m_base_pointer, m_mapping_size))
Panic("Failed to release previously-created view");
}
}
}
} // namespace Common

View file

@ -1,77 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "common/types.h"
#include <atomic>
#include <optional>
namespace Common {
class MemoryArena
{
public:
class View
{
public:
enum : size_t
{
RESERVED_REGION_OFFSET = static_cast<size_t>(-1)
};
View(MemoryArena* parent, void* base_pointer, size_t arena_offset, size_t mapping_size, bool writable);
View(View&& view);
~View();
void* GetBasePointer() const { return m_base_pointer; }
size_t GetArenaOffset() const { return m_arena_offset; }
size_t GetMappingSize() const { return m_mapping_size; }
bool IsWritable() const { return m_writable; }
private:
MemoryArena* m_parent;
void* m_base_pointer;
size_t m_arena_offset;
size_t m_mapping_size;
bool m_writable;
};
MemoryArena();
~MemoryArena();
static void* FindBaseAddressForMapping(size_t size);
ALWAYS_INLINE size_t GetSize() const { return m_size; }
ALWAYS_INLINE bool IsWritable() const { return m_writable; }
ALWAYS_INLINE bool IsExecutable() const { return m_executable; }
bool IsValid() const;
bool Create(size_t size, bool writable, bool executable);
void Destroy();
std::optional<View> CreateView(size_t offset, size_t size, bool writable, bool executable,
void* fixed_address = nullptr);
std::optional<View> CreateReservedView(size_t size, void* fixed_address = nullptr);
void* CreateViewPtr(size_t offset, size_t size, bool writable, bool executable, void* fixed_address = nullptr);
bool FlushViewPtr(void* address, size_t size);
bool ReleaseViewPtr(void* address, size_t size);
void* CreateReservedPtr(size_t size, void* fixed_address = nullptr);
bool ReleaseReservedPtr(void* address, size_t size);
static bool SetPageProtection(void* address, size_t length, bool readable, bool writable, bool executable);
private:
#if defined(_WIN32)
void* m_file_handle = nullptr;
#elif defined(__linux__) || defined(ANDROID) || defined(__APPLE__) || defined(__FreeBSD__)
int m_shmem_fd = -1;
#endif
std::atomic_size_t m_num_views{0};
size_t m_size = 0;
bool m_writable = false;
bool m_executable = false;
};
} // namespace Common

View file

@ -58,7 +58,6 @@
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="pbp_types.h" />
<ClInclude Include="memory_arena.h" />
<ClInclude Include="page_fault_handler.h" />
<ClInclude Include="cd_subchannel_replacement.h" />
<ClInclude Include="pch.h" />
@ -174,7 +173,6 @@
<ClCompile Include="sdl_input_source.cpp" />
<ClCompile Include="shadergen.cpp" />
<ClCompile Include="shiftjis.cpp" />
<ClCompile Include="memory_arena.cpp" />
<ClCompile Include="page_fault_handler.cpp" />
<ClCompile Include="spirv_compiler.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>

View file

@ -11,7 +11,6 @@
<ClInclude Include="wav_writer.h" />
<ClInclude Include="cd_image_hasher.h" />
<ClInclude Include="shiftjis.h" />
<ClInclude Include="memory_arena.h" />
<ClInclude Include="page_fault_handler.h" />
<ClInclude Include="pbp_types.h" />
<ClInclude Include="cue_parser.h" />
@ -87,7 +86,6 @@
<ClCompile Include="cd_image_hasher.cpp" />
<ClCompile Include="cd_image_memory.cpp" />
<ClCompile Include="shiftjis.cpp" />
<ClCompile Include="memory_arena.cpp" />
<ClCompile Include="page_fault_handler.cpp" />
<ClCompile Include="cd_image_ecm.cpp" />
<ClCompile Include="cd_image_mds.cpp" />