CPU/Recompiler: Reserve whole fastmem region to avoid clashes

This commit is contained in:
Connor McLaughlin 2021-04-11 12:42:51 +10:00
parent 8df0496dc1
commit 922d320523
13 changed files with 170 additions and 68 deletions

View file

@ -213,6 +213,15 @@ std::optional<MemoryArena::View> MemoryArena::CreateView(size_t offset, size_t s
return View(this, base_pointer, offset, size, writable);
}
std::optional<MemoryArena::View> MemoryArena::CreateReservedView(size_t size, void* fixed_address /*= nullptr*/)
{
void* base_pointer = CreateReservedPtr(size, fixed_address);
if (!base_pointer)
return std::nullopt;
return View(this, base_pointer, View::RESERVED_REGION_OFFSET, size, false);
}
void* MemoryArena::CreateViewPtr(size_t offset, size_t size, bool writable, bool executable,
void* fixed_address /*= nullptr*/)
{
@ -276,6 +285,53 @@ bool MemoryArena::ReleaseViewPtr(void* address, size_t size)
return true;
}
void* MemoryArena::CreateReservedPtr(size_t size, void* fixed_address /*= nullptr*/)
{
void* base_pointer;
#if defined(WIN32)
base_pointer = VirtualAlloc(fixed_address, size, MEM_RESERVE, PAGE_NOACCESS);
#elif defined(__linux__)
const int flags =
(fixed_address != nullptr) ? (MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED) : (MAP_PRIVATE | MAP_ANONYMOUS);
base_pointer = mmap64(fixed_address, size, PROT_NONE, flags, -1, 0);
if (base_pointer == reinterpret_cast<void*>(-1))
return nullptr;
#elif defined(__APPLE__) || defined(__FreeBSD__)
const int flags =
(fixed_address != nullptr) ? (MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED) : (MAP_PRIVATE | MAP_ANONYMOUS);
base_pointer = mmap(fixed_address, size, prot, PROT_NONE, -1, 0);
if (base_pointer == reinterpret_cast<void*>(-1))
return nullptr;
#else
return nullptr;
#endif
m_num_views.fetch_add(1);
return base_pointer;
}
bool MemoryArena::ReleaseReservedPtr(void* address, size_t size)
{
bool result;
#if defined(WIN32)
result = static_cast<bool>(VirtualFree(address, 0, MEM_RELEASE));
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__)
result = (munmap(address, size) >= 0);
#else
result = false;
#endif
if (!result)
{
Log_ErrorPrintf("Failed to release previously-created view at %p", address);
return false;
}
const size_t prev_count = m_num_views.fetch_sub(1);
Assert(prev_count > 0);
return true;
}
bool MemoryArena::SetPageProtection(void* address, size_t length, bool readable, bool writable, bool executable)
{
#if defined(WIN32)
@ -315,10 +371,18 @@ MemoryArena::View::~View()
{
if (m_parent)
{
if (m_writable && !m_parent->FlushViewPtr(m_base_pointer, m_mapping_size))
Panic("Failed to flush previously-created view");
if (!m_parent->ReleaseViewPtr(m_base_pointer, m_mapping_size))
Panic("Failed to unmap previously-created view");
if (m_arena_offset != RESERVED_REGION_OFFSET)
{
if (m_writable && !m_parent->FlushViewPtr(m_base_pointer, m_mapping_size))
Panic("Failed to flush previously-created view");
if (!m_parent->ReleaseViewPtr(m_base_pointer, m_mapping_size))
Panic("Failed to unmap previously-created view");
}
else
{
if (!m_parent->ReleaseReservedPtr(m_base_pointer, m_mapping_size))
Panic("Failed to release previously-created view");
}
}
}
} // namespace Common

View file

@ -10,6 +10,11 @@ public:
class View
{
public:
enum : size_t
{
RESERVED_REGION_OFFSET = static_cast<size_t>(-1)
};
View(MemoryArena* parent, void* base_pointer, size_t arena_offset, size_t mapping_size, bool writable);
View(View&& view);
~View();
@ -39,10 +44,15 @@ public:
std::optional<View> CreateView(size_t offset, size_t size, bool writable, bool executable,
void* fixed_address = nullptr);
std::optional<View> CreateReservedView(size_t size, void* fixed_address = nullptr);
void* CreateViewPtr(size_t offset, size_t size, bool writable, bool executable, void* fixed_address = nullptr);
bool FlushViewPtr(void* address, size_t size);
bool ReleaseViewPtr(void* address, size_t size);
void* CreateReservedPtr(size_t size, void* fixed_address = nullptr);
bool ReleaseReservedPtr(void* address, size_t size);
static bool SetPageProtection(void* address, size_t length, bool readable, bool writable, bool executable);
private:

View file

@ -95,6 +95,7 @@ static CPUFastmemMode m_fastmem_mode = CPUFastmemMode::Disabled;
#ifdef WITH_MMAP_FASTMEM
static u8* m_fastmem_base = nullptr;
static std::vector<Common::MemoryArena::View> m_fastmem_ram_views;
static std::vector<Common::MemoryArena::View> m_fastmem_reserved_views;
#endif
static u8** m_fastmem_lut = nullptr;
@ -304,12 +305,18 @@ CPUFastmemMode GetFastmemMode()
return m_fastmem_mode;
}
void UpdateFastmemViews(CPUFastmemMode mode, bool isolate_cache)
u8* GetFastmemBase()
{
return m_fastmem_base;
}
void UpdateFastmemViews(CPUFastmemMode mode)
{
#ifndef WITH_MMAP_FASTMEM
Assert(mode != CPUFastmemMode::MMap);
#else
m_fastmem_ram_views.clear();
m_fastmem_reserved_views.clear();
#endif
m_fastmem_mode = mode;
@ -323,8 +330,6 @@ void UpdateFastmemViews(CPUFastmemMode mode, bool isolate_cache)
return;
}
Log_DevPrintf("Remapping fastmem area, isolate cache = %s", isolate_cache ? "true" : "false");
#ifdef WITH_MMAP_FASTMEM
if (mode == CPUFastmemMode::MMap)
{
@ -344,9 +349,9 @@ void UpdateFastmemViews(CPUFastmemMode mode, bool isolate_cache)
CPU::g_state.fastmem_base = m_fastmem_base;
}
auto MapRAM = [](u32 base_address, bool writable) {
auto MapRAM = [](u32 base_address) {
u8* map_address = m_fastmem_base + base_address;
auto view = m_memory_arena.CreateView(MEMORY_ARENA_RAM_OFFSET, RAM_SIZE, writable, false, map_address);
auto view = m_memory_arena.CreateView(MEMORY_ARENA_RAM_OFFSET, RAM_SIZE, true, false, map_address);
if (!view)
{
Log_ErrorPrintf("Failed to map RAM at fastmem area %p (offset 0x%08X)", map_address, RAM_SIZE);
@ -370,26 +375,39 @@ void UpdateFastmemViews(CPUFastmemMode mode, bool isolate_cache)
m_fastmem_ram_views.push_back(std::move(view.value()));
};
if (!isolate_cache)
{
// KUSEG - cached
MapRAM(0x00000000, true);
// MapRAM(0x00200000, true);
// MapRAM(0x00400000, true);
// MapRAM(0x00600000, true);
auto ReserveRegion = [](u32 start_address, u32 end_address_inclusive) {
Assert(end_address_inclusive >= start_address);
u8* map_address = m_fastmem_base + start_address;
auto view = m_memory_arena.CreateReservedView(end_address_inclusive - start_address + 1, map_address);
if (!view)
{
Log_ErrorPrintf("Failed to map RAM at fastmem area %p (offset 0x%08X)", map_address, RAM_SIZE);
return;
}
// KSEG0 - cached
MapRAM(0x80000000, true);
// MapRAM(0x80200000, true);
// MapRAM(0x80400000, true);
// MapRAM(0x80600000, true);
}
m_fastmem_reserved_views.push_back(std::move(view.value()));
};
// KUSEG - cached
MapRAM(0x00000000);
// MapRAM(0x00200000);
// MapRAM(0x00400000);
// MapRAM(0x00600000);
ReserveRegion(0x00200000, 0x80000000 - 1);
// KSEG0 - cached
MapRAM(0x80000000);
// MapRAM(0x80200000);
// MapRAM(0x80400000);
// MapRAM(0x80600000);
ReserveRegion(0x80200000, 0xA0000000 - 1);
// KSEG1 - uncached
MapRAM(0xA0000000, true);
// MapRAM(0xA0200000, true);
// MapRAM(0xA0400000, true);
// MapRAM(0xA0600000, true);
MapRAM(0xA0000000);
// MapRAM(0xA0200000);
// MapRAM(0xA0400000);
// MapRAM(0xA0600000);
ReserveRegion(0xA0200000, 0xFFFFFFFF);
return;
}
@ -408,39 +426,31 @@ void UpdateFastmemViews(CPUFastmemMode mode, bool isolate_cache)
CPU::g_state.fastmem_base = reinterpret_cast<u8*>(m_fastmem_lut);
}
auto MapRAM = [](u32 base_address, bool readable, bool writable) {
if (readable)
auto MapRAM = [](u32 base_address) {
for (u32 address = 0; address < RAM_SIZE; address += HOST_PAGE_SIZE)
{
for (u32 address = 0; address < RAM_SIZE; address += HOST_PAGE_SIZE)
{
SetLUTFastmemPage(base_address + address, &g_ram[address],
!m_ram_code_bits[FastmemAddressToLUTPageIndex(address)]);
}
}
else
{
for (u32 address = 0; address < RAM_SIZE; address += HOST_PAGE_SIZE)
SetLUTFastmemPage(base_address + address, nullptr, false);
SetLUTFastmemPage(base_address + address, &g_ram[address],
!m_ram_code_bits[FastmemAddressToLUTPageIndex(address)]);
}
};
// KUSEG - cached
MapRAM(0x00000000, !isolate_cache, !isolate_cache);
MapRAM(0x00200000, !isolate_cache, !isolate_cache);
MapRAM(0x00400000, !isolate_cache, !isolate_cache);
MapRAM(0x00600000, !isolate_cache, !isolate_cache);
MapRAM(0x00000000);
MapRAM(0x00200000);
MapRAM(0x00400000);
MapRAM(0x00600000);
// KSEG0 - cached
MapRAM(0x80000000, !isolate_cache, !isolate_cache);
MapRAM(0x80200000, !isolate_cache, !isolate_cache);
MapRAM(0x80400000, !isolate_cache, !isolate_cache);
MapRAM(0x80600000, !isolate_cache, !isolate_cache);
MapRAM(0x80000000);
MapRAM(0x80200000);
MapRAM(0x80400000);
MapRAM(0x80600000);
// KSEG1 - uncached
MapRAM(0xA0000000, true, true);
MapRAM(0xA0200000, true, true);
MapRAM(0xA0400000, true, true);
MapRAM(0xA0600000, true, true);
MapRAM(0xA0000000);
MapRAM(0xA0200000);
MapRAM(0xA0400000);
MapRAM(0xA0600000);
}
bool CanUseFastmemForAddress(VirtualMemoryAddress address)

View file

@ -100,7 +100,8 @@ void Reset();
bool DoState(StateWrapper& sw);
CPUFastmemMode GetFastmemMode();
void UpdateFastmemViews(CPUFastmemMode mode, bool isolate_cache);
u8* GetFastmemBase();
void UpdateFastmemViews(CPUFastmemMode mode);
bool CanUseFastmemForAddress(VirtualMemoryAddress address);
void SetExpansionROM(std::vector<u8> data);

View file

@ -784,14 +784,14 @@ bool InitializeFastmem()
return false;
}
Bus::UpdateFastmemViews(mode, g_state.cop0_regs.sr.Isc);
Bus::UpdateFastmemViews(mode);
return true;
}
void ShutdownFastmem()
{
Common::PageFaultHandler::RemoveHandler(&s_host_code_map);
Bus::UpdateFastmemViews(CPUFastmemMode::Disabled, false);
Bus::UpdateFastmemViews(CPUFastmemMode::Disabled);
}
#ifdef WITH_MMAP_FASTMEM

View file

@ -1975,7 +1975,10 @@ bool InterpretInstructionPGXP()
void UpdateFastmemMapping()
{
Bus::UpdateFastmemViews(Bus::GetFastmemMode(), g_state.cop0_regs.sr.Isc);
if (g_state.cop0_regs.sr.Isc)
g_state.fastmem_base = nullptr;
else
g_state.fastmem_base = Bus::GetFastmemBase();
}
} // namespace Recompiler::Thunks

View file

@ -2476,6 +2476,7 @@ bool CodeGenerator::Compile_cop0(const CodeBlockInstruction& cbi)
EmitBranchIfBitClear(old_value.host_reg, RegSize_32, 16, &skip_fastmem_update);
m_register_cache.InhibitAllocation();
EmitFunctionCall(nullptr, &Thunks::UpdateFastmemMapping, m_register_cache.GetCPUPtr());
EmitUpdateMembasePointer();
EmitBindLabel(&skip_fastmem_update);
m_register_cache.UninhibitAllocation();
}

View file

@ -93,6 +93,7 @@ public:
void EmitStoreGuestMemoryFastmem(const CodeBlockInstruction& cbi, const Value& address, const Value& value);
void EmitStoreGuestMemorySlowmem(const CodeBlockInstruction& cbi, const Value& address, const Value& value,
bool in_far_code);
void EmitUpdateMembasePointer();
// Unconditional branch to pointer. May allocate a scratch register.
void EmitBranch(const void* address, bool allow_scratch = true);

View file

@ -1170,6 +1170,22 @@ Value CodeGenerator::GetFastmemStoreBase()
return val;
}
void CodeGenerator::EmitUpdateMembasePointer()
{
if (m_fastmem_load_base_in_register)
{
Value val = Value::FromHostReg(&m_register_cache, RARG4, RegSize_32);
m_emit->ldr(GetHostReg32(val), a32::MemOperand(GetCPUPtrReg(), offsetof(CPU::State, fastmem_base)));
}
if (m_fastmem_store_base_in_register)
{
Value val = Value::FromHostReg(&m_register_cache, RARG3, RegSize_32);
m_emit->ldr(GetHostReg32(val), a32::MemOperand(GetCPUPtrReg(), offsetof(CPU::State, fastmem_base)));
m_emit->add(GetHostReg32(val), GetHostReg32(val), sizeof(u32*) * Bus::FASTMEM_LUT_NUM_PAGES);
}
}
void CodeGenerator::EmitLoadGuestRAMFastmem(const Value& address, RegSize size, Value& result)
{
Value fastmem_base = GetFastmemLoadBase();

View file

@ -1728,6 +1728,11 @@ void CodeGenerator::EmitStoreGuestMemorySlowmem(const CodeBlockInstruction& cbi,
}
}
void CodeGenerator::EmitUpdateMembasePointer()
{
m_emit->Ldr(GetFastmemBasePtrReg(), a64::MemOperand(GetCPUPtrReg(), offsetof(State, fastmem_base)));
}
bool CodeGenerator::BackpatchLoadStore(const LoadStoreBackpatchInfo& lbi)
{
Log_DevPrintf("Backpatching %p (guest PC 0x%08X) to slowmem at %p", lbi.host_pc, lbi.guest_pc, lbi.host_slowmem_pc);

View file

@ -2313,6 +2313,11 @@ void CodeGenerator::EmitStoreGuestMemorySlowmem(const CodeBlockInstruction& cbi,
}
}
void CodeGenerator::EmitUpdateMembasePointer()
{
m_emit->mov(GetFastmemBasePtrReg(), m_emit->qword[GetCPUPtrReg() + offsetof(CPU::State, fastmem_base)]);
}
bool CodeGenerator::BackpatchLoadStore(const LoadStoreBackpatchInfo& lbi)
{
Log_DevPrintf("Backpatching %p (guest PC 0x%08X) to slowmem", lbi.host_pc, lbi.guest_pc);

View file

@ -801,9 +801,6 @@ void HostInterface::CheckForSettingsChanges(const Settings& old_settings)
g_settings.rewind_enable != old_settings.rewind_enable ||
g_settings.runahead_frames != old_settings.runahead_frames)
{
if (g_settings.IsUsingCodeCache())
CPU::CodeCache::Reinitialize();
g_gpu->UpdateSettings();
}

View file

@ -679,9 +679,6 @@ bool RecreateGPU(GPURenderer renderer, bool update_display /* = true*/)
return false;
}
// reinitialize the code cache because the address space could change
CPU::CodeCache::Reinitialize();
if (state_valid)
{
state_stream->SeekAbsolute(0);
@ -1940,10 +1937,6 @@ bool InsertMedia(const char* path)
UpdateMemoryCards();
}
// reinitialize recompiler, because especially with preloading this might overlap the fastmem area
if (g_settings.IsUsingCodeCache())
CPU::CodeCache::Reinitialize();
ClearMemorySaveStates();
return true;
}
@ -2069,10 +2062,6 @@ bool SwitchMediaSubImage(u32 index)
image->GetSubImageMetadata(index, "title").c_str(), index + 1u, image->GetMetadata("title").c_str());
g_cdrom.InsertMedia(std::move(image));
// reinitialize recompiler, because especially with preloading this might overlap the fastmem area
if (g_settings.IsUsingCodeCache())
CPU::CodeCache::Reinitialize();
ClearMemorySaveStates();
return true;
}