CPU: Refactor execution mode switching

Fixes single step breaking in branch delay slots with recompiler.
Simplifies initialization.
Removes multiple sources of truth for fastmem.
This commit is contained in:
Stenzek 2024-09-06 20:00:30 +10:00
parent f94a15126a
commit 70fd457cc4
No known key found for this signature in database
8 changed files with 222 additions and 224 deletions

View file

@ -143,8 +143,6 @@ static RAM_SIZE_REG s_RAM_SIZE = {};
static std::string s_tty_line_buffer;
static CPUFastmemMode s_fastmem_mode = CPUFastmemMode::Disabled;
#ifdef ENABLE_MMAP_FASTMEM
static SharedMemoryMappingArea s_fastmem_arena;
static std::vector<std::pair<u8*, size_t>> s_fastmem_ram_views;
@ -161,6 +159,8 @@ static void SetRAMSize(bool enable_8mb_ram);
static std::tuple<TickCount, TickCount, TickCount> CalculateMemoryTiming(MEMDELAY mem_delay, COMDELAY common_delay);
static void RecalculateMemoryTimings();
static void MapFastmemViews();
static void UnmapFastmemViews();
static u8* GetLUTFastmemPointer(u32 address, u8* ram_ptr);
static void SetRAMPageWritable(u32 page_index, bool writable);
@ -252,6 +252,7 @@ bool Bus::AllocateMemoryMap(bool export_shared_memory, Error* error)
VERBOSE_LOG("LUTs are mapped at {}.", static_cast<void*>(g_memory_handlers));
g_memory_handlers_isc = g_memory_handlers + MEMORY_LUT_SLOTS;
g_ram_mapped_size = RAM_8MB_SIZE;
SetHandlers();
#ifndef __ANDROID__
@ -348,7 +349,7 @@ bool Bus::ReallocateMemoryMap(bool export_shared_memory, Error* error)
if (System::IsValid())
{
CPU::CodeCache::InvalidateAllRAMBlocks();
UpdateFastmemViews(CPUFastmemMode::Disabled);
UnmapFastmemViews();
ram_backup.resize(RAM_8MB_SIZE);
std::memcpy(ram_backup.data(), g_unprotected_ram, RAM_8MB_SIZE);
@ -365,7 +366,7 @@ bool Bus::ReallocateMemoryMap(bool export_shared_memory, Error* error)
UpdateMappedRAMSize();
std::memcpy(g_unprotected_ram, ram_backup.data(), RAM_8MB_SIZE);
std::memcpy(g_bios, bios_backup.data(), BIOS_SIZE);
UpdateFastmemViews(g_settings.cpu_fastmem_mode);
MapFastmemViews();
}
return true;
@ -380,11 +381,10 @@ void Bus::CleanupMemoryMap()
#endif
}
bool Bus::Initialize()
void Bus::Initialize()
{
SetRAMSize(g_settings.enable_8mb_ram);
Reset();
return true;
MapFastmemViews();
}
void Bus::SetRAMSize(bool enable_8mb_ram)
@ -400,7 +400,7 @@ void Bus::SetRAMSize(bool enable_8mb_ram)
void Bus::Shutdown()
{
UpdateFastmemViews(CPUFastmemMode::Disabled);
UnmapFastmemViews();
CPU::g_state.fastmem_base = nullptr;
g_ram_mask = 0;
@ -439,8 +439,7 @@ bool Bus::DoState(StateWrapper& sw)
{
const bool using_8mb_ram = (ram_size == RAM_8MB_SIZE);
SetRAMSize(using_8mb_ram);
UpdateFastmemViews(s_fastmem_mode);
CPU::UpdateMemoryPointers();
RemapFastmemViews();
}
sw.Do(&g_exp1_access_time);
@ -526,18 +525,13 @@ void Bus::RecalculateMemoryTimings()
g_spu_access_time[2] + 1);
}
CPUFastmemMode Bus::GetFastmemMode()
{
return s_fastmem_mode;
}
void* Bus::GetFastmemBase(bool isc)
{
#ifdef ENABLE_MMAP_FASTMEM
if (s_fastmem_mode == CPUFastmemMode::MMap)
if (g_settings.cpu_fastmem_mode == CPUFastmemMode::MMap)
return isc ? nullptr : s_fastmem_arena.BasePointer();
#endif
if (s_fastmem_mode == CPUFastmemMode::LUT)
if (g_settings.cpu_fastmem_mode == CPUFastmemMode::LUT)
return reinterpret_cast<u8*>(s_fastmem_lut + (isc ? (FASTMEM_LUT_SIZE * sizeof(void*)) : 0));
return nullptr;
@ -548,24 +542,16 @@ u8* Bus::GetLUTFastmemPointer(u32 address, u8* ram_ptr)
return ram_ptr - address;
}
void Bus::UpdateFastmemViews(CPUFastmemMode mode)
void Bus::MapFastmemViews()
{
#ifndef ENABLE_MMAP_FASTMEM
Assert(mode != CPUFastmemMode::MMap);
#else
for (const auto& it : s_fastmem_ram_views)
s_fastmem_arena.Unmap(it.first, it.second);
s_fastmem_ram_views.clear();
#endif
Assert(s_fastmem_ram_views.empty());
s_fastmem_mode = mode;
if (mode == CPUFastmemMode::Disabled)
return;
#ifdef ENABLE_MMAP_FASTMEM
const CPUFastmemMode mode = g_settings.cpu_fastmem_mode;
if (mode == CPUFastmemMode::MMap)
{
#ifdef ENABLE_MMAP_FASTMEM
auto MapRAM = [](u32 base_address) {
// No need to check mapped RAM range here, we only ever fastmem map the first 2MB.
u8* map_address = s_fastmem_arena.BasePointer() + base_address;
if (!s_fastmem_arena.Map(s_shmem_handle, 0, map_address, g_ram_size, PageProtect::ReadWrite)) [[unlikely]]
{
@ -600,11 +586,12 @@ void Bus::UpdateFastmemViews(CPUFastmemMode mode)
// KSEG1 - uncached
MapRAM(0xA0000000);
return;
}
#else
Panic("MMap fastmem should not be selected on this platform.");
#endif
}
else if (mode == CPUFastmemMode::LUT)
{
if (!s_fastmem_lut)
{
s_fastmem_lut = static_cast<u8**>(std::malloc(sizeof(u8*) * FASTMEM_LUT_SLOTS));
@ -618,6 +605,10 @@ void Bus::UpdateFastmemViews(CPUFastmemMode mode)
s_fastmem_lut[i] = GetLUTFastmemPointer(i << FASTMEM_LUT_PAGE_SHIFT, nullptr);
auto MapRAM = [](u32 base_address) {
// Don't map RAM that isn't accessible.
if ((base_address & CPU::PHYSICAL_MEMORY_ADDRESS_MASK) >= g_ram_mapped_size)
return;
u8* ram_ptr = g_ram + (base_address & g_ram_mask);
for (u32 address = 0; address < g_ram_size; address += FASTMEM_LUT_PAGE_SIZE)
{
@ -646,11 +637,29 @@ void Bus::UpdateFastmemViews(CPUFastmemMode mode)
MapRAM(0xA0600000);
}
CPU::UpdateMemoryPointers();
}
void Bus::UnmapFastmemViews()
{
#ifdef ENABLE_MMAP_FASTMEM
for (const auto& it : s_fastmem_ram_views)
s_fastmem_arena.Unmap(it.first, it.second);
s_fastmem_ram_views.clear();
#endif
}
void Bus::RemapFastmemViews()
{
UnmapFastmemViews();
MapFastmemViews();
}
bool Bus::CanUseFastmemForAddress(VirtualMemoryAddress address)
{
const PhysicalMemoryAddress paddr = address & CPU::PHYSICAL_MEMORY_ADDRESS_MASK;
switch (s_fastmem_mode)
switch (g_settings.cpu_fastmem_mode)
{
#ifdef ENABLE_MMAP_FASTMEM
case CPUFastmemMode::MMap:
@ -706,7 +715,7 @@ void Bus::SetRAMPageWritable(u32 page_index, bool writable)
}
#ifdef ENABLE_MMAP_FASTMEM
if (s_fastmem_mode == CPUFastmemMode::MMap)
if (g_settings.cpu_fastmem_mode == CPUFastmemMode::MMap)
{
const PageProtect protect = writable ? PageProtect::ReadWrite : PageProtect::ReadOnly;
@ -734,7 +743,7 @@ void Bus::ClearRAMCodePageFlags()
ERROR_LOG("Failed to restore RAM protection to read-write.");
#ifdef ENABLE_MMAP_FASTMEM
if (s_fastmem_mode == CPUFastmemMode::MMap)
if (g_settings.cpu_fastmem_mode == CPUFastmemMode::MMap)
{
// unprotect fastmem pages
for (const auto& it : s_fastmem_ram_views)
@ -1041,7 +1050,8 @@ bool Bus::SideloadEXE(const std::string& path, Error* error)
}
if (okay)
{
const std::optional<DynamicHeapArray<u8>> exe_data = FileSystem::ReadBinaryFile(System::GetExeOverride().c_str(), error);
const std::optional<DynamicHeapArray<u8>> exe_data =
FileSystem::ReadBinaryFile(System::GetExeOverride().c_str(), error);
okay = (exe_data.has_value() && InjectExecutable(exe_data->cspan(), true, error));
if (!okay)
Error::AddPrefixFmt(error, "Failed to load {}: ", Path::GetFileName(path));
@ -1961,6 +1971,8 @@ void Bus::SetHandlers()
void Bus::UpdateMappedRAMSize()
{
const u32 prev_mapped_size = g_ram_mapped_size;
switch (s_RAM_SIZE.memory_window)
{
case 4: // 2MB memory + 6MB unmapped
@ -2003,6 +2015,10 @@ void Bus::UpdateMappedRAMSize()
}
break;
}
// Fastmem needs to be remapped.
if (prev_mapped_size != g_ram_mapped_size)
RemapFastmemViews();
}
#undef SET

View file

@ -127,7 +127,7 @@ bool ReallocateMemoryMap(bool export_shared_memory, Error* error);
/// Should be called when the process crashes, to avoid leaking.
void CleanupMemoryMap();
bool Initialize();
void Initialize();
void Shutdown();
void Reset();
bool DoState(StateWrapper& sw);
@ -144,9 +144,8 @@ ALWAYS_INLINE_RELEASE static FP* OffsetHandlerArray(void** handlers, MemoryAcces
(((static_cast<size_t>(size) * 2) + static_cast<size_t>(type)) * MEMORY_LUT_SIZE));
}
CPUFastmemMode GetFastmemMode();
void* GetFastmemBase(bool isc);
void UpdateFastmemViews(CPUFastmemMode mode);
void RemapFastmemViews();
bool CanUseFastmemForAddress(VirtualMemoryAddress address);
void SetExpansionROM(std::vector<u8> data);

View file

@ -102,7 +102,6 @@ static void BacklinkBlocks(u32 pc, const void* dst);
static void UnlinkBlockExits(Block* block);
static void ResetCodeBuffer();
static void ClearASMFunctions();
static void CompileASMFunctions();
static bool CompileBlock(Block* block);
static PageFaultHandler::HandlerResult HandleFastmemException(void* exception_pc, void* fault_address, bool is_write);
@ -174,7 +173,7 @@ bool CPU::CodeCache::IsUsingAnyRecompiler()
bool CPU::CodeCache::IsUsingFastmem()
{
return IsUsingAnyRecompiler() && g_settings.cpu_fastmem_mode != CPUFastmemMode::Disabled;
return g_settings.cpu_fastmem_mode != CPUFastmemMode::Disabled;
}
bool CPU::CodeCache::ProcessStartup(Error* error)
@ -214,37 +213,12 @@ void CPU::CodeCache::ProcessShutdown()
#endif
}
void CPU::CodeCache::Initialize()
{
Assert(s_blocks.empty());
if (IsUsingAnyRecompiler())
{
ResetCodeBuffer();
CompileASMFunctions();
ResetCodeLUT();
}
Bus::UpdateFastmemViews(IsUsingAnyRecompiler() ? g_settings.cpu_fastmem_mode : CPUFastmemMode::Disabled);
CPU::UpdateMemoryPointers();
}
void CPU::CodeCache::Shutdown()
{
ClearBlocks();
ClearASMFunctions();
Bus::UpdateFastmemViews(CPUFastmemMode::Disabled);
CPU::UpdateMemoryPointers();
}
void CPU::CodeCache::Reset()
{
ClearBlocks();
if (IsUsingAnyRecompiler())
{
ClearASMFunctions();
ResetCodeBuffer();
CompileASMFunctions();
ResetCodeLUT();
@ -1560,25 +1534,14 @@ const void* CPU::CodeCache::GetInterpretUncachedBlockFunction()
}
}
void CPU::CodeCache::ClearASMFunctions()
void CPU::CodeCache::CompileASMFunctions()
{
g_enter_recompiler = nullptr;
g_compile_or_revalidate_block = nullptr;
g_check_events_and_dispatch = nullptr;
g_run_events_and_dispatch = nullptr;
g_dispatcher = nullptr;
g_interpret_block = nullptr;
g_discard_and_recompile_block = nullptr;
MemMap::BeginCodeWrite();
#ifdef _DEBUG
s_total_instructions_compiled = 0;
s_total_host_instructions_emitted = 0;
#endif
}
void CPU::CodeCache::CompileASMFunctions()
{
MemMap::BeginCodeWrite();
const u32 asm_size = EmitASMFunctions(GetFreeCodePointer(), GetFreeCodeSpace());

View file

@ -22,12 +22,6 @@ bool ProcessStartup(Error* error);
/// Frees resources, call once at shutdown.
void ProcessShutdown();
/// Initializes resources for the system.
void Initialize();
/// Frees resources used by the system.
void Shutdown();
/// Runs the system.
[[noreturn]] void Execute();

View file

@ -29,7 +29,14 @@
Log_SetChannel(CPU::Core);
namespace CPU {
static bool ShouldUseInterpreter();
enum class ExecutionBreakType
{
None,
ExecuteOneInstruction,
SingleStep,
Breakpoint,
};
static void UpdateLoadDelay();
static void Branch(u32 target);
static void FlushLoadDelay();
@ -68,6 +75,8 @@ static void LogInstruction(u32 bits, u32 pc, bool regs);
static void HandleWriteSyscall();
static void HandlePutcSyscall();
static void HandlePutsSyscall();
static void CheckForExecutionModeChange();
[[noreturn]] static void ExecuteInterpreter();
template<PGXPMode pgxp_mode, bool debug>
@ -104,8 +113,8 @@ static constexpr u32 INVALID_BREAKPOINT_PC = UINT32_C(0xFFFFFFFF);
static std::array<std::vector<Breakpoint>, static_cast<u32>(BreakpointType::Count)> s_breakpoints;
static u32 s_breakpoint_counter = 1;
static u32 s_last_breakpoint_check_pc = INVALID_BREAKPOINT_PC;
static bool s_single_step = false;
static bool s_break_after_instruction = false;
static CPUExecutionMode s_current_execution_mode = CPUExecutionMode::Interpreter;
static ExecutionBreakType s_break_type = ExecutionBreakType::None;
} // namespace CPU
bool CPU::IsTraceEnabled()
@ -163,14 +172,14 @@ void CPU::Initialize()
// From nocash spec.
g_state.cop0_regs.PRID = UINT32_C(0x00000002);
s_current_execution_mode = g_settings.cpu_execution_mode;
g_state.using_debug_dispatcher = false;
g_state.using_interpreter = ShouldUseInterpreter();
g_state.using_interpreter = (s_current_execution_mode == CPUExecutionMode::Interpreter);
for (BreakpointList& bps : s_breakpoints)
bps.clear();
s_breakpoint_counter = 1;
s_last_breakpoint_check_pc = INVALID_BREAKPOINT_PC;
s_single_step = false;
s_break_after_instruction = false;
s_break_type = ExecutionBreakType::None;
UpdateMemoryPointers();
UpdateDebugDispatcherFlag();
@ -280,33 +289,23 @@ bool CPU::DoState(StateWrapper& sw)
sw.Do(&g_state.icache_data);
}
bool using_interpreter = g_state.using_interpreter;
sw.DoEx(&using_interpreter, 67, g_state.using_interpreter);
sw.DoEx(&g_state.using_interpreter, 67, g_state.using_interpreter);
if (sw.IsReading())
{
// Since the recompilers do not use npc/next_instruction, and the icache emulation doesn't actually fill the data,
// only the tags, if we save state with the recompiler, then load state with the interpreter, we're most likely
// going to crash. Clear both in the case that we are switching.
if (using_interpreter != g_state.using_interpreter)
{
WARNING_LOG("Current execution mode does not match save state. Resetting icache state.");
ExecutionModeChanged();
}
UpdateMemoryPointers();
// Trigger an execution mode change if the state was/wasn't using the interpreter.
s_current_execution_mode =
g_state.using_interpreter ?
CPUExecutionMode::Interpreter :
((g_settings.cpu_execution_mode == CPUExecutionMode::Interpreter) ? CPUExecutionMode::CachedInterpreter :
g_settings.cpu_execution_mode);
g_state.gte_completion_tick = 0;
UpdateMemoryPointers();
}
return !sw.HasError();
}
ALWAYS_INLINE_RELEASE bool CPU::ShouldUseInterpreter()
{
// Currently, any breakpoints require the interpreter.
return (g_settings.cpu_execution_mode == CPUExecutionMode::Interpreter || g_state.using_debug_dispatcher);
}
void CPU::SetPC(u32 new_pc)
{
DebugAssert(Common::IsAlignedPow2(new_pc, 4));
@ -1994,9 +1993,14 @@ void CPU::DispatchInterrupt()
TimingEvents::UpdateCPUDowncount();
}
CPUExecutionMode CPU::GetCurrentExecutionMode()
{
return s_current_execution_mode;
}
bool CPU::UpdateDebugDispatcherFlag()
{
const bool has_any_breakpoints = HasAnyBreakpoints() || s_single_step;
const bool has_any_breakpoints = (HasAnyBreakpoints() || s_break_type == ExecutionBreakType::SingleStep);
const auto& dcic = g_state.cop0_regs.dcic;
const bool has_cop0_breakpoints = dcic.super_master_enable_1 && dcic.super_master_enable_2 &&
@ -2010,14 +2014,75 @@ bool CPU::UpdateDebugDispatcherFlag()
DEV_LOG("{} debug dispatcher", use_debug_dispatcher ? "Now using" : "No longer using");
g_state.using_debug_dispatcher = use_debug_dispatcher;
// Switching to interpreter?
if (g_state.using_interpreter != ShouldUseInterpreter())
ExecutionModeChanged();
return true;
}
void CPU::CheckForExecutionModeChange()
{
// Currently, any breakpoints require the interpreter.
const CPUExecutionMode new_execution_mode =
(g_state.using_debug_dispatcher ? CPUExecutionMode::Interpreter : g_settings.cpu_execution_mode);
if (s_current_execution_mode == new_execution_mode) [[likely]]
{
DebugAssert(g_state.using_interpreter == (s_current_execution_mode == CPUExecutionMode::Interpreter));
return;
}
WARNING_LOG("Execution mode changed from {} to {}", Settings::GetCPUExecutionModeName(s_current_execution_mode),
Settings::GetCPUExecutionModeName(new_execution_mode));
const bool new_interpreter = (new_execution_mode == CPUExecutionMode::Interpreter);
if (g_state.using_interpreter != new_interpreter)
{
// Have to clear out the icache too, only the tags are valid in the recs.
ClearICache();
g_state.bus_error = false;
if (new_interpreter)
{
// Switching to interpreter. Set up the pipeline.
// We'll also need to fetch the next instruction to execute.
if (!SafeReadInstruction(g_state.pc, &g_state.next_instruction.bits)) [[unlikely]]
{
g_state.next_instruction.bits = 0;
ERROR_LOG("Failed to read current instruction from 0x{:08X}", g_state.pc);
}
g_state.npc = g_state.pc + sizeof(Instruction);
}
else
{
// Switching to recompiler. We can't start a rec block in a branch delay slot, so we need to execute the
// instruction if we're currently in one.
if (g_state.next_instruction_is_branch_delay_slot) [[unlikely]]
{
while (g_state.next_instruction_is_branch_delay_slot)
{
WARNING_LOG("EXECMODE: Executing instruction at 0x{:08X} because it is in a branch delay slot.", g_state.pc);
if (fastjmp_set(&s_jmp_buf) == 0)
{
s_break_type = ExecutionBreakType::ExecuteOneInstruction;
g_state.using_debug_dispatcher = true;
ExecuteInterpreter();
}
}
// Need to restart the whole process again, because the branch slot could change the debug flag.
UpdateDebugDispatcherFlag();
CheckForExecutionModeChange();
return;
}
}
}
s_current_execution_mode = new_execution_mode;
g_state.using_interpreter = new_interpreter;
// Wipe out code cache when switching modes.
if (!new_interpreter)
CPU::CodeCache::Reset();
}
[[noreturn]] void CPU::ExitExecution()
{
// can't exit while running events without messing things up
@ -2269,20 +2334,11 @@ ALWAYS_INLINE_RELEASE bool CPU::CheckBreakpointList(BreakpointType type, Virtual
ALWAYS_INLINE_RELEASE void CPU::ExecutionBreakpointCheck()
{
if (s_single_step) [[unlikely]]
{
// single step ignores breakpoints, since it stops anyway
s_single_step = false;
s_break_after_instruction = true;
Host::ReportDebuggerMessage(fmt::format("Stepped to 0x{:08X}.", g_state.npc));
return;
}
if (s_breakpoints[static_cast<u32>(BreakpointType::Execute)].empty()) [[likely]]
return;
const u32 pc = g_state.pc;
if (pc == s_last_breakpoint_check_pc) [[unlikely]]
if (pc == s_last_breakpoint_check_pc || s_break_type == ExecutionBreakType::ExecuteOneInstruction) [[unlikely]]
{
// we don't want to trigger the same breakpoint which just paused us repeatedly.
return;
@ -2292,7 +2348,7 @@ ALWAYS_INLINE_RELEASE void CPU::ExecutionBreakpointCheck()
if (CheckBreakpointList(BreakpointType::Execute, pc)) [[unlikely]]
{
s_single_step = false;
s_break_type = ExecutionBreakType::None;
ExitExecution();
}
}
@ -2301,8 +2357,8 @@ template<MemoryAccessType type>
ALWAYS_INLINE_RELEASE void CPU::MemoryBreakpointCheck(VirtualMemoryAddress address)
{
const BreakpointType bptype = (type == MemoryAccessType::Read) ? BreakpointType::Read : BreakpointType::Write;
if (CheckBreakpointList(bptype, address))
s_break_after_instruction = true;
if (CheckBreakpointList(bptype, address)) [[unlikely]]
s_break_type = ExecutionBreakType::Breakpoint;
}
template<PGXPMode pgxp_mode, bool debug>
@ -2364,10 +2420,12 @@ template<PGXPMode pgxp_mode, bool debug>
if constexpr (debug)
{
if (s_break_after_instruction)
if (s_break_type != ExecutionBreakType::None) [[unlikely]]
{
s_break_after_instruction = false;
const ExecutionBreakType break_type = std::exchange(s_break_type, ExecutionBreakType::None);
if (break_type >= ExecutionBreakType::SingleStep)
System::PauseSystem(true);
UpdateDebugDispatcherFlag();
ExitExecution();
}
@ -2412,6 +2470,8 @@ void CPU::ExecuteInterpreter()
void CPU::Execute()
{
CheckForExecutionModeChange();
if (fastjmp_set(&s_jmp_buf) != 0)
return;
@ -2423,7 +2483,7 @@ void CPU::Execute()
void CPU::SetSingleStepFlag()
{
s_single_step = true;
s_break_type = ExecutionBreakType::SingleStep;
if (UpdateDebugDispatcherFlag())
System::InterruptExecution();
}
@ -2571,41 +2631,6 @@ void CPU::UpdateMemoryPointers()
g_state.fastmem_base = Bus::GetFastmemBase(g_state.cop0_regs.sr.Isc);
}
void CPU::ExecutionModeChanged()
{
const bool prev_interpreter = g_state.using_interpreter;
UpdateDebugDispatcherFlag();
// Clear out bus errors in case only memory exceptions are toggled on.
g_state.bus_error = false;
// Have to clear out the icache too, only the tags are valid in the recs.
ClearICache();
// Switching to interpreter?
g_state.using_interpreter = ShouldUseInterpreter();
if (g_state.using_interpreter != prev_interpreter && !prev_interpreter)
{
// Before we return, set npc to pc so that we can switch from recs to int.
// We'll also need to fetch the next instruction to execute.
if (!SafeReadInstruction(g_state.pc, &g_state.next_instruction.bits)) [[unlikely]]
{
g_state.next_instruction.bits = 0;
ERROR_LOG("Failed to read current instruction from 0x{:08X}", g_state.pc);
}
g_state.npc = g_state.pc + sizeof(Instruction);
}
// Wipe out code cache when switching back to recompiler.
if (!g_state.using_interpreter && prev_interpreter)
CPU::CodeCache::Reset();
UpdateDebugDispatcherFlag();
System::InterruptExecution();
}
template<bool add_ticks, bool icache_read, u32 word_count, bool raise_exceptions>
ALWAYS_INLINE_RELEASE bool CPU::DoInstructionRead(PhysicalMemoryAddress address, void* data)
{

View file

@ -132,9 +132,9 @@ void Shutdown();
void Reset();
bool DoState(StateWrapper& sw);
void ClearICache();
CPUExecutionMode GetCurrentExecutionMode();
bool UpdateDebugDispatcherFlag();
void UpdateMemoryPointers();
void ExecutionModeChanged();
/// Executes interpreter loop.
void Execute();

View file

@ -1832,20 +1832,13 @@ bool System::Initialize(bool force_software_renderer, Error* error)
TimingEvents::Initialize();
Bus::Initialize();
CPU::Initialize();
if (!Bus::Initialize())
{
CPU::Shutdown();
return false;
}
CPU::CodeCache::Initialize();
if (!CreateGPU(force_software_renderer ? GPURenderer::Software : g_settings.gpu_renderer, false, error))
{
Bus::Shutdown();
CPU::Shutdown();
Bus::Shutdown();
return false;
}
@ -1933,9 +1926,8 @@ void System::DestroySystem()
g_gpu.reset();
DMA::Shutdown();
CPU::PGXP::Shutdown();
CPU::CodeCache::Shutdown();
Bus::Shutdown();
CPU::Shutdown();
Bus::Shutdown();
TimingEvents::Shutdown();
ClearRunningGame();
@ -3383,6 +3375,9 @@ void System::UpdateDisplayVSync()
// Avoid flipping vsync on and off by manually throttling when vsync is on.
const GPUVSyncMode vsync_mode = GetEffectiveVSyncMode();
const bool allow_present_throttle = ShouldAllowPresentThrottle();
if (g_gpu_device->GetVSyncMode() == vsync_mode && g_gpu_device->IsPresentThrottleAllowed() == allow_present_throttle)
return;
VERBOSE_LOG("VSync: {}{}{}", vsync_modes[static_cast<size_t>(vsync_mode)],
s_syncing_to_host_with_vsync ? " (for throttling)" : "",
allow_present_throttle ? " (present throttle allowed)" : "");
@ -4246,22 +4241,18 @@ void System::CheckForSettingsChanges(const Settings& old_settings)
if (g_settings.emulation_speed != old_settings.emulation_speed)
UpdateThrottlePeriod();
if (g_settings.cpu_execution_mode != old_settings.cpu_execution_mode ||
g_settings.cpu_fastmem_mode != old_settings.cpu_fastmem_mode)
if (g_settings.cpu_execution_mode != old_settings.cpu_execution_mode)
{
Host::AddIconOSDMessage("CPUExecutionModeSwitch", ICON_FA_MICROCHIP,
fmt::format(TRANSLATE_FS("OSDMessage", "Switching to {} CPU execution mode."),
TRANSLATE_SV("CPUExecutionMode", Settings::GetCPUExecutionModeDisplayName(
g_settings.cpu_execution_mode))),
Host::OSD_INFO_DURATION);
CPU::ExecutionModeChanged();
if (old_settings.cpu_execution_mode != CPUExecutionMode::Interpreter)
CPU::CodeCache::Shutdown();
if (g_settings.cpu_execution_mode != CPUExecutionMode::Interpreter)
CPU::CodeCache::Initialize();
CPU::UpdateDebugDispatcherFlag();
InterruptExecution();
}
if (CPU::CodeCache::IsUsingAnyRecompiler() &&
if (CPU::GetCurrentExecutionMode() != CPUExecutionMode::Interpreter &&
(g_settings.cpu_recompiler_memory_exceptions != old_settings.cpu_recompiler_memory_exceptions ||
g_settings.cpu_recompiler_block_linking != old_settings.cpu_recompiler_block_linking ||
g_settings.cpu_recompiler_icache != old_settings.cpu_recompiler_icache ||
@ -4270,12 +4261,21 @@ void System::CheckForSettingsChanges(const Settings& old_settings)
Host::AddIconOSDMessage("CPUFlushAllBlocks", ICON_FA_MICROCHIP,
TRANSLATE_STR("OSDMessage", "Recompiler options changed, flushing all blocks."),
Host::OSD_INFO_DURATION);
CPU::ExecutionModeChanged();
CPU::CodeCache::Reset();
CPU::g_state.bus_error = false;
}
else if (g_settings.cpu_execution_mode == CPUExecutionMode::Interpreter &&
g_settings.bios_tty_logging != old_settings.bios_tty_logging)
{
CPU::UpdateDebugDispatcherFlag();
// TTY interception requires debug dispatcher.
if (CPU::UpdateDebugDispatcherFlag())
InterruptExecution();
}
if (g_settings.cpu_fastmem_mode != old_settings.cpu_fastmem_mode)
{
// Reallocate fastmem area, even if it's not being used.
Bus::RemapFastmemViews();
}
if (g_settings.enable_cheats != old_settings.enable_cheats)

View file

@ -711,6 +711,7 @@ public:
ALWAYS_INLINE GPUVSyncMode GetVSyncMode() const { return m_vsync_mode; }
ALWAYS_INLINE bool IsVSyncModeBlocking() const { return (m_vsync_mode == GPUVSyncMode::FIFO); }
ALWAYS_INLINE bool IsPresentThrottleAllowed() const { return m_allow_present_throttle; }
virtual void SetVSyncMode(GPUVSyncMode mode, bool allow_present_throttle) = 0;
ALWAYS_INLINE bool IsDebugDevice() const { return m_debug_device; }