mirror of
https://github.com/RetroDECK/Duckstation.git
synced 2024-11-22 13:55:38 +00:00
CPU/CodeCache: Impove overflow handling
This commit is contained in:
parent
bdde3ce07b
commit
601e3586b2
|
@ -195,13 +195,13 @@ void LogCurrentState();
|
||||||
static CodeBlockKey GetNextBlockKey();
|
static CodeBlockKey GetNextBlockKey();
|
||||||
|
|
||||||
/// Looks up the block in the cache if it's already been compiled.
|
/// Looks up the block in the cache if it's already been compiled.
|
||||||
static CodeBlock* LookupBlock(CodeBlockKey key);
|
static CodeBlock* LookupBlock(CodeBlockKey key, bool allow_flush);
|
||||||
|
|
||||||
/// Can the current block execute? This will re-validate the block if necessary.
|
/// Can the current block execute? This will re-validate the block if necessary.
|
||||||
/// The block can also be flushed if recompilation failed, so ignore the pointer if false is returned.
|
/// The block can also be flushed if recompilation failed, so ignore the pointer if false is returned.
|
||||||
static bool RevalidateBlock(CodeBlock* block);
|
static bool RevalidateBlock(CodeBlock* block, bool allow_flush);
|
||||||
|
|
||||||
static bool CompileBlock(CodeBlock* block);
|
static bool CompileBlock(CodeBlock* block, bool allow_flush);
|
||||||
static void RemoveReferencesToBlock(CodeBlock* block);
|
static void RemoveReferencesToBlock(CodeBlock* block);
|
||||||
static void AddBlockToPageMap(CodeBlock* block);
|
static void AddBlockToPageMap(CodeBlock* block);
|
||||||
static void RemoveBlockFromPageMap(CodeBlock* block);
|
static void RemoveBlockFromPageMap(CodeBlock* block);
|
||||||
|
@ -310,7 +310,7 @@ static void ExecuteImpl()
|
||||||
next_block_key = GetNextBlockKey();
|
next_block_key = GetNextBlockKey();
|
||||||
while (g_state.pending_ticks < g_state.downcount)
|
while (g_state.pending_ticks < g_state.downcount)
|
||||||
{
|
{
|
||||||
CodeBlock* block = LookupBlock(next_block_key);
|
CodeBlock* block = LookupBlock(next_block_key, true);
|
||||||
if (!block)
|
if (!block)
|
||||||
{
|
{
|
||||||
InterpretUncachedBlock<pgxp_mode>();
|
InterpretUncachedBlock<pgxp_mode>();
|
||||||
|
@ -346,7 +346,7 @@ static void ExecuteImpl()
|
||||||
{
|
{
|
||||||
// we can jump straight to it if there's no pending interrupts
|
// we can jump straight to it if there's no pending interrupts
|
||||||
// ensure it's not a self-modifying block
|
// ensure it's not a self-modifying block
|
||||||
if (!block->invalidated || RevalidateBlock(block))
|
if (!block->invalidated || RevalidateBlock(block, true))
|
||||||
goto reexecute_block;
|
goto reexecute_block;
|
||||||
}
|
}
|
||||||
else if (!block->invalidated)
|
else if (!block->invalidated)
|
||||||
|
@ -358,7 +358,7 @@ static void ExecuteImpl()
|
||||||
CodeBlock* linked_block = li.block;
|
CodeBlock* linked_block = li.block;
|
||||||
if (linked_block->key.bits == next_block_key.bits)
|
if (linked_block->key.bits == next_block_key.bits)
|
||||||
{
|
{
|
||||||
if (linked_block->invalidated && !RevalidateBlock(linked_block))
|
if (linked_block->invalidated && !RevalidateBlock(linked_block, true))
|
||||||
{
|
{
|
||||||
// CanExecuteBlock can result in a block flush, so stop iterating here.
|
// CanExecuteBlock can result in a block flush, so stop iterating here.
|
||||||
break;
|
break;
|
||||||
|
@ -371,7 +371,7 @@ static void ExecuteImpl()
|
||||||
}
|
}
|
||||||
|
|
||||||
// No acceptable blocks found in the successor list, try a new one.
|
// No acceptable blocks found in the successor list, try a new one.
|
||||||
CodeBlock* next_block = LookupBlock(next_block_key);
|
CodeBlock* next_block = LookupBlock(next_block_key, false);
|
||||||
if (next_block)
|
if (next_block)
|
||||||
{
|
{
|
||||||
// Link the previous block to this new block if we find a new block.
|
// Link the previous block to this new block if we find a new block.
|
||||||
|
@ -537,7 +537,7 @@ static void FallbackExistingBlockToInterpreter(CodeBlock* block)
|
||||||
delete block;
|
delete block;
|
||||||
}
|
}
|
||||||
|
|
||||||
CodeBlock* LookupBlock(CodeBlockKey key)
|
CodeBlock* LookupBlock(CodeBlockKey key, bool allow_flush)
|
||||||
{
|
{
|
||||||
BlockMap::iterator iter = s_blocks.find(key.bits);
|
BlockMap::iterator iter = s_blocks.find(key.bits);
|
||||||
if (iter != s_blocks.end())
|
if (iter != s_blocks.end())
|
||||||
|
@ -548,7 +548,7 @@ CodeBlock* LookupBlock(CodeBlockKey key)
|
||||||
return existing_block;
|
return existing_block;
|
||||||
|
|
||||||
// if compilation fails or we're forced back to the interpreter, bail out
|
// if compilation fails or we're forced back to the interpreter, bail out
|
||||||
if (RevalidateBlock(existing_block))
|
if (RevalidateBlock(existing_block, allow_flush))
|
||||||
return existing_block;
|
return existing_block;
|
||||||
else
|
else
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -557,7 +557,7 @@ CodeBlock* LookupBlock(CodeBlockKey key)
|
||||||
CodeBlock* block = new CodeBlock(key);
|
CodeBlock* block = new CodeBlock(key);
|
||||||
block->recompile_frame_number = System::GetFrameNumber();
|
block->recompile_frame_number = System::GetFrameNumber();
|
||||||
|
|
||||||
if (CompileBlock(block))
|
if (CompileBlock(block, allow_flush))
|
||||||
{
|
{
|
||||||
// add it to the page map if it's in ram
|
// add it to the page map if it's in ram
|
||||||
AddBlockToPageMap(block);
|
AddBlockToPageMap(block);
|
||||||
|
@ -574,11 +574,13 @@ CodeBlock* LookupBlock(CodeBlockKey key)
|
||||||
block = nullptr;
|
block = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
s_blocks.emplace(key.bits, block);
|
if (block || allow_flush)
|
||||||
|
s_blocks.emplace(key.bits, block);
|
||||||
|
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RevalidateBlock(CodeBlock* block)
|
bool RevalidateBlock(CodeBlock* block, bool allow_flush)
|
||||||
{
|
{
|
||||||
for (const CodeBlockInstruction& cbi : block->instructions)
|
for (const CodeBlockInstruction& cbi : block->instructions)
|
||||||
{
|
{
|
||||||
|
@ -634,7 +636,7 @@ recompile:
|
||||||
|
|
||||||
block->instructions.clear();
|
block->instructions.clear();
|
||||||
|
|
||||||
if (!CompileBlock(block))
|
if (!CompileBlock(block, allow_flush))
|
||||||
{
|
{
|
||||||
Log_PerfPrintf("Failed to recompile block 0x%08X, falling back to interpreter.", block->GetPC());
|
Log_PerfPrintf("Failed to recompile block 0x%08X, falling back to interpreter.", block->GetPC());
|
||||||
FallbackExistingBlockToInterpreter(block);
|
FallbackExistingBlockToInterpreter(block);
|
||||||
|
@ -657,7 +659,7 @@ recompile:
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CompileBlock(CodeBlock* block)
|
bool CompileBlock(CodeBlock* block, bool allow_flush)
|
||||||
{
|
{
|
||||||
u32 pc = block->GetPC();
|
u32 pc = block->GetPC();
|
||||||
bool is_branch_delay_slot = false;
|
bool is_branch_delay_slot = false;
|
||||||
|
@ -777,8 +779,16 @@ bool CompileBlock(CodeBlock* block)
|
||||||
s_code_buffer.GetFreeFarCodeSpace() <
|
s_code_buffer.GetFreeFarCodeSpace() <
|
||||||
(block->instructions.size() * Recompiler::MAX_FAR_HOST_BYTES_PER_INSTRUCTION))
|
(block->instructions.size() * Recompiler::MAX_FAR_HOST_BYTES_PER_INSTRUCTION))
|
||||||
{
|
{
|
||||||
Log_WarningPrintf("Out of code space, flushing all blocks.");
|
if (allow_flush)
|
||||||
Flush();
|
{
|
||||||
|
Log_WarningPrintf("Out of code space, flushing all blocks.");
|
||||||
|
Flush();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
Log_ErrorPrintf("Out of code space and cannot flush while compiling %08X.", block->GetPC());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s_code_buffer.WriteProtect(false);
|
s_code_buffer.WriteProtect(false);
|
||||||
|
@ -801,7 +811,7 @@ bool CompileBlock(CodeBlock* block)
|
||||||
|
|
||||||
void FastCompileBlockFunction()
|
void FastCompileBlockFunction()
|
||||||
{
|
{
|
||||||
CodeBlock* block = LookupBlock(GetNextBlockKey());
|
CodeBlock* block = LookupBlock(GetNextBlockKey(), true);
|
||||||
if (block)
|
if (block)
|
||||||
{
|
{
|
||||||
s_single_block_asm_dispatcher(block->host_code);
|
s_single_block_asm_dispatcher(block->host_code);
|
||||||
|
@ -1218,9 +1228,9 @@ void CPU::Recompiler::Thunks::ResolveBranch(CodeBlock* block, void* host_pc, voi
|
||||||
using namespace CPU::CodeCache;
|
using namespace CPU::CodeCache;
|
||||||
|
|
||||||
CodeBlockKey key = GetNextBlockKey();
|
CodeBlockKey key = GetNextBlockKey();
|
||||||
CodeBlock* successor_block = LookupBlock(key);
|
CodeBlock* successor_block = LookupBlock(key, false);
|
||||||
if (!successor_block || (successor_block->invalidated && !RevalidateBlock(successor_block)) || !block->can_link ||
|
if (!successor_block || (successor_block->invalidated && !RevalidateBlock(successor_block, false)) ||
|
||||||
!successor_block->can_link)
|
!block->can_link || !successor_block->can_link)
|
||||||
{
|
{
|
||||||
// just turn it into a return to the dispatcher instead.
|
// just turn it into a return to the dispatcher instead.
|
||||||
s_code_buffer.WriteProtect(false);
|
s_code_buffer.WriteProtect(false);
|
||||||
|
|
Loading…
Reference in a new issue