CPU/Recompiler: Remove unused code

This commit is contained in:
Connor McLaughlin 2019-12-12 20:17:52 +10:00
parent 20c7aaf74b
commit a267451614
2 changed files with 1 additions and 223 deletions

View file

@ -131,12 +131,6 @@ public:
void EmitPushHostReg(HostReg reg, u32 position);
void EmitPopHostReg(HostReg reg, u32 position);
// Flags copying from host.
#if defined(Y_CPU_X64)
void ReadFlagsFromHost(Value* value);
Value ReadFlagsFromHost();
#endif
// Value ops
Value AddValues(const Value& lhs, const Value& rhs, bool set_flags);
Value SubValues(const Value& lhs, const Value& rhs, bool set_flags);

View file

@ -1369,21 +1369,6 @@ void CodeGenerator::EmitPopHostReg(HostReg reg, u32 position)
m_emit->pop(GetHostReg64(reg));
}
void CodeGenerator::ReadFlagsFromHost(Value* value)
{
// this is a 64-bit push/pop, we ignore the upper 32 bits
DebugAssert(value->IsInHostRegister());
m_emit->pushf();
m_emit->pop(GetHostReg64(value->host_reg));
}
Value CodeGenerator::ReadFlagsFromHost()
{
Value temp = m_register_cache.AllocateScratch(RegSize_32);
ReadFlagsFromHost(&temp);
return temp;
}
void CodeGenerator::EmitLoadCPUStructField(HostReg host_reg, RegSize guest_size, u32 offset)
{
switch (guest_size)
@ -1867,207 +1852,6 @@ void CodeGenerator::EmitRaiseException(Exception excode, Condition condition /*
m_register_cache.PopState();
}
#if 0
class ThunkGenerator
{
public:
template<typename DataType>
static DataType (*CompileMemoryReadFunction(JitCodeBuffer* code_buffer))(u8, u32)
{
using FunctionType = DataType (*)(u8, u32);
const auto rret = GetHostReg64(RRETURN);
const auto rcpuptr = GetHostReg64(RCPUPTR);
const auto rarg1 = GetHostReg32(RARG1);
const auto rarg2 = GetHostReg32(RARG2);
const auto rarg3 = GetHostReg32(RARG3);
const auto scratch = GetHostReg64(RARG3);
Xbyak::CodeGenerator emitter(code_buffer->GetFreeCodeSpace(), code_buffer->GetFreeCodePointer());
// ensure function starts at aligned 16 bytes
emitter.align();
FunctionType ret = emitter.getCurr<FunctionType>();
// TODO: We can skip these if the base address is zero and the size is 4GB.
Xbyak::Label raise_gpf_label;
static_assert(sizeof(CPU::SegmentCache) == 16);
emitter.movzx(rarg1, rarg1.cvt8());
emitter.shl(rarg1, 4);
emitter.lea(rret, emitter.byte[rcpuptr + rarg1.cvt64() + offsetof(CPU, m_segment_cache[0])]);
// if segcache->access_mask & Read == 0
emitter.test(emitter.byte[rret + offsetof(CPU::SegmentCache, access_mask)], static_cast<u32>(AccessTypeMask::Read));
emitter.jz(raise_gpf_label);
// if offset < limit_low
emitter.cmp(rarg2, emitter.dword[rret + offsetof(CPU::SegmentCache, limit_low)]);
emitter.jb(raise_gpf_label);
// if offset + (size - 1) > limit_high
// offset += segcache->base_address
if constexpr (sizeof(DataType) > 1)
{
emitter.lea(scratch, emitter.qword[rarg2.cvt64() + (sizeof(DataType) - 1)]);
emitter.add(rarg2, emitter.dword[rret + offsetof(CPU::SegmentCache, base_address)]);
emitter.mov(rret.cvt32(), emitter.dword[rret + offsetof(CPU::SegmentCache, limit_high)]);
emitter.cmp(scratch, rret);
emitter.ja(raise_gpf_label);
}
else
{
emitter.cmp(rarg2, emitter.dword[rret + offsetof(CPU::SegmentCache, limit_high)]);
emitter.ja(raise_gpf_label);
emitter.add(rarg2, emitter.dword[rret + offsetof(CPU::SegmentCache, base_address)]);
}
// swap segment with CPU
emitter.mov(rarg1, rcpuptr);
// go ahead with the memory read
if constexpr (std::is_same_v<DataType, u8>)
{
emitter.mov(rret, reinterpret_cast<size_t>(static_cast<u8 (*)(CPU*, LinearMemoryAddress)>(&CPU::ReadMemoryByte)));
}
else if constexpr (std::is_same_v<DataType, u16>)
{
emitter.mov(rret,
reinterpret_cast<size_t>(static_cast<u16 (*)(CPU*, LinearMemoryAddress)>(&CPU::ReadMemoryWord)));
}
else
{
emitter.mov(rret,
reinterpret_cast<size_t>(static_cast<u32 (*)(CPU*, LinearMemoryAddress)>(&CPU::ReadMemoryDWord)));
}
emitter.jmp(rret);
// RAISE GPF BRANCH
emitter.L(raise_gpf_label);
// register swap since the CPU has to come first
emitter.cmp(rarg1, (Segment_SS << 4));
emitter.mov(rarg1, Interrupt_StackFault);
emitter.mov(rarg2, Interrupt_GeneralProtectionFault);
emitter.cmove(rarg2, rarg1);
emitter.xor_(rarg3, rarg3);
emitter.mov(rarg1, rcpuptr);
// cpu->RaiseException(ss ? Interrupt_StackFault : Interrupt_GeneralProtectionFault, 0)
emitter.mov(rret, reinterpret_cast<size_t>(static_cast<void (*)(CPU*, u32, u32)>(&CPU::RaiseException)));
emitter.jmp(rret);
emitter.ready();
code_buffer->CommitCode(emitter.getSize());
return ret;
}
template<typename DataType>
static void (*CompileMemoryWriteFunction(JitCodeBuffer* code_buffer))(u8, u32, DataType)
{
using FunctionType = void (*)(u8, u32, DataType);
const auto rret = GetHostReg64(RRETURN);
const auto rcpuptr = GetHostReg64(RCPUPTR);
const auto rarg1 = GetHostReg32(RARG1);
const auto rarg2 = GetHostReg32(RARG2);
const auto rarg3 = GetHostReg32(RARG3);
const auto scratch = GetHostReg64(RARG4);
Xbyak::CodeGenerator emitter(code_buffer->GetFreeCodeSpace(), code_buffer->GetFreeCodePointer());
// ensure function starts at aligned 16 bytes
emitter.align();
FunctionType ret = emitter.getCurr<FunctionType>();
// TODO: We can skip these if the base address is zero and the size is 4GB.
Xbyak::Label raise_gpf_label;
static_assert(sizeof(CPU::SegmentCache) == 16);
emitter.movzx(rarg1, rarg1.cvt8());
emitter.shl(rarg1, 4);
emitter.lea(rret, emitter.byte[rcpuptr + rarg1.cvt64() + offsetof(CPU, m_segment_cache[0])]);
// if segcache->access_mask & Read == 0
emitter.test(emitter.byte[rret + offsetof(CPU::SegmentCache, access_mask)],
static_cast<u32>(AccessTypeMask::Write));
emitter.jz(raise_gpf_label);
// if offset < limit_low
emitter.cmp(rarg2, emitter.dword[rret + offsetof(CPU::SegmentCache, limit_low)]);
emitter.jb(raise_gpf_label);
// if offset + (size - 1) > limit_high
// offset += segcache->base_address
if constexpr (sizeof(DataType) > 1)
{
emitter.lea(scratch, emitter.qword[rarg2.cvt64() + (sizeof(DataType) - 1)]);
emitter.add(rarg2, emitter.dword[rret + offsetof(CPU::SegmentCache, base_address)]);
emitter.mov(rret.cvt32(), emitter.dword[rret + offsetof(CPU::SegmentCache, limit_high)]);
emitter.cmp(scratch, rret.cvt64());
emitter.ja(raise_gpf_label);
}
else
{
emitter.cmp(rarg2, emitter.dword[rret + offsetof(CPU::SegmentCache, limit_high)]);
emitter.ja(raise_gpf_label);
emitter.add(rarg2, emitter.dword[rret + offsetof(CPU::SegmentCache, base_address)]);
}
// swap segment with CPU
emitter.mov(rarg1, rcpuptr);
// go ahead with the memory read
if constexpr (std::is_same_v<DataType, u8>)
{
emitter.mov(
rret, reinterpret_cast<size_t>(static_cast<void (*)(CPU*, LinearMemoryAddress, u8)>(&CPU::WriteMemoryByte)));
}
else if constexpr (std::is_same_v<DataType, u16>)
{
emitter.mov(
rret, reinterpret_cast<size_t>(static_cast<void (*)(CPU*, LinearMemoryAddress, u16)>(&CPU::WriteMemoryWord)));
}
else
{
emitter.mov(
rret, reinterpret_cast<size_t>(static_cast<void (*)(CPU*, LinearMemoryAddress, u32)>(&CPU::WriteMemoryDWord)));
}
emitter.jmp(rret);
// RAISE GPF BRANCH
emitter.L(raise_gpf_label);
// register swap since the CPU has to come first
emitter.cmp(rarg1, (Segment_SS << 4));
emitter.mov(rarg1, Interrupt_StackFault);
emitter.mov(rarg2, Interrupt_GeneralProtectionFault);
emitter.cmove(rarg2, rarg1);
emitter.xor_(rarg3, rarg3);
emitter.mov(rarg1, rcpuptr);
// cpu->RaiseException(ss ? Interrupt_StackFault : Interrupt_GeneralProtectionFault, 0)
emitter.mov(rret, reinterpret_cast<size_t>(static_cast<void (*)(CPU*, u32, u32)>(&CPU::RaiseException)));
emitter.jmp(rret);
emitter.ready();
code_buffer->CommitCode(emitter.getSize());
return ret;
}
};
#endif
void ASMFunctions::Generate(JitCodeBuffer* code_buffer)
{
#if 0
read_memory_byte = ThunkGenerator::CompileMemoryReadFunction<u8>(code_buffer);
read_memory_word = ThunkGenerator::CompileMemoryReadFunction<u16>(code_buffer);
read_memory_dword = ThunkGenerator::CompileMemoryReadFunction<u32>(code_buffer);
write_memory_byte = ThunkGenerator::CompileMemoryWriteFunction<u8>(code_buffer);
write_memory_word = ThunkGenerator::CompileMemoryWriteFunction<u16>(code_buffer);
write_memory_dword = ThunkGenerator::CompileMemoryWriteFunction<u32>(code_buffer);
#endif
}
void ASMFunctions::Generate(JitCodeBuffer* code_buffer) {}
} // namespace CPU::Recompiler