Dep: Update vixl to 662828c

This commit is contained in:
Stenzek
2024-06-14 17:27:12 +10:00
parent d45e218da7
commit f0c2832d03
66 changed files with 65453 additions and 10345 deletions

View File

@ -63,10 +63,10 @@ void CPU::NewRec::AArch64Compiler::Reset(CodeCache::Block* block, u8* code_buffe
armAsm = &m_emitter;
#ifdef VIXL_DEBUG
m_emitter_check = std::make_unique<vixl::CodeBufferCheckScope>(m_emitter.get(), code_buffer_space,
m_emitter_check = std::make_unique<vixl::CodeBufferCheckScope>(&m_emitter, code_buffer_space,
vixl::CodeBufferCheckScope::kDontReserveBufferSpace);
m_far_emitter_check = std::make_unique<vixl::CodeBufferCheckScope>(
m_far_emitter.get(), far_code_space, vixl::CodeBufferCheckScope::kDontReserveBufferSpace);
&m_far_emitter, far_code_space, vixl::CodeBufferCheckScope::kDontReserveBufferSpace);
#endif
// Need to wipe it out so it's correct when toggling fastmem.
@ -162,7 +162,7 @@ void CPU::NewRec::AArch64Compiler::SwitchToNearCode(bool emit_jump, vixl::aarch6
armAsm = &m_emitter;
}
void CPU::NewRec::AArch64Compiler::EmitMov(const vixl::aarch64::WRegister& dst, u32 val)
void CPU::NewRec::AArch64Compiler::EmitMov(const vixl::aarch64::Register& dst, u32 val)
{
armEmitMov(armAsm, dst, val);
}
@ -495,38 +495,39 @@ vixl::aarch64::MemOperand CPU::NewRec::AArch64Compiler::MipsPtr(Reg r) const
return PTR(&g_state.regs.r[static_cast<u32>(r)]);
}
vixl::aarch64::WRegister CPU::NewRec::AArch64Compiler::CFGetRegD(CompileFlags cf) const
vixl::aarch64::Register CPU::NewRec::AArch64Compiler::CFGetRegD(CompileFlags cf) const
{
DebugAssert(cf.valid_host_d);
return WRegister(cf.host_d);
}
vixl::aarch64::WRegister CPU::NewRec::AArch64Compiler::CFGetRegS(CompileFlags cf) const
vixl::aarch64::Register CPU::NewRec::AArch64Compiler::CFGetRegS(CompileFlags cf) const
{
DebugAssert(cf.valid_host_s);
return WRegister(cf.host_s);
}
vixl::aarch64::WRegister CPU::NewRec::AArch64Compiler::CFGetRegT(CompileFlags cf) const
vixl::aarch64::Register CPU::NewRec::AArch64Compiler::CFGetRegT(CompileFlags cf) const
{
DebugAssert(cf.valid_host_t);
return WRegister(cf.host_t);
}
vixl::aarch64::WRegister CPU::NewRec::AArch64Compiler::CFGetRegLO(CompileFlags cf) const
vixl::aarch64::Register CPU::NewRec::AArch64Compiler::CFGetRegLO(CompileFlags cf) const
{
DebugAssert(cf.valid_host_lo);
return WRegister(cf.host_lo);
}
vixl::aarch64::WRegister CPU::NewRec::AArch64Compiler::CFGetRegHI(CompileFlags cf) const
vixl::aarch64::Register CPU::NewRec::AArch64Compiler::CFGetRegHI(CompileFlags cf) const
{
DebugAssert(cf.valid_host_hi);
return WRegister(cf.host_hi);
}
void CPU::NewRec::AArch64Compiler::MoveSToReg(const vixl::aarch64::WRegister& dst, CompileFlags cf)
void CPU::NewRec::AArch64Compiler::MoveSToReg(const vixl::aarch64::Register& dst, CompileFlags cf)
{
DebugAssert(dst.IsW());
if (cf.valid_host_s)
{
if (cf.host_s != dst.GetCode())
@ -547,8 +548,9 @@ void CPU::NewRec::AArch64Compiler::MoveSToReg(const vixl::aarch64::WRegister& ds
}
}
void CPU::NewRec::AArch64Compiler::MoveTToReg(const vixl::aarch64::WRegister& dst, CompileFlags cf)
void CPU::NewRec::AArch64Compiler::MoveTToReg(const vixl::aarch64::Register& dst, CompileFlags cf)
{
DebugAssert(dst.IsW());
if (cf.valid_host_t)
{
if (cf.host_t != dst.GetCode())
@ -569,9 +571,9 @@ void CPU::NewRec::AArch64Compiler::MoveTToReg(const vixl::aarch64::WRegister& ds
}
}
void CPU::NewRec::AArch64Compiler::MoveMIPSRegToReg(const vixl::aarch64::WRegister& dst, Reg reg)
void CPU::NewRec::AArch64Compiler::MoveMIPSRegToReg(const vixl::aarch64::Register& dst, Reg reg)
{
DebugAssert(reg < Reg::count);
DebugAssert(reg < Reg::count && dst.IsW());
if (const std::optional<u32> hreg = CheckHostReg(0, Compiler::HR_TYPE_CPU_REG, reg))
armAsm->mov(dst, WRegister(hreg.value()));
else if (HasConstantReg(reg))
@ -712,8 +714,9 @@ void CPU::NewRec::AArch64Compiler::Compile_Fallback()
m_load_delay_dirty = EMULATE_LOAD_DELAYS;
}
void CPU::NewRec::AArch64Compiler::CheckBranchTarget(const vixl::aarch64::WRegister& pcreg)
void CPU::NewRec::AArch64Compiler::CheckBranchTarget(const vixl::aarch64::Register& pcreg)
{
DebugAssert(pcreg.IsW());
if (!g_settings.cpu_recompiler_memory_exceptions)
return;
@ -729,7 +732,7 @@ void CPU::NewRec::AArch64Compiler::CheckBranchTarget(const vixl::aarch64::WRegis
void CPU::NewRec::AArch64Compiler::Compile_jr(CompileFlags cf)
{
const WRegister pcreg = CFGetRegS(cf);
const Register pcreg = CFGetRegS(cf);
CheckBranchTarget(pcreg);
armAsm->str(pcreg, PTR(&g_state.pc));
@ -740,7 +743,7 @@ void CPU::NewRec::AArch64Compiler::Compile_jr(CompileFlags cf)
void CPU::NewRec::AArch64Compiler::Compile_jalr(CompileFlags cf)
{
const WRegister pcreg = CFGetRegS(cf);
const Register pcreg = CFGetRegS(cf);
if (MipsD() != Reg::zero)
SetConstantReg(MipsD(), GetBranchReturnAddress(cf));
@ -765,7 +768,7 @@ void CPU::NewRec::AArch64Compiler::Compile_bxx(CompileFlags cf, BranchCondition
DebugAssert(cond == BranchCondition::Equal || cond == BranchCondition::NotEqual || cf.MipsT() == Reg::zero);
Label taken;
const WRegister rs = CFGetRegS(cf);
const Register rs = CFGetRegS(cf);
switch (cond)
{
case BranchCondition::Equal:
@ -834,8 +837,8 @@ void CPU::NewRec::AArch64Compiler::Compile_bxx(CompileFlags cf, BranchCondition
void CPU::NewRec::AArch64Compiler::Compile_addi(CompileFlags cf, bool overflow)
{
const WRegister rs = CFGetRegS(cf);
const WRegister rt = CFGetRegT(cf);
const Register rs = CFGetRegS(cf);
const Register rt = CFGetRegT(cf);
if (const u32 imm = inst->i.imm_sext32(); imm != 0)
{
if (!overflow)
@ -882,7 +885,7 @@ void CPU::NewRec::AArch64Compiler::Compile_slti(CompileFlags cf, bool sign)
void CPU::NewRec::AArch64Compiler::Compile_andi(CompileFlags cf)
{
const WRegister rt = CFGetRegT(cf);
const Register rt = CFGetRegT(cf);
if (const u32 imm = inst->i.imm_zext32(); imm != 0)
armAsm->and_(rt, CFGetRegS(cf), armCheckLogicalConstant(imm));
else
@ -891,8 +894,8 @@ void CPU::NewRec::AArch64Compiler::Compile_andi(CompileFlags cf)
void CPU::NewRec::AArch64Compiler::Compile_ori(CompileFlags cf)
{
const WRegister rt = CFGetRegT(cf);
const WRegister rs = CFGetRegS(cf);
const Register rt = CFGetRegT(cf);
const Register rs = CFGetRegS(cf);
if (const u32 imm = inst->i.imm_zext32(); imm != 0)
armAsm->orr(rt, rs, armCheckLogicalConstant(imm));
else if (rt.GetCode() != rs.GetCode())
@ -901,8 +904,8 @@ void CPU::NewRec::AArch64Compiler::Compile_ori(CompileFlags cf)
void CPU::NewRec::AArch64Compiler::Compile_xori(CompileFlags cf)
{
const WRegister rt = CFGetRegT(cf);
const WRegister rs = CFGetRegS(cf);
const Register rt = CFGetRegT(cf);
const Register rs = CFGetRegS(cf);
if (const u32 imm = inst->i.imm_zext32(); imm != 0)
armAsm->eor(rt, rs, armCheckLogicalConstant(imm));
else if (rt.GetCode() != rs.GetCode())
@ -914,8 +917,8 @@ void CPU::NewRec::AArch64Compiler::Compile_shift(CompileFlags cf,
const vixl::aarch64::Register&,
unsigned))
{
const WRegister rd = CFGetRegD(cf);
const WRegister rt = CFGetRegT(cf);
const Register rd = CFGetRegD(cf);
const Register rt = CFGetRegT(cf);
if (inst->r.shamt > 0)
(armAsm->*op)(rd, rt, inst->r.shamt);
else if (rd.GetCode() != rt.GetCode())
@ -943,12 +946,12 @@ void CPU::NewRec::AArch64Compiler::Compile_variable_shift(
const vixl::aarch64::Register&),
void (vixl::aarch64::Assembler::*op_const)(const vixl::aarch64::Register&, const vixl::aarch64::Register&, unsigned))
{
const WRegister rd = CFGetRegD(cf);
const Register rd = CFGetRegD(cf);
AssertRegOrConstS(cf);
AssertRegOrConstT(cf);
const WRegister rt = cf.valid_host_t ? CFGetRegT(cf) : RWARG2;
const Register rt = cf.valid_host_t ? CFGetRegT(cf) : RWARG2;
if (!cf.valid_host_t)
MoveTToReg(rt, cf);
@ -982,17 +985,17 @@ void CPU::NewRec::AArch64Compiler::Compile_srav(CompileFlags cf)
void CPU::NewRec::AArch64Compiler::Compile_mult(CompileFlags cf, bool sign)
{
const WRegister rs = cf.valid_host_s ? CFGetRegS(cf) : RWARG1;
const Register rs = cf.valid_host_s ? CFGetRegS(cf) : RWARG1;
if (!cf.valid_host_s)
MoveSToReg(rs, cf);
const WRegister rt = cf.valid_host_t ? CFGetRegT(cf) : RWARG2;
const Register rt = cf.valid_host_t ? CFGetRegT(cf) : RWARG2;
if (!cf.valid_host_t)
MoveTToReg(rt, cf);
// TODO: if lo/hi gets killed, we can use a 32-bit multiply
const WRegister lo = CFGetRegLO(cf);
const WRegister hi = CFGetRegHI(cf);
const Register lo = CFGetRegLO(cf);
const Register hi = CFGetRegHI(cf);
(sign) ? armAsm->smull(lo.X(), rs, rt) : armAsm->umull(lo.X(), rs, rt);
armAsm->lsr(hi.X(), lo.X(), 32);
@ -1010,16 +1013,16 @@ void CPU::NewRec::AArch64Compiler::Compile_multu(CompileFlags cf)
void CPU::NewRec::AArch64Compiler::Compile_div(CompileFlags cf)
{
const WRegister rs = cf.valid_host_s ? CFGetRegS(cf) : RWARG1;
const Register rs = cf.valid_host_s ? CFGetRegS(cf) : RWARG1;
if (!cf.valid_host_s)
MoveSToReg(rs, cf);
const WRegister rt = cf.valid_host_t ? CFGetRegT(cf) : RWARG2;
const Register rt = cf.valid_host_t ? CFGetRegT(cf) : RWARG2;
if (!cf.valid_host_t)
MoveTToReg(rt, cf);
const WRegister rlo = CFGetRegLO(cf);
const WRegister rhi = CFGetRegHI(cf);
const Register rlo = CFGetRegLO(cf);
const Register rhi = CFGetRegHI(cf);
// TODO: This could be slightly more optimal
Label done;
@ -1055,16 +1058,16 @@ void CPU::NewRec::AArch64Compiler::Compile_div(CompileFlags cf)
void CPU::NewRec::AArch64Compiler::Compile_divu(CompileFlags cf)
{
const WRegister rs = cf.valid_host_s ? CFGetRegS(cf) : RWARG1;
const Register rs = cf.valid_host_s ? CFGetRegS(cf) : RWARG1;
if (!cf.valid_host_s)
MoveSToReg(rs, cf);
const WRegister rt = cf.valid_host_t ? CFGetRegT(cf) : RWARG2;
const Register rt = cf.valid_host_t ? CFGetRegT(cf) : RWARG2;
if (!cf.valid_host_t)
MoveTToReg(rt, cf);
const WRegister rlo = CFGetRegLO(cf);
const WRegister rhi = CFGetRegHI(cf);
const Register rlo = CFGetRegLO(cf);
const Register rhi = CFGetRegHI(cf);
Label done;
Label not_divide_by_zero;
@ -1083,8 +1086,9 @@ void CPU::NewRec::AArch64Compiler::Compile_divu(CompileFlags cf)
armAsm->bind(&done);
}
void CPU::NewRec::AArch64Compiler::TestOverflow(const vixl::aarch64::WRegister& result)
void CPU::NewRec::AArch64Compiler::TestOverflow(const vixl::aarch64::Register& result)
{
DebugAssert(result.IsW());
SwitchToFarCode(true, vs);
BackupHostState();
@ -1108,14 +1112,14 @@ void CPU::NewRec::AArch64Compiler::Compile_dst_op(CompileFlags cf,
AssertRegOrConstS(cf);
AssertRegOrConstT(cf);
const WRegister rd = CFGetRegD(cf);
const Register rd = CFGetRegD(cf);
if (cf.valid_host_s && cf.valid_host_t)
{
(armAsm->*op)(rd, CFGetRegS(cf), CFGetRegT(cf));
}
else if (commutative && (cf.const_s || cf.const_t))
{
const WRegister src = cf.const_s ? CFGetRegT(cf) : CFGetRegS(cf);
const Register src = cf.const_s ? CFGetRegT(cf) : CFGetRegS(cf);
if (const u32 cv = GetConstantRegU32(cf.const_s ? cf.MipsS() : cf.MipsT()); cv != 0)
{
(armAsm->*op)(rd, src, logical ? armCheckLogicalConstant(cv) : armCheckAddSubConstant(cv));
@ -1135,7 +1139,7 @@ void CPU::NewRec::AArch64Compiler::Compile_dst_op(CompileFlags cf,
}
else if (cf.const_t)
{
const WRegister rs = CFGetRegS(cf);
const Register rs = CFGetRegS(cf);
if (const u32 cv = GetConstantRegU32(cf.const_s ? cf.MipsS() : cf.MipsT()); cv != 0)
{
(armAsm->*op)(rd, rs, logical ? armCheckLogicalConstant(cv) : armCheckAddSubConstant(cv));
@ -1184,7 +1188,7 @@ void CPU::NewRec::AArch64Compiler::Compile_and(CompileFlags cf)
AssertRegOrConstT(cf);
// special cases - and with self -> self, and with 0 -> 0
const WRegister regd = CFGetRegD(cf);
const Register regd = CFGetRegD(cf);
if (cf.MipsS() == cf.MipsT())
{
armAsm->mov(regd, CFGetRegS(cf));
@ -1205,7 +1209,7 @@ void CPU::NewRec::AArch64Compiler::Compile_or(CompileFlags cf)
AssertRegOrConstT(cf);
// or/nor with 0 -> no effect
const WRegister regd = CFGetRegD(cf);
const Register regd = CFGetRegD(cf);
if (HasConstantRegValue(cf.MipsS(), 0) || HasConstantRegValue(cf.MipsT(), 0) || cf.MipsS() == cf.MipsT())
{
cf.const_s ? MoveTToReg(regd, cf) : MoveSToReg(regd, cf);
@ -1220,7 +1224,7 @@ void CPU::NewRec::AArch64Compiler::Compile_xor(CompileFlags cf)
AssertRegOrConstS(cf);
AssertRegOrConstT(cf);
const WRegister regd = CFGetRegD(cf);
const Register regd = CFGetRegD(cf);
if (cf.MipsS() == cf.MipsT())
{
// xor with self -> zero
@ -1276,16 +1280,16 @@ void CPU::NewRec::AArch64Compiler::Compile_slt(CompileFlags cf, bool sign)
armAsm->cset(CFGetRegD(cf), sign ? lt : lo);
}
vixl::aarch64::WRegister
vixl::aarch64::Register
CPU::NewRec::AArch64Compiler::ComputeLoadStoreAddressArg(CompileFlags cf,
const std::optional<VirtualMemoryAddress>& address,
const std::optional<const vixl::aarch64::WRegister>& reg)
const std::optional<const vixl::aarch64::Register>& reg)
{
const u32 imm = inst->i.imm_sext32();
if (cf.valid_host_s && imm == 0 && !reg.has_value())
return CFGetRegS(cf);
const WRegister dst = reg.has_value() ? reg.value() : RWARG1;
const Register dst = reg.has_value() ? reg.value() : RWARG1;
if (address.has_value())
{
EmitMov(dst, address.value());
@ -1294,7 +1298,7 @@ CPU::NewRec::AArch64Compiler::ComputeLoadStoreAddressArg(CompileFlags cf,
{
if (cf.valid_host_s)
{
if (const WRegister src = CFGetRegS(cf); src.GetCode() != dst.GetCode())
if (const Register src = CFGetRegS(cf); src.GetCode() != dst.GetCode())
armAsm->mov(dst, CFGetRegS(cf));
}
else
@ -1319,15 +1323,16 @@ CPU::NewRec::AArch64Compiler::ComputeLoadStoreAddressArg(CompileFlags cf,
}
template<typename RegAllocFn>
vixl::aarch64::WRegister CPU::NewRec::AArch64Compiler::GenerateLoad(const vixl::aarch64::WRegister& addr_reg,
MemoryAccessSize size, bool sign, bool use_fastmem,
const RegAllocFn& dst_reg_alloc)
vixl::aarch64::Register CPU::NewRec::AArch64Compiler::GenerateLoad(const vixl::aarch64::Register& addr_reg,
MemoryAccessSize size, bool sign, bool use_fastmem,
const RegAllocFn& dst_reg_alloc)
{
DebugAssert(addr_reg.IsW());
if (use_fastmem)
{
m_cycles += Bus::RAM_READ_TICKS;
const WRegister dst = dst_reg_alloc();
const Register dst = dst_reg_alloc();
if (g_settings.cpu_fastmem_mode == CPUFastmemMode::LUT)
{
@ -1410,7 +1415,7 @@ vixl::aarch64::WRegister CPU::NewRec::AArch64Compiler::GenerateLoad(const vixl::
SwitchToNearCode(false);
}
const WRegister dst_reg = dst_reg_alloc();
const Register dst_reg = dst_reg_alloc();
switch (size)
{
case MemoryAccessSize::Byte:
@ -1434,10 +1439,11 @@ vixl::aarch64::WRegister CPU::NewRec::AArch64Compiler::GenerateLoad(const vixl::
return dst_reg;
}
void CPU::NewRec::AArch64Compiler::GenerateStore(const vixl::aarch64::WRegister& addr_reg,
const vixl::aarch64::WRegister& value_reg, MemoryAccessSize size,
void CPU::NewRec::AArch64Compiler::GenerateStore(const vixl::aarch64::Register& addr_reg,
const vixl::aarch64::Register& value_reg, MemoryAccessSize size,
bool use_fastmem)
{
DebugAssert(addr_reg.IsW() && value_reg.IsW());
if (use_fastmem)
{
if (g_settings.cpu_fastmem_mode == CPUFastmemMode::LUT)
@ -1529,8 +1535,8 @@ void CPU::NewRec::AArch64Compiler::Compile_lxx(CompileFlags cf, MemoryAccessSize
g_settings.gpu_pgxp_enable ? std::optional<WRegister>(WRegister(AllocateTempHostReg(HR_CALLEE_SAVED))) :
std::optional<WRegister>();
FlushForLoadStore(address, false, use_fastmem);
const WRegister addr = ComputeLoadStoreAddressArg(cf, address, addr_reg);
const WRegister data = GenerateLoad(addr, size, sign, use_fastmem, [this, cf]() {
const Register addr = ComputeLoadStoreAddressArg(cf, address, addr_reg);
const Register data = GenerateLoad(addr, size, sign, use_fastmem, [this, cf]() -> Register {
if (cf.MipsT() == Reg::zero)
return RWRET;
@ -1556,7 +1562,7 @@ void CPU::NewRec::AArch64Compiler::Compile_lwx(CompileFlags cf, MemoryAccessSize
{
DebugAssert(size == MemoryAccessSize::Word && !sign);
const WRegister addr = WRegister(AllocateTempHostReg(HR_CALLEE_SAVED));
const Register addr = WRegister(AllocateTempHostReg(HR_CALLEE_SAVED));
FlushForLoadStore(address, false, use_fastmem);
// TODO: if address is constant, this can be simplified..
@ -1579,7 +1585,7 @@ void CPU::NewRec::AArch64Compiler::Compile_lwx(CompileFlags cf, MemoryAccessSize
// lwl/lwr from a load-delayed value takes the new value, but it itself, is load delayed, so the original value is
// never written back. NOTE: can't trust T in cf because of the flush
const Reg rt = inst->r.rt;
WRegister value;
Register value;
if (m_load_delay_register == rt)
{
const u32 existing_ld_rt = (m_load_delay_value_register == NUM_HOST_REGS) ?
@ -1654,8 +1660,8 @@ void CPU::NewRec::AArch64Compiler::Compile_lwc2(CompileFlags cf, MemoryAccessSiz
g_settings.gpu_pgxp_enable ? std::optional<WRegister>(WRegister(AllocateTempHostReg(HR_CALLEE_SAVED))) :
std::optional<WRegister>();
FlushForLoadStore(address, false, use_fastmem);
const WRegister addr = ComputeLoadStoreAddressArg(cf, address, addr_reg);
const WRegister value = GenerateLoad(addr, MemoryAccessSize::Word, false, use_fastmem, [this, action = action]() {
const Register addr = ComputeLoadStoreAddressArg(cf, address, addr_reg);
const Register value = GenerateLoad(addr, MemoryAccessSize::Word, false, use_fastmem, [this, action = action]() {
return (action == GTERegisterAccessAction::CallHandler && g_settings.gpu_pgxp_enable) ?
WRegister(AllocateTempHostReg(HR_CALLEE_SAVED)) :
RWRET;
@ -1741,8 +1747,8 @@ void CPU::NewRec::AArch64Compiler::Compile_sxx(CompileFlags cf, MemoryAccessSize
g_settings.gpu_pgxp_enable ? std::optional<WRegister>(WRegister(AllocateTempHostReg(HR_CALLEE_SAVED))) :
std::optional<WRegister>();
FlushForLoadStore(address, true, use_fastmem);
const WRegister addr = ComputeLoadStoreAddressArg(cf, address, addr_reg);
const WRegister data = cf.valid_host_t ? CFGetRegT(cf) : RWARG2;
const Register addr = ComputeLoadStoreAddressArg(cf, address, addr_reg);
const Register data = cf.valid_host_t ? CFGetRegT(cf) : RWARG2;
if (!cf.valid_host_t)
MoveTToReg(RWARG2, cf);
@ -1766,8 +1772,8 @@ void CPU::NewRec::AArch64Compiler::Compile_swx(CompileFlags cf, MemoryAccessSize
// TODO: this can take over rt's value if it's no longer needed
// NOTE: can't trust T in cf because of the alloc
const WRegister addr = WRegister(AllocateTempHostReg(HR_CALLEE_SAVED));
const WRegister value = g_settings.gpu_pgxp_enable ? WRegister(AllocateTempHostReg(HR_CALLEE_SAVED)) : RWARG2;
const Register addr = WRegister(AllocateTempHostReg(HR_CALLEE_SAVED));
const Register value = g_settings.gpu_pgxp_enable ? WRegister(AllocateTempHostReg(HR_CALLEE_SAVED)) : RWARG2;
if (g_settings.gpu_pgxp_enable)
MoveMIPSRegToReg(value, inst->r.rt);
@ -1838,10 +1844,10 @@ void CPU::NewRec::AArch64Compiler::Compile_swc2(CompileFlags cf, MemoryAccessSiz
{
const u32 index = static_cast<u32>(inst->r.rt.GetValue());
const auto [ptr, action] = GetGTERegisterPointer(index, false);
const WRegister addr = (g_settings.gpu_pgxp_enable || action == GTERegisterAccessAction::CallHandler) ?
WRegister(AllocateTempHostReg(HR_CALLEE_SAVED)) :
RWARG1;
const WRegister data = g_settings.gpu_pgxp_enable ? WRegister(AllocateTempHostReg(HR_CALLEE_SAVED)) : RWARG2;
const Register addr = (g_settings.gpu_pgxp_enable || action == GTERegisterAccessAction::CallHandler) ?
WRegister(AllocateTempHostReg(HR_CALLEE_SAVED)) :
RWARG1;
const Register data = g_settings.gpu_pgxp_enable ? WRegister(AllocateTempHostReg(HR_CALLEE_SAVED)) : RWARG2;
FlushForLoadStore(address, true, use_fastmem);
ComputeLoadStoreAddressArg(cf, address, addr);
@ -1912,10 +1918,10 @@ void CPU::NewRec::AArch64Compiler::Compile_mtc0(CompileFlags cf)
// for some registers, we need to test certain bits
const bool needs_bit_test = (reg == Cop0Reg::SR);
const WRegister new_value = RWARG1;
const WRegister old_value = RWARG2;
const WRegister changed_bits = RWARG3;
const WRegister mask_reg = RWSCRATCH;
const Register new_value = RWARG1;
const Register old_value = RWARG2;
const Register changed_bits = RWARG3;
const Register mask_reg = RWSCRATCH;
// Load old value
armAsm->ldr(old_value, PTR(ptr));
@ -1975,8 +1981,10 @@ void CPU::NewRec::AArch64Compiler::Compile_rfe(CompileFlags cf)
TestInterrupts(RWARG1);
}
void CPU::NewRec::AArch64Compiler::TestInterrupts(const vixl::aarch64::WRegister& sr)
void CPU::NewRec::AArch64Compiler::TestInterrupts(const vixl::aarch64::Register& sr)
{
DebugAssert(sr.IsW());
// if Iec == 0 then goto no_interrupt
Label no_interrupt;
armAsm->tbz(sr, 0, &no_interrupt);

View File

@ -43,7 +43,7 @@ protected:
void Compile_Fallback() override;
void CheckBranchTarget(const vixl::aarch64::WRegister& pcreg);
void CheckBranchTarget(const vixl::aarch64::Register& pcreg);
void Compile_jr(CompileFlags cf) override;
void Compile_jalr(CompileFlags cf) override;
void Compile_bxx(CompileFlags cf, BranchCondition cond) override;
@ -77,7 +77,7 @@ protected:
void Compile_multu(CompileFlags cf) override;
void Compile_div(CompileFlags cf) override;
void Compile_divu(CompileFlags cf) override;
void TestOverflow(const vixl::aarch64::WRegister& result);
void TestOverflow(const vixl::aarch64::Register& result);
void Compile_dst_op(CompileFlags cf,
void (vixl::aarch64::Assembler::*op)(const vixl::aarch64::Register&,
const vixl::aarch64::Register&,
@ -95,13 +95,13 @@ protected:
void Compile_slt(CompileFlags cf) override;
void Compile_sltu(CompileFlags cf) override;
vixl::aarch64::WRegister
vixl::aarch64::Register
ComputeLoadStoreAddressArg(CompileFlags cf, const std::optional<VirtualMemoryAddress>& address,
const std::optional<const vixl::aarch64::WRegister>& reg = std::nullopt);
const std::optional<const vixl::aarch64::Register>& reg = std::nullopt);
template<typename RegAllocFn>
vixl::aarch64::WRegister GenerateLoad(const vixl::aarch64::WRegister& addr_reg, MemoryAccessSize size, bool sign,
bool use_fastmem, const RegAllocFn& dst_reg_alloc);
void GenerateStore(const vixl::aarch64::WRegister& addr_reg, const vixl::aarch64::WRegister& value_reg,
vixl::aarch64::Register GenerateLoad(const vixl::aarch64::Register& addr_reg, MemoryAccessSize size, bool sign,
bool use_fastmem, const RegAllocFn& dst_reg_alloc);
void GenerateStore(const vixl::aarch64::Register& addr_reg, const vixl::aarch64::Register& value_reg,
MemoryAccessSize size, bool use_fastmem);
void Compile_lxx(CompileFlags cf, MemoryAccessSize size, bool sign, bool use_fastmem,
const std::optional<VirtualMemoryAddress>& address) override;
@ -116,7 +116,7 @@ protected:
void Compile_swc2(CompileFlags cf, MemoryAccessSize size, bool sign, bool use_fastmem,
const std::optional<VirtualMemoryAddress>& address) override;
void TestInterrupts(const vixl::aarch64::WRegister& sr);
void TestInterrupts(const vixl::aarch64::Register& sr);
void Compile_mtc0(CompileFlags cf) override;
void Compile_rfe(CompileFlags cf) override;
@ -128,7 +128,7 @@ protected:
Reg arg3reg = Reg::count) override;
private:
void EmitMov(const vixl::aarch64::WRegister& dst, u32 val);
void EmitMov(const vixl::aarch64::Register& dst, u32 val);
void EmitCall(const void* ptr, bool force_inline = false);
vixl::aarch64::Operand armCheckAddSubConstant(s32 val);
@ -144,15 +144,15 @@ private:
void AssertRegOrConstS(CompileFlags cf) const;
void AssertRegOrConstT(CompileFlags cf) const;
vixl::aarch64::MemOperand MipsPtr(Reg r) const;
vixl::aarch64::WRegister CFGetRegD(CompileFlags cf) const;
vixl::aarch64::WRegister CFGetRegS(CompileFlags cf) const;
vixl::aarch64::WRegister CFGetRegT(CompileFlags cf) const;
vixl::aarch64::WRegister CFGetRegLO(CompileFlags cf) const;
vixl::aarch64::WRegister CFGetRegHI(CompileFlags cf) const;
vixl::aarch64::Register CFGetRegD(CompileFlags cf) const;
vixl::aarch64::Register CFGetRegS(CompileFlags cf) const;
vixl::aarch64::Register CFGetRegT(CompileFlags cf) const;
vixl::aarch64::Register CFGetRegLO(CompileFlags cf) const;
vixl::aarch64::Register CFGetRegHI(CompileFlags cf) const;
void MoveSToReg(const vixl::aarch64::WRegister& dst, CompileFlags cf);
void MoveTToReg(const vixl::aarch64::WRegister& dst, CompileFlags cf);
void MoveMIPSRegToReg(const vixl::aarch64::WRegister& dst, Reg reg);
void MoveSToReg(const vixl::aarch64::Register& dst, CompileFlags cf);
void MoveTToReg(const vixl::aarch64::Register& dst, CompileFlags cf);
void MoveMIPSRegToReg(const vixl::aarch64::Register& dst, Reg reg);
vixl::aarch64::Assembler m_emitter;
vixl::aarch64::Assembler m_far_emitter;

View File

@ -145,8 +145,10 @@ s64 CPU::Recompiler::armGetPCDisplacement(const void* current, const void* targe
return static_cast<s64>((reinterpret_cast<ptrdiff_t>(target) - reinterpret_cast<ptrdiff_t>(current)) >> 2);
}
void CPU::Recompiler::armMoveAddressToReg(a64::Assembler* armAsm, const a64::XRegister& reg, const void* addr)
void CPU::Recompiler::armMoveAddressToReg(a64::Assembler* armAsm, const a64::Register& reg, const void* addr)
{
DebugAssert(reg.IsX());
const void* cur = armAsm->GetCursorAddress<const void*>();
const void* current_code_ptr_page =
reinterpret_cast<const void*>(reinterpret_cast<uintptr_t>(cur) & ~static_cast<uintptr_t>(0xFFF));
@ -259,8 +261,13 @@ u8* CPU::Recompiler::armGetJumpTrampoline(const void* target)
u8* start = s_trampoline_start_ptr + offset;
a64::Assembler armAsm(start, TRAMPOLINE_AREA_SIZE - offset);
#ifdef VIXL_DEBUG
vixl::CodeBufferCheckScope armAsmCheck(&armAsm, TRAMPOLINE_AREA_SIZE - offset,
vixl::CodeBufferCheckScope::kDontReserveBufferSpace);
#endif
armMoveAddressToReg(&armAsm, RXSCRATCH, target);
armAsm.br(RXSCRATCH);
armAsm.FinalizeCode();
const u32 size = static_cast<u32>(armAsm.GetSizeOfCodeGenerated());
DebugAssert(size < 20);

View File

@ -123,7 +123,7 @@ constexpr u32 MAX_FAR_HOST_BYTES_PER_INSTRUCTION = 128;
bool armIsCallerSavedRegister(u32 id);
s64 armGetPCDisplacement(const void* current, const void* target);
void armMoveAddressToReg(vixl::aarch64::Assembler* armAsm, const vixl::aarch64::XRegister& reg, const void* addr);
void armMoveAddressToReg(vixl::aarch64::Assembler* armAsm, const vixl::aarch64::Register& reg, const void* addr);
void armEmitMov(vixl::aarch64::Assembler* armAsm, const vixl::aarch64::Register& rd, u64 imm);
void armEmitJmp(vixl::aarch64::Assembler* armAsm, const void* ptr, bool force_inline);
void armEmitCall(vixl::aarch64::Assembler* armAsm, const void* ptr, bool force_inline);