basic_block: Move all variables behind a public interface

This commit is contained in:
Lioncash 2016-08-25 10:35:50 -04:00 committed by MerryMage
parent 1d8432487d
commit 0e12fb6a56
9 changed files with 185 additions and 66 deletions

View file

@ -57,7 +57,7 @@ static IR::Inst* FindUseWithOpcode(IR::Inst* inst, IR::Opcode opcode) {
}
static void EraseInstruction(IR::Block& block, IR::Inst* inst) {
block.instructions.erase(block.instructions.iterator_to(*inst));
block.Instructions().erase(block.Instructions().iterator_to(*inst));
}
EmitX64::BlockDescriptor EmitX64::Emit(const Arm::LocationDescriptor descriptor, Dynarmic::IR::Block& block) {
@ -92,8 +92,8 @@ EmitX64::BlockDescriptor EmitX64::Emit(const Arm::LocationDescriptor descriptor,
reg_alloc.EndOfAllocScope();
}
EmitAddCycles(block.cycle_count);
EmitTerminal(block.terminal, block.location);
EmitAddCycles(block.CycleCount());
EmitTerminal(block.GetTerminal(), block.Location());
reg_alloc.AssertNoMoreUses();
@ -1227,15 +1227,15 @@ static void FPThreeOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block
X64Reg operand = reg_alloc.UseRegister(b, any_xmm);
X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr);
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero32(code, result, gpr_scratch);
DenormalsAreZero32(code, operand, gpr_scratch);
}
(code->*fn)(result, R(operand));
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
FlushToZero32(code, result, gpr_scratch);
}
if (block.location.FPSCR().DN()) {
if (block.Location().FPSCR().DN()) {
DefaultNaN32(code, result);
}
}
@ -1248,15 +1248,15 @@ static void FPThreeOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block
X64Reg operand = reg_alloc.UseRegister(b, any_xmm);
X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr);
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero64(code, result, gpr_scratch);
DenormalsAreZero64(code, operand, gpr_scratch);
}
(code->*fn)(result, R(operand));
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
FlushToZero64(code, result, gpr_scratch);
}
if (block.location.FPSCR().DN()) {
if (block.Location().FPSCR().DN()) {
DefaultNaN64(code, result);
}
}
@ -1267,14 +1267,14 @@ static void FPTwoOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block,
X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm);
X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr);
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero32(code, result, gpr_scratch);
}
(code->*fn)(result, R(result));
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
FlushToZero32(code, result, gpr_scratch);
}
if (block.location.FPSCR().DN()) {
if (block.Location().FPSCR().DN()) {
DefaultNaN32(code, result);
}
}
@ -1285,14 +1285,14 @@ static void FPTwoOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block,
X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm);
X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr);
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero64(code, result, gpr_scratch);
}
(code->*fn)(result, R(result));
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
FlushToZero64(code, result, gpr_scratch);
}
if (block.location.FPSCR().DN()) {
if (block.Location().FPSCR().DN()) {
DefaultNaN64(code, result);
}
}
@ -1403,14 +1403,14 @@ void EmitX64::EmitFPSingleToDouble(IR::Block& block, IR::Inst* inst) {
X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm);
X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr);
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero32(code, result, gpr_scratch);
}
code->CVTSS2SD(result, R(result));
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
FlushToZero64(code, result, gpr_scratch);
}
if (block.location.FPSCR().DN()) {
if (block.Location().FPSCR().DN()) {
DefaultNaN64(code, result);
}
}
@ -1421,14 +1421,14 @@ void EmitX64::EmitFPDoubleToSingle(IR::Block& block, IR::Inst* inst) {
X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm);
X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr);
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero64(code, result, gpr_scratch);
}
code->CVTSD2SS(result, R(result));
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
FlushToZero32(code, result, gpr_scratch);
}
if (block.location.FPSCR().DN()) {
if (block.Location().FPSCR().DN()) {
DefaultNaN32(code, result);
}
}
@ -1444,7 +1444,7 @@ void EmitX64::EmitFPSingleToS32(IR::Block& block, IR::Inst* inst) {
// ARM saturates on conversion; this differs from x64 which returns a sentinel value.
// Conversion to double is lossless, and allows for clamping.
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero32(code, from, gpr_scratch);
}
code->CVTSS2SD(from, R(from));
@ -1482,8 +1482,8 @@ void EmitX64::EmitFPSingleToU32(IR::Block& block, IR::Inst* inst) {
//
// FIXME: Inexact exception not correctly signalled with the below code
if (block.location.FPSCR().RMode() != Arm::FPSCR::RoundingMode::TowardsZero && !round_towards_zero) {
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().RMode() != Arm::FPSCR::RoundingMode::TowardsZero && !round_towards_zero) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero32(code, from, gpr_scratch);
}
code->CVTSS2SD(from, R(from));
@ -1504,7 +1504,7 @@ void EmitX64::EmitFPSingleToU32(IR::Block& block, IR::Inst* inst) {
X64Reg xmm_mask = reg_alloc.ScratchRegister(any_xmm);
X64Reg gpr_mask = reg_alloc.ScratchRegister(any_gpr);
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero32(code, from, gpr_scratch);
}
code->CVTSS2SD(from, R(from));
@ -1540,7 +1540,7 @@ void EmitX64::EmitFPDoubleToS32(IR::Block& block, IR::Inst* inst) {
// ARM saturates on conversion; this differs from x64 which returns a sentinel value.
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero64(code, from, gpr_scratch);
}
// First time is to set flags
@ -1575,8 +1575,8 @@ void EmitX64::EmitFPDoubleToU32(IR::Block& block, IR::Inst* inst) {
// TODO: Use VCVTPD2UDQ when AVX512VL is available.
// FIXME: Inexact exception not correctly signalled with the below code
if (block.location.FPSCR().RMode() != Arm::FPSCR::RoundingMode::TowardsZero && !round_towards_zero) {
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().RMode() != Arm::FPSCR::RoundingMode::TowardsZero && !round_towards_zero) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero64(code, from, gpr_scratch);
}
ZeroIfNaN64(code, from);
@ -1596,7 +1596,7 @@ void EmitX64::EmitFPDoubleToU32(IR::Block& block, IR::Inst* inst) {
X64Reg xmm_mask = reg_alloc.ScratchRegister(any_xmm);
X64Reg gpr_mask = reg_alloc.ScratchRegister(any_gpr);
if (block.location.FPSCR().FTZ()) {
if (block.Location().FPSCR().FTZ()) {
DenormalsAreZero64(code, from, gpr_scratch);
}
ZeroIfNaN64(code, from);
@ -1933,19 +1933,19 @@ static CCFlags EmitCond(BlockOfCode* code, Arm::Cond cond) {
}
void EmitX64::EmitCondPrelude(const IR::Block& block) {
if (block.cond == Arm::Cond::AL) {
ASSERT(!block.cond_failed.is_initialized());
if (block.GetCondition() == Arm::Cond::AL) {
ASSERT(!block.HasConditionFailedLocation());
return;
}
ASSERT(block.cond_failed.is_initialized());
ASSERT(block.HasConditionFailedLocation());
CCFlags cc = EmitCond(code, block.cond);
CCFlags cc = EmitCond(code, block.GetCondition());
// TODO: Improve, maybe.
auto fixup = code->J_CC(cc, true);
EmitAddCycles(block.cond_failed_cycle_count);
EmitTerminalLinkBlock(IR::Term::LinkBlock{block.cond_failed.get()}, block.location);
EmitAddCycles(block.ConditionFailedCycleCount());
EmitTerminalLinkBlock(IR::Term::LinkBlock{block.ConditionFailedLocation()}, block.Location());
code->SetJumpTarget(fixup);
}