diff --git a/src/dynarmic/frontend/A32/translate/conditional_state.cpp b/src/dynarmic/frontend/A32/translate/conditional_state.cpp index 8bd875c7..26756574 100644 --- a/src/dynarmic/frontend/A32/translate/conditional_state.cpp +++ b/src/dynarmic/frontend/A32/translate/conditional_state.cpp @@ -17,14 +17,15 @@ namespace Dynarmic::A32 { -bool CondCanContinue(ConditionalState cond_state, const A32::IREmitter& ir) { +bool CondCanContinue(const ConditionalState cond_state, const A32::IREmitter& ir) { ASSERT_MSG(cond_state != ConditionalState::Break, "Should never happen."); - if (cond_state == ConditionalState::None) return true; // TODO: This is more conservative than necessary. - return std::all_of(ir.block.begin(), ir.block.end(), [](const IR::Inst& inst) { return !inst.WritesToCPSR(); }); + return std::all_of(ir.block.begin(), ir.block.end(), [](const IR::Inst& inst) { + return !WritesToCPSR(inst.GetOpcode()); + }); } bool IsConditionPassed(TranslatorVisitor& v, IR::Cond cond) { diff --git a/src/dynarmic/frontend/A32/translate/conditional_state.h b/src/dynarmic/frontend/A32/translate/conditional_state.h index 18c8b1cc..27e1c98d 100644 --- a/src/dynarmic/frontend/A32/translate/conditional_state.h +++ b/src/dynarmic/frontend/A32/translate/conditional_state.h @@ -27,7 +27,7 @@ enum class ConditionalState { Trailing, }; -bool CondCanContinue(ConditionalState cond_state, const A32::IREmitter& ir); +bool CondCanContinue(const ConditionalState cond_state, const A32::IREmitter& ir); bool IsConditionPassed(TranslatorVisitor& v, IR::Cond cond); } // namespace Dynarmic::A32 diff --git a/src/dynarmic/ir/microinstruction.cpp b/src/dynarmic/ir/microinstruction.cpp index c52b7bef..3f67a3ad 100644 --- a/src/dynarmic/ir/microinstruction.cpp +++ b/src/dynarmic/ir/microinstruction.cpp @@ -14,595 +14,10 @@ namespace Dynarmic::IR { -bool Inst::IsArithmeticShift() const { - return op == Opcode::ArithmeticShiftRight32 - || op == Opcode::ArithmeticShiftRight64; -} - -bool Inst::IsCircularShift() const { - return op == Opcode::RotateRight32 - || op == Opcode::RotateRight64 - || op == Opcode::RotateRightExtended; -} - -bool Inst::IsLogicalShift() const { - switch (op) { - case Opcode::LogicalShiftLeft32: - case Opcode::LogicalShiftLeft64: - case Opcode::LogicalShiftRight32: - case Opcode::LogicalShiftRight64: - return true; - - default: - return false; - } -} - -bool Inst::IsShift() const { - return IsArithmeticShift() - || IsCircularShift() - || IsLogicalShift(); -} - -bool Inst::IsBarrier() const { - switch (op) { - case Opcode::A32DataMemoryBarrier: - case Opcode::A32DataSynchronizationBarrier: - case Opcode::A32InstructionSynchronizationBarrier: - case Opcode::A64DataMemoryBarrier: - case Opcode::A64DataSynchronizationBarrier: - case Opcode::A64InstructionSynchronizationBarrier: - return true; - - default: - return false; - } -} - -bool Inst::IsSharedMemoryRead() const { - switch (op) { - case Opcode::A32ReadMemory8: - case Opcode::A32ReadMemory16: - case Opcode::A32ReadMemory32: - case Opcode::A32ReadMemory64: - case Opcode::A64ReadMemory8: - case Opcode::A64ReadMemory16: - case Opcode::A64ReadMemory32: - case Opcode::A64ReadMemory64: - case Opcode::A64ReadMemory128: - return true; - - default: - return false; - } -} - -bool Inst::IsSharedMemoryWrite() const { - switch (op) { - case Opcode::A32WriteMemory8: - case Opcode::A32WriteMemory16: - case Opcode::A32WriteMemory32: - case Opcode::A32WriteMemory64: - case Opcode::A64WriteMemory8: - case Opcode::A64WriteMemory16: - case Opcode::A64WriteMemory32: - case Opcode::A64WriteMemory64: - case Opcode::A64WriteMemory128: - return true; - - default: - return false; - } -} - -bool Inst::IsSharedMemoryReadOrWrite() const { - return IsSharedMemoryRead() - || IsSharedMemoryWrite(); -} - -bool Inst::IsExclusiveMemoryRead() const { - switch (op) { - case Opcode::A32ExclusiveReadMemory8: - case Opcode::A32ExclusiveReadMemory16: - case Opcode::A32ExclusiveReadMemory32: - case Opcode::A32ExclusiveReadMemory64: - case Opcode::A64ExclusiveReadMemory8: - case Opcode::A64ExclusiveReadMemory16: - case Opcode::A64ExclusiveReadMemory32: - case Opcode::A64ExclusiveReadMemory64: - case Opcode::A64ExclusiveReadMemory128: - return true; - - default: - return false; - } -} - -bool Inst::IsExclusiveMemoryWrite() const { - switch (op) { - case Opcode::A32ExclusiveWriteMemory8: - case Opcode::A32ExclusiveWriteMemory16: - case Opcode::A32ExclusiveWriteMemory32: - case Opcode::A32ExclusiveWriteMemory64: - case Opcode::A64ExclusiveWriteMemory8: - case Opcode::A64ExclusiveWriteMemory16: - case Opcode::A64ExclusiveWriteMemory32: - case Opcode::A64ExclusiveWriteMemory64: - case Opcode::A64ExclusiveWriteMemory128: - return true; - - default: - return false; - } -} - -bool Inst::IsMemoryRead() const { - return IsSharedMemoryRead() - || IsExclusiveMemoryRead(); -} - -bool Inst::IsMemoryWrite() const { - return IsSharedMemoryWrite() - || IsExclusiveMemoryWrite(); -} - -bool Inst::IsMemoryReadOrWrite() const { - return IsMemoryRead() - || IsMemoryWrite(); -} - -bool Inst::ReadsFromCPSR() const { - switch (op) { - case Opcode::A32GetCpsr: - case Opcode::A32GetCFlag: - case Opcode::A32GetGEFlags: - case Opcode::A32UpdateUpperLocationDescriptor: - case Opcode::A64GetCFlag: - case Opcode::A64GetNZCVRaw: - case Opcode::ConditionalSelect32: - case Opcode::ConditionalSelect64: - case Opcode::ConditionalSelectNZCV: - return true; - - default: - return false; - } -} - -bool Inst::WritesToCPSR() const { - switch (op) { - case Opcode::A32SetCpsr: - case Opcode::A32SetCpsrNZCVRaw: - case Opcode::A32SetCpsrNZCV: - case Opcode::A32SetCpsrNZCVQ: - case Opcode::A32SetCpsrNZ: - case Opcode::A32SetCpsrNZC: - case Opcode::A32OrQFlag: - case Opcode::A32SetGEFlags: - case Opcode::A32SetGEFlagsCompressed: - case Opcode::A32UpdateUpperLocationDescriptor: - case Opcode::A64SetNZCVRaw: - case Opcode::A64SetNZCV: - return true; - - default: - return false; - } -} - -bool Inst::WritesToSystemRegister() const { - switch (op) { - case Opcode::A64SetTPIDR: - return true; - default: - return false; - } -} - -bool Inst::ReadsFromCoreRegister() const { - switch (op) { - case Opcode::A32GetRegister: - case Opcode::A32GetExtendedRegister32: - case Opcode::A32GetExtendedRegister64: - case Opcode::A32GetVector: - case Opcode::A64GetW: - case Opcode::A64GetX: - case Opcode::A64GetS: - case Opcode::A64GetD: - case Opcode::A64GetQ: - case Opcode::A64GetSP: - return true; - - default: - return false; - } -} - -bool Inst::WritesToCoreRegister() const { - switch (op) { - case Opcode::A32SetRegister: - case Opcode::A32SetExtendedRegister32: - case Opcode::A32SetExtendedRegister64: - case Opcode::A32SetVector: - case Opcode::A32BXWritePC: - case Opcode::A64SetW: - case Opcode::A64SetX: - case Opcode::A64SetS: - case Opcode::A64SetD: - case Opcode::A64SetQ: - case Opcode::A64SetSP: - case Opcode::A64SetPC: - return true; - - default: - return false; - } -} - -bool Inst::ReadsFromFPCR() const { - switch (op) { - case Opcode::A32GetFpscr: - case Opcode::A32GetFpscrNZCV: - case Opcode::A64GetFPCR: - return true; - - default: - return false; - } -} - -bool Inst::WritesToFPCR() const { - switch (op) { - case Opcode::A32SetFpscr: - case Opcode::A32SetFpscrNZCV: - case Opcode::A64SetFPCR: - return true; - - default: - return false; - } -} - -bool Inst::ReadsFromFPSR() const { - return op == Opcode::A32GetFpscr - || op == Opcode::A32GetFpscrNZCV - || op == Opcode::A64GetFPSR - || ReadsFromFPSRCumulativeExceptionBits() - || ReadsFromFPSRCumulativeSaturationBit(); -} - -bool Inst::WritesToFPSR() const { - return op == Opcode::A32SetFpscr - || op == Opcode::A32SetFpscrNZCV - || op == Opcode::A64SetFPSR - || WritesToFPSRCumulativeExceptionBits() - || WritesToFPSRCumulativeSaturationBit(); -} - -bool Inst::ReadsFromFPSRCumulativeExceptionBits() const { - return ReadsFromAndWritesToFPSRCumulativeExceptionBits(); -} - -bool Inst::WritesToFPSRCumulativeExceptionBits() const { - return ReadsFromAndWritesToFPSRCumulativeExceptionBits(); -} - -bool Inst::ReadsFromAndWritesToFPSRCumulativeExceptionBits() const { - switch (op) { - case Opcode::FPAdd32: - case Opcode::FPAdd64: - case Opcode::FPCompare32: - case Opcode::FPCompare64: - case Opcode::FPDiv32: - case Opcode::FPDiv64: - case Opcode::FPMax32: - case Opcode::FPMax64: - case Opcode::FPMaxNumeric32: - case Opcode::FPMaxNumeric64: - case Opcode::FPMin32: - case Opcode::FPMin64: - case Opcode::FPMinNumeric32: - case Opcode::FPMinNumeric64: - case Opcode::FPMul32: - case Opcode::FPMul64: - case Opcode::FPMulAdd16: - case Opcode::FPMulAdd32: - case Opcode::FPMulAdd64: - case Opcode::FPMulSub16: - case Opcode::FPMulSub32: - case Opcode::FPMulSub64: - case Opcode::FPRecipEstimate16: - case Opcode::FPRecipEstimate32: - case Opcode::FPRecipEstimate64: - case Opcode::FPRecipExponent16: - case Opcode::FPRecipExponent32: - case Opcode::FPRecipExponent64: - case Opcode::FPRecipStepFused16: - case Opcode::FPRecipStepFused32: - case Opcode::FPRecipStepFused64: - case Opcode::FPRoundInt16: - case Opcode::FPRoundInt32: - case Opcode::FPRoundInt64: - case Opcode::FPRSqrtEstimate16: - case Opcode::FPRSqrtEstimate32: - case Opcode::FPRSqrtEstimate64: - case Opcode::FPRSqrtStepFused16: - case Opcode::FPRSqrtStepFused32: - case Opcode::FPRSqrtStepFused64: - case Opcode::FPSqrt32: - case Opcode::FPSqrt64: - case Opcode::FPSub32: - case Opcode::FPSub64: - case Opcode::FPHalfToDouble: - case Opcode::FPHalfToSingle: - case Opcode::FPSingleToDouble: - case Opcode::FPSingleToHalf: - case Opcode::FPDoubleToHalf: - case Opcode::FPDoubleToSingle: - case Opcode::FPDoubleToFixedS32: - case Opcode::FPDoubleToFixedS64: - case Opcode::FPDoubleToFixedU32: - case Opcode::FPDoubleToFixedU64: - case Opcode::FPHalfToFixedS32: - case Opcode::FPHalfToFixedS64: - case Opcode::FPHalfToFixedU32: - case Opcode::FPHalfToFixedU64: - case Opcode::FPSingleToFixedS32: - case Opcode::FPSingleToFixedS64: - case Opcode::FPSingleToFixedU32: - case Opcode::FPSingleToFixedU64: - case Opcode::FPFixedU32ToSingle: - case Opcode::FPFixedS32ToSingle: - case Opcode::FPFixedU32ToDouble: - case Opcode::FPFixedU64ToDouble: - case Opcode::FPFixedU64ToSingle: - case Opcode::FPFixedS32ToDouble: - case Opcode::FPFixedS64ToDouble: - case Opcode::FPFixedS64ToSingle: - case Opcode::FPVectorAdd32: - case Opcode::FPVectorAdd64: - case Opcode::FPVectorDiv32: - case Opcode::FPVectorDiv64: - case Opcode::FPVectorEqual16: - case Opcode::FPVectorEqual32: - case Opcode::FPVectorEqual64: - case Opcode::FPVectorFromSignedFixed32: - case Opcode::FPVectorFromSignedFixed64: - case Opcode::FPVectorFromUnsignedFixed32: - case Opcode::FPVectorFromUnsignedFixed64: - case Opcode::FPVectorGreater32: - case Opcode::FPVectorGreater64: - case Opcode::FPVectorGreaterEqual32: - case Opcode::FPVectorGreaterEqual64: - case Opcode::FPVectorMul32: - case Opcode::FPVectorMul64: - case Opcode::FPVectorMulAdd16: - case Opcode::FPVectorMulAdd32: - case Opcode::FPVectorMulAdd64: - case Opcode::FPVectorPairedAddLower32: - case Opcode::FPVectorPairedAddLower64: - case Opcode::FPVectorPairedAdd32: - case Opcode::FPVectorPairedAdd64: - case Opcode::FPVectorRecipEstimate16: - case Opcode::FPVectorRecipEstimate32: - case Opcode::FPVectorRecipEstimate64: - case Opcode::FPVectorRecipStepFused16: - case Opcode::FPVectorRecipStepFused32: - case Opcode::FPVectorRecipStepFused64: - case Opcode::FPVectorRoundInt16: - case Opcode::FPVectorRoundInt32: - case Opcode::FPVectorRoundInt64: - case Opcode::FPVectorRSqrtEstimate16: - case Opcode::FPVectorRSqrtEstimate32: - case Opcode::FPVectorRSqrtEstimate64: - case Opcode::FPVectorRSqrtStepFused16: - case Opcode::FPVectorRSqrtStepFused32: - case Opcode::FPVectorRSqrtStepFused64: - case Opcode::FPVectorSqrt32: - case Opcode::FPVectorSqrt64: - case Opcode::FPVectorSub32: - case Opcode::FPVectorSub64: - case Opcode::FPVectorToSignedFixed16: - case Opcode::FPVectorToSignedFixed32: - case Opcode::FPVectorToSignedFixed64: - case Opcode::FPVectorToUnsignedFixed16: - case Opcode::FPVectorToUnsignedFixed32: - case Opcode::FPVectorToUnsignedFixed64: - return true; - - default: - return false; - } -} - -bool Inst::ReadsFromFPSRCumulativeSaturationBit() const { - return false; -} - -bool Inst::WritesToFPSRCumulativeSaturationBit() const { - switch (op) { - case Opcode::SignedSaturatedAdd8: - case Opcode::SignedSaturatedAdd16: - case Opcode::SignedSaturatedAdd32: - case Opcode::SignedSaturatedAdd64: - case Opcode::SignedSaturatedDoublingMultiplyReturnHigh16: - case Opcode::SignedSaturatedDoublingMultiplyReturnHigh32: - case Opcode::SignedSaturatedSub8: - case Opcode::SignedSaturatedSub16: - case Opcode::SignedSaturatedSub32: - case Opcode::SignedSaturatedSub64: - case Opcode::UnsignedSaturatedAdd8: - case Opcode::UnsignedSaturatedAdd16: - case Opcode::UnsignedSaturatedAdd32: - case Opcode::UnsignedSaturatedAdd64: - case Opcode::UnsignedSaturatedSub8: - case Opcode::UnsignedSaturatedSub16: - case Opcode::UnsignedSaturatedSub32: - case Opcode::UnsignedSaturatedSub64: - case Opcode::VectorSignedSaturatedAbs8: - case Opcode::VectorSignedSaturatedAbs16: - case Opcode::VectorSignedSaturatedAbs32: - case Opcode::VectorSignedSaturatedAbs64: - case Opcode::VectorSignedSaturatedAccumulateUnsigned8: - case Opcode::VectorSignedSaturatedAccumulateUnsigned16: - case Opcode::VectorSignedSaturatedAccumulateUnsigned32: - case Opcode::VectorSignedSaturatedAccumulateUnsigned64: - case Opcode::VectorSignedSaturatedAdd8: - case Opcode::VectorSignedSaturatedAdd16: - case Opcode::VectorSignedSaturatedAdd32: - case Opcode::VectorSignedSaturatedAdd64: - case Opcode::VectorSignedSaturatedDoublingMultiplyHigh16: - case Opcode::VectorSignedSaturatedDoublingMultiplyHigh32: - case Opcode::VectorSignedSaturatedDoublingMultiplyHighRounding16: - case Opcode::VectorSignedSaturatedDoublingMultiplyHighRounding32: - case Opcode::VectorSignedSaturatedDoublingMultiplyLong16: - case Opcode::VectorSignedSaturatedDoublingMultiplyLong32: - case Opcode::VectorSignedSaturatedNarrowToSigned16: - case Opcode::VectorSignedSaturatedNarrowToSigned32: - case Opcode::VectorSignedSaturatedNarrowToSigned64: - case Opcode::VectorSignedSaturatedNarrowToUnsigned16: - case Opcode::VectorSignedSaturatedNarrowToUnsigned32: - case Opcode::VectorSignedSaturatedNarrowToUnsigned64: - case Opcode::VectorSignedSaturatedNeg8: - case Opcode::VectorSignedSaturatedNeg16: - case Opcode::VectorSignedSaturatedNeg32: - case Opcode::VectorSignedSaturatedNeg64: - case Opcode::VectorSignedSaturatedShiftLeft8: - case Opcode::VectorSignedSaturatedShiftLeft16: - case Opcode::VectorSignedSaturatedShiftLeft32: - case Opcode::VectorSignedSaturatedShiftLeft64: - case Opcode::VectorSignedSaturatedShiftLeftUnsigned8: - case Opcode::VectorSignedSaturatedShiftLeftUnsigned16: - case Opcode::VectorSignedSaturatedShiftLeftUnsigned32: - case Opcode::VectorSignedSaturatedShiftLeftUnsigned64: - case Opcode::VectorSignedSaturatedSub8: - case Opcode::VectorSignedSaturatedSub16: - case Opcode::VectorSignedSaturatedSub32: - case Opcode::VectorSignedSaturatedSub64: - case Opcode::VectorUnsignedSaturatedAccumulateSigned8: - case Opcode::VectorUnsignedSaturatedAccumulateSigned16: - case Opcode::VectorUnsignedSaturatedAccumulateSigned32: - case Opcode::VectorUnsignedSaturatedAccumulateSigned64: - case Opcode::VectorUnsignedSaturatedAdd8: - case Opcode::VectorUnsignedSaturatedAdd16: - case Opcode::VectorUnsignedSaturatedAdd32: - case Opcode::VectorUnsignedSaturatedAdd64: - case Opcode::VectorUnsignedSaturatedNarrow16: - case Opcode::VectorUnsignedSaturatedNarrow32: - case Opcode::VectorUnsignedSaturatedNarrow64: - case Opcode::VectorUnsignedSaturatedShiftLeft8: - case Opcode::VectorUnsignedSaturatedShiftLeft16: - case Opcode::VectorUnsignedSaturatedShiftLeft32: - case Opcode::VectorUnsignedSaturatedShiftLeft64: - case Opcode::VectorUnsignedSaturatedSub8: - case Opcode::VectorUnsignedSaturatedSub16: - case Opcode::VectorUnsignedSaturatedSub32: - case Opcode::VectorUnsignedSaturatedSub64: - return true; - - default: - return false; - } -} - -bool Inst::CausesCPUException() const { - return op == Opcode::Breakpoint - || op == Opcode::A32CallSupervisor - || op == Opcode::A32ExceptionRaised - || op == Opcode::A64CallSupervisor - || op == Opcode::A64ExceptionRaised; -} - -bool Inst::AltersExclusiveState() const { - return op == Opcode::A32ClearExclusive - || op == Opcode::A64ClearExclusive - || IsExclusiveMemoryRead() - || IsExclusiveMemoryWrite(); -} - -bool Inst::IsCoprocessorInstruction() const { - switch (op) { - case Opcode::A32CoprocInternalOperation: - case Opcode::A32CoprocSendOneWord: - case Opcode::A32CoprocSendTwoWords: - case Opcode::A32CoprocGetOneWord: - case Opcode::A32CoprocGetTwoWords: - case Opcode::A32CoprocLoadWords: - case Opcode::A32CoprocStoreWords: - return true; - - default: - return false; - } -} - -bool Inst::IsSetCheckBitOperation() const { - return op == Opcode::A32SetCheckBit - || op == Opcode::A64SetCheckBit; -} - -bool Inst::MayHaveSideEffects() const { - return op == Opcode::PushRSB - || op == Opcode::CallHostFunction - || op == Opcode::A64DataCacheOperationRaised - || op == Opcode::A64InstructionCacheOperationRaised - || IsSetCheckBitOperation() - || IsBarrier() - || CausesCPUException() - || WritesToCoreRegister() - || WritesToSystemRegister() - || WritesToCPSR() - || WritesToFPCR() - || WritesToFPSR() - || AltersExclusiveState() - || IsMemoryWrite() - || IsCoprocessorInstruction(); -} - -bool Inst::IsAPseudoOperation() const { - switch (op) { - case Opcode::GetCarryFromOp: - case Opcode::GetOverflowFromOp: - case Opcode::GetGEFromOp: - case Opcode::GetNZCVFromOp: - case Opcode::GetNZFromOp: - case Opcode::GetUpperFromOp: - case Opcode::GetLowerFromOp: - case Opcode::MostSignificantBit: - case Opcode::IsZero32: - case Opcode::IsZero64: - return true; - - default: - return false; - } -} - -bool Inst::MayGetNZCVFromOp() const { - switch (op) { - case Opcode::Add32: - case Opcode::Add64: - case Opcode::Sub32: - case Opcode::Sub64: - case Opcode::And32: - case Opcode::And64: - case Opcode::AndNot32: - case Opcode::AndNot64: - case Opcode::Eor32: - case Opcode::Eor64: - case Opcode::Or32: - case Opcode::Or64: - case Opcode::Not32: - case Opcode::Not64: - return true; - - default: - return false; - } -} - bool Inst::AreAllArgsImmediates() const { - return std::all_of(args.begin(), args.begin() + NumArgs(), [](const auto& value) { return value.IsImmediate(); }); + return std::all_of(args.begin(), args.begin() + NumArgs(), [](const auto& value) { + return value.IsImmediate(); + }); } Inst* Inst::GetAssociatedPseudoOperation(Opcode opcode) { @@ -623,36 +38,18 @@ Type Inst::GetType() const { return GetTypeOf(op); } -size_t Inst::NumArgs() const { - return GetNumArgsOf(op); -} - -Value Inst::GetArg(size_t index) const { - ASSERT_MSG(index < GetNumArgsOf(op), "Inst::GetArg: index {} >= number of arguments of {} ({})", index, op, GetNumArgsOf(op)); - ASSERT_MSG(!args[index].IsEmpty() || GetArgTypeOf(op, index) == IR::Type::Opaque, "Inst::GetArg: index {} is empty", index, args[index].GetType()); - - return args[index]; -} - -void Inst::SetArg(size_t index, Value value) { - ASSERT_MSG(index < GetNumArgsOf(op), "Inst::SetArg: index {} >= number of arguments of {} ({})", index, op, GetNumArgsOf(op)); - ASSERT_MSG(AreTypesCompatible(value.GetType(), GetArgTypeOf(op, index)), "Inst::SetArg: type {} of argument {} not compatible with operation {} ({})", value.GetType(), index, op, GetArgTypeOf(op, index)); - +void Inst::SetArg(size_t index, Value value) noexcept { + DEBUG_ASSERT_MSG(index < GetNumArgsOf(op), "Inst::SetArg: index {} >= number of arguments of {} ({})", index, op, GetNumArgsOf(op)); + DEBUG_ASSERT_MSG(AreTypesCompatible(value.GetType(), GetArgTypeOf(op, index)), "Inst::SetArg: type {} of argument {} not compatible with operation {} ({})", value.GetType(), index, op, GetArgTypeOf(op, index)); if (!args[index].IsImmediate()) { UndoUse(args[index]); } if (!value.IsImmediate()) { Use(value); } - args[index] = value; } -void Inst::Invalidate() { - ClearArgs(); - op = Opcode::Void; -} - void Inst::ClearArgs() { for (auto& value : args) { if (!value.IsImmediate()) { @@ -677,9 +74,9 @@ void Inst::ReplaceUsesWith(Value replacement) { void Inst::Use(const Value& value) { value.GetInst()->use_count++; - if (IsAPseudoOperation()) { + if (IsAPseudoOperation(op)) { if (op == Opcode::GetNZCVFromOp) { - ASSERT_MSG(value.GetInst()->MayGetNZCVFromOp(), "This value doesn't support the GetNZCVFromOp pseduo-op"); + ASSERT_MSG(MayGetNZCVFromOp(value.GetInst()->GetOpcode()), "This value doesn't support the GetNZCVFromOp pseduo-op"); } Inst* insert_point = value.GetInst(); @@ -694,7 +91,7 @@ void Inst::Use(const Value& value) { void Inst::UndoUse(const Value& value) { value.GetInst()->use_count--; - if (IsAPseudoOperation()) { + if (IsAPseudoOperation(op)) { Inst* insert_point = value.GetInst(); while (insert_point->next_pseudoop != this) { insert_point = insert_point->next_pseudoop; diff --git a/src/dynarmic/ir/microinstruction.h b/src/dynarmic/ir/microinstruction.h index 95eef2fb..a26a9d80 100644 --- a/src/dynarmic/ir/microinstruction.h +++ b/src/dynarmic/ir/microinstruction.h @@ -11,6 +11,7 @@ #include #include "dynarmic/ir/value.h" +#include "dynarmic/ir/opcodes.h" namespace Dynarmic::IR { @@ -26,94 +27,7 @@ class Inst final : public mcl::intrusive_list_node { public: explicit Inst(Opcode op) : op(op) {} - /// Determines whether or not this instruction performs an arithmetic shift. - bool IsArithmeticShift() const; - /// Determines whether or not this instruction performs a logical shift. - bool IsLogicalShift() const; - /// Determines whether or not this instruction performs a circular shift. - bool IsCircularShift() const; - /// Determines whether or not this instruction performs any kind of shift. - bool IsShift() const; - - /// Determines whether or not this instruction is a form of barrier. - bool IsBarrier() const; - - /// Determines whether or not this instruction performs a shared memory read. - bool IsSharedMemoryRead() const; - /// Determines whether or not this instruction performs a shared memory write. - bool IsSharedMemoryWrite() const; - /// Determines whether or not this instruction performs a shared memory read or write. - bool IsSharedMemoryReadOrWrite() const; - /// Determines whether or not this instruction performs an atomic memory read. - bool IsExclusiveMemoryRead() const; - /// Determines whether or not this instruction performs an atomic memory write. - bool IsExclusiveMemoryWrite() const; - - /// Determines whether or not this instruction performs any kind of memory read. - bool IsMemoryRead() const; - /// Determines whether or not this instruction performs any kind of memory write. - bool IsMemoryWrite() const; - /// Determines whether or not this instruction performs any kind of memory access. - bool IsMemoryReadOrWrite() const; - - /// Determines whether or not this instruction reads from the CPSR. - bool ReadsFromCPSR() const; - /// Determines whether or not this instruction writes to the CPSR. - bool WritesToCPSR() const; - - /// Determines whether or not this instruction writes to a system register. - bool WritesToSystemRegister() const; - - /// Determines whether or not this instruction reads from a core register. - bool ReadsFromCoreRegister() const; - /// Determines whether or not this instruction writes to a core register. - bool WritesToCoreRegister() const; - - /// Determines whether or not this instruction reads from the FPCR. - bool ReadsFromFPCR() const; - /// Determines whether or not this instruction writes to the FPCR. - bool WritesToFPCR() const; - - /// Determines whether or not this instruction reads from the FPSR. - bool ReadsFromFPSR() const; - /// Determines whether or not this instruction writes to the FPSR. - bool WritesToFPSR() const; - - /// Determines whether or not this instruction reads from the FPSR cumulative exception bits. - bool ReadsFromFPSRCumulativeExceptionBits() const; - /// Determines whether or not this instruction writes to the FPSR cumulative exception bits. - bool WritesToFPSRCumulativeExceptionBits() const; - /// Determines whether or not this instruction both reads from and writes to the FPSR cumulative exception bits. - bool ReadsFromAndWritesToFPSRCumulativeExceptionBits() const; - - /// Determines whether or not this instruction reads from the FPSR cumulative saturation bit. - bool ReadsFromFPSRCumulativeSaturationBit() const; - /// Determines whether or not this instruction writes to the FPSR cumulative saturation bit. - bool WritesToFPSRCumulativeSaturationBit() const; - - /// Determines whether or not this instruction alters memory-exclusivity. - bool AltersExclusiveState() const; - - /// Determines whether or not this instruction accesses a coprocessor. - bool IsCoprocessorInstruction() const; - - /// Determines whether or not this instruction causes a CPU exception. - bool CausesCPUException() const; - - /// Determines whether or not this instruction is a SetCheckBit operation. - bool IsSetCheckBitOperation() const; - - /// Determines whether or not this instruction may have side-effects. - bool MayHaveSideEffects() const; - - /// Determines whether or not this instruction is a pseduo-instruction. - /// Pseudo-instructions depend on their parent instructions for their semantics. - bool IsAPseudoOperation() const; - - /// Determines whether or not this instruction supports the GetNZCVFromOp pseudo-operation. - bool MayGetNZCVFromOp() const; - - /// Determines if all arguments of this instruction are immediates. + /// @brief Determines if all arguments of this instruction are immediates. bool AreAllArgsImmediates() const; size_t UseCount() const { return use_count; } @@ -121,7 +35,7 @@ public: /// Determines if there is a pseudo-operation associated with this instruction. inline bool HasAssociatedPseudoOperation() const noexcept { - return next_pseudoop && !IsAPseudoOperation(); + return next_pseudoop && !IsAPseudoOperation(op); } /// Gets a pseudo-operation associated with this instruction. Inst* GetAssociatedPseudoOperation(Opcode opcode); @@ -131,12 +45,21 @@ public: /// Get the type this instruction returns. Type GetType() const; /// Get the number of arguments this instruction has. - size_t NumArgs() const; + inline size_t NumArgs() const noexcept { + return GetNumArgsOf(op); + } - Value GetArg(size_t index) const; - void SetArg(size_t index, Value value); + inline Value GetArg(size_t index) const noexcept { + DEBUG_ASSERT_MSG(index < GetNumArgsOf(op), "Inst::GetArg: index {} >= number of arguments of {} ({})", index, op, GetNumArgsOf(op)); + DEBUG_ASSERT_MSG(!args[index].IsEmpty() || GetArgTypeOf(op, index) == IR::Type::Opaque, "Inst::GetArg: index {} is empty", index, args[index].GetType()); + return args[index]; + } + void SetArg(size_t index, Value value) noexcept; - void Invalidate(); + inline void Invalidate() noexcept { + ClearArgs(); + op = Opcode::Void; + } void ClearArgs(); void ReplaceUsesWith(Value replacement); diff --git a/src/dynarmic/ir/opcodes.cpp b/src/dynarmic/ir/opcodes.cpp index 88a52d4f..e7e73b70 100644 --- a/src/dynarmic/ir/opcodes.cpp +++ b/src/dynarmic/ir/opcodes.cpp @@ -17,9 +17,9 @@ namespace Dynarmic::IR { namespace OpcodeInfo { struct Meta { + std::vector arg_types; const char* name; Type type; - std::vector arg_types; }; constexpr Type Void = Type::Void; @@ -40,10 +40,10 @@ constexpr Type Cond = Type::Cond; constexpr Type Table = Type::Table; constexpr Type AccType = Type::AccType; -static const std::array opcode_info{ -#define OPCODE(name, type, ...) Meta{#name, type, {__VA_ARGS__}}, -#define A32OPC(name, type, ...) Meta{#name, type, {__VA_ARGS__}}, -#define A64OPC(name, type, ...) Meta{#name, type, {__VA_ARGS__}}, +alignas(64) static const std::array opcode_info{ +#define OPCODE(name, type, ...) Meta{{__VA_ARGS__}, #name, type}, +#define A32OPC(name, type, ...) Meta{{__VA_ARGS__}, #name, type}, +#define A64OPC(name, type, ...) Meta{{__VA_ARGS__}, #name, type}, #include "./opcodes.inc" #undef OPCODE #undef A32OPC @@ -52,20 +52,24 @@ static const std::array opcode_info{ } // namespace OpcodeInfo -Type GetTypeOf(Opcode op) { - return OpcodeInfo::opcode_info.at(static_cast(op)).type; +/// @brief Get return type of an opcode +Type GetTypeOf(Opcode op) noexcept { + return OpcodeInfo::opcode_info.at(size_t(op)).type; } -size_t GetNumArgsOf(Opcode op) { - return OpcodeInfo::opcode_info.at(static_cast(op)).arg_types.size(); +/// @brief Get the number of arguments an opcode accepts +size_t GetNumArgsOf(Opcode op) noexcept { + return OpcodeInfo::opcode_info.at(size_t(op)).arg_types.size(); } -Type GetArgTypeOf(Opcode op, size_t arg_index) { - return OpcodeInfo::opcode_info.at(static_cast(op)).arg_types.at(arg_index); +/// @brief Get the required type of an argument of an opcode +Type GetArgTypeOf(Opcode op, size_t arg_index) noexcept { + return OpcodeInfo::opcode_info.at(size_t(op)).arg_types.at(arg_index); } -std::string GetNameOf(Opcode op) { - return OpcodeInfo::opcode_info.at(static_cast(op)).name; +/// @brief Get the name of an opcode. +std::string GetNameOf(Opcode op) noexcept { + return OpcodeInfo::opcode_info.at(size_t(op)).name; } } // namespace Dynarmic::IR diff --git a/src/dynarmic/ir/opcodes.h b/src/dynarmic/ir/opcodes.h index 404f4cdb..2af7a9b2 100644 --- a/src/dynarmic/ir/opcodes.h +++ b/src/dynarmic/ir/opcodes.h @@ -14,10 +14,8 @@ namespace Dynarmic::IR { enum class Type; -/** - * The Opcodes of our intermediate representation. - * Type signatures for each opcode can be found in opcodes.inc - */ +/// @brief The Opcodes of our intermediate representation. +/// Type signatures for each opcode can be found in opcodes.inc enum class Opcode { #define OPCODE(name, type, ...) name, #define A32OPC(name, type, ...) A32##name, @@ -31,17 +29,626 @@ enum class Opcode { constexpr size_t OpcodeCount = static_cast(Opcode::NUM_OPCODE); -/// Get return type of an opcode -Type GetTypeOf(Opcode op); +Type GetTypeOf(Opcode op) noexcept; +size_t GetNumArgsOf(Opcode op) noexcept; +Type GetArgTypeOf(Opcode op, size_t arg_index) noexcept; +std::string GetNameOf(Opcode op) noexcept; -/// Get the number of arguments an opcode accepts -size_t GetNumArgsOf(Opcode op); +/// @brief Determines whether or not this instruction performs an arithmetic shift. +constexpr bool IsArithmeticShift(const Opcode op) noexcept { + return op == Opcode::ArithmeticShiftRight32 + || op == Opcode::ArithmeticShiftRight64; +} -/// Get the required type of an argument of an opcode -Type GetArgTypeOf(Opcode op, size_t arg_index); +/// @brief Determines whether or not this instruction performs a logical shift. +constexpr bool IsCircularShift(const Opcode op) noexcept { + return op == Opcode::RotateRight32 + || op == Opcode::RotateRight64 + || op == Opcode::RotateRightExtended; +} -/// Get the name of an opcode. -std::string GetNameOf(Opcode op); +/// @brief Determines whether or not this instruction performs a circular shift. +constexpr bool IsLogicalShift(const Opcode op) noexcept { + switch (op) { + case Opcode::LogicalShiftLeft32: + case Opcode::LogicalShiftLeft64: + case Opcode::LogicalShiftRight32: + case Opcode::LogicalShiftRight64: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction performs any kind of shift. +constexpr bool IsShift(const Opcode op) noexcept { + return IsArithmeticShift(op) || IsCircularShift(op) || IsLogicalShift(op); +} + +/// @brief Determines whether or not this instruction is a form of barrier. +constexpr bool IsBarrier(const Opcode op) noexcept { + switch (op) { + case Opcode::A32DataMemoryBarrier: + case Opcode::A32DataSynchronizationBarrier: + case Opcode::A32InstructionSynchronizationBarrier: + case Opcode::A64DataMemoryBarrier: + case Opcode::A64DataSynchronizationBarrier: + case Opcode::A64InstructionSynchronizationBarrier: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction performs a shared memory read. +constexpr bool IsSharedMemoryRead(const Opcode op) noexcept { + switch (op) { + case Opcode::A32ReadMemory8: + case Opcode::A32ReadMemory16: + case Opcode::A32ReadMemory32: + case Opcode::A32ReadMemory64: + case Opcode::A64ReadMemory8: + case Opcode::A64ReadMemory16: + case Opcode::A64ReadMemory32: + case Opcode::A64ReadMemory64: + case Opcode::A64ReadMemory128: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction performs a shared memory write. +constexpr bool IsSharedMemoryWrite(const Opcode op) noexcept { + switch (op) { + case Opcode::A32WriteMemory8: + case Opcode::A32WriteMemory16: + case Opcode::A32WriteMemory32: + case Opcode::A32WriteMemory64: + case Opcode::A64WriteMemory8: + case Opcode::A64WriteMemory16: + case Opcode::A64WriteMemory32: + case Opcode::A64WriteMemory64: + case Opcode::A64WriteMemory128: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction performs a shared memory read or write. +constexpr bool IsSharedMemoryReadOrWrite(const Opcode op) noexcept { + return IsSharedMemoryRead(op) || IsSharedMemoryWrite(op); +} + +/// @brief Determines whether or not this instruction performs an atomic memory read. +constexpr bool IsExclusiveMemoryRead(const Opcode op) noexcept { + switch (op) { + case Opcode::A32ExclusiveReadMemory8: + case Opcode::A32ExclusiveReadMemory16: + case Opcode::A32ExclusiveReadMemory32: + case Opcode::A32ExclusiveReadMemory64: + case Opcode::A64ExclusiveReadMemory8: + case Opcode::A64ExclusiveReadMemory16: + case Opcode::A64ExclusiveReadMemory32: + case Opcode::A64ExclusiveReadMemory64: + case Opcode::A64ExclusiveReadMemory128: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction performs an atomic memory write. +constexpr bool IsExclusiveMemoryWrite(const Opcode op) noexcept { + switch (op) { + case Opcode::A32ExclusiveWriteMemory8: + case Opcode::A32ExclusiveWriteMemory16: + case Opcode::A32ExclusiveWriteMemory32: + case Opcode::A32ExclusiveWriteMemory64: + case Opcode::A64ExclusiveWriteMemory8: + case Opcode::A64ExclusiveWriteMemory16: + case Opcode::A64ExclusiveWriteMemory32: + case Opcode::A64ExclusiveWriteMemory64: + case Opcode::A64ExclusiveWriteMemory128: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction performs any kind of memory read. +constexpr bool IsMemoryRead(const Opcode op) noexcept { + return IsSharedMemoryRead(op) || IsExclusiveMemoryRead(op); +} + +/// @brief Determines whether or not this instruction performs any kind of memory write. +constexpr bool IsMemoryWrite(const Opcode op) noexcept { + return IsSharedMemoryWrite(op) || IsExclusiveMemoryWrite(op); +} + +/// @brief Determines whether or not this instruction performs any kind of memory access. +constexpr bool IsMemoryReadOrWrite(const Opcode op) noexcept { + return IsMemoryRead(op) || IsMemoryWrite(op); +} + +/// @brief Determines whether or not this instruction reads from the CPSR. +constexpr bool ReadsFromCPSR(const Opcode op) noexcept { + switch (op) { + case Opcode::A32GetCpsr: + case Opcode::A32GetCFlag: + case Opcode::A32GetGEFlags: + case Opcode::A32UpdateUpperLocationDescriptor: + case Opcode::A64GetCFlag: + case Opcode::A64GetNZCVRaw: + case Opcode::ConditionalSelect32: + case Opcode::ConditionalSelect64: + case Opcode::ConditionalSelectNZCV: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction writes to the CPSR. +constexpr bool WritesToCPSR(const Opcode op) noexcept { + switch (op) { + case Opcode::A32SetCpsr: + case Opcode::A32SetCpsrNZCVRaw: + case Opcode::A32SetCpsrNZCV: + case Opcode::A32SetCpsrNZCVQ: + case Opcode::A32SetCpsrNZ: + case Opcode::A32SetCpsrNZC: + case Opcode::A32OrQFlag: + case Opcode::A32SetGEFlags: + case Opcode::A32SetGEFlagsCompressed: + case Opcode::A32UpdateUpperLocationDescriptor: + case Opcode::A64SetNZCVRaw: + case Opcode::A64SetNZCV: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction writes to a system register. +constexpr bool WritesToSystemRegister(const Opcode op) noexcept { + switch (op) { + case Opcode::A64SetTPIDR: + return true; + default: + return false; + } +} + +/// @brief Determines whether or not this instruction reads from a core register. +constexpr bool ReadsFromCoreRegister(const Opcode op) noexcept { + switch (op) { + case Opcode::A32GetRegister: + case Opcode::A32GetExtendedRegister32: + case Opcode::A32GetExtendedRegister64: + case Opcode::A32GetVector: + case Opcode::A64GetW: + case Opcode::A64GetX: + case Opcode::A64GetS: + case Opcode::A64GetD: + case Opcode::A64GetQ: + case Opcode::A64GetSP: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction writes to a core register. +constexpr bool WritesToCoreRegister(const Opcode op) noexcept { + switch (op) { + case Opcode::A32SetRegister: + case Opcode::A32SetExtendedRegister32: + case Opcode::A32SetExtendedRegister64: + case Opcode::A32SetVector: + case Opcode::A32BXWritePC: + case Opcode::A64SetW: + case Opcode::A64SetX: + case Opcode::A64SetS: + case Opcode::A64SetD: + case Opcode::A64SetQ: + case Opcode::A64SetSP: + case Opcode::A64SetPC: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction reads from the FPCR. +constexpr bool ReadsFromFPCR(const Opcode op) noexcept { + switch (op) { + case Opcode::A32GetFpscr: + case Opcode::A32GetFpscrNZCV: + case Opcode::A64GetFPCR: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction writes to the FPCR. +constexpr bool WritesToFPCR(const Opcode op) noexcept { + switch (op) { + case Opcode::A32SetFpscr: + case Opcode::A32SetFpscrNZCV: + case Opcode::A64SetFPCR: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction both reads from and writes to the FPSR cumulative exception bits. +constexpr bool ReadsFromAndWritesToFPSRCumulativeExceptionBits(const Opcode op) noexcept { + switch (op) { + case Opcode::FPAdd32: + case Opcode::FPAdd64: + case Opcode::FPCompare32: + case Opcode::FPCompare64: + case Opcode::FPDiv32: + case Opcode::FPDiv64: + case Opcode::FPMax32: + case Opcode::FPMax64: + case Opcode::FPMaxNumeric32: + case Opcode::FPMaxNumeric64: + case Opcode::FPMin32: + case Opcode::FPMin64: + case Opcode::FPMinNumeric32: + case Opcode::FPMinNumeric64: + case Opcode::FPMul32: + case Opcode::FPMul64: + case Opcode::FPMulAdd16: + case Opcode::FPMulAdd32: + case Opcode::FPMulAdd64: + case Opcode::FPMulSub16: + case Opcode::FPMulSub32: + case Opcode::FPMulSub64: + case Opcode::FPRecipEstimate16: + case Opcode::FPRecipEstimate32: + case Opcode::FPRecipEstimate64: + case Opcode::FPRecipExponent16: + case Opcode::FPRecipExponent32: + case Opcode::FPRecipExponent64: + case Opcode::FPRecipStepFused16: + case Opcode::FPRecipStepFused32: + case Opcode::FPRecipStepFused64: + case Opcode::FPRoundInt16: + case Opcode::FPRoundInt32: + case Opcode::FPRoundInt64: + case Opcode::FPRSqrtEstimate16: + case Opcode::FPRSqrtEstimate32: + case Opcode::FPRSqrtEstimate64: + case Opcode::FPRSqrtStepFused16: + case Opcode::FPRSqrtStepFused32: + case Opcode::FPRSqrtStepFused64: + case Opcode::FPSqrt32: + case Opcode::FPSqrt64: + case Opcode::FPSub32: + case Opcode::FPSub64: + case Opcode::FPHalfToDouble: + case Opcode::FPHalfToSingle: + case Opcode::FPSingleToDouble: + case Opcode::FPSingleToHalf: + case Opcode::FPDoubleToHalf: + case Opcode::FPDoubleToSingle: + case Opcode::FPDoubleToFixedS32: + case Opcode::FPDoubleToFixedS64: + case Opcode::FPDoubleToFixedU32: + case Opcode::FPDoubleToFixedU64: + case Opcode::FPHalfToFixedS32: + case Opcode::FPHalfToFixedS64: + case Opcode::FPHalfToFixedU32: + case Opcode::FPHalfToFixedU64: + case Opcode::FPSingleToFixedS32: + case Opcode::FPSingleToFixedS64: + case Opcode::FPSingleToFixedU32: + case Opcode::FPSingleToFixedU64: + case Opcode::FPFixedU32ToSingle: + case Opcode::FPFixedS32ToSingle: + case Opcode::FPFixedU32ToDouble: + case Opcode::FPFixedU64ToDouble: + case Opcode::FPFixedU64ToSingle: + case Opcode::FPFixedS32ToDouble: + case Opcode::FPFixedS64ToDouble: + case Opcode::FPFixedS64ToSingle: + case Opcode::FPVectorAdd32: + case Opcode::FPVectorAdd64: + case Opcode::FPVectorDiv32: + case Opcode::FPVectorDiv64: + case Opcode::FPVectorEqual16: + case Opcode::FPVectorEqual32: + case Opcode::FPVectorEqual64: + case Opcode::FPVectorFromSignedFixed32: + case Opcode::FPVectorFromSignedFixed64: + case Opcode::FPVectorFromUnsignedFixed32: + case Opcode::FPVectorFromUnsignedFixed64: + case Opcode::FPVectorGreater32: + case Opcode::FPVectorGreater64: + case Opcode::FPVectorGreaterEqual32: + case Opcode::FPVectorGreaterEqual64: + case Opcode::FPVectorMul32: + case Opcode::FPVectorMul64: + case Opcode::FPVectorMulAdd16: + case Opcode::FPVectorMulAdd32: + case Opcode::FPVectorMulAdd64: + case Opcode::FPVectorPairedAddLower32: + case Opcode::FPVectorPairedAddLower64: + case Opcode::FPVectorPairedAdd32: + case Opcode::FPVectorPairedAdd64: + case Opcode::FPVectorRecipEstimate16: + case Opcode::FPVectorRecipEstimate32: + case Opcode::FPVectorRecipEstimate64: + case Opcode::FPVectorRecipStepFused16: + case Opcode::FPVectorRecipStepFused32: + case Opcode::FPVectorRecipStepFused64: + case Opcode::FPVectorRoundInt16: + case Opcode::FPVectorRoundInt32: + case Opcode::FPVectorRoundInt64: + case Opcode::FPVectorRSqrtEstimate16: + case Opcode::FPVectorRSqrtEstimate32: + case Opcode::FPVectorRSqrtEstimate64: + case Opcode::FPVectorRSqrtStepFused16: + case Opcode::FPVectorRSqrtStepFused32: + case Opcode::FPVectorRSqrtStepFused64: + case Opcode::FPVectorSqrt32: + case Opcode::FPVectorSqrt64: + case Opcode::FPVectorSub32: + case Opcode::FPVectorSub64: + case Opcode::FPVectorToSignedFixed16: + case Opcode::FPVectorToSignedFixed32: + case Opcode::FPVectorToSignedFixed64: + case Opcode::FPVectorToUnsignedFixed16: + case Opcode::FPVectorToUnsignedFixed32: + case Opcode::FPVectorToUnsignedFixed64: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction reads from the FPSR cumulative saturation bit. +constexpr bool ReadsFromFPSRCumulativeSaturationBit([[maybe_unused]] const Opcode op) noexcept { + return false; +} + +/// @brief Determines whether or not this instruction writes to the FPSR cumulative saturation bit. +constexpr bool ReadsFromFPSRCumulativeExceptionBits(const Opcode op) noexcept { + return ReadsFromAndWritesToFPSRCumulativeExceptionBits(op); +} + +/// @brief Determines whether or not this instruction writes to the FPSR cumulative exception bits. +constexpr bool WritesToFPSRCumulativeExceptionBits(const Opcode op) noexcept { + return ReadsFromAndWritesToFPSRCumulativeExceptionBits(op); +} + +/// @brief Determines whether or not this instruction writes to the FPSR cumulative saturation bit. +constexpr bool WritesToFPSRCumulativeSaturationBit(const Opcode op) noexcept { + switch (op) { + case Opcode::SignedSaturatedAdd8: + case Opcode::SignedSaturatedAdd16: + case Opcode::SignedSaturatedAdd32: + case Opcode::SignedSaturatedAdd64: + case Opcode::SignedSaturatedDoublingMultiplyReturnHigh16: + case Opcode::SignedSaturatedDoublingMultiplyReturnHigh32: + case Opcode::SignedSaturatedSub8: + case Opcode::SignedSaturatedSub16: + case Opcode::SignedSaturatedSub32: + case Opcode::SignedSaturatedSub64: + case Opcode::UnsignedSaturatedAdd8: + case Opcode::UnsignedSaturatedAdd16: + case Opcode::UnsignedSaturatedAdd32: + case Opcode::UnsignedSaturatedAdd64: + case Opcode::UnsignedSaturatedSub8: + case Opcode::UnsignedSaturatedSub16: + case Opcode::UnsignedSaturatedSub32: + case Opcode::UnsignedSaturatedSub64: + case Opcode::VectorSignedSaturatedAbs8: + case Opcode::VectorSignedSaturatedAbs16: + case Opcode::VectorSignedSaturatedAbs32: + case Opcode::VectorSignedSaturatedAbs64: + case Opcode::VectorSignedSaturatedAccumulateUnsigned8: + case Opcode::VectorSignedSaturatedAccumulateUnsigned16: + case Opcode::VectorSignedSaturatedAccumulateUnsigned32: + case Opcode::VectorSignedSaturatedAccumulateUnsigned64: + case Opcode::VectorSignedSaturatedAdd8: + case Opcode::VectorSignedSaturatedAdd16: + case Opcode::VectorSignedSaturatedAdd32: + case Opcode::VectorSignedSaturatedAdd64: + case Opcode::VectorSignedSaturatedDoublingMultiplyHigh16: + case Opcode::VectorSignedSaturatedDoublingMultiplyHigh32: + case Opcode::VectorSignedSaturatedDoublingMultiplyHighRounding16: + case Opcode::VectorSignedSaturatedDoublingMultiplyHighRounding32: + case Opcode::VectorSignedSaturatedDoublingMultiplyLong16: + case Opcode::VectorSignedSaturatedDoublingMultiplyLong32: + case Opcode::VectorSignedSaturatedNarrowToSigned16: + case Opcode::VectorSignedSaturatedNarrowToSigned32: + case Opcode::VectorSignedSaturatedNarrowToSigned64: + case Opcode::VectorSignedSaturatedNarrowToUnsigned16: + case Opcode::VectorSignedSaturatedNarrowToUnsigned32: + case Opcode::VectorSignedSaturatedNarrowToUnsigned64: + case Opcode::VectorSignedSaturatedNeg8: + case Opcode::VectorSignedSaturatedNeg16: + case Opcode::VectorSignedSaturatedNeg32: + case Opcode::VectorSignedSaturatedNeg64: + case Opcode::VectorSignedSaturatedShiftLeft8: + case Opcode::VectorSignedSaturatedShiftLeft16: + case Opcode::VectorSignedSaturatedShiftLeft32: + case Opcode::VectorSignedSaturatedShiftLeft64: + case Opcode::VectorSignedSaturatedShiftLeftUnsigned8: + case Opcode::VectorSignedSaturatedShiftLeftUnsigned16: + case Opcode::VectorSignedSaturatedShiftLeftUnsigned32: + case Opcode::VectorSignedSaturatedShiftLeftUnsigned64: + case Opcode::VectorSignedSaturatedSub8: + case Opcode::VectorSignedSaturatedSub16: + case Opcode::VectorSignedSaturatedSub32: + case Opcode::VectorSignedSaturatedSub64: + case Opcode::VectorUnsignedSaturatedAccumulateSigned8: + case Opcode::VectorUnsignedSaturatedAccumulateSigned16: + case Opcode::VectorUnsignedSaturatedAccumulateSigned32: + case Opcode::VectorUnsignedSaturatedAccumulateSigned64: + case Opcode::VectorUnsignedSaturatedAdd8: + case Opcode::VectorUnsignedSaturatedAdd16: + case Opcode::VectorUnsignedSaturatedAdd32: + case Opcode::VectorUnsignedSaturatedAdd64: + case Opcode::VectorUnsignedSaturatedNarrow16: + case Opcode::VectorUnsignedSaturatedNarrow32: + case Opcode::VectorUnsignedSaturatedNarrow64: + case Opcode::VectorUnsignedSaturatedShiftLeft8: + case Opcode::VectorUnsignedSaturatedShiftLeft16: + case Opcode::VectorUnsignedSaturatedShiftLeft32: + case Opcode::VectorUnsignedSaturatedShiftLeft64: + case Opcode::VectorUnsignedSaturatedSub8: + case Opcode::VectorUnsignedSaturatedSub16: + case Opcode::VectorUnsignedSaturatedSub32: + case Opcode::VectorUnsignedSaturatedSub64: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction reads from the FPSR. +constexpr bool ReadsFromFPSR(const Opcode op) noexcept { + return op == Opcode::A32GetFpscr + || op == Opcode::A32GetFpscrNZCV + || op == Opcode::A64GetFPSR + || ReadsFromFPSRCumulativeExceptionBits(op) + || ReadsFromFPSRCumulativeSaturationBit(op); +} + +/// @brief Determines whether or not this instruction writes to the FPSR. +constexpr bool WritesToFPSR(const Opcode op) noexcept { + return op == Opcode::A32SetFpscr + || op == Opcode::A32SetFpscrNZCV + || op == Opcode::A64SetFPSR + || WritesToFPSRCumulativeExceptionBits(op) + || WritesToFPSRCumulativeSaturationBit(op); +} + +/// @brief Determines whether or not this instruction causes a CPU exception. +constexpr bool CausesCPUException(const Opcode op) noexcept { + return op == Opcode::Breakpoint + || op == Opcode::A32CallSupervisor + || op == Opcode::A32ExceptionRaised + || op == Opcode::A64CallSupervisor + || op == Opcode::A64ExceptionRaised; +} + +/// @brief Determines whether or not this instruction alters memory-exclusivity. +constexpr bool AltersExclusiveState(const Opcode op) noexcept { + return op == Opcode::A32ClearExclusive + || op == Opcode::A64ClearExclusive + || IsExclusiveMemoryRead(op) + || IsExclusiveMemoryWrite(op); +} + +/// @brief Determines whether or not this instruction accesses a coprocessor. +constexpr bool IsCoprocessorInstruction(const Opcode op) noexcept { + switch (op) { + case Opcode::A32CoprocInternalOperation: + case Opcode::A32CoprocSendOneWord: + case Opcode::A32CoprocSendTwoWords: + case Opcode::A32CoprocGetOneWord: + case Opcode::A32CoprocGetTwoWords: + case Opcode::A32CoprocLoadWords: + case Opcode::A32CoprocStoreWords: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction is a SetCheckBit operation. +constexpr bool IsSetCheckBitOperation(const Opcode op) noexcept { + return op == Opcode::A32SetCheckBit + || op == Opcode::A64SetCheckBit; +} + +/// @brief Determines whether or not this instruction may have side-effects. +constexpr bool MayHaveSideEffects(const Opcode op) noexcept { + return op == Opcode::PushRSB + || op == Opcode::CallHostFunction + || op == Opcode::A64DataCacheOperationRaised + || op == Opcode::A64InstructionCacheOperationRaised + || IsSetCheckBitOperation(op) + || IsBarrier(op) + || CausesCPUException(op) + || WritesToCoreRegister(op) + || WritesToSystemRegister(op) + || WritesToCPSR(op) + || WritesToFPCR(op) + || WritesToFPSR(op) + || AltersExclusiveState(op) + || IsMemoryWrite(op) + || IsCoprocessorInstruction(op); +} + +/// @brief Determines whether or not this instruction is a pseduo-instruction. +/// @note Pseudo-instructions depend on their parent instructions for their semantics. +constexpr bool IsAPseudoOperation(const Opcode op) noexcept { + switch (op) { + case Opcode::GetCarryFromOp: + case Opcode::GetOverflowFromOp: + case Opcode::GetGEFromOp: + case Opcode::GetNZCVFromOp: + case Opcode::GetNZFromOp: + case Opcode::GetUpperFromOp: + case Opcode::GetLowerFromOp: + case Opcode::MostSignificantBit: + case Opcode::IsZero32: + case Opcode::IsZero64: + return true; + + default: + return false; + } +} + +/// @brief Determines whether or not this instruction supports the GetNZCVFromOp pseudo-operation. +constexpr bool MayGetNZCVFromOp(const Opcode op) noexcept { + switch (op) { + case Opcode::Add32: + case Opcode::Add64: + case Opcode::Sub32: + case Opcode::Sub64: + case Opcode::And32: + case Opcode::And64: + case Opcode::AndNot32: + case Opcode::AndNot64: + case Opcode::Eor32: + case Opcode::Eor64: + case Opcode::Or32: + case Opcode::Or64: + case Opcode::Not32: + case Opcode::Not64: + return true; + + default: + return false; + } +} } // namespace Dynarmic::IR diff --git a/src/dynarmic/ir/opt/a32_get_set_elimination_pass.cpp b/src/dynarmic/ir/opt/a32_get_set_elimination_pass.cpp index 4b743bff..06e159ba 100644 --- a/src/dynarmic/ir/opt/a32_get_set_elimination_pass.cpp +++ b/src/dynarmic/ir/opt/a32_get_set_elimination_pass.cpp @@ -68,7 +68,8 @@ void FlagsPass(IR::Block& block) { A32::IREmitter ir{block, A32::LocationDescriptor{block.Location()}, {}}; for (auto inst = block.rbegin(); inst != block.rend(); ++inst) { - switch (inst->GetOpcode()) { + auto const opcode = inst->GetOpcode(); + switch (opcode) { case IR::Opcode::A32GetCFlag: { do_get(c_flag, inst); break; @@ -167,7 +168,7 @@ void FlagsPass(IR::Block& block) { break; } default: { - if (inst->ReadsFromCPSR() || inst->WritesToCPSR()) { + if (ReadsFromCPSR(opcode) || WritesToCPSR(opcode)) { nzcvq = {}; nzcv = {}; nz = {}; @@ -254,7 +255,8 @@ void RegisterPass(IR::Block& block) { A32::IREmitter ir{block, A32::LocationDescriptor{block.Location()}, {}}; for (auto inst = block.begin(); inst != block.end(); ++inst) { - switch (inst->GetOpcode()) { + auto const opcode = inst->GetOpcode(); + switch (opcode) { case IR::Opcode::A32GetRegister: { const A32::Reg reg = inst->GetArg(0).GetA32RegRef(); ASSERT(reg != A32::Reg::PC); @@ -357,7 +359,7 @@ void RegisterPass(IR::Block& block) { break; } default: { - if (inst->ReadsFromCoreRegister() || inst->WritesToCoreRegister()) { + if (ReadsFromCoreRegister(opcode) || WritesToCoreRegister(opcode)) { reg_info = {}; ext_reg_info = {}; } diff --git a/src/dynarmic/ir/opt/a64_get_set_elimination_pass.cpp b/src/dynarmic/ir/opt/a64_get_set_elimination_pass.cpp index f4920156..4034eebf 100644 --- a/src/dynarmic/ir/opt/a64_get_set_elimination_pass.cpp +++ b/src/dynarmic/ir/opt/a64_get_set_elimination_pass.cpp @@ -72,7 +72,8 @@ void A64GetSetElimination(IR::Block& block) { }; for (auto inst = block.begin(); inst != block.end(); ++inst) { - switch (inst->GetOpcode()) { + auto const opcode = inst->GetOpcode(); + switch (opcode) { case IR::Opcode::A64GetW: { const size_t index = A64::RegNumber(inst->GetArg(0).GetA64RegRef()); do_get(reg_info.at(index), inst, TrackingType::W); @@ -144,10 +145,10 @@ void A64GetSetElimination(IR::Block& block) { break; } default: { - if (inst->ReadsFromCPSR() || inst->WritesToCPSR()) { + if (ReadsFromCPSR(opcode) || WritesToCPSR(opcode)) { nzcv_info = {}; } - if (inst->ReadsFromCoreRegister() || inst->WritesToCoreRegister()) { + if (ReadsFromCoreRegister(opcode) || WritesToCoreRegister(opcode)) { reg_info = {}; vec_info = {}; sp_info = {}; diff --git a/src/dynarmic/ir/opt/constant_propagation_pass.cpp b/src/dynarmic/ir/opt/constant_propagation_pass.cpp index 83ed8499..83530fc4 100644 --- a/src/dynarmic/ir/opt/constant_propagation_pass.cpp +++ b/src/dynarmic/ir/opt/constant_propagation_pass.cpp @@ -117,14 +117,14 @@ void FoldAdd(IR::Inst& inst, bool is_32_bit) { } } -// Folds AND operations based on the following: -// -// 1. imm_x & imm_y -> result -// 2. x & 0 -> 0 -// 3. 0 & y -> 0 -// 4. x & y -> y (where x has all bits set to 1) -// 5. x & y -> x (where y has all bits set to 1) -// +/// Folds AND operations based on the following: +/// +/// 1. imm_x & imm_y -> result +/// 2. x & 0 -> 0 +/// 3. 0 & y -> 0 +/// 4. x & y -> y (where x has all bits set to 1) +/// 5. x & y -> x (where y has all bits set to 1) +/// void FoldAND(IR::Inst& inst, bool is_32_bit) { if (FoldCommutative(inst, is_32_bit, [](u64 a, u64 b) { return a & b; })) { const auto rhs = inst.GetArg(1); @@ -136,10 +136,10 @@ void FoldAND(IR::Inst& inst, bool is_32_bit) { } } -// Folds byte reversal opcodes based on the following: -// -// 1. imm -> swap(imm) -// +/// Folds byte reversal opcodes based on the following: +/// +/// 1. imm -> swap(imm) +/// void FoldByteReverse(IR::Inst& inst, Op op) { const auto operand = inst.GetArg(0); @@ -159,12 +159,12 @@ void FoldByteReverse(IR::Inst& inst, Op op) { } } -// Folds division operations based on the following: -// -// 1. x / 0 -> 0 (NOTE: This is an ARM-specific behavior defined in the architecture reference manual) -// 2. imm_x / imm_y -> result -// 3. x / 1 -> x -// +/// Folds division operations based on the following: +/// +/// 1. x / 0 -> 0 (NOTE: This is an ARM-specific behavior defined in the architecture reference manual) +/// 2. imm_x / imm_y -> result +/// 3. x / 1 -> x +/// void FoldDivide(IR::Inst& inst, bool is_32_bit, bool is_signed) { const auto rhs = inst.GetArg(1); diff --git a/src/dynarmic/ir/opt/dead_code_elimination_pass.cpp b/src/dynarmic/ir/opt/dead_code_elimination_pass.cpp index 0d0de180..bda9f6ef 100644 --- a/src/dynarmic/ir/opt/dead_code_elimination_pass.cpp +++ b/src/dynarmic/ir/opt/dead_code_elimination_pass.cpp @@ -14,7 +14,7 @@ void DeadCodeElimination(IR::Block& block) { // We iterate over the instructions in reverse order. // This is because removing an instruction reduces the number of uses for earlier instructions. for (auto& inst : mcl::iterator::reverse(block)) { - if (!inst.HasUses() && !inst.MayHaveSideEffects()) { + if (!inst.HasUses() && !MayHaveSideEffects(inst.GetOpcode())) { inst.Invalidate(); } } diff --git a/src/dynarmic/ir/opt/verification_pass.cpp b/src/dynarmic/ir/opt/verification_pass.cpp index 8bc9e6f1..9252997f 100644 --- a/src/dynarmic/ir/opt/verification_pass.cpp +++ b/src/dynarmic/ir/opt/verification_pass.cpp @@ -8,6 +8,7 @@ #include #include +#include #include "dynarmic/ir/basic_block.h" #include "dynarmic/ir/microinstruction.h" @@ -29,7 +30,7 @@ void VerificationPass(const IR::Block& block) { } } - std::map actual_uses; + ankerl::unordered_dense::map actual_uses; for (const auto& inst : block) { for (size_t i = 0; i < inst.NumArgs(); i++) { const auto arg = inst.GetArg(i); diff --git a/src/dynarmic/ir/value.cpp b/src/dynarmic/ir/value.cpp index 20f94cb5..21c0a677 100644 --- a/src/dynarmic/ir/value.cpp +++ b/src/dynarmic/ir/value.cpp @@ -85,23 +85,23 @@ Value Value::EmptyNZCVImmediateMarker() { return result; } -bool Value::IsIdentity() const { +inline bool Value::IsIdentity() const noexcept { if (type == Type::Opaque) return inner.inst->GetOpcode() == Opcode::Identity; return false; } -bool Value::IsImmediate() const { +inline bool Value::IsEmpty() const noexcept { + return type == Type::Void; +} + +inline bool Value::IsImmediate() const noexcept { if (IsIdentity()) return inner.inst->GetArg(0).IsImmediate(); return type != Type::Opaque; } -bool Value::IsEmpty() const { - return type == Type::Void; -} - -Type Value::GetType() const { +inline Type Value::GetType() const noexcept { if (IsIdentity()) return inner.inst->GetArg(0).GetType(); if (type == Type::Opaque) @@ -199,7 +199,6 @@ AccType Value::GetAccType() const { s64 Value::GetImmediateAsS64() const { ASSERT(IsImmediate()); - switch (GetType()) { case IR::Type::U1: return s64(GetU1()); @@ -212,13 +211,12 @@ s64 Value::GetImmediateAsS64() const { case IR::Type::U64: return s64(GetU64()); default: - ASSERT_FALSE("GetImmediateAsS64 called on an incompatible Value type."); + UNREACHABLE(); } } u64 Value::GetImmediateAsU64() const { ASSERT(IsImmediate()); - switch (GetType()) { case IR::Type::U1: return u64(GetU1()); @@ -231,24 +229,8 @@ u64 Value::GetImmediateAsU64() const { case IR::Type::U64: return u64(GetU64()); default: - ASSERT_FALSE("GetImmediateAsU64 called on an incompatible Value type."); + UNREACHABLE(); } } -bool Value::IsSignedImmediate(s64 value) const { - return IsImmediate() && GetImmediateAsS64() == value; -} - -bool Value::IsUnsignedImmediate(u64 value) const { - return IsImmediate() && GetImmediateAsU64() == value; -} - -bool Value::HasAllBitsSet() const { - return IsSignedImmediate(-1); -} - -bool Value::IsZero() const { - return IsUnsignedImmediate(0); -} - } // namespace Dynarmic::IR diff --git a/src/dynarmic/ir/value.h b/src/dynarmic/ir/value.h index 8f70110a..fd65b20a 100644 --- a/src/dynarmic/ir/value.h +++ b/src/dynarmic/ir/value.h @@ -29,16 +29,13 @@ class Inst; enum class AccType; enum class Cond; -/** - * A representation of a value in the IR. - * A value may either be an immediate or the result of a microinstruction. - */ +/// @brief A representation of a value in the IR. +/// A value may either be an immediate or the result of a microinstruction. class Value { public: using CoprocessorInfo = std::array; - Value() - : type(Type::Void) {} + inline Value() noexcept : type(Type::Void) {} explicit Value(Inst* value); explicit Value(A32::Reg value); explicit Value(A32::ExtReg value); @@ -55,10 +52,10 @@ public: static Value EmptyNZCVImmediateMarker(); - bool IsIdentity() const; - bool IsEmpty() const; - bool IsImmediate() const; - Type GetType() const; + inline bool IsIdentity() const noexcept; + inline bool IsEmpty() const noexcept; + inline bool IsImmediate() const noexcept; + inline Type GetType() const noexcept; Inst* GetInst() const; Inst* GetInstRecursive() const; @@ -75,64 +72,53 @@ public: Cond GetCond() const; AccType GetAccType() const; - /** - * Retrieves the immediate of a Value instance as a signed 64-bit value. - * - * @pre The value contains either a U1, U8, U16, U32, or U64 value. - * Breaking this precondition will cause an assertion to be invoked. - */ + /// @brief Retrieves the immediate of a Value instance as a signed 64-bit value. + /// @pre The value contains either a U1, U8, U16, U32, or U64 value. + /// Breaking this precondition will cause an assertion to be invoked. s64 GetImmediateAsS64() const; - /** - * Retrieves the immediate of a Value instance as an unsigned 64-bit value. - * - * @pre The value contains either a U1, U8, U16, U32, or U64 value. - * Breaking this precondition will cause an assertion to be invoked. - */ + /// @brief Retrieves the immediate of a Value instance as an unsigned 64-bit value. + /// @pre The value contains either a U1, U8, U16, U32, or U64 value. + /// Breaking this precondition will cause an assertion to be invoked. u64 GetImmediateAsU64() const; - /** - * Determines whether or not the contained value matches the provided signed one. - * - * Note that this function will always return false if the contained - * value is not a a constant value. In other words, if IsImmediate() - * would return false on an instance, then so will this function. - * - * @param value The value to check against the contained value. - */ - bool IsSignedImmediate(s64 value) const; + /// @brief Determines whether or not the contained value matches the provided signed one. + /// Note that this function will always return false if the contained + /// value is not a a constant value. In other words, if IsImmediate() + /// would return false on an instance, then so will this function. + /// @param value The value to check against the contained value. + inline bool IsSignedImmediate(s64 value) const noexcept { + return IsImmediate() && GetImmediateAsS64() == value; + } - /** - * Determines whether or not the contained value matches the provided unsigned one. - * - * Note that this function will always return false if the contained - * value is not a a constant value. In other words, if IsImmediate() - * would return false on an instance, then so will this function. - * - * @param value The value to check against the contained value. - */ - bool IsUnsignedImmediate(u64 value) const; + /// @brief Determines whether or not the contained value matches the provided unsigned one. + /// + /// Note that this function will always return false if the contained + /// value is not a a constant value. In other words, if IsImmediate() + /// would return false on an instance, then so will this function. + /// @param value The value to check against the contained value. + inline bool IsUnsignedImmediate(u64 value) const noexcept { + return IsImmediate() && GetImmediateAsU64() == value; + } + - /** - * Determines whether or not the contained constant value has all bits set. - * - * @pre The value contains either a U1, U8, U16, U32, or U64 value. - * Breaking this precondition will cause an assertion to be invoked. - */ - bool HasAllBitsSet() const; + /// @brief Determines whether or not the contained constant value has all bits set. + /// @pre The value contains either a U1, U8, U16, U32, or U64 value. + /// Breaking this precondition will cause an assertion to be invoked. + inline bool HasAllBitsSet() const noexcept { + return IsSignedImmediate(-1); + } - /** - * Whether or not the current value contains a representation of zero. - * - * Note that this function will always return false if the contained - * value is not a a constant value. In other words, if IsImmediate() - * would return false on an instance, then so will this function. - */ - bool IsZero() const; + /// @brief Whether or not the current value contains a representation of zero. + /// Note that this function will always return false if the contained + /// value is not a a constant value. In other words, if IsImmediate() + /// would return false on an instance, then so will this function. + inline bool IsZero() const noexcept { + return IsUnsignedImmediate(0); + } private: Type type; - union { Inst* inst; // type == Type::Opaque A32::Reg imm_a32regref;