From 1422a6730853876bf026da02630d88e23e2e4aa2 Mon Sep 17 00:00:00 2001 From: Eugene Zelenko Date: Wed, 25 Jan 2017 00:29:26 +0000 Subject: [PATCH] [AArch64] Fix some Clang-tidy modernize and Include What You Use warnings; other minor fixes (NFC). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@292996 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../AArch64/AArch64AddressTypePromotion.cpp | 23 +++-- lib/Target/AArch64/AArch64FastISel.cpp | 91 ++++++++++++++----- lib/Target/AArch64/AArch64FrameLowering.cpp | 32 ++++++- .../AArch64/AArch64LoadStoreOptimizer.cpp | 30 ++++-- .../AArch64/AArch64VectorByElementOpt.cpp | 25 ++++- 5 files changed, 153 insertions(+), 48 deletions(-) diff --git a/lib/Target/AArch64/AArch64AddressTypePromotion.cpp b/lib/Target/AArch64/AArch64AddressTypePromotion.cpp index 0cbb2db1134..e1b8ee6d03c 100644 --- a/lib/Target/AArch64/AArch64AddressTypePromotion.cpp +++ b/lib/Target/AArch64/AArch64AddressTypePromotion.cpp @@ -31,16 +31,23 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" -#include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/Use.h" +#include "llvm/IR/User.h" #include "llvm/Pass.h" +#include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" +#include using namespace llvm; @@ -59,12 +66,12 @@ EnableMerge("aarch64-type-promotion-merge", cl::Hidden, //===----------------------------------------------------------------------===// namespace { -class AArch64AddressTypePromotion : public FunctionPass { +class AArch64AddressTypePromotion : public FunctionPass { public: static char ID; - AArch64AddressTypePromotion() - : FunctionPass(ID), Func(nullptr), ConsideredSExtType(nullptr) { + + AArch64AddressTypePromotion() : FunctionPass(ID) { initializeAArch64AddressTypePromotionPass(*PassRegistry::getPassRegistry()); } @@ -76,10 +83,11 @@ public: private: /// The current function. - Function *Func; + Function *Func = nullptr; + /// Filter out all sexts that does not have this type. /// Currently initialized with Int64Ty. - Type *ConsideredSExtType; + Type *ConsideredSExtType = nullptr; // This transformation requires dominator info. void getAnalysisUsage(AnalysisUsage &AU) const override { @@ -129,7 +137,8 @@ private: void mergeSExts(ValueToInsts &ValToSExtendedUses, SetOfInstructions &ToRemove); }; -} // end anonymous namespace. + +} // end anonymous namespace char AArch64AddressTypePromotion::ID = 0; diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp index fe2c2d4550a..030bd9e28e5 100644 --- a/lib/Target/AArch64/AArch64FastISel.cpp +++ b/lib/Target/AArch64/AArch64FastISel.cpp @@ -15,28 +15,62 @@ #include "AArch64.h" #include "AArch64CallingConvention.h" +#include "AArch64RegisterInfo.h" #include "AArch64Subtarget.h" -#include "AArch64TargetMachine.h" #include "MCTargetDesc/AArch64AddressingModes.h" +#include "Utils/AArch64BaseInfo.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/BranchProbabilityInfo.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" +#include "llvm/CodeGen/ISDOpcodes.h" +#include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/MachineValueType.h" +#include "llvm/CodeGen/RuntimeLibcalls.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/IR/Argument.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/BasicBlock.h" #include "llvm/IR/CallingConv.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" -#include "llvm/IR/GlobalAlias.h" -#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Operator.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/User.h" +#include "llvm/IR/Value.h" +#include "llvm/MC/MCInstrDesc.h" +#include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCSymbol.h" +#include "llvm/Support/AtomicOrdering.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CodeGen.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" +#include +#include +#include +#include +#include + using namespace llvm; namespace { @@ -50,48 +84,55 @@ class AArch64FastISel final : public FastISel { } BaseKind; private: - BaseKind Kind; - AArch64_AM::ShiftExtendType ExtType; + BaseKind Kind = RegBase; + AArch64_AM::ShiftExtendType ExtType = AArch64_AM::InvalidShiftExtend; union { unsigned Reg; int FI; } Base; - unsigned OffsetReg; - unsigned Shift; - int64_t Offset; - const GlobalValue *GV; + unsigned OffsetReg = 0; + unsigned Shift = 0; + int64_t Offset = 0; + const GlobalValue *GV = nullptr; public: - Address() : Kind(RegBase), ExtType(AArch64_AM::InvalidShiftExtend), - OffsetReg(0), Shift(0), Offset(0), GV(nullptr) { Base.Reg = 0; } + Address() { Base.Reg = 0; } + void setKind(BaseKind K) { Kind = K; } BaseKind getKind() const { return Kind; } void setExtendType(AArch64_AM::ShiftExtendType E) { ExtType = E; } AArch64_AM::ShiftExtendType getExtendType() const { return ExtType; } bool isRegBase() const { return Kind == RegBase; } bool isFIBase() const { return Kind == FrameIndexBase; } + void setReg(unsigned Reg) { assert(isRegBase() && "Invalid base register access!"); Base.Reg = Reg; } + unsigned getReg() const { assert(isRegBase() && "Invalid base register access!"); return Base.Reg; } + void setOffsetReg(unsigned Reg) { OffsetReg = Reg; } + unsigned getOffsetReg() const { return OffsetReg; } + void setFI(unsigned FI) { assert(isFIBase() && "Invalid base frame index access!"); Base.FI = FI; } + unsigned getFI() const { assert(isFIBase() && "Invalid base frame index access!"); return Base.FI; } + void setOffset(int64_t O) { Offset = O; } int64_t getOffset() { return Offset; } void setShift(unsigned S) { Shift = S; } @@ -531,23 +572,23 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty) switch (Opcode) { default: break; - case Instruction::BitCast: { + case Instruction::BitCast: // Look through bitcasts. return computeAddress(U->getOperand(0), Addr, Ty); - } - case Instruction::IntToPtr: { + + case Instruction::IntToPtr: // Look past no-op inttoptrs. if (TLI.getValueType(DL, U->getOperand(0)->getType()) == TLI.getPointerTy(DL)) return computeAddress(U->getOperand(0), Addr, Ty); break; - } - case Instruction::PtrToInt: { + + case Instruction::PtrToInt: // Look past no-op ptrtoints. if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL)) return computeAddress(U->getOperand(0), Addr, Ty); break; - } + case Instruction::GetElementPtr: { Address SavedAddr = Addr; uint64_t TmpOffset = Addr.getOffset(); @@ -563,7 +604,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty) TmpOffset += SL->getElementOffset(Idx); } else { uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); - for (;;) { + while (true) { if (const ConstantInt *CI = dyn_cast(Op)) { // Constant-offset addressing. TmpOffset += CI->getSExtValue() * S; @@ -2813,8 +2854,8 @@ bool AArch64FastISel::selectIntToFP(const Instruction *I, bool Signed) { MVT DestVT; if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector()) return false; - assert ((DestVT == MVT::f32 || DestVT == MVT::f64) && - "Unexpected value type."); + assert((DestVT == MVT::f32 || DestVT == MVT::f64) && + "Unexpected value type."); unsigned SrcReg = getRegForValue(I->getOperand(0)); if (!SrcReg) @@ -3521,11 +3562,11 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { updateValueMap(II, ResultReg); return true; } - case Intrinsic::trap: { + case Intrinsic::trap: BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK)) .addImm(1); return true; - } + case Intrinsic::sqrt: { Type *RetTy = II->getCalledFunction()->getReturnType(); @@ -5092,8 +5133,10 @@ bool AArch64FastISel::fastSelectInstruction(const Instruction *I) { } namespace llvm { -llvm::FastISel *AArch64::createFastISel(FunctionLoweringInfo &FuncInfo, + +FastISel *AArch64::createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) { return new AArch64FastISel(FuncInfo, LibInfo); } -} + +} // end namespace llvm diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp index 65e94699ca3..5cce8f92db3 100644 --- a/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -90,21 +90,42 @@ #include "AArch64FrameLowering.h" #include "AArch64InstrInfo.h" #include "AArch64MachineFunctionInfo.h" +#include "AArch64RegisterInfo.h" #include "AArch64Subtarget.h" #include "AArch64TargetMachine.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/CodeGen/LivePhysRegs.h" +#include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/CallingConv.h" #include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugLoc.h" #include "llvm/IR/Function.h" +#include "llvm/MC/MCDwarf.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" +#include +#include +#include +#include using namespace llvm; @@ -319,7 +340,6 @@ bool AArch64FrameLowering::shouldCombineCSRLocalStackBump( static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec( MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc) { - unsigned NewOpc; bool NewIsUnscaled = false; switch (MBBI->getOpcode()) { @@ -870,15 +890,19 @@ static bool produceCompactUnwindFrame(MachineFunction &MF) { } namespace { + struct RegPairInfo { - RegPairInfo() : Reg1(AArch64::NoRegister), Reg2(AArch64::NoRegister) {} - unsigned Reg1; - unsigned Reg2; + unsigned Reg1 = AArch64::NoRegister; + unsigned Reg2 = AArch64::NoRegister; int FrameIdx; int Offset; bool IsGPR; + + RegPairInfo() = default; + bool isPaired() const { return Reg2 != AArch64::NoRegister; } }; + } // end anonymous namespace static void computeCalleeSaveRegisterPairs( diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index d780644d884..443a14f7068 100644 --- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -16,19 +16,29 @@ #include "AArch64Subtarget.h" #include "MCTargetDesc/AArch64AddressingModes.h" #include "llvm/ADT/BitVector.h" +#include "llvm/ADT/iterator_range.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringRef.h" #include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" +#include +#include +#include +#include + using namespace llvm; #define DEBUG_TYPE "aarch64-ldst-opt" @@ -58,15 +68,15 @@ typedef struct LdStPairFlags { // If a matching instruction is found, MergeForward is set to true if the // merge is to remove the first instruction and replace the second with // a pair-wise insn, and false if the reverse is true. - bool MergeForward; + bool MergeForward = false; // SExtIdx gives the index of the result of the load pair that must be // extended. The value of SExtIdx assumes that the paired load produces the // value in this order: (I, returned iterator), i.e., -1 means no value has // to be extended, 0 means I, and 1 means the returned iterator. - int SExtIdx; + int SExtIdx = -1; - LdStPairFlags() : MergeForward(false), SExtIdx(-1) {} + LdStPairFlags() = default; void setMergeForward(bool V = true) { MergeForward = V; } bool getMergeForward() const { return MergeForward; } @@ -78,6 +88,7 @@ typedef struct LdStPairFlags { struct AArch64LoadStoreOpt : public MachineFunctionPass { static char ID; + AArch64LoadStoreOpt() : MachineFunctionPass(ID) { initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry()); } @@ -162,8 +173,10 @@ struct AArch64LoadStoreOpt : public MachineFunctionPass { StringRef getPassName() const override { return AARCH64_LOAD_STORE_OPT_NAME; } }; + char AArch64LoadStoreOpt::ID = 0; -} // namespace + +} // end anonymous namespace INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt", AARCH64_LOAD_STORE_OPT_NAME, false, false) @@ -246,7 +259,7 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc, default: if (IsValidLdStrOpc) *IsValidLdStrOpc = false; - return UINT_MAX; + return std::numeric_limits::max(); case AArch64::STRDui: case AArch64::STURDi: case AArch64::STRQui: @@ -1543,7 +1556,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, case AArch64::LDURBBi: case AArch64::LDURHHi: case AArch64::LDURWi: - case AArch64::LDURXi: { + case AArch64::LDURXi: if (tryToPromoteLoadFromStore(MBBI)) { Modified = true; break; @@ -1551,7 +1564,6 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, ++MBBI; break; } - } } // 2) Merge adjacent zero stores into a wider store. // e.g., diff --git a/lib/Target/AArch64/AArch64VectorByElementOpt.cpp b/lib/Target/AArch64/AArch64VectorByElementOpt.cpp index e3b1d7cea48..f53af2315ec 100644 --- a/lib/Target/AArch64/AArch64VectorByElementOpt.cpp +++ b/lib/Target/AArch64/AArch64VectorByElementOpt.cpp @@ -19,13 +19,27 @@ // is rewritten into // dup v3.4s, v2.s[1] // fmla v0.4s, v1.4s, v3.4s +// //===----------------------------------------------------------------------===// #include "AArch64InstrInfo.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetSchedule.h" +#include "llvm/MC/MCInstrDesc.h" +#include "llvm/MC/MCSchedule.h" +#include "llvm/Pass.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" +#include using namespace llvm; @@ -41,14 +55,15 @@ namespace { struct AArch64VectorByElementOpt : public MachineFunctionPass { static char ID; - AArch64VectorByElementOpt() : MachineFunctionPass(ID) { - initializeAArch64VectorByElementOptPass(*PassRegistry::getPassRegistry()); - } const TargetInstrInfo *TII; MachineRegisterInfo *MRI; TargetSchedModel SchedModel; + AArch64VectorByElementOpt() : MachineFunctionPass(ID) { + initializeAArch64VectorByElementOptPass(*PassRegistry::getPassRegistry()); + } + /// Based only on latency of instructions, determine if it is cost efficient /// to replace the instruction InstDesc by the two instructions InstDescRep1 /// and InstDescRep2. @@ -90,8 +105,10 @@ struct AArch64VectorByElementOpt : public MachineFunctionPass { return AARCH64_VECTOR_BY_ELEMENT_OPT_NAME; } }; + char AArch64VectorByElementOpt::ID = 0; -} // namespace + +} // end anonymous namespace INITIALIZE_PASS(AArch64VectorByElementOpt, "aarch64-vectorbyelement-opt", AARCH64_VECTOR_BY_ELEMENT_OPT_NAME, false, false)