mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-24 12:19:53 +00:00
Make constant arrays that are passed to functions as const.
In theory this allows the compiler to skip materializing the array on the stack. In practice clang often fails to do that, but that's a different story. NFC. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@231571 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
62ffaaac7c
commit
ed0266d8ee
@ -1936,7 +1936,7 @@ SDNode *SelectionDAGISel::Select_INLINEASM(SDNode *N) {
|
||||
std::vector<SDValue> Ops(N->op_begin(), N->op_end());
|
||||
SelectInlineAsmMemoryOperands(Ops);
|
||||
|
||||
EVT VTs[] = { MVT::Other, MVT::Glue };
|
||||
const EVT VTs[] = {MVT::Other, MVT::Glue};
|
||||
SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N), VTs, Ops);
|
||||
New->setNodeId(-1);
|
||||
return New.getNode();
|
||||
|
@ -122,12 +122,11 @@ void MCELFStreamer::EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {
|
||||
// If neither T1 < T2 nor T2 < T1 according to this ordering, use T2 (the user
|
||||
// provided type).
|
||||
static unsigned CombineSymbolTypes(unsigned T1, unsigned T2) {
|
||||
unsigned TypeOrdering[] = {ELF::STT_NOTYPE, ELF::STT_OBJECT, ELF::STT_FUNC,
|
||||
ELF::STT_GNU_IFUNC, ELF::STT_TLS};
|
||||
for (unsigned i = 0; i != array_lengthof(TypeOrdering); ++i) {
|
||||
if (T1 == TypeOrdering[i])
|
||||
for (unsigned Type : {ELF::STT_NOTYPE, ELF::STT_OBJECT, ELF::STT_FUNC,
|
||||
ELF::STT_GNU_IFUNC, ELF::STT_TLS}) {
|
||||
if (T1 == Type)
|
||||
return T2;
|
||||
if (T2 == TypeOrdering[i])
|
||||
if (T2 == Type)
|
||||
return T1;
|
||||
}
|
||||
|
||||
|
@ -1055,7 +1055,7 @@ SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
|
||||
SDValue Ops[] = {N->getOperand(2), // Mem operand;
|
||||
Chain};
|
||||
|
||||
EVT ResTys[] = {MVT::Untyped, MVT::Other};
|
||||
const EVT ResTys[] = {MVT::Untyped, MVT::Other};
|
||||
|
||||
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
|
||||
SDValue SuperReg = SDValue(Ld, 0);
|
||||
@ -1077,8 +1077,8 @@ SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
|
||||
N->getOperand(2), // Incremental
|
||||
Chain};
|
||||
|
||||
EVT ResTys[] = {MVT::i64, // Type of the write back register
|
||||
MVT::Untyped, MVT::Other};
|
||||
const EVT ResTys[] = {MVT::i64, // Type of the write back register
|
||||
MVT::Untyped, MVT::Other};
|
||||
|
||||
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
|
||||
|
||||
@ -1119,8 +1119,8 @@ SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
|
||||
unsigned Opc) {
|
||||
SDLoc dl(N);
|
||||
EVT VT = N->getOperand(2)->getValueType(0);
|
||||
EVT ResTys[] = {MVT::i64, // Type of the write back register
|
||||
MVT::Other}; // Type for the Chain
|
||||
const EVT ResTys[] = {MVT::i64, // Type of the write back register
|
||||
MVT::Other}; // Type for the Chain
|
||||
|
||||
// Form a REG_SEQUENCE to force register allocation.
|
||||
bool Is128Bit = VT.getSizeInBits() == 128;
|
||||
@ -1184,7 +1184,7 @@ SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
|
||||
|
||||
SDValue RegSeq = createQTuple(Regs);
|
||||
|
||||
EVT ResTys[] = {MVT::Untyped, MVT::Other};
|
||||
const EVT ResTys[] = {MVT::Untyped, MVT::Other};
|
||||
|
||||
unsigned LaneNo =
|
||||
cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
|
||||
@ -1224,8 +1224,8 @@ SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
|
||||
|
||||
SDValue RegSeq = createQTuple(Regs);
|
||||
|
||||
EVT ResTys[] = {MVT::i64, // Type of the write back register
|
||||
MVT::Untyped, MVT::Other};
|
||||
const EVT ResTys[] = {MVT::i64, // Type of the write back register
|
||||
MVT::Untyped, MVT::Other};
|
||||
|
||||
unsigned LaneNo =
|
||||
cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
|
||||
@ -1309,8 +1309,8 @@ SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
|
||||
|
||||
SDValue RegSeq = createQTuple(Regs);
|
||||
|
||||
EVT ResTys[] = {MVT::i64, // Type of the write back register
|
||||
MVT::Other};
|
||||
const EVT ResTys[] = {MVT::i64, // Type of the write back register
|
||||
MVT::Other};
|
||||
|
||||
unsigned LaneNo =
|
||||
cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
|
||||
|
@ -3086,7 +3086,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
|
||||
|
||||
// Store exclusive double return a i32 value which is the return status
|
||||
// of the issued store.
|
||||
EVT ResTys[] = { MVT::i32, MVT::Other };
|
||||
const EVT ResTys[] = {MVT::i32, MVT::Other};
|
||||
|
||||
bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
|
||||
// Place arguments in the right order.
|
||||
|
@ -569,14 +569,12 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
|
||||
setTargetDAGCombine(ISD::LOAD);
|
||||
|
||||
// It is legal to extload from v4i8 to v4i16 or v4i32.
|
||||
MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8,
|
||||
MVT::v4i16, MVT::v2i16,
|
||||
MVT::v2i32};
|
||||
for (unsigned i = 0; i < 6; ++i) {
|
||||
for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
|
||||
MVT::v2i32}) {
|
||||
for (MVT VT : MVT::integer_vector_valuetypes()) {
|
||||
setLoadExtAction(ISD::EXTLOAD, VT, Tys[i], Legal);
|
||||
setLoadExtAction(ISD::ZEXTLOAD, VT, Tys[i], Legal);
|
||||
setLoadExtAction(ISD::SEXTLOAD, VT, Tys[i], Legal);
|
||||
setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
|
||||
setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
|
||||
setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -160,11 +160,8 @@ PPCRegisterInfo::getNoPreservedMask() const {
|
||||
}
|
||||
|
||||
void PPCRegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
|
||||
unsigned PseudoRegs[] = { PPC::ZERO, PPC::ZERO8, PPC::RM };
|
||||
for (unsigned i = 0, ie = array_lengthof(PseudoRegs); i != ie; ++i) {
|
||||
unsigned Reg = PseudoRegs[i];
|
||||
Mask[Reg / 32] &= ~(1u << (Reg % 32));
|
||||
}
|
||||
for (unsigned PseudoReg : {PPC::ZERO, PPC::ZERO8, PPC::RM})
|
||||
Mask[PseudoReg / 32] &= ~(1u << (PseudoReg % 32));
|
||||
}
|
||||
|
||||
BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
|
||||
|
@ -172,16 +172,12 @@ SITargetLowering::SITargetLowering(TargetMachine &TM,
|
||||
setOperationAction(ISD::UDIV, MVT::i64, Expand);
|
||||
setOperationAction(ISD::UREM, MVT::i64, Expand);
|
||||
|
||||
// We only support LOAD/STORE and vector manipulation ops for vectors
|
||||
// with > 4 elements.
|
||||
MVT VecTypes[] = {
|
||||
MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32
|
||||
};
|
||||
|
||||
setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
|
||||
setOperationAction(ISD::SELECT, MVT::i1, Promote);
|
||||
|
||||
for (MVT VT : VecTypes) {
|
||||
// We only support LOAD/STORE and vector manipulation ops for vectors
|
||||
// with > 4 elements.
|
||||
for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32}) {
|
||||
for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
|
||||
switch(Op) {
|
||||
case ISD::LOAD:
|
||||
|
@ -62,8 +62,8 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
|
||||
|
||||
#ifndef NDEBUG
|
||||
// If the base register might conflict with our physical registers, bail out.
|
||||
unsigned ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
|
||||
X86::ECX, X86::EAX, X86::EDI};
|
||||
const unsigned ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
|
||||
X86::ECX, X86::EAX, X86::EDI};
|
||||
assert(!isBaseRegConflictPossible(DAG, ClobberSet));
|
||||
#endif
|
||||
|
||||
@ -228,8 +228,8 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
|
||||
return SDValue();
|
||||
|
||||
// If the base register might conflict with our physical registers, bail out.
|
||||
unsigned ClobberSet[] = {X86::RCX, X86::RSI, X86::RDI,
|
||||
X86::ECX, X86::ESI, X86::EDI};
|
||||
const unsigned ClobberSet[] = {X86::RCX, X86::RSI, X86::RDI,
|
||||
X86::ECX, X86::ESI, X86::EDI};
|
||||
if (isBaseRegConflictPossible(DAG, ClobberSet))
|
||||
return SDValue();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user