Fix Whitespace.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@116173 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Michael J. Spencer 2010-10-10 22:04:20 +00:00
parent 27287664c2
commit ec38de2ca8
2 changed files with 90 additions and 90 deletions

View File

@ -64,7 +64,7 @@ public:
bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const; bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const;
}; };
} // end anonymous namespace } // end anonymous namespace
static unsigned getRelaxedOpcode(unsigned Op) { static unsigned getRelaxedOpcode(unsigned Op) {
switch (Op) { switch (Op) {
@ -345,7 +345,7 @@ public:
} }
}; };
} // end anonymous namespace } // end anonymous namespace
TargetAsmBackend *llvm::createX86_32AsmBackend(const Target &T, TargetAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
const std::string &TT) { const std::string &TT) {

View File

@ -63,9 +63,9 @@ static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
SDValue V2); SDValue V2);
static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
if (TM.getSubtarget<X86Subtarget>().isTargetDarwin()) { if (TM.getSubtarget<X86Subtarget>().isTargetDarwin()) {
if (is64Bit) return new X8664_MachoTargetObjectFile(); if (is64Bit) return new X8664_MachoTargetObjectFile();
return new TargetLoweringObjectFileMachO(); return new TargetLoweringObjectFileMachO();
@ -74,7 +74,7 @@ static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
return new X8632_ELFTargetObjectFile(TM); return new X8632_ELFTargetObjectFile(TM);
} else if (TM.getSubtarget<X86Subtarget>().isTargetCOFF()) { } else if (TM.getSubtarget<X86Subtarget>().isTargetCOFF()) {
return new TargetLoweringObjectFileCOFF(); return new TargetLoweringObjectFileCOFF();
} }
llvm_unreachable("unknown subtarget type"); llvm_unreachable("unknown subtarget type");
} }
@ -213,7 +213,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
} }
// TODO: when we have SSE, these could be more efficient, by using movd/movq. // TODO: when we have SSE, these could be more efficient, by using movd/movq.
if (!X86ScalarSSEf64) { if (!X86ScalarSSEf64) {
setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
if (Subtarget->is64Bit()) { if (Subtarget->is64Bit()) {
@ -343,7 +343,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// We may not have a libcall for MEMBARRIER so we should lower this. // We may not have a libcall for MEMBARRIER so we should lower this.
setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom);
// On X86 and X86-64, atomic operations are lowered to locked instructions. // On X86 and X86-64, atomic operations are lowered to locked instructions.
// Locked instructions, in turn, have implicit fence semantics (all memory // Locked instructions, in turn, have implicit fence semantics (all memory
// operations are flushed before issuing the locked instruction, and they // operations are flushed before issuing the locked instruction, and they
@ -745,7 +745,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// Do not attempt to promote non-128-bit vectors // Do not attempt to promote non-128-bit vectors
if (!VT.is128BitVector()) if (!VT.is128BitVector())
continue; continue;
setOperationAction(ISD::AND, SVT, Promote); setOperationAction(ISD::AND, SVT, Promote);
AddPromotedToType (ISD::AND, SVT, MVT::v2i64); AddPromotedToType (ISD::AND, SVT, MVT::v2i64);
setOperationAction(ISD::OR, SVT, Promote); setOperationAction(ISD::OR, SVT, Promote);
@ -1085,7 +1085,7 @@ unsigned X86TargetLowering::getJumpTableEncoding() const {
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
Subtarget->isPICStyleGOT()) Subtarget->isPICStyleGOT())
return MachineJumpTableInfo::EK_Custom32; return MachineJumpTableInfo::EK_Custom32;
// Otherwise, use the normal jump table encoding heuristics. // Otherwise, use the normal jump table encoding heuristics.
return TargetLowering::getJumpTableEncoding(); return TargetLowering::getJumpTableEncoding();
} }
@ -1212,7 +1212,7 @@ bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
#include "X86GenCallingConv.inc" #include "X86GenCallingConv.inc"
bool bool
X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const { LLVMContext &Context) const {
@ -1299,7 +1299,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
} }
} }
} }
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
Flag = Chain.getValue(1); Flag = Chain.getValue(1);
} }
@ -1313,7 +1313,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
unsigned Reg = FuncInfo->getSRetReturnReg(); unsigned Reg = FuncInfo->getSRetReturnReg();
assert(Reg && assert(Reg &&
"SRetReturnReg should have been set in LowerFormalArguments()."); "SRetReturnReg should have been set in LowerFormalArguments().");
SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
@ -1478,7 +1478,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG, ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
DebugLoc dl) { DebugLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
/*isVolatile*/false, /*AlwaysInline=*/true, /*isVolatile*/false, /*AlwaysInline=*/true,
MachinePointerInfo(), MachinePointerInfo()); MachinePointerInfo(), MachinePointerInfo());
@ -2893,15 +2893,15 @@ bool X86::isPSHUFLWMask(ShuffleVectorSDNode *N) {
static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT, static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT,
bool hasSSSE3) { bool hasSSSE3) {
int i, e = VT.getVectorNumElements(); int i, e = VT.getVectorNumElements();
// Do not handle v2i64 / v2f64 shuffles with palignr. // Do not handle v2i64 / v2f64 shuffles with palignr.
if (e < 4 || !hasSSSE3) if (e < 4 || !hasSSSE3)
return false; return false;
for (i = 0; i != e; ++i) for (i = 0; i != e; ++i)
if (Mask[i] >= 0) if (Mask[i] >= 0)
break; break;
// All undef, not a palignr. // All undef, not a palignr.
if (i == e) if (i == e)
return false; return false;
@ -2912,13 +2912,13 @@ static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT,
bool NeedsUnary = false; bool NeedsUnary = false;
int s = Mask[i] - i; int s = Mask[i] - i;
// Check the rest of the elements to see if they are consecutive. // Check the rest of the elements to see if they are consecutive.
for (++i; i != e; ++i) { for (++i; i != e; ++i) {
int m = Mask[i]; int m = Mask[i];
if (m < 0) if (m < 0)
continue; continue;
Unary = Unary && (m < (int)e); Unary = Unary && (m < (int)e);
NeedsUnary = NeedsUnary || (m < s); NeedsUnary = NeedsUnary || (m < s);
@ -3006,10 +3006,10 @@ bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) {
/// <2, 3, 2, 3> /// <2, 3, 2, 3>
bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) { bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) {
unsigned NumElems = N->getValueType(0).getVectorNumElements(); unsigned NumElems = N->getValueType(0).getVectorNumElements();
if (NumElems != 4) if (NumElems != 4)
return false; return false;
return isUndefOrEqual(N->getMaskElt(0), 2) && return isUndefOrEqual(N->getMaskElt(0), 2) &&
isUndefOrEqual(N->getMaskElt(1), 3) && isUndefOrEqual(N->getMaskElt(1), 3) &&
isUndefOrEqual(N->getMaskElt(2), 2) && isUndefOrEqual(N->getMaskElt(2), 2) &&
@ -3979,7 +3979,7 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
SDValue SDValue
X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
SelectionDAG &DAG) const { SelectionDAG &DAG) const {
// Check if the scalar load can be widened into a vector load. And if // Check if the scalar load can be widened into a vector load. And if
// the address is "base + cst" see if the cst can be "absorbed" into // the address is "base + cst" see if the cst can be "absorbed" into
// the shuffle mask. // the shuffle mask.
@ -4047,12 +4047,12 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
return SDValue(); return SDValue();
} }
/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a /// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a
/// vector of type 'VT', see if the elements can be replaced by a single large /// vector of type 'VT', see if the elements can be replaced by a single large
/// load which has the same value as a build_vector whose operands are 'elts'. /// load which has the same value as a build_vector whose operands are 'elts'.
/// ///
/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
/// ///
/// FIXME: we'd also like to handle the case where the last elements are zero /// FIXME: we'd also like to handle the case where the last elements are zero
/// rather than undef via VZEXT_LOAD, but we do not detect that case today. /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
/// There's even a handy isZeroNode for that purpose. /// There's even a handy isZeroNode for that purpose.
@ -4060,16 +4060,16 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
DebugLoc &DL, SelectionDAG &DAG) { DebugLoc &DL, SelectionDAG &DAG) {
EVT EltVT = VT.getVectorElementType(); EVT EltVT = VT.getVectorElementType();
unsigned NumElems = Elts.size(); unsigned NumElems = Elts.size();
LoadSDNode *LDBase = NULL; LoadSDNode *LDBase = NULL;
unsigned LastLoadedElt = -1U; unsigned LastLoadedElt = -1U;
// For each element in the initializer, see if we've found a load or an undef. // For each element in the initializer, see if we've found a load or an undef.
// If we don't find an initial load element, or later load elements are // If we don't find an initial load element, or later load elements are
// non-consecutive, bail out. // non-consecutive, bail out.
for (unsigned i = 0; i < NumElems; ++i) { for (unsigned i = 0; i < NumElems; ++i) {
SDValue Elt = Elts[i]; SDValue Elt = Elts[i];
if (!Elt.getNode() || if (!Elt.getNode() ||
(Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
return SDValue(); return SDValue();
@ -4349,20 +4349,20 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// Check for a build vector of consecutive loads. // Check for a build vector of consecutive loads.
for (unsigned i = 0; i < NumElems; ++i) for (unsigned i = 0; i < NumElems; ++i)
V[i] = Op.getOperand(i); V[i] = Op.getOperand(i);
// Check for elements which are consecutive loads. // Check for elements which are consecutive loads.
SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG);
if (LD.getNode()) if (LD.getNode())
return LD; return LD;
// For SSE 4.1, use insertps to put the high elements into the low element. // For SSE 4.1, use insertps to put the high elements into the low element.
if (getSubtarget()->hasSSE41()) { if (getSubtarget()->hasSSE41()) {
SDValue Result; SDValue Result;
if (Op.getOperand(0).getOpcode() != ISD::UNDEF) if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
else else
Result = DAG.getUNDEF(VT); Result = DAG.getUNDEF(VT);
for (unsigned i = 1; i < NumElems; ++i) { for (unsigned i = 1; i < NumElems; ++i) {
if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
@ -4370,7 +4370,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
} }
return Result; return Result;
} }
// Otherwise, expand into a number of unpckl*, start by extending each of // Otherwise, expand into a number of unpckl*, start by extending each of
// our (non-undef) elements to the full vector width with the element in the // our (non-undef) elements to the full vector width with the element in the
// bottom slot of the vector (which generates no code for SSE). // bottom slot of the vector (which generates no code for SSE).
@ -4396,7 +4396,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (V[i+EltStride].getOpcode() == ISD::UNDEF && if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
EltStride == NumElems/2) EltStride == NumElems/2)
continue; continue;
V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
} }
EltStride >>= 1; EltStride >>= 1;
@ -5826,7 +5826,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
SDValue SDValue
X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc(); DebugLoc dl = Op.getDebugLoc();
if (Op.getValueType() == MVT::v1i64 && if (Op.getValueType() == MVT::v1i64 &&
Op.getOperand(0).getValueType() == MVT::i64) Op.getOperand(0).getValueType() == MVT::i64)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
@ -6082,12 +6082,12 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT, TLSModel::Model model, const EVT PtrVT, TLSModel::Model model,
bool is64Bit) { bool is64Bit) {
DebugLoc dl = GA->getDebugLoc(); DebugLoc dl = GA->getDebugLoc();
// Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
is64Bit ? 257 : 256)); is64Bit ? 257 : 256));
SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getIntPtrConstant(0), DAG.getIntPtrConstant(0),
MachinePointerInfo(Ptr), false, false, 0); MachinePointerInfo(Ptr), false, false, 0);
@ -6108,7 +6108,7 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
// emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
// exec) // exec)
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0), GA->getValueType(0),
GA->getOffset(), OperandFlags); GA->getOffset(), OperandFlags);
SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
@ -6124,29 +6124,29 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
SDValue SDValue
X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GA->getGlobal(); const GlobalValue *GV = GA->getGlobal();
if (Subtarget->isTargetELF()) { if (Subtarget->isTargetELF()) {
// TODO: implement the "local dynamic" model // TODO: implement the "local dynamic" model
// TODO: implement the "initial exec"model for pic executables // TODO: implement the "initial exec"model for pic executables
// If GV is an alias then use the aliasee for determining // If GV is an alias then use the aliasee for determining
// thread-localness. // thread-localness.
if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
GV = GA->resolveAliasedGlobal(false); GV = GA->resolveAliasedGlobal(false);
TLSModel::Model model TLSModel::Model model
= getTLSModel(GV, getTargetMachine().getRelocationModel()); = getTLSModel(GV, getTargetMachine().getRelocationModel());
switch (model) { switch (model) {
case TLSModel::GeneralDynamic: case TLSModel::GeneralDynamic:
case TLSModel::LocalDynamic: // not implemented case TLSModel::LocalDynamic: // not implemented
if (Subtarget->is64Bit()) if (Subtarget->is64Bit())
return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
case TLSModel::InitialExec: case TLSModel::InitialExec:
case TLSModel::LocalExec: case TLSModel::LocalExec:
return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, return LowerToTLSExecModel(GA, DAG, getPointerTy(), model,
@ -6157,7 +6157,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
unsigned char OpFlag = 0; unsigned char OpFlag = 0;
unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
X86ISD::WrapperRIP : X86ISD::Wrapper; X86ISD::WrapperRIP : X86ISD::Wrapper;
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg. // global base reg.
bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) &&
@ -6166,24 +6166,24 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
OpFlag = X86II::MO_TLVP_PIC_BASE; OpFlag = X86II::MO_TLVP_PIC_BASE;
else else
OpFlag = X86II::MO_TLVP; OpFlag = X86II::MO_TLVP;
DebugLoc DL = Op.getDebugLoc(); DebugLoc DL = Op.getDebugLoc();
SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
getPointerTy(), getPointerTy(),
GA->getOffset(), OpFlag); GA->getOffset(), OpFlag);
SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
// With PIC32, the address is actually $g + Offset. // With PIC32, the address is actually $g + Offset.
if (PIC32) if (PIC32)
Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg, DAG.getNode(X86ISD::GlobalBaseReg,
DebugLoc(), getPointerTy()), DebugLoc(), getPointerTy()),
Offset); Offset);
// Lowering the machine isd will make sure everything is in the right // Lowering the machine isd will make sure everything is in the right
// location. // location.
SDValue Args[] = { Offset }; SDValue Args[] = { Offset };
SDValue Chain = DAG.getNode(X86ISD::TLSCALL, DL, MVT::Other, Args, 1); SDValue Chain = DAG.getNode(X86ISD::TLSCALL, DL, MVT::Other, Args, 1);
// TLSCALL will be codegen'ed as call. Inform MFI that function has calls. // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setAdjustsStack(true); MFI->setAdjustsStack(true);
@ -6193,7 +6193,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy()); return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
} }
assert(false && assert(false &&
"TLS not implemented for this target."); "TLS not implemented for this target.");
@ -6280,7 +6280,7 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
} }
SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
SDValue StackSlot, SDValue StackSlot,
SelectionDAG &DAG) const { SelectionDAG &DAG) const {
// Build the FILD // Build the FILD
DebugLoc DL = Op.getDebugLoc(); DebugLoc DL = Op.getDebugLoc();
@ -6290,15 +6290,15 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
else else
Tys = DAG.getVTList(Op.getValueType(), MVT::Other); Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
unsigned ByteSize = SrcVT.getSizeInBits()/8; unsigned ByteSize = SrcVT.getSizeInBits()/8;
int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
MachineMemOperand *MMO = MachineMemOperand *MMO =
DAG.getMachineFunction() DAG.getMachineFunction()
.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
MachineMemOperand::MOLoad, ByteSize, ByteSize); MachineMemOperand::MOLoad, ByteSize, ByteSize);
SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
X86ISD::FILD, DL, X86ISD::FILD, DL,
@ -6324,7 +6324,7 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
DAG.getMachineFunction() DAG.getMachineFunction()
.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
MachineMemOperand::MOStore, SSFISize, SSFISize); MachineMemOperand::MOStore, SSFISize, SSFISize);
Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
Ops, array_lengthof(Ops), Ops, array_lengthof(Ops),
Op.getValueType(), MMO); Op.getValueType(), MMO);
@ -6516,7 +6516,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
DAG.getMachineFunction() DAG.getMachineFunction()
.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
MachineMemOperand::MOLoad, 8, 8); MachineMemOperand::MOLoad, 8, 8);
SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3,
@ -6582,8 +6582,8 @@ FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
unsigned Opc; unsigned Opc;
switch (DstTy.getSimpleVT().SimpleTy) { switch (DstTy.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
@ -6604,7 +6604,7 @@ FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
SDValue Ops[] = { SDValue Ops[] = {
Chain, StackSlot, DAG.getValueType(TheVT) Chain, StackSlot, DAG.getValueType(TheVT)
}; };
MachineMemOperand *MMO = MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
MachineMemOperand::MOLoad, MemSize, MemSize); MachineMemOperand::MOLoad, MemSize, MemSize);
@ -6614,7 +6614,7 @@ FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
} }
MachineMemOperand *MMO = MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
MachineMemOperand::MOStore, MemSize, MemSize); MachineMemOperand::MOStore, MemSize, MemSize);
@ -7236,7 +7236,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
if (Cond.getOpcode() == ISD::AND && if (Cond.getOpcode() == ISD::AND &&
Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
if (C && C->getAPIntValue() == 1) if (C && C->getAPIntValue() == 1)
Cond = Cond.getOperand(0); Cond = Cond.getOperand(0);
} }
@ -7269,7 +7269,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// We know the result of AND is compared against zero. Try to match // We know the result of AND is compared against zero. Try to match
// it to BT. // it to BT.
if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
if (NewSetCC.getNode()) { if (NewSetCC.getNode()) {
CC = NewSetCC.getOperand(0); CC = NewSetCC.getOperand(0);
@ -7343,7 +7343,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
if (Cond.getOpcode() == ISD::AND && if (Cond.getOpcode() == ISD::AND &&
Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
if (C && C->getAPIntValue() == 1) if (C && C->getAPIntValue() == 1)
Cond = Cond.getOperand(0); Cond = Cond.getOperand(0);
} }
@ -7444,7 +7444,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
// We know the result of AND is compared against zero. Try to match // We know the result of AND is compared against zero. Try to match
// it to BT. // it to BT.
if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
if (NewSetCC.getNode()) { if (NewSetCC.getNode()) {
CC = NewSetCC.getOperand(0); CC = NewSetCC.getOperand(0);
@ -7579,7 +7579,7 @@ SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
false, false,
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
} }
@ -8117,11 +8117,11 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
MachineMemOperand *MMO = MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
MachineMemOperand::MOStore, 2, 2); MachineMemOperand::MOStore, 2, 2);
SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
DAG.getVTList(MVT::Other), DAG.getVTList(MVT::Other),
@ -8278,7 +8278,7 @@ SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const {
Op.getOperand(1), DAG.getConstant(23, MVT::i32)); Op.getOperand(1), DAG.getConstant(23, MVT::i32));
ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U)); ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U));
std::vector<Constant*> CV(4, CI); std::vector<Constant*> CV(4, CI);
Constant *C = ConstantVector::get(CV); Constant *C = ConstantVector::get(CV);
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
@ -8318,13 +8318,13 @@ SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const {
R, M, Op); R, M, Op);
// a += a // a += a
Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
C = ConstantVector::get(CVM2); C = ConstantVector::get(CVM2);
CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(), MachinePointerInfo::getConstantPool(),
false, false, 16); false, false, 16);
// r = pblendv(r, psllw(r & (char16)63, 2), a); // r = pblendv(r, psllw(r & (char16)63, 2), a);
M = DAG.getNode(ISD::AND, dl, VT, R, M); M = DAG.getNode(ISD::AND, dl, VT, R, M);
M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
@ -8335,7 +8335,7 @@ SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const {
R, M, Op); R, M, Op);
// a += a // a += a
Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
// return pblendv(r, r+r, a); // return pblendv(r, r+r, a);
R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32), DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32),
@ -8415,10 +8415,10 @@ SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{ SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{
DebugLoc dl = Op.getDebugLoc(); DebugLoc dl = Op.getDebugLoc();
if (!Subtarget->hasSSE2()) { if (!Subtarget->hasSSE2()) {
SDValue Chain = Op.getOperand(0); SDValue Chain = Op.getOperand(0);
SDValue Zero = DAG.getConstant(0, SDValue Zero = DAG.getConstant(0,
Subtarget->is64Bit() ? MVT::i64 : MVT::i32); Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
SDValue Ops[] = { SDValue Ops[] = {
DAG.getRegister(X86::ESP, MVT::i32), // Base DAG.getRegister(X86::ESP, MVT::i32), // Base
@ -8429,30 +8429,30 @@ SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{
Zero, Zero,
Chain Chain
}; };
SDNode *Res = SDNode *Res =
DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops,
array_lengthof(Ops)); array_lengthof(Ops));
return SDValue(Res, 0); return SDValue(Res, 0);
} }
unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
if (!isDev) if (!isDev)
return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
// def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
if (!Op1 && !Op2 && !Op3 && Op4) if (!Op1 && !Op2 && !Op3 && Op4)
return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0));
// def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
if (Op1 && !Op2 && !Op3 && !Op4) if (Op1 && !Op2 && !Op3 && !Op4)
return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0));
// def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)),
// (MFENCE)>; // (MFENCE)>;
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
} }
@ -8512,10 +8512,10 @@ SDValue X86TargetLowering::LowerBIT_CONVERT(SDValue Op,
SelectionDAG &DAG) const { SelectionDAG &DAG) const {
EVT SrcVT = Op.getOperand(0).getValueType(); EVT SrcVT = Op.getOperand(0).getValueType();
EVT DstVT = Op.getValueType(); EVT DstVT = Op.getValueType();
assert((Subtarget->is64Bit() && !Subtarget->hasSSE2() && assert((Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
Subtarget->hasMMX() && !DisableMMX) && Subtarget->hasMMX() && !DisableMMX) &&
"Unexpected custom BIT_CONVERT"); "Unexpected custom BIT_CONVERT");
assert((DstVT == MVT::i64 || assert((DstVT == MVT::i64 ||
(DstVT.isVector() && DstVT.getSizeInBits()==64)) && (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
"Unexpected custom BIT_CONVERT"); "Unexpected custom BIT_CONVERT");
// i64 <=> MMX conversions are Legal. // i64 <=> MMX conversions are Legal.
@ -9575,20 +9575,20 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
// our load from the relocation, sticking it in either RDI (x86-64) // our load from the relocation, sticking it in either RDI (x86-64)
// or EAX and doing an indirect call. The return value will then // or EAX and doing an indirect call. The return value will then
// be in the normal return register. // be in the normal return register.
const X86InstrInfo *TII const X86InstrInfo *TII
= static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo());
DebugLoc DL = MI->getDebugLoc(); DebugLoc DL = MI->getDebugLoc();
MachineFunction *F = BB->getParent(); MachineFunction *F = BB->getParent();
assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
assert(MI->getOperand(3).isGlobal() && "This should be a global"); assert(MI->getOperand(3).isGlobal() && "This should be a global");
if (Subtarget->is64Bit()) { if (Subtarget->is64Bit()) {
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
TII->get(X86::MOV64rm), X86::RDI) TII->get(X86::MOV64rm), X86::RDI)
.addReg(X86::RIP) .addReg(X86::RIP)
.addImm(0).addReg(0) .addImm(0).addReg(0)
.addGlobalAddress(MI->getOperand(3).getGlobal(), 0, .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
MI->getOperand(3).getTargetFlags()) MI->getOperand(3).getTargetFlags())
.addReg(0); .addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
@ -9598,7 +9598,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
TII->get(X86::MOV32rm), X86::EAX) TII->get(X86::MOV32rm), X86::EAX)
.addReg(0) .addReg(0)
.addImm(0).addReg(0) .addImm(0).addReg(0)
.addGlobalAddress(MI->getOperand(3).getGlobal(), 0, .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
MI->getOperand(3).getTargetFlags()) MI->getOperand(3).getTargetFlags())
.addReg(0); .addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
@ -9608,13 +9608,13 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
TII->get(X86::MOV32rm), X86::EAX) TII->get(X86::MOV32rm), X86::EAX)
.addReg(TII->getGlobalBaseReg(F)) .addReg(TII->getGlobalBaseReg(F))
.addImm(0).addReg(0) .addImm(0).addReg(0)
.addGlobalAddress(MI->getOperand(3).getGlobal(), 0, .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
MI->getOperand(3).getTargetFlags()) MI->getOperand(3).getTargetFlags())
.addReg(0); .addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX); addDirectMem(MIB, X86::EAX);
} }
MI->eraseFromParent(); // The pseudo instruction is gone now. MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB; return BB;
} }
@ -9952,7 +9952,7 @@ unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
// SETCC_CARRY sets the dest to ~0 for true or 0 for false. // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
if (Op.getOpcode() == X86ISD::SETCC_CARRY) if (Op.getOpcode() == X86ISD::SETCC_CARRY)
return Op.getValueType().getScalarType().getSizeInBits(); return Op.getValueType().getScalarType().getSizeInBits();
// Fallback case. // Fallback case.
return 1; return 1;
} }