mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-15 23:57:48 +00:00
Move matching for x86 BMI BLSI/BLSMSK/BLSR instructions to isel patterns instead of DAG combine. This weakens the ability to fold loads with them because we aren't able to match patterns that load the same thing twice. But maybe we should fix that if we care. The peephole optimizer will be able to fold some loads in its absense.
llvm-svn: 200824
This commit is contained in:
parent
2e0202b75e
commit
4c6c325efa
@ -14034,9 +14034,6 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
||||
case X86ISD::OR: return "X86ISD::OR";
|
||||
case X86ISD::XOR: return "X86ISD::XOR";
|
||||
case X86ISD::AND: return "X86ISD::AND";
|
||||
case X86ISD::BLSI: return "X86ISD::BLSI";
|
||||
case X86ISD::BLSMSK: return "X86ISD::BLSMSK";
|
||||
case X86ISD::BLSR: return "X86ISD::BLSR";
|
||||
case X86ISD::BZHI: return "X86ISD::BZHI";
|
||||
case X86ISD::BEXTR: return "X86ISD::BEXTR";
|
||||
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
|
||||
@ -18089,9 +18086,7 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
|
||||
if (R.getNode())
|
||||
return R;
|
||||
|
||||
// Create BLSI, BLSR, and BZHI instructions
|
||||
// BLSI is X & (-X)
|
||||
// BLSR is X & (X-1)
|
||||
// Create BEXTR and BZHI instructions
|
||||
// BZHI is X & ((1 << Y) - 1)
|
||||
// BEXTR is ((X >> imm) & (2**size-1))
|
||||
if (VT == MVT::i32 || VT == MVT::i64) {
|
||||
@ -18099,28 +18094,6 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
|
||||
SDValue N1 = N->getOperand(1);
|
||||
SDLoc DL(N);
|
||||
|
||||
if (Subtarget->hasBMI()) {
|
||||
// Check LHS for neg
|
||||
if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 &&
|
||||
isZero(N0.getOperand(0)))
|
||||
return DAG.getNode(X86ISD::BLSI, DL, VT, N1);
|
||||
|
||||
// Check RHS for neg
|
||||
if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 &&
|
||||
isZero(N1.getOperand(0)))
|
||||
return DAG.getNode(X86ISD::BLSI, DL, VT, N0);
|
||||
|
||||
// Check LHS for X-1
|
||||
if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 &&
|
||||
isAllOnes(N0.getOperand(1)))
|
||||
return DAG.getNode(X86ISD::BLSR, DL, VT, N1);
|
||||
|
||||
// Check RHS for X-1
|
||||
if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 &&
|
||||
isAllOnes(N1.getOperand(1)))
|
||||
return DAG.getNode(X86ISD::BLSR, DL, VT, N0);
|
||||
}
|
||||
|
||||
if (Subtarget->hasBMI2()) {
|
||||
// Check for (and (add (shl 1, Y), -1), X)
|
||||
if (N0.getOpcode() == ISD::ADD && isAllOnes(N0.getOperand(1))) {
|
||||
@ -18396,7 +18369,6 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
|
||||
static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
|
||||
TargetLowering::DAGCombinerInfo &DCI,
|
||||
const X86Subtarget *Subtarget) {
|
||||
EVT VT = N->getValueType(0);
|
||||
if (DCI.isBeforeLegalizeOps())
|
||||
return SDValue();
|
||||
|
||||
@ -18406,28 +18378,6 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
|
||||
return RV;
|
||||
}
|
||||
|
||||
// Try forming BMI if it is available.
|
||||
if (!Subtarget->hasBMI())
|
||||
return SDValue();
|
||||
|
||||
if (VT != MVT::i32 && VT != MVT::i64)
|
||||
return SDValue();
|
||||
|
||||
assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions");
|
||||
|
||||
// Create BLSMSK instructions by finding X ^ (X-1)
|
||||
SDValue N0 = N->getOperand(0);
|
||||
SDValue N1 = N->getOperand(1);
|
||||
SDLoc DL(N);
|
||||
|
||||
if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 &&
|
||||
isAllOnes(N0.getOperand(1)))
|
||||
return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1);
|
||||
|
||||
if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 &&
|
||||
isAllOnes(N1.getOperand(1)))
|
||||
return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0);
|
||||
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
|
@ -295,9 +295,6 @@ namespace llvm {
|
||||
ADD, SUB, ADC, SBB, SMUL,
|
||||
INC, DEC, OR, XOR, AND,
|
||||
|
||||
BLSI, // BLSI - Extract lowest set isolated bit
|
||||
BLSMSK, // BLSMSK - Get mask up to lowest set bit
|
||||
BLSR, // BLSR - Reset lowest set bit
|
||||
BZHI, // BZHI - Zero high bits
|
||||
BEXTR, // BEXTR - Bit field extract
|
||||
|
||||
|
@ -249,9 +249,6 @@ def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
|
||||
def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
|
||||
[SDNPCommutative]>;
|
||||
|
||||
def X86blsi : SDNode<"X86ISD::BLSI", SDTIntUnaryOp>;
|
||||
def X86blsmsk : SDNode<"X86ISD::BLSMSK", SDTIntUnaryOp>;
|
||||
def X86blsr : SDNode<"X86ISD::BLSR", SDTIntUnaryOp>;
|
||||
def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntShiftOp>;
|
||||
def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
|
||||
|
||||
@ -1978,30 +1975,47 @@ let Predicates = [HasBMI], Defs = [EFLAGS] in {
|
||||
}
|
||||
|
||||
multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
|
||||
RegisterClass RC, X86MemOperand x86memop, SDNode OpNode,
|
||||
PatFrag ld_frag> {
|
||||
RegisterClass RC, X86MemOperand x86memop> {
|
||||
let hasSideEffects = 0 in {
|
||||
def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
|
||||
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"),
|
||||
[(set RC:$dst, (OpNode RC:$src)), (implicit EFLAGS)]>, T8, VEX_4V;
|
||||
[]>, T8, VEX_4V;
|
||||
let mayLoad = 1 in
|
||||
def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
|
||||
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"),
|
||||
[(set RC:$dst, (OpNode (ld_frag addr:$src))), (implicit EFLAGS)]>,
|
||||
T8, VEX_4V;
|
||||
[]>, T8, VEX_4V;
|
||||
}
|
||||
}
|
||||
|
||||
let Predicates = [HasBMI], Defs = [EFLAGS] in {
|
||||
defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem,
|
||||
X86blsr, loadi32>;
|
||||
defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem,
|
||||
X86blsr, loadi64>, VEX_W;
|
||||
defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem,
|
||||
X86blsmsk, loadi32>;
|
||||
defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem,
|
||||
X86blsmsk, loadi64>, VEX_W;
|
||||
defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem,
|
||||
X86blsi, loadi32>;
|
||||
defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem,
|
||||
X86blsi, loadi64>, VEX_W;
|
||||
defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem>;
|
||||
defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem>, VEX_W;
|
||||
defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem>;
|
||||
defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem>, VEX_W;
|
||||
defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem>;
|
||||
defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem>, VEX_W;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Pattern fragments to auto generate BMI instructions.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
let Predicates = [HasBMI] in {
|
||||
// FIXME: patterns for the load versions are not implemented
|
||||
def : Pat<(and GR32:$src, (add GR32:$src, -1)),
|
||||
(BLSR32rr GR32:$src)>;
|
||||
def : Pat<(and GR64:$src, (add GR64:$src, -1)),
|
||||
(BLSR64rr GR64:$src)>;
|
||||
|
||||
def : Pat<(xor GR32:$src, (add GR32:$src, -1)),
|
||||
(BLSMSK32rr GR32:$src)>;
|
||||
def : Pat<(xor GR64:$src, (add GR64:$src, -1)),
|
||||
(BLSMSK64rr GR64:$src)>;
|
||||
|
||||
def : Pat<(and GR32:$src, (ineg GR32:$src)),
|
||||
(BLSI32rr GR32:$src)>;
|
||||
def : Pat<(and GR64:$src, (ineg GR64:$src)),
|
||||
(BLSI64rr GR64:$src)>;
|
||||
}
|
||||
|
||||
multiclass bmi_bextr_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
|
||||
|
Loading…
Reference in New Issue
Block a user