[X86][SSE] Vectorized i8 and i16 shift operators

This patch ensures that SHL/SRL/SRA shifts for i8 and i16 vectors avoid scalarization. It builds on the existing i8 SHL vectorized implementation of moving the shift bits up to the sign bit position and separating the 4, 2 & 1 bit shifts with several improvements:

1 - SSE41 targets can use (v)pblendvb directly with the sign bit instead of performing a comparison to feed into a VSELECT node.
2 - pre-SSE41 targets were masking + comparing with an 0x80 constant - we avoid this by using the fact that a set sign bit means a negative integer which can be compared against zero to then feed into VSELECT, avoiding the need for a constant mask (zero generation is much cheaper).
3 - SRA i8 needs to be unpacked to the upper byte of a i16 so that the i16 psraw instruction can be correctly used for sign extension - we have to do more work than for SHL/SRL but perf tests indicate that this is still beneficial.

The i16 implementation is similar but simpler than for i8 - we have to do 8, 4, 2 & 1 bit shifts but less shift masking is involved. SSE41 use of (v)pblendvb requires that the i16 shift amount is splatted to both bytes however.

Tested on SSE2, SSE41 and AVX machines.

Differential Revision: http://reviews.llvm.org/D9474

llvm-svn: 239509
This commit is contained in:
Simon Pilgrim 2015-06-11 07:46:37 +00:00
parent 15dd6a1961
commit c3425b72b9
8 changed files with 701 additions and 1376 deletions

View File

@ -17012,36 +17012,111 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
}
}
if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
if (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget->hasInt256())) {
MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
unsigned ShiftOpcode = Op->getOpcode();
auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
if (Subtarget->hasSSE41()) {
V0 = DAG.getBitcast(VT, V0);
V1 = DAG.getBitcast(VT, V1);
Sel = DAG.getBitcast(VT, Sel);
return DAG.getBitcast(SelVT,
DAG.getNode(ISD::VSELECT, dl, VT, Sel, V0, V1));
}
// On pre-SSE41 targets we test for the sign bit by comparing to
// zero - a negative value will set all bits of the lanes to true
// and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
SDValue Z = getZeroVector(SelVT, Subtarget, DAG, dl);
SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
return DAG.getNode(ISD::VSELECT, dl, SelVT, C, V0, V1);
};
// Turn 'a' into a mask suitable for VSELECT: a = a << 5;
Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, dl, VT));
// We can safely do this using i16 shifts as we're only interested in
// the 3 lower bits of each byte.
Amt = DAG.getBitcast(ExtVT, Amt);
Amt = DAG.getNode(ISD::SHL, dl, ExtVT, Amt, DAG.getConstant(5, dl, ExtVT));
Amt = DAG.getBitcast(VT, Amt);
SDValue VSelM = DAG.getConstant(0x80, dl, VT);
SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
if (Op->getOpcode() == ISD::SHL || Op->getOpcode() == ISD::SRL) {
// r = VSELECT(r, shift(r, 4), a);
SDValue M =
DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
R = SignBitSelect(VT, Amt, M, R);
// r = VSELECT(r, shl(r, 4), a);
SDValue M = DAG.getNode(ISD::SHL, dl, VT, R, DAG.getConstant(4, dl, VT));
R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// a += a
Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
// r = VSELECT(r, shift(r, 2), a);
M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
R = SignBitSelect(VT, Amt, M, R);
// r = VSELECT(r, shl(r, 2), a);
M = DAG.getNode(ISD::SHL, dl, VT, R, DAG.getConstant(2, dl, VT));
R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// a += a
Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
// return VSELECT(r, shift(r, 1), a);
M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
R = SignBitSelect(VT, Amt, M, R);
return R;
}
// return VSELECT(r, r+r, a);
R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
DAG.getNode(ISD::ADD, dl, VT, R, R), R);
return R;
if (Op->getOpcode() == ISD::SRA) {
// For SRA we need to unpack each byte to the higher byte of a i16 vector
// so we can correctly sign extend. We don't care what happens to the
// lower byte.
SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), Amt);
SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), Amt);
SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), R);
SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), R);
ALo = DAG.getBitcast(ExtVT, ALo);
AHi = DAG.getBitcast(ExtVT, AHi);
RLo = DAG.getBitcast(ExtVT, RLo);
RHi = DAG.getBitcast(ExtVT, RHi);
// r = VSELECT(r, shift(r, 4), a);
SDValue MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
DAG.getConstant(4, dl, ExtVT));
SDValue MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
DAG.getConstant(4, dl, ExtVT));
RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
// a += a
ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
// r = VSELECT(r, shift(r, 2), a);
MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
DAG.getConstant(2, dl, ExtVT));
MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
DAG.getConstant(2, dl, ExtVT));
RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
// a += a
ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
// r = VSELECT(r, shift(r, 1), a);
MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
DAG.getConstant(1, dl, ExtVT));
MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
DAG.getConstant(1, dl, ExtVT));
RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
// Logical shift the result back to the lower byte, leaving a zero upper
// byte
// meaning that we can safely pack with PACKUSWB.
RLo =
DAG.getNode(ISD::SRL, dl, ExtVT, RLo, DAG.getConstant(8, dl, ExtVT));
RHi =
DAG.getNode(ISD::SRL, dl, ExtVT, RHi, DAG.getConstant(8, dl, ExtVT));
return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
}
}
// It's worth extending once and using the v8i32 shifts for 16-bit types, but
@ -17075,6 +17150,67 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
}
if (VT == MVT::v8i16) {
unsigned ShiftOpcode = Op->getOpcode();
auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
if (Subtarget->hasSSE41()) {
MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
V0 = DAG.getBitcast(ExtVT, V0);
V1 = DAG.getBitcast(ExtVT, V1);
Sel = DAG.getBitcast(ExtVT, Sel);
return DAG.getBitcast(
VT, DAG.getNode(ISD::VSELECT, dl, ExtVT, Sel, V0, V1));
}
// On pre-SSE41 targets we splat the sign bit - a negative value will
// set all bits of the lanes to true and VSELECT uses that in
// its OR(AND(V0,C),AND(V1,~C)) lowering.
SDValue C =
DAG.getNode(ISD::SRA, dl, VT, Sel, DAG.getConstant(15, dl, VT));
return DAG.getNode(ISD::VSELECT, dl, VT, C, V0, V1);
};
// Turn 'a' into a mask suitable for VSELECT: a = a << 12;
if (Subtarget->hasSSE41()) {
// On SSE41 targets we need to replicate the shift mask in both
// bytes for PBLENDVB.
Amt = DAG.getNode(
ISD::OR, dl, VT,
DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(4, dl, VT)),
DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT)));
} else {
Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT));
}
// r = VSELECT(r, shift(r, 8), a);
SDValue M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(8, dl, VT));
R = SignBitSelect(Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// r = VSELECT(r, shift(r, 4), a);
M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
R = SignBitSelect(Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// r = VSELECT(r, shift(r, 2), a);
M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
R = SignBitSelect(Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// return VSELECT(r, shift(r, 1), a);
M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
R = SignBitSelect(Amt, M, R);
return R;
}
// Decompose 256-bit shifts into smaller 128-bit shifts.
if (VT.is256BitVector()) {
unsigned NumElems = VT.getVectorNumElements();

View File

@ -153,13 +153,13 @@ unsigned X86TTIImpl::getArithmeticInstrCost(
{ ISD::SHL, MVT::v4i64, 1 },
{ ISD::SRL, MVT::v4i64, 1 },
{ ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence.
{ ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
{ ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
{ ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized.
{ ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
{ ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
{ ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
{ ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
{ ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
{ ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
@ -253,19 +253,19 @@ unsigned X86TTIImpl::getArithmeticInstrCost(
// to ISel. The cost model must return worst case assumptions because it is
// used for vectorization and we don't want to make vectorized code worse
// than scalar code.
{ ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence.
{ ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
{ ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
{ ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
{ ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
{ ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
{ ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
{ ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
{ ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
{ ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
{ ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
{ ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
{ ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
{ ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
{ ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized.
{ ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
{ ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
{ ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
{ ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
{ ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.

View File

@ -29,9 +29,9 @@ entry:
define %shifttype8i16 @shift8i16(%shifttype8i16 %a, %shifttype8i16 %b) {
entry:
; SSE2: shift8i16
; SSE2: cost of 80 {{.*}} ashr
; SSE2: cost of 32 {{.*}} ashr
; SSE2-CODEGEN: shift8i16
; SSE2-CODEGEN: sarw %cl
; SSE2-CODEGEN: psraw
%0 = ashr %shifttype8i16 %a , %b
ret %shifttype8i16 %0
@ -41,9 +41,9 @@ entry:
define %shifttype16i16 @shift16i16(%shifttype16i16 %a, %shifttype16i16 %b) {
entry:
; SSE2: shift16i16
; SSE2: cost of 160 {{.*}} ashr
; SSE2: cost of 64 {{.*}} ashr
; SSE2-CODEGEN: shift16i16
; SSE2-CODEGEN: sarw %cl
; SSE2-CODEGEN: psraw
%0 = ashr %shifttype16i16 %a , %b
ret %shifttype16i16 %0
@ -53,9 +53,9 @@ entry:
define %shifttype32i16 @shift32i16(%shifttype32i16 %a, %shifttype32i16 %b) {
entry:
; SSE2: shift32i16
; SSE2: cost of 320 {{.*}} ashr
; SSE2: cost of 128 {{.*}} ashr
; SSE2-CODEGEN: shift32i16
; SSE2-CODEGEN: sarw %cl
; SSE2-CODEGEN: psraw
%0 = ashr %shifttype32i16 %a , %b
ret %shifttype32i16 %0
@ -209,9 +209,9 @@ entry:
define %shifttype8i8 @shift8i8(%shifttype8i8 %a, %shifttype8i8 %b) {
entry:
; SSE2: shift8i8
; SSE2: cost of 80 {{.*}} ashr
; SSE2: cost of 32 {{.*}} ashr
; SSE2-CODEGEN: shift8i8
; SSE2-CODEGEN: sarw %cl
; SSE2-CODEGEN: psraw
%0 = ashr %shifttype8i8 %a , %b
ret %shifttype8i8 %0
@ -221,9 +221,9 @@ entry:
define %shifttype16i8 @shift16i8(%shifttype16i8 %a, %shifttype16i8 %b) {
entry:
; SSE2: shift16i8
; SSE2: cost of 160 {{.*}} ashr
; SSE2: cost of 54 {{.*}} ashr
; SSE2-CODEGEN: shift16i8
; SSE2-CODEGEN: sarb %cl
; SSE2-CODEGEN: psraw
%0 = ashr %shifttype16i8 %a , %b
ret %shifttype16i8 %0
@ -233,9 +233,9 @@ entry:
define %shifttype32i8 @shift32i8(%shifttype32i8 %a, %shifttype32i8 %b) {
entry:
; SSE2: shift32i8
; SSE2: cost of 320 {{.*}} ashr
; SSE2: cost of 108 {{.*}} ashr
; SSE2-CODEGEN: shift32i8
; SSE2-CODEGEN: sarb %cl
; SSE2-CODEGEN: psraw
%0 = ashr %shifttype32i8 %a , %b
ret %shifttype32i8 %0

View File

@ -29,9 +29,9 @@ entry:
define %shifttype8i16 @shift8i16(%shifttype8i16 %a, %shifttype8i16 %b) {
entry:
; SSE2: shift8i16
; SSE2: cost of 80 {{.*}} lshr
; SSE2: cost of 32 {{.*}} lshr
; SSE2-CODEGEN: shift8i16
; SSE2-CODEGEN: shrl %cl
; SSE2-CODEGEN: psrlw
%0 = lshr %shifttype8i16 %a , %b
ret %shifttype8i16 %0
@ -41,9 +41,9 @@ entry:
define %shifttype16i16 @shift16i16(%shifttype16i16 %a, %shifttype16i16 %b) {
entry:
; SSE2: shift16i16
; SSE2: cost of 160 {{.*}} lshr
; SSE2: cost of 64 {{.*}} lshr
; SSE2-CODEGEN: shift16i16
; SSE2-CODEGEN: shrl %cl
; SSE2-CODEGEN: psrlw
%0 = lshr %shifttype16i16 %a , %b
ret %shifttype16i16 %0
@ -53,9 +53,9 @@ entry:
define %shifttype32i16 @shift32i16(%shifttype32i16 %a, %shifttype32i16 %b) {
entry:
; SSE2: shift32i16
; SSE2: cost of 320 {{.*}} lshr
; SSE2: cost of 128 {{.*}} lshr
; SSE2-CODEGEN: shift32i16
; SSE2-CODEGEN: shrl %cl
; SSE2-CODEGEN: psrlw
%0 = lshr %shifttype32i16 %a , %b
ret %shifttype32i16 %0
@ -209,9 +209,9 @@ entry:
define %shifttype8i8 @shift8i8(%shifttype8i8 %a, %shifttype8i8 %b) {
entry:
; SSE2: shift8i8
; SSE2: cost of 80 {{.*}} lshr
; SSE2: cost of 32 {{.*}} lshr
; SSE2-CODEGEN: shift8i8
; SSE2-CODEGEN: shrl %cl
; SSE2-CODEGEN: psrlw
%0 = lshr %shifttype8i8 %a , %b
ret %shifttype8i8 %0
@ -221,9 +221,9 @@ entry:
define %shifttype16i8 @shift16i8(%shifttype16i8 %a, %shifttype16i8 %b) {
entry:
; SSE2: shift16i8
; SSE2: cost of 160 {{.*}} lshr
; SSE2: cost of 26 {{.*}} lshr
; SSE2-CODEGEN: shift16i8
; SSE2-CODEGEN: shrb %cl
; SSE2-CODEGEN: psrlw
%0 = lshr %shifttype16i8 %a , %b
ret %shifttype16i8 %0
@ -233,9 +233,9 @@ entry:
define %shifttype32i8 @shift32i8(%shifttype32i8 %a, %shifttype32i8 %b) {
entry:
; SSE2: shift32i8
; SSE2: cost of 320 {{.*}} lshr
; SSE2: cost of 52 {{.*}} lshr
; SSE2-CODEGEN: shift32i8
; SSE2-CODEGEN: shrb %cl
; SSE2-CODEGEN: psrlw
%0 = lshr %shifttype32i8 %a , %b
ret %shifttype32i8 %0

View File

@ -29,9 +29,9 @@ entry:
define %shifttype8i16 @shift8i16(%shifttype8i16 %a, %shifttype8i16 %b) {
entry:
; SSE2: shift8i16
; SSE2: cost of 80 {{.*}} shl
; SSE2: cost of 32 {{.*}} shl
; SSE2-CODEGEN: shift8i16
; SSE2-CODEGEN: shll %cl
; SSE2-CODEGEN: psllw
%0 = shl %shifttype8i16 %a , %b
ret %shifttype8i16 %0
@ -41,9 +41,9 @@ entry:
define %shifttype16i16 @shift16i16(%shifttype16i16 %a, %shifttype16i16 %b) {
entry:
; SSE2: shift16i16
; SSE2: cost of 160 {{.*}} shl
; SSE2: cost of 64 {{.*}} shl
; SSE2-CODEGEN: shift16i16
; SSE2-CODEGEN: shll %cl
; SSE2-CODEGEN: psllw
%0 = shl %shifttype16i16 %a , %b
ret %shifttype16i16 %0
@ -53,9 +53,9 @@ entry:
define %shifttype32i16 @shift32i16(%shifttype32i16 %a, %shifttype32i16 %b) {
entry:
; SSE2: shift32i16
; SSE2: cost of 320 {{.*}} shl
; SSE2: cost of 128 {{.*}} shl
; SSE2-CODEGEN: shift32i16
; SSE2-CODEGEN: shll %cl
; SSE2-CODEGEN: psllw
%0 = shl %shifttype32i16 %a , %b
ret %shifttype32i16 %0
@ -209,9 +209,9 @@ entry:
define %shifttype8i8 @shift8i8(%shifttype8i8 %a, %shifttype8i8 %b) {
entry:
; SSE2: shift8i8
; SSE2: cost of 80 {{.*}} shl
; SSE2: cost of 32 {{.*}} shl
; SSE2-CODEGEN: shift8i8
; SSE2-CODEGEN: shll
; SSE2-CODEGEN: psllw
%0 = shl %shifttype8i8 %a , %b
ret %shifttype8i8 %0
@ -221,9 +221,9 @@ entry:
define %shifttype16i8 @shift16i8(%shifttype16i8 %a, %shifttype16i8 %b) {
entry:
; SSE2: shift16i8
; SSE2: cost of 30 {{.*}} shl
; SSE2: cost of 26 {{.*}} shl
; SSE2-CODEGEN: shift16i8
; SSE2-CODEGEN: cmpeqb
; SSE2-CODEGEN: psllw
%0 = shl %shifttype16i8 %a , %b
ret %shifttype16i8 %0
@ -233,9 +233,9 @@ entry:
define %shifttype32i8 @shift32i8(%shifttype32i8 %a, %shifttype32i8 %b) {
entry:
; SSE2: shift32i8
; SSE2: cost of 60 {{.*}} shl
; SSE2: cost of 52 {{.*}} shl
; SSE2-CODEGEN: shift32i8
; SSE2-CODEGEN: cmpeqb
; SSE2-CODEGEN: psllw
%0 = shl %shifttype32i8 %a , %b
ret %shifttype32i8 %0

View File

@ -12,8 +12,8 @@ define <16 x i8> @shift(<16 x i8> %a, <16 x i8> %b) nounwind {
; Make sure we're masking and pcmp'ing the VSELECT conditon vector.
; CHECK-WO-SSE4: psllw $5, [[REG1:%xmm.]]
; CHECK-WO-SSE4: pand [[REG1]], [[REG2:%xmm.]]
; CHECK-WO-SSE4: pcmpeqb {{%xmm., }}[[REG2]]
; CHECK-WO-SSE4: pxor [[REG2:%xmm.]], [[REG2:%xmm.]]
; CHECK-WO-SSE4: pcmpgtb {{%xmm., }}[[REG2]]
%1 = shl <16 x i8> %a, %b
ret <16 x i8> %1
}

View File

@ -302,49 +302,17 @@ define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; CHECK-LABEL: shl_32i8
; CHECK: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpsllw $4, %xmm3, %xmm2
; CHECK-NEXT: vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; CHECK-NEXT: vpand %xmm8, %xmm2, %xmm5
; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpsllw $5, %xmm2, %xmm2
; CHECK-NEXT: vmovdqa {{.*#+}} xmm9 = [224,224,224,224,224,224,224,224,224,224,224,224,224,224,224,224]
; CHECK-NEXT: vpand %xmm9, %xmm2, %xmm7
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; CHECK-NEXT: vpand %xmm7, %xmm2, %xmm4
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm4, %xmm4
; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
; CHECK-NEXT: vpsllw $2, %xmm3, %xmm4
; CHECK-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; CHECK-NEXT: vpand %xmm5, %xmm4, %xmm4
; CHECK-NEXT: vpaddb %xmm7, %xmm7, %xmm7
; CHECK-NEXT: vpand %xmm7, %xmm2, %xmm6
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm6, %xmm6
; CHECK-NEXT: vpblendvb %xmm6, %xmm4, %xmm3, %xmm3
; CHECK-NEXT: vpaddb %xmm3, %xmm3, %xmm4
; CHECK-NEXT: vpaddb %xmm7, %xmm7, %xmm6
; CHECK-NEXT: vpand %xmm6, %xmm2, %xmm6
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm6, %xmm6
; CHECK-NEXT: vpblendvb %xmm6, %xmm4, %xmm3, %xmm3
; CHECK-NEXT: vpsllw $4, %xmm0, %xmm4
; CHECK-NEXT: vpand %xmm8, %xmm4, %xmm4
; CHECK-NEXT: vpsllw $5, %xmm1, %xmm1
; CHECK-NEXT: vpand %xmm9, %xmm1, %xmm1
; CHECK-NEXT: vpand %xmm1, %xmm2, %xmm6
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm6, %xmm6
; CHECK-NEXT: vpblendvb %xmm6, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vpsllw $2, %xmm0, %xmm4
; CHECK-NEXT: vpand %xmm5, %xmm4, %xmm4
; CHECK-NEXT: vpaddb %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpand %xmm1, %xmm2, %xmm5
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm5, %xmm5
; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm0, %xmm0, %xmm4
; CHECK-NEXT: vpaddb %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpand %xmm1, %xmm2, %xmm1
; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
; CHECK-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
; CHECK: vpsllw $5, %ymm1, %ymm1
; CHECK-NEXT: vpsllw $4, %ymm0, %ymm2
; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vpsllw $2, %ymm0, %ymm2
; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vpaddb %ymm0, %ymm0, %ymm2
; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; CHECK-NEXT: retq
%shl = shl <32 x i8> %r, %a
ret <32 x i8> %shl
@ -381,169 +349,30 @@ define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; CHECK-LABEL: ashr_32i8
; CHECK: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpextrb $1, %xmm2, %ecx
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpextrb $1, %xmm3, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: vpextrb $0, %xmm2, %ecx
; CHECK-NEXT: vpextrb $0, %xmm3, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: movzbl %dl, %edx
; CHECK-NEXT: vpextrb $2, %xmm2, %ecx
; CHECK-NEXT: vpextrb $2, %xmm3, %esi
; CHECK-NEXT: sarb %cl, %sil
; CHECK-NEXT: vmovd %edx, %xmm4
; CHECK-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %sil, %eax
; CHECK-NEXT: vpextrb $3, %xmm2, %ecx
; CHECK-NEXT: vpextrb $3, %xmm3, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $4, %xmm2, %ecx
; CHECK-NEXT: vpextrb $4, %xmm3, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $5, %xmm2, %ecx
; CHECK-NEXT: vpextrb $5, %xmm3, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: vpextrb $6, %xmm2, %ecx
; CHECK-NEXT: vpextrb $6, %xmm3, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpextrb $7, %xmm2, %ecx
; CHECK-NEXT: vpextrb $7, %xmm3, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $8, %xmm2, %ecx
; CHECK-NEXT: vpextrb $8, %xmm3, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $9, %xmm2, %ecx
; CHECK-NEXT: vpextrb $9, %xmm3, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: vpextrb $10, %xmm2, %ecx
; CHECK-NEXT: vpextrb $10, %xmm3, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpextrb $11, %xmm2, %ecx
; CHECK-NEXT: vpextrb $11, %xmm3, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $12, %xmm2, %ecx
; CHECK-NEXT: vpextrb $12, %xmm3, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $13, %xmm2, %ecx
; CHECK-NEXT: vpextrb $13, %xmm3, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: vpextrb $14, %xmm2, %ecx
; CHECK-NEXT: vpextrb $14, %xmm3, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $15, %xmm2, %ecx
; CHECK-NEXT: vpextrb $15, %xmm3, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: vpextrb $1, %xmm1, %ecx
; CHECK-NEXT: vpextrb $1, %xmm0, %esi
; CHECK-NEXT: sarb %cl, %sil
; CHECK-NEXT: movzbl %dl, %ecx
; CHECK-NEXT: vpinsrb $14, %ecx, %xmm4, %xmm2
; CHECK-NEXT: vpextrb $0, %xmm1, %ecx
; CHECK-NEXT: vpextrb $0, %xmm0, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: vpextrb $2, %xmm1, %ecx
; CHECK-NEXT: vpextrb $2, %xmm0, %edi
; CHECK-NEXT: sarb %cl, %dil
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
; CHECK-NEXT: movzbl %sil, %eax
; CHECK-NEXT: movzbl %dl, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm3
; CHECK-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: vpextrb $3, %xmm1, %ecx
; CHECK-NEXT: vpextrb $3, %xmm0, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $4, %xmm1, %ecx
; CHECK-NEXT: vpextrb $4, %xmm0, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $5, %xmm1, %ecx
; CHECK-NEXT: vpextrb $5, %xmm0, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: vpextrb $6, %xmm1, %ecx
; CHECK-NEXT: vpextrb $6, %xmm0, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpextrb $7, %xmm1, %ecx
; CHECK-NEXT: vpextrb $7, %xmm0, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $8, %xmm1, %ecx
; CHECK-NEXT: vpextrb $8, %xmm0, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $9, %xmm1, %ecx
; CHECK-NEXT: vpextrb $9, %xmm0, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: vpextrb $10, %xmm1, %ecx
; CHECK-NEXT: vpextrb $10, %xmm0, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpextrb $11, %xmm1, %ecx
; CHECK-NEXT: vpextrb $11, %xmm0, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $12, %xmm1, %ecx
; CHECK-NEXT: vpextrb $12, %xmm0, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $13, %xmm1, %ecx
; CHECK-NEXT: vpextrb $13, %xmm0, %eax
; CHECK-NEXT: sarb %cl, %al
; CHECK-NEXT: vpextrb $14, %xmm1, %ecx
; CHECK-NEXT: vpextrb $14, %xmm0, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpextrb $15, %xmm1, %ecx
; CHECK-NEXT: vpextrb $15, %xmm0, %edx
; CHECK-NEXT: sarb %cl, %dl
; CHECK-NEXT: vpinsrb $14, %eax, %xmm3, %xmm0
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; CHECK: vpsllw $5, %ymm1, %ymm1
; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; CHECK-NEXT: vpsraw $4, %ymm3, %ymm4
; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
; CHECK-NEXT: vpsraw $2, %ymm3, %ymm4
; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
; CHECK-NEXT: vpsraw $1, %ymm3, %ymm4
; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
; CHECK-NEXT: vpsrlw $8, %ymm2, %ymm2
; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; CHECK-NEXT: vpsraw $4, %ymm0, %ymm3
; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; CHECK-NEXT: vpsraw $2, %ymm0, %ymm3
; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1
; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; CHECK-NEXT: vpsraw $1, %ymm0, %ymm3
; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1
; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
; CHECK-NEXT: vpsrlw $8, %ymm0, %ymm0
; CHECK-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
; CHECK-NEXT: retq
%ashr = ashr <32 x i8> %r, %a
ret <32 x i8> %ashr
@ -580,169 +409,18 @@ define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; CHECK-LABEL: lshr_32i8
; CHECK: vextracti128 $1, %ymm1, %xmm2
; CHECK-NEXT: vpextrb $1, %xmm2, %ecx
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
; CHECK-NEXT: vpextrb $1, %xmm3, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: vpextrb $0, %xmm2, %ecx
; CHECK-NEXT: vpextrb $0, %xmm3, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: movzbl %dl, %edx
; CHECK-NEXT: vpextrb $2, %xmm2, %ecx
; CHECK-NEXT: vpextrb $2, %xmm3, %esi
; CHECK-NEXT: shrb %cl, %sil
; CHECK-NEXT: vmovd %edx, %xmm4
; CHECK-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %sil, %eax
; CHECK-NEXT: vpextrb $3, %xmm2, %ecx
; CHECK-NEXT: vpextrb $3, %xmm3, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $4, %xmm2, %ecx
; CHECK-NEXT: vpextrb $4, %xmm3, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $5, %xmm2, %ecx
; CHECK-NEXT: vpextrb $5, %xmm3, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: vpextrb $6, %xmm2, %ecx
; CHECK-NEXT: vpextrb $6, %xmm3, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpextrb $7, %xmm2, %ecx
; CHECK-NEXT: vpextrb $7, %xmm3, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $8, %xmm2, %ecx
; CHECK-NEXT: vpextrb $8, %xmm3, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $9, %xmm2, %ecx
; CHECK-NEXT: vpextrb $9, %xmm3, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: vpextrb $10, %xmm2, %ecx
; CHECK-NEXT: vpextrb $10, %xmm3, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpextrb $11, %xmm2, %ecx
; CHECK-NEXT: vpextrb $11, %xmm3, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $12, %xmm2, %ecx
; CHECK-NEXT: vpextrb $12, %xmm3, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $13, %xmm2, %ecx
; CHECK-NEXT: vpextrb $13, %xmm3, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: vpextrb $14, %xmm2, %ecx
; CHECK-NEXT: vpextrb $14, %xmm3, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
; CHECK-NEXT: vpextrb $15, %xmm2, %ecx
; CHECK-NEXT: vpextrb $15, %xmm3, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: vpextrb $1, %xmm1, %ecx
; CHECK-NEXT: vpextrb $1, %xmm0, %esi
; CHECK-NEXT: shrb %cl, %sil
; CHECK-NEXT: movzbl %dl, %ecx
; CHECK-NEXT: vpinsrb $14, %ecx, %xmm4, %xmm2
; CHECK-NEXT: vpextrb $0, %xmm1, %ecx
; CHECK-NEXT: vpextrb $0, %xmm0, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: vpextrb $2, %xmm1, %ecx
; CHECK-NEXT: vpextrb $2, %xmm0, %edi
; CHECK-NEXT: shrb %cl, %dil
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
; CHECK-NEXT: movzbl %sil, %eax
; CHECK-NEXT: movzbl %dl, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm3
; CHECK-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: vpextrb $3, %xmm1, %ecx
; CHECK-NEXT: vpextrb $3, %xmm0, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $4, %xmm1, %ecx
; CHECK-NEXT: vpextrb $4, %xmm0, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $5, %xmm1, %ecx
; CHECK-NEXT: vpextrb $5, %xmm0, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: vpextrb $6, %xmm1, %ecx
; CHECK-NEXT: vpextrb $6, %xmm0, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpextrb $7, %xmm1, %ecx
; CHECK-NEXT: vpextrb $7, %xmm0, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $8, %xmm1, %ecx
; CHECK-NEXT: vpextrb $8, %xmm0, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $9, %xmm1, %ecx
; CHECK-NEXT: vpextrb $9, %xmm0, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: vpextrb $10, %xmm1, %ecx
; CHECK-NEXT: vpextrb $10, %xmm0, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpextrb $11, %xmm1, %ecx
; CHECK-NEXT: vpextrb $11, %xmm0, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $12, %xmm1, %ecx
; CHECK-NEXT: vpextrb $12, %xmm0, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
; CHECK-NEXT: vpextrb $13, %xmm1, %ecx
; CHECK-NEXT: vpextrb $13, %xmm0, %eax
; CHECK-NEXT: shrb %cl, %al
; CHECK-NEXT: vpextrb $14, %xmm1, %ecx
; CHECK-NEXT: vpextrb $14, %xmm0, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpextrb $15, %xmm1, %ecx
; CHECK-NEXT: vpextrb $15, %xmm0, %edx
; CHECK-NEXT: shrb %cl, %dl
; CHECK-NEXT: vpinsrb $14, %eax, %xmm3, %xmm0
; CHECK-NEXT: movzbl %dl, %eax
; CHECK-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; CHECK: vpsllw $5, %ymm1, %ymm1
; CHECK-NEXT: vpsrlw $4, %ymm0, %ymm2
; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vpsrlw $2, %ymm0, %ymm2
; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vpsrlw $1, %ymm0, %ymm2
; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; CHECK-NEXT: retq
%lshr = lshr <32 x i8> %r, %a
ret <32 x i8> %lshr

File diff suppressed because it is too large Load Diff