diff --git a/include/llvm/IntrinsicsX86.td b/include/llvm/IntrinsicsX86.td index a48a1ed2774..3ca9cb44419 100644 --- a/include/llvm/IntrinsicsX86.td +++ b/include/llvm/IntrinsicsX86.td @@ -669,16 +669,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>; } -// Align ops -let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_ssse3_palign_r : - Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, - llvm_v1i64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_ssse3_palign_r_128 : - Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, - llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>; -} - //===----------------------------------------------------------------------===// // SSE4.1 diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index a9bc5fdbd3a..212958025bd 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -2939,14 +2939,8 @@ let Constraints = "$src1 = $dst" in { []>, OpSize; } -def : Pat<(int_x86_ssse3_palign_r VR64:$src1, VR64:$src2, (i8 imm:$src3)), - (PALIGNR64rr VR64:$src1, VR64:$src2, (BYTE_imm imm:$src3))>, - Requires<[HasSSSE3]>; -def : Pat<(int_x86_ssse3_palign_r VR64:$src1, - (memop64 addr:$src2), - (i8 imm:$src3)), - (PALIGNR64rm VR64:$src1, addr:$src2, (BYTE_imm imm:$src3))>, - Requires<[HasSSSE3]>; +let AddedComplexity = 5 in { + def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)), (PALIGNR64rr VR64:$src2, VR64:$src1, (SHUFFLE_get_palign_imm VR64:$src3))>, @@ -2968,16 +2962,6 @@ def : Pat<(v8i8 (palign:$src3 VR64:$src1, VR64:$src2)), (SHUFFLE_get_palign_imm VR64:$src3))>, Requires<[HasSSSE3]>; -def : Pat<(int_x86_ssse3_palign_r_128 VR128:$src1, VR128:$src2, (i8 imm:$src3)), - (PALIGNR128rr VR128:$src1, VR128:$src2, (BYTE_imm imm:$src3))>, - Requires<[HasSSSE3]>; -def : Pat<(int_x86_ssse3_palign_r_128 VR128:$src1, - (memopv2i64 addr:$src2), - (i8 imm:$src3)), - (PALIGNR128rm VR128:$src1, addr:$src2, (BYTE_imm imm:$src3))>, - Requires<[HasSSSE3]>; - -let AddedComplexity = 5 in { def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)), (PALIGNR128rr VR128:$src2, VR128:$src1, (SHUFFLE_get_palign_imm VR128:$src3))>, diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp index 4d06b666816..0144210767d 100644 --- a/lib/VMCore/AutoUpgrade.cpp +++ b/lib/VMCore/AutoUpgrade.cpp @@ -19,6 +19,7 @@ #include "llvm/IntrinsicInst.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/IRBuilder.h" #include using namespace llvm; @@ -277,8 +278,13 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { // Calls to these intrinsics are transformed into vector multiplies. NewFn = 0; return true; + } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 || + Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) { + // Calls to these intrinsics are transformed into vector shuffles, shifts, + // or 0. + NewFn = 0; + return true; } - break; } @@ -420,6 +426,118 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { // Remove upgraded multiply. CI->eraseFromParent(); + } else if (F->getName() == "llvm.x86.ssse3.palign.r") { + Value *Op1 = CI->getOperand(1); + Value *Op2 = CI->getOperand(2); + Value *Op3 = CI->getOperand(3); + unsigned shiftVal = cast(Op3)->getZExtValue(); + Value *Rep; + IRBuilder<> Builder(C); + Builder.SetInsertPoint(CI->getParent(), CI); + + // If palignr is shifting the pair of input vectors less than 9 bytes, + // emit a shuffle instruction. + if (shiftVal <= 8) { + const Type *IntTy = Type::getInt32Ty(C); + const Type *EltTy = Type::getInt8Ty(C); + const Type *VecTy = VectorType::get(EltTy, 8); + + Op2 = Builder.CreateBitCast(Op2, VecTy); + Op1 = Builder.CreateBitCast(Op1, VecTy); + + llvm::SmallVector Indices; + for (unsigned i = 0; i != 8; ++i) + Indices.push_back(ConstantInt::get(IntTy, shiftVal + i)); + + Value *SV = ConstantVector::get(Indices.begin(), Indices.size()); + Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr"); + Rep = Builder.CreateBitCast(Rep, F->getReturnType()); + } + + // If palignr is shifting the pair of input vectors more than 8 but less + // than 16 bytes, emit a logical right shift of the destination. + else if (shiftVal < 16) { + // MMX has these as 1 x i64 vectors for some odd optimization reasons. + const Type *EltTy = Type::getInt64Ty(C); + const Type *VecTy = VectorType::get(EltTy, 1); + + Op1 = Builder.CreateBitCast(Op1, VecTy, "cast"); + Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8); + + // create i32 constant + Function *I = + Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q); + Rep = Builder.CreateCall2(I, Op1, Op2, "palignr"); + } + + // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. + else { + Rep = Constant::getNullValue(F->getReturnType()); + } + + // Replace any uses with our new instruction. + if (!CI->use_empty()) + CI->replaceAllUsesWith(Rep); + + // Remove upgraded instruction. + CI->eraseFromParent(); + + } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") { + Value *Op1 = CI->getOperand(1); + Value *Op2 = CI->getOperand(2); + Value *Op3 = CI->getOperand(3); + unsigned shiftVal = cast(Op3)->getZExtValue(); + Value *Rep; + IRBuilder<> Builder(C); + Builder.SetInsertPoint(CI->getParent(), CI); + + // If palignr is shifting the pair of input vectors less than 17 bytes, + // emit a shuffle instruction. + if (shiftVal <= 16) { + const Type *IntTy = Type::getInt32Ty(C); + const Type *EltTy = Type::getInt8Ty(C); + const Type *VecTy = VectorType::get(EltTy, 16); + + Op2 = Builder.CreateBitCast(Op2, VecTy); + Op1 = Builder.CreateBitCast(Op1, VecTy); + + llvm::SmallVector Indices; + for (unsigned i = 0; i != 16; ++i) + Indices.push_back(ConstantInt::get(IntTy, shiftVal + i)); + + Value *SV = ConstantVector::get(Indices.begin(), Indices.size()); + Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr"); + Rep = Builder.CreateBitCast(Rep, F->getReturnType()); + } + + // If palignr is shifting the pair of input vectors more than 16 but less + // than 32 bytes, emit a logical right shift of the destination. + else if (shiftVal < 32) { + const Type *EltTy = Type::getInt64Ty(C); + const Type *VecTy = VectorType::get(EltTy, 2); + const Type *IntTy = Type::getInt32Ty(C); + + Op1 = Builder.CreateBitCast(Op1, VecTy, "cast"); + Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8); + + // create i32 constant + Function *I = + Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq); + Rep = Builder.CreateCall2(I, Op1, Op2, "palignr"); + } + + // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. + else { + Rep = Constant::getNullValue(F->getReturnType()); + } + + // Replace any uses with our new instruction. + if (!CI->use_empty()) + CI->replaceAllUsesWith(Rep); + + // Remove upgraded instruction. + CI->eraseFromParent(); + } else { llvm_unreachable("Unknown function for CallInst upgrade."); } diff --git a/test/Bitcode/ssse3_palignr.ll b/test/Bitcode/ssse3_palignr.ll new file mode 100644 index 00000000000..d596dd5eb36 --- /dev/null +++ b/test/Bitcode/ssse3_palignr.ll @@ -0,0 +1 @@ +; RUN: llvm-dis < %s.bc | not grep {@llvm\\.palign} diff --git a/test/Bitcode/ssse3_palignr.ll.bc b/test/Bitcode/ssse3_palignr.ll.bc new file mode 100644 index 00000000000..642f4dedc41 Binary files /dev/null and b/test/Bitcode/ssse3_palignr.ll.bc differ