From 53e7e0fcfb08b18df926a90caddb27074ac8be4c Mon Sep 17 00:00:00 2001 From: Eric Christopher Date: Tue, 20 Apr 2010 00:59:54 +0000 Subject: [PATCH] Remove the palignr intrinsics now that we lower them to vector shuffles, shifts and null vectors. Autoupgrade these to what we'd lower them to. Add a testcase to exercise this. llvm-svn: 101851 --- include/llvm/IntrinsicsX86.td | 10 --- lib/Target/X86/X86InstrSSE.td | 20 +----- lib/VMCore/AutoUpgrade.cpp | 120 ++++++++++++++++++++++++++++++- test/Bitcode/ssse3_palignr.ll | 1 + test/Bitcode/ssse3_palignr.ll.bc | Bin 0 -> 1280 bytes 5 files changed, 122 insertions(+), 29 deletions(-) create mode 100644 test/Bitcode/ssse3_palignr.ll create mode 100644 test/Bitcode/ssse3_palignr.ll.bc diff --git a/include/llvm/IntrinsicsX86.td b/include/llvm/IntrinsicsX86.td index a48a1ed2774..3ca9cb44419 100644 --- a/include/llvm/IntrinsicsX86.td +++ b/include/llvm/IntrinsicsX86.td @@ -669,16 +669,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>; } -// Align ops -let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_ssse3_palign_r : - Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, - llvm_v1i64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_ssse3_palign_r_128 : - Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, - llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>; -} - //===----------------------------------------------------------------------===// // SSE4.1 diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index a9bc5fdbd3a..212958025bd 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -2939,14 +2939,8 @@ let Constraints = "$src1 = $dst" in { []>, OpSize; } -def : Pat<(int_x86_ssse3_palign_r VR64:$src1, VR64:$src2, (i8 imm:$src3)), - (PALIGNR64rr VR64:$src1, VR64:$src2, (BYTE_imm imm:$src3))>, - Requires<[HasSSSE3]>; -def : Pat<(int_x86_ssse3_palign_r VR64:$src1, - (memop64 addr:$src2), - (i8 imm:$src3)), - (PALIGNR64rm VR64:$src1, addr:$src2, (BYTE_imm imm:$src3))>, - Requires<[HasSSSE3]>; +let AddedComplexity = 5 in { + def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)), (PALIGNR64rr VR64:$src2, VR64:$src1, (SHUFFLE_get_palign_imm VR64:$src3))>, @@ -2968,16 +2962,6 @@ def : Pat<(v8i8 (palign:$src3 VR64:$src1, VR64:$src2)), (SHUFFLE_get_palign_imm VR64:$src3))>, Requires<[HasSSSE3]>; -def : Pat<(int_x86_ssse3_palign_r_128 VR128:$src1, VR128:$src2, (i8 imm:$src3)), - (PALIGNR128rr VR128:$src1, VR128:$src2, (BYTE_imm imm:$src3))>, - Requires<[HasSSSE3]>; -def : Pat<(int_x86_ssse3_palign_r_128 VR128:$src1, - (memopv2i64 addr:$src2), - (i8 imm:$src3)), - (PALIGNR128rm VR128:$src1, addr:$src2, (BYTE_imm imm:$src3))>, - Requires<[HasSSSE3]>; - -let AddedComplexity = 5 in { def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)), (PALIGNR128rr VR128:$src2, VR128:$src1, (SHUFFLE_get_palign_imm VR128:$src3))>, diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp index 4d06b666816..0144210767d 100644 --- a/lib/VMCore/AutoUpgrade.cpp +++ b/lib/VMCore/AutoUpgrade.cpp @@ -19,6 +19,7 @@ #include "llvm/IntrinsicInst.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/IRBuilder.h" #include using namespace llvm; @@ -277,8 +278,13 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { // Calls to these intrinsics are transformed into vector multiplies. NewFn = 0; return true; + } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 || + Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) { + // Calls to these intrinsics are transformed into vector shuffles, shifts, + // or 0. + NewFn = 0; + return true; } - break; } @@ -420,6 +426,118 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { // Remove upgraded multiply. CI->eraseFromParent(); + } else if (F->getName() == "llvm.x86.ssse3.palign.r") { + Value *Op1 = CI->getOperand(1); + Value *Op2 = CI->getOperand(2); + Value *Op3 = CI->getOperand(3); + unsigned shiftVal = cast(Op3)->getZExtValue(); + Value *Rep; + IRBuilder<> Builder(C); + Builder.SetInsertPoint(CI->getParent(), CI); + + // If palignr is shifting the pair of input vectors less than 9 bytes, + // emit a shuffle instruction. + if (shiftVal <= 8) { + const Type *IntTy = Type::getInt32Ty(C); + const Type *EltTy = Type::getInt8Ty(C); + const Type *VecTy = VectorType::get(EltTy, 8); + + Op2 = Builder.CreateBitCast(Op2, VecTy); + Op1 = Builder.CreateBitCast(Op1, VecTy); + + llvm::SmallVector Indices; + for (unsigned i = 0; i != 8; ++i) + Indices.push_back(ConstantInt::get(IntTy, shiftVal + i)); + + Value *SV = ConstantVector::get(Indices.begin(), Indices.size()); + Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr"); + Rep = Builder.CreateBitCast(Rep, F->getReturnType()); + } + + // If palignr is shifting the pair of input vectors more than 8 but less + // than 16 bytes, emit a logical right shift of the destination. + else if (shiftVal < 16) { + // MMX has these as 1 x i64 vectors for some odd optimization reasons. + const Type *EltTy = Type::getInt64Ty(C); + const Type *VecTy = VectorType::get(EltTy, 1); + + Op1 = Builder.CreateBitCast(Op1, VecTy, "cast"); + Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8); + + // create i32 constant + Function *I = + Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q); + Rep = Builder.CreateCall2(I, Op1, Op2, "palignr"); + } + + // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. + else { + Rep = Constant::getNullValue(F->getReturnType()); + } + + // Replace any uses with our new instruction. + if (!CI->use_empty()) + CI->replaceAllUsesWith(Rep); + + // Remove upgraded instruction. + CI->eraseFromParent(); + + } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") { + Value *Op1 = CI->getOperand(1); + Value *Op2 = CI->getOperand(2); + Value *Op3 = CI->getOperand(3); + unsigned shiftVal = cast(Op3)->getZExtValue(); + Value *Rep; + IRBuilder<> Builder(C); + Builder.SetInsertPoint(CI->getParent(), CI); + + // If palignr is shifting the pair of input vectors less than 17 bytes, + // emit a shuffle instruction. + if (shiftVal <= 16) { + const Type *IntTy = Type::getInt32Ty(C); + const Type *EltTy = Type::getInt8Ty(C); + const Type *VecTy = VectorType::get(EltTy, 16); + + Op2 = Builder.CreateBitCast(Op2, VecTy); + Op1 = Builder.CreateBitCast(Op1, VecTy); + + llvm::SmallVector Indices; + for (unsigned i = 0; i != 16; ++i) + Indices.push_back(ConstantInt::get(IntTy, shiftVal + i)); + + Value *SV = ConstantVector::get(Indices.begin(), Indices.size()); + Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr"); + Rep = Builder.CreateBitCast(Rep, F->getReturnType()); + } + + // If palignr is shifting the pair of input vectors more than 16 but less + // than 32 bytes, emit a logical right shift of the destination. + else if (shiftVal < 32) { + const Type *EltTy = Type::getInt64Ty(C); + const Type *VecTy = VectorType::get(EltTy, 2); + const Type *IntTy = Type::getInt32Ty(C); + + Op1 = Builder.CreateBitCast(Op1, VecTy, "cast"); + Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8); + + // create i32 constant + Function *I = + Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq); + Rep = Builder.CreateCall2(I, Op1, Op2, "palignr"); + } + + // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. + else { + Rep = Constant::getNullValue(F->getReturnType()); + } + + // Replace any uses with our new instruction. + if (!CI->use_empty()) + CI->replaceAllUsesWith(Rep); + + // Remove upgraded instruction. + CI->eraseFromParent(); + } else { llvm_unreachable("Unknown function for CallInst upgrade."); } diff --git a/test/Bitcode/ssse3_palignr.ll b/test/Bitcode/ssse3_palignr.ll new file mode 100644 index 00000000000..d596dd5eb36 --- /dev/null +++ b/test/Bitcode/ssse3_palignr.ll @@ -0,0 +1 @@ +; RUN: llvm-dis < %s.bc | not grep {@llvm\\.palign} diff --git a/test/Bitcode/ssse3_palignr.ll.bc b/test/Bitcode/ssse3_palignr.ll.bc new file mode 100644 index 0000000000000000000000000000000000000000..642f4dedc4177e6ccc2e2a05bbd9a1a9fc516b84 GIT binary patch literal 1280 zcmb`EUr1A76u{5j?(TQJ8}7QToFI1R{-9trMvRFFb8fmRg_aM|(^ayEVC7~kSy6X7 zQpjkn-g;RS;aeh<5Ix-R4<-on$)FHJ3j!aEUIYU>-)5NEi|FC|bN>Fm!*`a|ay_6} zaIdof1b_)NtIIY6fR8~#1TbNYr0kEH=GX`)dg`-G+Thgj6;o&U1hV?T3(;fo z@z#jo2|B_;#%7;BuNW$9aR-?Tm-US0#{9Dzh=Yg~K!>+Jy^V50wO*2bQ4LxEIwb%z z#yr*G5~+7E4X#jFSK8FHZ&(lBgN>O2d7b;ag#Hb7aWZr}ceQO<(ie zMvf%>Bq@^{1tpp(jpfJyt~~c$A<4p`PMIV{lE@KM(Cn8;(oNzyf(n{@2=d;j{7=-l zH`y4I=ySVgp8EnMi3?lGb3cnpu{XJN08Np^m7O!XQc7#(oo0~SoYjGXA0UV0060W2 zL`)LkgcaKd2K?x7JL&=Or{JE0P8~qaTxLK*hc#Q{FGO}B6Gcdx{;3hSnQf}0R4jww zycQa*SZtWQEeJ_avEcq6i;LtL zQ4arbYSiXm2wk}#5E|!j|IKoxg4110dsK^lD&5k5m_qvKnQend&lC>N+&2?}Kk!WU zG1}O}GlYXNVmQYNoM&SEWc$o!`Kzt0qik{;^gUKtz04DwUZ`1i1M_y|7nWWuHX{|`3;2_Cfoo3 literal 0 HcmV?d00001