mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-14 23:48:56 +00:00
While reviewing the changes to Clang to add builtin support for the vsld, vsrd, and vsrad instructions, it was pointed out that the builtins are generating the LLVM opcodes (shl, lshr, and ashr) not calls to the intrinsics. This patch changes the implementation of the vsld, vsrd, and vsrad instructions from from intrinsics to VXForm_1 instructions and makes them legal with P8 Altivec. It also removes the definition of the int_ppc_altivec_vsld, int_ppc_altivec_vsrd, and int_ppc_altivec_vsrad intrinsics.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@231378 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
684d323b9b
commit
b98636a0f8
@ -517,7 +517,6 @@ def int_ppc_altivec_vslo : PowerPC_Vec_WWW_Intrinsic<"vslo">;
|
||||
def int_ppc_altivec_vslb : PowerPC_Vec_BBB_Intrinsic<"vslb">;
|
||||
def int_ppc_altivec_vslh : PowerPC_Vec_HHH_Intrinsic<"vslh">;
|
||||
def int_ppc_altivec_vslw : PowerPC_Vec_WWW_Intrinsic<"vslw">;
|
||||
def int_ppc_altivec_vsld : PowerPC_Vec_DDD_Intrinsic<"vsld">;
|
||||
|
||||
// Right Shifts.
|
||||
def int_ppc_altivec_vsr : PowerPC_Vec_WWW_Intrinsic<"vsr">;
|
||||
@ -526,11 +525,9 @@ def int_ppc_altivec_vsro : PowerPC_Vec_WWW_Intrinsic<"vsro">;
|
||||
def int_ppc_altivec_vsrb : PowerPC_Vec_BBB_Intrinsic<"vsrb">;
|
||||
def int_ppc_altivec_vsrh : PowerPC_Vec_HHH_Intrinsic<"vsrh">;
|
||||
def int_ppc_altivec_vsrw : PowerPC_Vec_WWW_Intrinsic<"vsrw">;
|
||||
def int_ppc_altivec_vsrd : PowerPC_Vec_DDD_Intrinsic<"vsrd">;
|
||||
def int_ppc_altivec_vsrab : PowerPC_Vec_BBB_Intrinsic<"vsrab">;
|
||||
def int_ppc_altivec_vsrah : PowerPC_Vec_HHH_Intrinsic<"vsrah">;
|
||||
def int_ppc_altivec_vsraw : PowerPC_Vec_WWW_Intrinsic<"vsraw">;
|
||||
def int_ppc_altivec_vsrad : PowerPC_Vec_DDD_Intrinsic<"vsrad">;
|
||||
|
||||
// Rotates.
|
||||
def int_ppc_altivec_vrlb : PowerPC_Vec_BBB_Intrinsic<"vrlb">;
|
||||
|
@ -574,14 +574,18 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
|
||||
addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
|
||||
addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
|
||||
|
||||
setOperationAction(ISD::SHL, MVT::v2i64, Expand);
|
||||
setOperationAction(ISD::SRA, MVT::v2i64, Expand);
|
||||
setOperationAction(ISD::SRL, MVT::v2i64, Expand);
|
||||
|
||||
if (Subtarget.hasP8Altivec()) {
|
||||
setOperationAction(ISD::SHL, MVT::v2i64, Legal);
|
||||
setOperationAction(ISD::SRA, MVT::v2i64, Legal);
|
||||
setOperationAction(ISD::SRL, MVT::v2i64, Legal);
|
||||
|
||||
setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
|
||||
}
|
||||
else {
|
||||
setOperationAction(ISD::SHL, MVT::v2i64, Expand);
|
||||
setOperationAction(ISD::SRA, MVT::v2i64, Expand);
|
||||
setOperationAction(ISD::SRL, MVT::v2i64, Expand);
|
||||
|
||||
setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
|
||||
|
||||
// VSX v2i64 only supports non-arithmetic operations.
|
||||
|
@ -969,11 +969,17 @@ def VMINSD : VX1_Int_Ty<962, "vminsd", int_ppc_altivec_vminsd, v2i64>;
|
||||
def VMIDUD : VX1_Int_Ty<706, "vminud", int_ppc_altivec_vminud, v2i64>;
|
||||
} // isCommutable
|
||||
|
||||
// Vector shifts
|
||||
def VRLD : VX1_Int_Ty<196, "vrld", int_ppc_altivec_vrld, v2i64>;
|
||||
def VSLD : VX1_Int_Ty<1476, "vsld", int_ppc_altivec_vsld, v2i64>;
|
||||
def VSRD : VX1_Int_Ty<1732, "vsrd", int_ppc_altivec_vsrd, v2i64>;
|
||||
def VSRAD : VX1_Int_Ty<964, "vsrad", int_ppc_altivec_vsrad, v2i64>;
|
||||
|
||||
def VSLD : VXForm_1<1476, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
|
||||
"vsld $vD, $vA, $vB", IIC_VecGeneral,
|
||||
[(set v2i64:$vD, (shl v2i64:$vA, v2i64:$vB))]>;
|
||||
def VSRD : VXForm_1<1732, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
|
||||
"vsrd $vD, $vA, $vB", IIC_VecGeneral,
|
||||
[(set v2i64:$vD, (srl v2i64:$vA, v2i64:$vB))]>;
|
||||
def VSRAD : VXForm_1<964, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
|
||||
"vsrad $vD, $vA, $vB", IIC_VecGeneral,
|
||||
[(set v2i64:$vD, (sra v2i64:$vA, v2i64:$vB))]>;
|
||||
|
||||
// Vector Integer Arithmetic Instructions
|
||||
let isCommutable = 1 in {
|
||||
|
@ -14,20 +14,23 @@ define <2 x i64> @test_vrld(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
|
||||
}
|
||||
|
||||
define <2 x i64> @test_vsld(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
|
||||
%tmp = tail call <2 x i64> @llvm.ppc.altivec.vsld(<2 x i64> %x, <2 x i64> %y)
|
||||
%tmp = shl <2 x i64> %x, %y
|
||||
ret <2 x i64> %tmp
|
||||
; CHECK-LABEL: @test_vsld
|
||||
; CHECK: vsld 2, 2, 3
|
||||
}
|
||||
|
||||
define <2 x i64> @test_vsrd(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
|
||||
%tmp = tail call <2 x i64> @llvm.ppc.altivec.vsrd(<2 x i64> %x, <2 x i64> %y)
|
||||
ret <2 x i64> %tmp
|
||||
%tmp = lshr <2 x i64> %x, %y
|
||||
ret <2 x i64> %tmp
|
||||
; CHECK-LABEL: @test_vsrd
|
||||
; CHECK: vsrd 2, 2, 3
|
||||
}
|
||||
|
||||
define <2 x i64> @test_vsrad(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
|
||||
%tmp = tail call <2 x i64> @llvm.ppc.altivec.vsrad(<2 x i64> %x, <2 x i64> %y)
|
||||
ret <2 x i64> %tmp
|
||||
%tmp = ashr <2 x i64> %x, %y
|
||||
ret <2 x i64> %tmp
|
||||
; CHECK-LABER: @test_vsrad
|
||||
; CHECK: vsrad 2, 2, 3
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user