From d2725878ee60829a1214f3689b4178d608b512eb Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Wed, 13 May 2020 14:31:18 +0100 Subject: [PATCH] [InstCombine] Add vector tests for the or(shl(zext(x),32)|zext(y)) concat combines --- test/Transforms/InstCombine/or-concat.ll | 112 +++++++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/test/Transforms/InstCombine/or-concat.ll b/test/Transforms/InstCombine/or-concat.ll index 4148e4900f7..4bbe5896a7f 100644 --- a/test/Transforms/InstCombine/or-concat.ll +++ b/test/Transforms/InstCombine/or-concat.ll @@ -28,6 +28,23 @@ define i64 @concat_bswap32_unary_split(i64 %a0) { ret i64 %9 } +define <2 x i64> @concat_bswap32_unary_split_vector(<2 x i64> %a0) { +; CHECK-LABEL: @concat_bswap32_unary_split_vector( +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> [[A0:%.*]]) +; CHECK-NEXT: ret <2 x i64> [[TMP1]] +; + %1 = lshr <2 x i64> %a0, + %2 = trunc <2 x i64> %1 to <2 x i32> + %3 = trunc <2 x i64> %a0 to <2 x i32> + %4 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %2) + %5 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %3) + %6 = zext <2 x i32> %4 to <2 x i64> + %7 = zext <2 x i32> %5 to <2 x i64> + %8 = shl nuw <2 x i64> %7, + %9 = or <2 x i64> %6, %8 + ret <2 x i64> %9 +} + define i64 @concat_bswap32_unary_flip(i64 %a0) { ; CHECK-LABEL: @concat_bswap32_unary_flip( ; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[A0:%.*]], 32 @@ -48,6 +65,26 @@ define i64 @concat_bswap32_unary_flip(i64 %a0) { ret i64 %9 } +define <2 x i64> @concat_bswap32_unary_flip_vector(<2 x i64> %a0) { +; CHECK-LABEL: @concat_bswap32_unary_flip_vector( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i64> [[A0:%.*]], +; CHECK-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[A0]], +; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> [[TMP3]]) +; CHECK-NEXT: ret <2 x i64> [[TMP4]] +; + %1 = lshr <2 x i64> %a0, + %2 = trunc <2 x i64> %1 to <2 x i32> + %3 = trunc <2 x i64> %a0 to <2 x i32> + %4 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %2) + %5 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %3) + %6 = zext <2 x i32> %4 to <2 x i64> + %7 = zext <2 x i32> %5 to <2 x i64> + %8 = shl nuw <2 x i64> %6, + %9 = or <2 x i64> %7, %8 + ret <2 x i64> %9 +} + define i64 @concat_bswap32_binary(i32 %a0, i32 %a1) { ; CHECK-LABEL: @concat_bswap32_binary( ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[A1:%.*]] to i64 @@ -66,7 +103,26 @@ define i64 @concat_bswap32_binary(i32 %a0, i32 %a1) { ret i64 %6 } +define <2 x i64> @concat_bswap32_binary_vector(<2 x i32> %a0, <2 x i32> %a1) { +; CHECK-LABEL: @concat_bswap32_binary_vector( +; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[A1:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[A0:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw <2 x i64> [[TMP2]], +; CHECK-NEXT: [[TMP4:%.*]] = or <2 x i64> [[TMP3]], [[TMP1]] +; CHECK-NEXT: [[TMP5:%.*]] = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> [[TMP4]]) +; CHECK-NEXT: ret <2 x i64> [[TMP5]] +; + %1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a0) + %2 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a1) + %3 = zext <2 x i32> %1 to <2 x i64> + %4 = zext <2 x i32> %2 to <2 x i64> + %5 = shl nuw <2 x i64> %4, + %6 = or <2 x i64> %3, %5 + ret <2 x i64> %6 +} + declare i32 @llvm.bswap.i32(i32) +declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>) ; BITREVERSE @@ -87,6 +143,23 @@ define i64 @concat_bitreverse32_unary_split(i64 %a0) { ret i64 %9 } +define <2 x i64> @concat_bitreverse32_unary_split_vector(<2 x i64> %a0) { +; CHECK-LABEL: @concat_bitreverse32_unary_split_vector( +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[A0:%.*]]) +; CHECK-NEXT: ret <2 x i64> [[TMP1]] +; + %1 = lshr <2 x i64> %a0, + %2 = trunc <2 x i64> %1 to <2 x i32> + %3 = trunc <2 x i64> %a0 to <2 x i32> + %4 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %2) + %5 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %3) + %6 = zext <2 x i32> %4 to <2 x i64> + %7 = zext <2 x i32> %5 to <2 x i64> + %8 = shl nuw <2 x i64> %7, + %9 = or <2 x i64> %6, %8 + ret <2 x i64> %9 +} + define i64 @concat_bitreverse32_unary_flip(i64 %a0) { ; CHECK-LABEL: @concat_bitreverse32_unary_flip( ; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[A0:%.*]], 32 @@ -107,6 +180,26 @@ define i64 @concat_bitreverse32_unary_flip(i64 %a0) { ret i64 %9 } +define <2 x i64> @concat_bitreverse32_unary_flip_vector(<2 x i64> %a0) { +; CHECK-LABEL: @concat_bitreverse32_unary_flip_vector( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i64> [[A0:%.*]], +; CHECK-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[A0]], +; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP3]]) +; CHECK-NEXT: ret <2 x i64> [[TMP4]] +; + %1 = lshr <2 x i64> %a0, + %2 = trunc <2 x i64> %1 to <2 x i32> + %3 = trunc <2 x i64> %a0 to <2 x i32> + %4 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %2) + %5 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %3) + %6 = zext <2 x i32> %4 to <2 x i64> + %7 = zext <2 x i32> %5 to <2 x i64> + %8 = shl nuw <2 x i64> %6, + %9 = or <2 x i64> %7, %8 + ret <2 x i64> %9 +} + define i64 @concat_bitreverse32_binary(i32 %a0, i32 %a1) { ; CHECK-LABEL: @concat_bitreverse32_binary( ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[A1:%.*]] to i64 @@ -125,4 +218,23 @@ define i64 @concat_bitreverse32_binary(i32 %a0, i32 %a1) { ret i64 %6 } +define <2 x i64> @concat_bitreverse32_binary_vector(<2 x i32> %a0, <2 x i32> %a1) { +; CHECK-LABEL: @concat_bitreverse32_binary_vector( +; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[A1:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[A0:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw <2 x i64> [[TMP2]], +; CHECK-NEXT: [[TMP4:%.*]] = or <2 x i64> [[TMP3]], [[TMP1]] +; CHECK-NEXT: [[TMP5:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP4]]) +; CHECK-NEXT: ret <2 x i64> [[TMP5]] +; + %1 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %a0) + %2 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %a1) + %3 = zext <2 x i32> %1 to <2 x i64> + %4 = zext <2 x i32> %2 to <2 x i64> + %5 = shl nuw <2 x i64> %4, + %6 = or <2 x i64> %3, %5 + ret <2 x i64> %6 +} + declare i32 @llvm.bitreverse.i32(i32) +declare <2 x i32> @llvm.bitreverse.v2i32(<2 x i32>)