llvm/test/CodeGen/AArch64/cond-sel.ll
Tim Northover 7bc8414ee9 Add explicit triples to AArch64 tests
Only Linux is supported at the moment, and other platforms quickly fault. As a
result these tests would fail on non-Linux hosts. It may be worth making the
tests more generic again as more platforms are supported.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@174170 91177308-0d34-0410-b5e6-96231b3b80d8
2013-02-01 11:40:47 +00:00

214 lines
6.6 KiB
LLVM

; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
@var32 = global i32 0
@var64 = global i64 0
define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
; CHECK: test_csel:
%tst1 = icmp ugt i32 %lhs32, %rhs32
%val1 = select i1 %tst1, i32 42, i32 52
store i32 %val1, i32* @var32
; CHECK: movz [[W52:w[0-9]+]], #52
; CHECK: movz [[W42:w[0-9]+]], #42
; CHECK: csel {{w[0-9]+}}, [[W42]], [[W52]], hi
%rhs64 = sext i32 %rhs32 to i64
%tst2 = icmp sle i64 %lhs64, %rhs64
%val2 = select i1 %tst2, i64 %lhs64, i64 %rhs64
store i64 %val2, i64* @var64
; CHECK: cmp [[LHS:x[0-9]+]], [[RHS:w[0-9]+]], sxtw
; CHECK: sxtw [[EXT_RHS:x[0-9]+]], [[RHS]]
; CHECK: csel {{x[0-9]+}}, [[LHS]], [[EXT_RHS]], le
ret void
; CHECK: ret
}
define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %rhs64) {
; CHECK: test_floatcsel:
%tst1 = fcmp one float %lhs32, %rhs32
; CHECK: fcmp {{s[0-9]+}}, {{s[0-9]+}}
%val1 = select i1 %tst1, i32 42, i32 52
store i32 %val1, i32* @var32
; CHECK: movz [[W52:w[0-9]+]], #52
; CHECK: movz [[W42:w[0-9]+]], #42
; CHECK: csel [[MAYBETRUE:w[0-9]+]], [[W42]], [[W52]], mi
; CHECK: csel {{w[0-9]+}}, [[W42]], [[MAYBETRUE]], gt
%tst2 = fcmp ueq double %lhs64, %rhs64
; CHECK: fcmp {{d[0-9]+}}, {{d[0-9]+}}
%val2 = select i1 %tst2, i64 9, i64 15
store i64 %val2, i64* @var64
; CHECK: movz [[CONST15:x[0-9]+]], #15
; CHECK: movz [[CONST9:x[0-9]+]], #9
; CHECK: csel [[MAYBETRUE:x[0-9]+]], [[CONST9]], [[CONST15]], eq
; CHECK: csel {{x[0-9]+}}, [[CONST9]], [[MAYBETRUE]], vs
ret void
; CHECK: ret
}
define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
; CHECK: test_csinc:
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
%tst1 = icmp ugt i32 %lhs32, %rhs32
%inc1 = add i32 %rhs32, 1
%val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
store volatile i32 %val1, i32* @var32
; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
; CHECK: csinc {{w[0-9]+}}, [[LHS]], [[RHS]], ls
%rhs2 = add i32 %rhs32, 42
%tst2 = icmp sle i32 %lhs32, %rhs2
%inc2 = add i32 %rhs32, 1
%val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
store volatile i32 %val2, i32* @var32
; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
; CHECK: csinc {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
%rhs3 = sext i32 %rhs32 to i64
%tst3 = icmp ugt i64 %lhs64, %rhs3
%inc3 = add i64 %rhs3, 1
%val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
store volatile i64 %val3, i64* @var64
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
; CHECK: csinc {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
%rhs4 = zext i32 %rhs32 to i64
%tst4 = icmp sle i64 %lhs64, %rhs4
%inc4 = add i64 %rhs4, 1
%val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
store volatile i64 %val4, i64* @var64
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
; CHECK: csinc {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
ret void
; CHECK: ret
}
define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
; CHECK: test_csinv:
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
%tst1 = icmp ugt i32 %lhs32, %rhs32
%inc1 = xor i32 -1, %rhs32
%val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
store volatile i32 %val1, i32* @var32
; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
; CHECK: csinv {{w[0-9]+}}, [[LHS]], [[RHS]], ls
%rhs2 = add i32 %rhs32, 42
%tst2 = icmp sle i32 %lhs32, %rhs2
%inc2 = xor i32 -1, %rhs32
%val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
store volatile i32 %val2, i32* @var32
; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
; CHECK: csinv {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
%rhs3 = sext i32 %rhs32 to i64
%tst3 = icmp ugt i64 %lhs64, %rhs3
%inc3 = xor i64 -1, %rhs3
%val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
store volatile i64 %val3, i64* @var64
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
; CHECK: csinv {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
%rhs4 = zext i32 %rhs32 to i64
%tst4 = icmp sle i64 %lhs64, %rhs4
%inc4 = xor i64 -1, %rhs4
%val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
store volatile i64 %val4, i64* @var64
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
; CHECK: csinv {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
ret void
; CHECK: ret
}
define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
; CHECK: test_csneg:
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
%tst1 = icmp ugt i32 %lhs32, %rhs32
%inc1 = sub i32 0, %rhs32
%val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
store volatile i32 %val1, i32* @var32
; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
; CHECK: csneg {{w[0-9]+}}, [[LHS]], [[RHS]], ls
%rhs2 = add i32 %rhs32, 42
%tst2 = icmp sle i32 %lhs32, %rhs2
%inc2 = sub i32 0, %rhs32
%val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
store volatile i32 %val2, i32* @var32
; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
; CHECK: csneg {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
%rhs3 = sext i32 %rhs32 to i64
%tst3 = icmp ugt i64 %lhs64, %rhs3
%inc3 = sub i64 0, %rhs3
%val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
store volatile i64 %val3, i64* @var64
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
; CHECK: csneg {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
%rhs4 = zext i32 %rhs32 to i64
%tst4 = icmp sle i64 %lhs64, %rhs4
%inc4 = sub i64 0, %rhs4
%val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
store volatile i64 %val4, i64* @var64
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
; CHECK: csneg {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
ret void
; CHECK: ret
}
define void @test_cset(i32 %lhs, i32 %rhs, i64 %lhs64) {
; CHECK: test_cset:
; N.b. code is not optimal here (32-bit csinc would be better) but
; incoming DAG is too complex
%tst1 = icmp eq i32 %lhs, %rhs
%val1 = zext i1 %tst1 to i32
store i32 %val1, i32* @var32
; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
; CHECK: csinc {{w[0-9]+}}, wzr, wzr, ne
%rhs64 = sext i32 %rhs to i64
%tst2 = icmp ule i64 %lhs64, %rhs64
%val2 = zext i1 %tst2 to i64
store i64 %val2, i64* @var64
; CHECK: csinc {{w[0-9]+}}, wzr, wzr, hi
ret void
; CHECK: ret
}
define void @test_csetm(i32 %lhs, i32 %rhs, i64 %lhs64) {
; CHECK: test_csetm:
%tst1 = icmp eq i32 %lhs, %rhs
%val1 = sext i1 %tst1 to i32
store i32 %val1, i32* @var32
; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
; CHECK: csinv {{w[0-9]+}}, wzr, wzr, ne
%rhs64 = sext i32 %rhs to i64
%tst2 = icmp ule i64 %lhs64, %rhs64
%val2 = sext i1 %tst2 to i64
store i64 %val2, i64* @var64
; CHECK: csinv {{x[0-9]+}}, xzr, xzr, hi
ret void
; CHECK: ret
}