Files
archived-llvm/test/CodeGen/X86/wide-integer-cmp.ll
Reid Kleckner bfc1e953bc Revert "Correct dwarf unwind information in function epilogue for X86"
This reverts r317579, originally committed as r317100.

There is a design issue with marking CFI instructions duplicatable. Not
all targets support the CFIInstrInserter pass, and targets like Darwin
can't cope with duplicated prologue setup CFI instructions. The compact
unwind info emission fails.

When the following code is compiled for arm64 on Mac at -O3, the CFI
instructions end up getting tail duplicated, which causes compact unwind
info emission to fail:
  int a, c, d, e, f, g, h, i, j, k, l, m;
  void n(int o, int *b) {
    if (g)
      f = 0;
    for (; f < o; f++) {
      m = a;
      if (l > j * k > i)
        j = i = k = d;
      h = b[c] - e;
    }
  }

We get assembly that looks like this:
; BB#1:                                 ; %if.then
Lloh3:
	adrp	x9, _f@GOTPAGE
Lloh4:
	ldr	x9, [x9, _f@GOTPAGEOFF]
	mov	 w8, wzr
Lloh5:
	str		wzr, [x9]
	stp	x20, x19, [sp, #-16]!   ; 8-byte Folded Spill
	.cfi_def_cfa_offset 16
	.cfi_offset w19, -8
	.cfi_offset w20, -16
	cmp		w8, w0
	b.lt	LBB0_3
	b	LBB0_7
LBB0_2:                                 ; %entry.if.end_crit_edge
Lloh6:
	adrp	x8, _f@GOTPAGE
Lloh7:
	ldr	x8, [x8, _f@GOTPAGEOFF]
Lloh8:
	ldr		w8, [x8]
	stp	x20, x19, [sp, #-16]!   ; 8-byte Folded Spill
	.cfi_def_cfa_offset 16
	.cfi_offset w19, -8
	.cfi_offset w20, -16
	cmp		w8, w0
	b.ge	LBB0_7
LBB0_3:                                 ; %for.body.lr.ph

Note the multiple .cfi_def* directives. Compact unwind info emission
can't handle that.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317726 91177308-0d34-0410-b5e6-96231b3b80d8
2017-11-08 21:31:14 +00:00

146 lines
3.9 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=i686-linux-gnu %s -o - | FileCheck %s
define i32 @branch_eq(i64 %a, i64 %b) {
; CHECK-LABEL: branch_eq:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: xorl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: xorl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: orl %ecx, %eax
; CHECK-NEXT: jne .LBB0_2
; CHECK-NEXT: # BB#1: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB0_2: # %bb2
; CHECK-NEXT: movl $2, %eax
; CHECK-NEXT: retl
entry:
%cmp = icmp eq i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
define i32 @branch_slt(i64 %a, i64 %b) {
; CHECK-LABEL: branch_slt:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: jge .LBB1_2
; CHECK-NEXT: # BB#1: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB1_2: # %bb2
; CHECK-NEXT: movl $2, %eax
; CHECK-NEXT: retl
entry:
%cmp = icmp slt i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
define i32 @branch_ule(i64 %a, i64 %b) {
; CHECK-LABEL: branch_ule:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: jb .LBB2_2
; CHECK-NEXT: # BB#1: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB2_2: # %bb2
; CHECK-NEXT: movl $2, %eax
; CHECK-NEXT: retl
entry:
%cmp = icmp ule i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
define i32 @set_gt(i64 %a, i64 %b) {
; CHECK-LABEL: set_gt:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: setl %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: retl
entry:
%cmp = icmp sgt i64 %a, %b
%res = select i1 %cmp, i32 1, i32 0
ret i32 %res
}
define i32 @test_wide(i128 %a, i128 %b) {
; CHECK-LABEL: test_wide:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %esi, -8
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %esi
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: jge .LBB4_2
; CHECK-NEXT: # BB#1: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB4_2: # %bb2
; CHECK-NEXT: movl $2, %eax
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
entry:
%cmp = icmp slt i128 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
; The comparison of the low bits will be folded to a CARRY_FALSE node. Make
; sure the code can handle that.
define i32 @test_carry_false(i64 %a, i64 %b) {
; CHECK-LABEL: test_carry_false:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: jge .LBB5_2
; CHECK-NEXT: # BB#1: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB5_2: # %bb2
; CHECK-NEXT: movl $2, %eax
; CHECK-NEXT: retl
entry:
%x = and i64 %a, -4294967296 ;0xffffffff00000000
%y = and i64 %b, -4294967296
%cmp = icmp slt i64 %x, %y
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}