Convert a CodeGen test into a MC test.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@207971 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Rafael Espindola 2014-05-05 15:34:13 +00:00
parent 1c87e2a3a8
commit 260b6b05b9
2 changed files with 208 additions and 161 deletions

View File

@ -1,161 +0,0 @@
; Use the -disable-cfi flag so that we get the compact unwind info in the
; emitted assembly. Compact unwind info is omitted when CFI directives
; are emitted.
;
; RUN: llc -march=arm64 -mtriple=arm64-apple-ios -disable-cfi < %s | FileCheck %s
;
; rdar://13070556
@bar = common global i32 0, align 4
; Leaf function with no stack allocation and no saving/restoring
; of non-volatile registers.
define i32 @foo1(i32 %a) #0 {
entry:
%add = add nsw i32 %a, 42
ret i32 %add
}
; Leaf function with stack allocation but no saving/restoring
; of non-volatile registers.
define i32 @foo2(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) #0 {
entry:
%stack = alloca [36 x i32], align 4
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv19 = phi i64 [ 0, %entry ], [ %indvars.iv.next20, %for.body ]
%arrayidx = getelementptr inbounds [36 x i32]* %stack, i64 0, i64 %indvars.iv19
%0 = trunc i64 %indvars.iv19 to i32
store i32 %0, i32* %arrayidx, align 4, !tbaa !0
%indvars.iv.next20 = add i64 %indvars.iv19, 1
%lftr.wideiv21 = trunc i64 %indvars.iv.next20 to i32
%exitcond22 = icmp eq i32 %lftr.wideiv21, 36
br i1 %exitcond22, label %for.body4, label %for.body
for.body4: ; preds = %for.body, %for.body4
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body4 ], [ 0, %for.body ]
%z1.016 = phi i32 [ %add, %for.body4 ], [ 0, %for.body ]
%arrayidx6 = getelementptr inbounds [36 x i32]* %stack, i64 0, i64 %indvars.iv
%1 = load i32* %arrayidx6, align 4, !tbaa !0
%add = add nsw i32 %1, %z1.016
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 36
br i1 %exitcond, label %for.end9, label %for.body4
for.end9: ; preds = %for.body4
ret i32 %add
}
; Leaf function with no stack allocation but with saving restoring of
; non-volatile registers.
define i32 @foo3(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) #1 {
entry:
%0 = load volatile i32* @bar, align 4, !tbaa !0
%1 = load volatile i32* @bar, align 4, !tbaa !0
%2 = load volatile i32* @bar, align 4, !tbaa !0
%3 = load volatile i32* @bar, align 4, !tbaa !0
%4 = load volatile i32* @bar, align 4, !tbaa !0
%5 = load volatile i32* @bar, align 4, !tbaa !0
%6 = load volatile i32* @bar, align 4, !tbaa !0
%7 = load volatile i32* @bar, align 4, !tbaa !0
%8 = load volatile i32* @bar, align 4, !tbaa !0
%9 = load volatile i32* @bar, align 4, !tbaa !0
%10 = load volatile i32* @bar, align 4, !tbaa !0
%11 = load volatile i32* @bar, align 4, !tbaa !0
%12 = load volatile i32* @bar, align 4, !tbaa !0
%13 = load volatile i32* @bar, align 4, !tbaa !0
%14 = load volatile i32* @bar, align 4, !tbaa !0
%15 = load volatile i32* @bar, align 4, !tbaa !0
%16 = load volatile i32* @bar, align 4, !tbaa !0
%17 = load volatile i32* @bar, align 4, !tbaa !0
%factor = mul i32 %h, -2
%factor56 = mul i32 %g, -2
%factor57 = mul i32 %f, -2
%factor58 = mul i32 %e, -2
%factor59 = mul i32 %d, -2
%factor60 = mul i32 %c, -2
%factor61 = mul i32 %b, -2
%sum = add i32 %1, %0
%sum62 = add i32 %sum, %2
%sum63 = add i32 %sum62, %3
%sum64 = add i32 %sum63, %4
%sum65 = add i32 %sum64, %5
%sum66 = add i32 %sum65, %6
%sum67 = add i32 %sum66, %7
%sum68 = add i32 %sum67, %8
%sum69 = add i32 %sum68, %9
%sum70 = add i32 %sum69, %10
%sum71 = add i32 %sum70, %11
%sum72 = add i32 %sum71, %12
%sum73 = add i32 %sum72, %13
%sum74 = add i32 %sum73, %14
%sum75 = add i32 %sum74, %15
%sum76 = add i32 %sum75, %16
%sub10 = sub i32 %17, %sum76
%sub11 = add i32 %sub10, %factor
%sub12 = add i32 %sub11, %factor56
%sub13 = add i32 %sub12, %factor57
%sub14 = add i32 %sub13, %factor58
%sub15 = add i32 %sub14, %factor59
%sub16 = add i32 %sub15, %factor60
%add = add i32 %sub16, %factor61
ret i32 %add
}
; Leaf function with stack allocation and saving/restoring of non-volatile
; registers.
define i32 @foo4(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) #0 {
entry:
%stack = alloca [128 x i32], align 4
%0 = zext i32 %a to i64
br label %for.body
for.cond2.preheader: ; preds = %for.body
%1 = sext i32 %f to i64
br label %for.body4
for.body: ; preds = %for.body, %entry
%indvars.iv22 = phi i64 [ 0, %entry ], [ %indvars.iv.next23, %for.body ]
%2 = add nsw i64 %indvars.iv22, %0
%arrayidx = getelementptr inbounds [128 x i32]* %stack, i64 0, i64 %indvars.iv22
%3 = trunc i64 %2 to i32
store i32 %3, i32* %arrayidx, align 4, !tbaa !0
%indvars.iv.next23 = add i64 %indvars.iv22, 1
%lftr.wideiv25 = trunc i64 %indvars.iv.next23 to i32
%exitcond26 = icmp eq i32 %lftr.wideiv25, 128
br i1 %exitcond26, label %for.cond2.preheader, label %for.body
for.body4: ; preds = %for.body4, %for.cond2.preheader
%indvars.iv = phi i64 [ 0, %for.cond2.preheader ], [ %indvars.iv.next, %for.body4 ]
%z1.018 = phi i32 [ 0, %for.cond2.preheader ], [ %add8, %for.body4 ]
%4 = add nsw i64 %indvars.iv, %1
%arrayidx7 = getelementptr inbounds [128 x i32]* %stack, i64 0, i64 %4
%5 = load i32* %arrayidx7, align 4, !tbaa !0
%add8 = add nsw i32 %5, %z1.018
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 128
br i1 %exitcond, label %for.end11, label %for.body4
for.end11: ; preds = %for.body4
ret i32 %add8
}
attributes #0 = { readnone "target-cpu"="cyclone" }
attributes #1 = { "target-cpu"="cyclone" }
!0 = metadata !{metadata !"int", metadata !1}
!1 = metadata !{metadata !"omnipotent char", metadata !2}
!2 = metadata !{metadata !"Simple C/C++ TBAA"}
; CHECK: .section __LD,__compact_unwind,regular,debug
; CHECK: .quad _foo1 ; Range Start
; CHECK: .long 33554432 ; Compact Unwind Encoding: 0x2000000
; CHECK: .quad _foo2 ; Range Start
; CHECK: .long 33591296 ; Compact Unwind Encoding: 0x2009000
; CHECK: .quad _foo3 ; Range Start
; CHECK: .long 33570831 ; Compact Unwind Encoding: 0x200400f
; CHECK: .quad _foo4 ; Range Start
; CHECK: .long 33689616 ; Compact Unwind Encoding: 0x2021010

View File

@ -0,0 +1,208 @@
// RUN: llvm-mc -triple=arm64-apple-ios -filetype=obj < %s | \
// RUN: llvm-readobj -sections -section-relocations -section-data | \
// RUN: FileCheck %s
//
// rdar://13070556
// FIXME: we should add compact unwind support to llvm-objdump -unwind-info
// CHECK: Section {
// CHECK: Index: 1
// CHECK-NEXT: Name: __compact_unwind
// CHECK-NEXT: Segment: __LD
// CHECK-NEXT: Address:
// CHECK-NEXT: Size:
// CHECK-NEXT: Offset:
// CHECK-NEXT: Alignment:
// CHECK-NEXT: RelocationOffset:
// CHECK-NEXT: RelocationCount:
// CHECK-NEXT: Type:
// CHECK-NEXT: Attributes [
// CHECK-NEXT: Debug
// CHECK-NEXT: ]
// CHECK-NEXT: Reserved1:
// CHECK-NEXT: Reserved2:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: 0x60 0 3 0 ARM64_RELOC_UNSIGNED 0 -
// CHECK-NEXT: 0x40 0 3 0 ARM64_RELOC_UNSIGNED 0 -
// CHECK-NEXT: 0x20 0 3 0 ARM64_RELOC_UNSIGNED 0 -
// CHECK-NEXT: 0x0 0 3 0 ARM64_RELOC_UNSIGNED 0 -
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
// CHECK-NEXT: 0000: 00000000 00000000 08000000 00000002
// CHECK-NEXT: 0010: 00000000 00000000 00000000 00000000
// CHECK-NEXT: 0020: 08000000 00000000 40000000 00900002
// CHECK-NEXT: 0030: 00000000 00000000 00000000 00000000
// CHECK-NEXT: 0040: 48000000 00000000 D4000000 0F400002
// CHECK-NEXT: 0050: 00000000 00000000 00000000 00000000
// CHECK-NEXT: 0060: 1C010000 00000000 54000000 10100202
// CHECK-NEXT: 0070: 00000000 00000000 00000000 00000000
// CHECK-NEXT: )
// CHECK-NEXT: }
.section __TEXT,__text,regular,pure_instructions
.globl _foo1
.align 2
_foo1: ; @foo1
.cfi_startproc
; BB#0: ; %entry
add w0, w0, #42 ; =#42
ret
.cfi_endproc
.globl _foo2
.align 2
_foo2: ; @foo2
.cfi_startproc
; BB#0: ; %entry
sub sp, sp, #144 ; =#144
Ltmp2:
.cfi_def_cfa_offset 144
mov x9, xzr
mov x8, sp
LBB1_1: ; %for.body
; =>This Inner Loop Header: Depth=1
str w9, [x8, x9, lsl #2]
add x9, x9, #1 ; =#1
cmp w9, #36 ; =#36
b.ne LBB1_1
; BB#2:
mov x9, xzr
mov w0, wzr
LBB1_3: ; %for.body4
; =>This Inner Loop Header: Depth=1
ldr w10, [x8, x9]
add x9, x9, #4 ; =#4
cmp w9, #144 ; =#144
add w0, w10, w0
b.ne LBB1_3
; BB#4: ; %for.end9
add sp, sp, #144 ; =#144
ret
.cfi_endproc
.globl _foo3
.align 2
_foo3: ; @foo3
.cfi_startproc
; BB#0: ; %entry
stp x26, x25, [sp, #-64]!
stp x24, x23, [sp, #16]
stp x22, x21, [sp, #32]
stp x20, x19, [sp, #48]
Ltmp3:
.cfi_def_cfa_offset 64
Ltmp4:
.cfi_offset w19, -16
Ltmp5:
.cfi_offset w20, -24
Ltmp6:
.cfi_offset w21, -32
Ltmp7:
.cfi_offset w22, -40
Ltmp8:
.cfi_offset w23, -48
Ltmp9:
.cfi_offset w24, -56
Ltmp10:
.cfi_offset w25, -64
Ltmp11:
.cfi_offset w26, -72
Lloh0:
adrp x8, _bar@GOTPAGE
Lloh1:
ldr x8, [x8, _bar@GOTPAGEOFF]
ldr w9, [x8]
ldr w10, [x8]
ldr w11, [x8]
ldr w12, [x8]
ldr w13, [x8]
ldr w14, [x8]
ldr w15, [x8]
ldr w16, [x8]
ldr w17, [x8]
ldr w0, [x8]
ldr w19, [x8]
ldr w20, [x8]
ldr w21, [x8]
ldr w22, [x8]
ldr w23, [x8]
ldr w24, [x8]
ldr w25, [x8]
ldr w8, [x8]
add w9, w10, w9
add w9, w9, w11
add w9, w9, w12
add w9, w9, w13
add w9, w9, w14
add w9, w9, w15
add w9, w9, w16
add w9, w9, w17
add w9, w9, w0
add w9, w9, w19
add w9, w9, w20
add w9, w9, w21
add w9, w9, w22
add w9, w9, w23
add w9, w9, w24
add w9, w9, w25
sub w8, w8, w9
sub w8, w8, w7, lsl #1
sub w8, w8, w6, lsl #1
sub w8, w8, w5, lsl #1
sub w8, w8, w4, lsl #1
sub w8, w8, w3, lsl #1
sub w8, w8, w2, lsl #1
sub w0, w8, w1, lsl #1
ldp x20, x19, [sp, #48]
ldp x22, x21, [sp, #32]
ldp x24, x23, [sp, #16]
ldp x26, x25, [sp], #64
ret
.loh AdrpLdrGot Lloh0, Lloh1
.cfi_endproc
.globl _foo4
.align 2
_foo4: ; @foo4
.cfi_startproc
; BB#0: ; %entry
stp x28, x27, [sp, #-16]!
sub sp, sp, #512 ; =#512
Ltmp12:
.cfi_def_cfa_offset 528
Ltmp13:
.cfi_offset w27, -16
Ltmp14:
.cfi_offset w28, -24
; kill: W0<def> W0<kill> X0<def>
mov x9, xzr
ubfx x10, x0, #0, #32
mov x8, sp
LBB3_1: ; %for.body
; =>This Inner Loop Header: Depth=1
add w11, w10, w9
str w11, [x8, x9, lsl #2]
add x9, x9, #1 ; =#1
cmp w9, #128 ; =#128
b.ne LBB3_1
; BB#2: ; %for.cond2.preheader
mov x9, xzr
mov w0, wzr
add x8, x8, w5, sxtw #2
LBB3_3: ; %for.body4
; =>This Inner Loop Header: Depth=1
ldr w10, [x8, x9]
add x9, x9, #4 ; =#4
cmp w9, #512 ; =#512
add w0, w10, w0
b.ne LBB3_3
; BB#4: ; %for.end11
add sp, sp, #512 ; =#512
ldp x28, x27, [sp], #16
ret
.cfi_endproc
.comm _bar,4,2 ; @bar
.subsections_via_symbols