llvm/test/CodeGen/PowerPC/fast-isel-fold.ll
Bill Schmidt 7248968fa5 [PowerPC] Add loads, stores, and related things to fast-isel.
This is the next big chunk of fast-isel code.  The primary purpose is
to implement selection of loads and stores, but there is a lot of
drag-along to support this.  The common code to analyze addresses for
both loads and stores is substantial.  It's also necessary to add the
materialization code for global values.

Related to load-store processing is the code to fold loads into
integer extends, since otherwise we generate lots of redundant
instructions.  We also need to add some overrides to some FastEmit
routines to ensure we don't assign GPR 0 to a virtual register when
this would change the meaning of an instruction.

I added handling selection of a few binary arithmetic instructions, to
enable committing some test cases I wrote a while back.

Finally, ap couple of miscellaneous changes:
 * I cleaned up some poor style from a previous patch in
   PPCISelLowering.cpp, pointed out by David Blaikie.
 * I enlarged the Addr.Offset field to avoid sign problems with 32-bit
   offsets. 



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189636 91177308-0d34-0410-b5e6-96231b3b80d8
2013-08-30 02:29:45 +00:00

96 lines
1.7 KiB
LLVM

; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
@a = global i8 1, align 1
@b = global i16 2, align 2
@c = global i32 4, align 4
define i32 @t3() nounwind uwtable ssp {
; ELF64: t3
%1 = load i8* @a, align 1
%2 = zext i8 %1 to i32
; ELF64: lbz
; ELF64-NOT: rlwinm
ret i32 %2
}
define i32 @t4() nounwind uwtable ssp {
; ELF64: t4
%1 = load i16* @b, align 2
%2 = zext i16 %1 to i32
; ELF64: lhz
; ELF64-NOT: rlwinm
ret i32 %2
}
define i32 @t5() nounwind uwtable ssp {
; ELF64: t5
%1 = load i16* @b, align 2
%2 = sext i16 %1 to i32
; ELF64: lha
; ELF64-NOT: rlwinm
ret i32 %2
}
define i32 @t6() nounwind uwtable ssp {
; ELF64: t6
%1 = load i8* @a, align 2
%2 = sext i8 %1 to i32
; ELF64: lbz
; ELF64-NOT: rlwinm
ret i32 %2
}
define i64 @t7() nounwind uwtable ssp {
; ELF64: t7
%1 = load i8* @a, align 1
%2 = zext i8 %1 to i64
; ELF64: lbz
; ELF64-NOT: rldicl
ret i64 %2
}
define i64 @t8() nounwind uwtable ssp {
; ELF64: t8
%1 = load i16* @b, align 2
%2 = zext i16 %1 to i64
; ELF64: lhz
; ELF64-NOT: rldicl
ret i64 %2
}
define i64 @t9() nounwind uwtable ssp {
; ELF64: t9
%1 = load i16* @b, align 2
%2 = sext i16 %1 to i64
; ELF64: lha
; ELF64-NOT: extsh
ret i64 %2
}
define i64 @t10() nounwind uwtable ssp {
; ELF64: t10
%1 = load i8* @a, align 2
%2 = sext i8 %1 to i64
; ELF64: lbz
; ELF64: extsb
ret i64 %2
}
define i64 @t11() nounwind uwtable ssp {
; ELF64: t11
%1 = load i32* @c, align 4
%2 = zext i32 %1 to i64
; ELF64: lwz
; ELF64-NOT: rldicl
ret i64 %2
}
define i64 @t12() nounwind uwtable ssp {
; ELF64: t12
%1 = load i32* @c, align 4
%2 = sext i32 %1 to i64
; ELF64: lwa
; ELF64-NOT: extsw
ret i64 %2
}