llvm-mirror/test/CodeGen/AArch64/arm64-vector-insertion.ll
Evandro Menezes 701b911eac [AArch64] Improve code generation of constant vectors
Use the whole gammut of constant immediates available to set up a vector.
Instead of using, for example, `mov w0, #0xffff; dup v0.4s, w0`, which
transfers between register files, use the more efficient `movi v0.4s, #-1`
instead.  Not limited to just a few values, but any immediate value that can
be encoded by all the variants of `FMOV`, `MOVI`, `MVNI`, thus eliminating
the need to there be patterns to optimize special cases.

Differential revision: https://reviews.llvm.org/D42133

llvm-svn: 326718
2018-03-05 17:02:47 +00:00

32 lines
985 B
LLVM

; RUN: llc < %s -mtriple=arm64-eabi -mcpu=generic -aarch64-neon-syntax=apple | FileCheck %s
define void @test0f(float* nocapture %x, float %a) #0 {
entry:
%0 = insertelement <4 x float> <float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %a, i32 0
%1 = bitcast float* %x to <4 x float>*
store <4 x float> %0, <4 x float>* %1, align 16
ret void
; CHECK-LABEL: test0f
; CHECK: movi.2d v[[TEMP:[0-9]+]], #0
; CHECK: mov.s v[[TEMP]][0], v{{[0-9]+}}[0]
; CHECK: str q[[TEMP]], [x0]
; CHECK: ret
}
define void @test1f(float* nocapture %x, float %a) #0 {
entry:
%0 = insertelement <4 x float> <float undef, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, float %a, i32 0
%1 = bitcast float* %x to <4 x float>*
store <4 x float> %0, <4 x float>* %1, align 16
ret void
; CHECK-LABEL: test1f
; CHECK: fmov.4s v[[TEMP:[0-9]+]], #1.0
; CHECK: mov.s v[[TEMP]][0], v0[0]
; CHECK: str q[[TEMP]], [x0]
; CHECK: ret
}