llvm/test/CodeGen/ARM/shift_minsize.ll
Sjoerd Meijer e690485141 [SelectionDAG] Codesize: don't expand SHIFT to SHIFT_PARTS
And instead just generate a libcall. My motivating example on ARM was a simple:
  
  shl i64 %A, %B

for which the code bloat is quite significant. For other targets that also
accept __int128/i128 such as AArch64 and X86, it is also beneficial for these
cases to generate a libcall when optimising for minsize. On these 64-bit targets,
the 64-bits shifts are of course unaffected because the SHIFT/SHIFT_PARTS
lowering operation action is not set to custom/expand.

Differential Revision: https://reviews.llvm.org/D57386



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@352736 91177308-0d34-0410-b5e6-96231b3b80d8
2019-01-31 08:07:30 +00:00

33 lines
706 B
LLVM

; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i64 @f0(i64 %val, i64 %amt) minsize optsize {
; CHECK-LABEL: f0:
; CHECK: bl __aeabi_llsl
%res = shl i64 %val, %amt
ret i64 %res
}
define i32 @f1(i64 %x, i64 %y) minsize optsize {
; CHECK-LABEL: f1:
; CHECK: bl __aeabi_llsl
%a = shl i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
}
define i32 @f2(i64 %x, i64 %y) minsize optsize {
; CHECK-LABEL: f2:
; CHECK: bl __aeabi_lasr
%a = ashr i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
}
define i32 @f3(i64 %x, i64 %y) minsize optsize {
; CHECK-LABEL: f3:
; CHECK: bl __aeabi_llsr
%a = lshr i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
}