llvm/test/CodeGen/PowerPC/srl-mask.ll
Hal Finkel eda8f6708d PPC: Optimize rldicl generation for masked shifts
Masking operations (where only some number of the low bits are being kept) are
selected to rldicl(x, 0, mb). If x is a logical right shift (which would become
rldicl(y, 64-n, n)), we might be able to fold the two instructions together:

  rldicl(rldicl(x, 64-n, n), 0, mb) -> rldicl(x, 64-n, mb) for n <= mb

The right shift is really a left rotate followed by a mask, and if the explicit
mask is a more-restrictive sub-mask of the mask implied by the shift, only one
rldicl is needed.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@195185 91177308-0d34-0410-b5e6-96231b3b80d8
2013-11-20 01:10:15 +00:00

17 lines
448 B
LLVM

; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
define i64 @foo(i64 %x) #0 {
entry:
; CHECK-LABEL: @foo
%a = lshr i64 %x, 35
%b = and i64 %a, 65535
; CHECK: rldicl 3, 3, 29, 48
ret i64 %b
; CHECK: blr
}
attributes #0 = { nounwind }