llvm/test/CodeGen/RISCV/mul.ll
Alex Bradbury 1efb7b6d36 [RISCV] Introduce codegen patterns for RV64M-only instructions
As discussed on llvm-dev
<http://lists.llvm.org/pipermail/llvm-dev/2018-December/128497.html>, we have
to be careful when trying to select the *w RV64M instructions. i32 is not a
legal type for RV64 in the RISC-V backend, so operations have been promoted by
the time they reach instruction selection. Information about whether the
operation was originally a 32-bit operations has been lost, and it's easy to
write incorrect patterns.

Similarly to the variable 32-bit shifts, a DAG combine on ANY_EXTEND will
produce a SIGN_EXTEND if this is likely to result in sdiv/udiv/urem being
selected (and so save instructions to sext/zext the input operands).

Differential Revision: https://reviews.llvm.org/D53230


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@350993 91177308-0d34-0410-b5e6-96231b3b80d8
2019-01-12 07:43:06 +00:00

304 lines
7.6 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IM %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64IM %s
define signext i32 @square(i32 %a) nounwind {
; RV32I-LABEL: square:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp)
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IM-LABEL: square:
; RV32IM: # %bb.0:
; RV32IM-NEXT: mul a0, a0, a0
; RV32IM-NEXT: ret
;
; RV64I-LABEL: square:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IM-LABEL: square:
; RV64IM: # %bb.0:
; RV64IM-NEXT: mulw a0, a0, a0
; RV64IM-NEXT: ret
%1 = mul i32 %a, %a
ret i32 %1
}
define signext i32 @mul(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: mul:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp)
; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IM-LABEL: mul:
; RV32IM: # %bb.0:
; RV32IM-NEXT: mul a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: mul:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IM-LABEL: mul:
; RV64IM: # %bb.0:
; RV64IM-NEXT: mulw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, %b
ret i32 %1
}
define signext i32 @mul_constant(i32 %a) nounwind {
; RV32I-LABEL: mul_constant:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp)
; RV32I-NEXT: addi a1, zero, 5
; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IM-LABEL: mul_constant:
; RV32IM: # %bb.0:
; RV32IM-NEXT: addi a1, zero, 5
; RV32IM-NEXT: mul a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: mul_constant:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a1, zero, 5
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IM-LABEL: mul_constant:
; RV64IM: # %bb.0:
; RV64IM-NEXT: addi a1, zero, 5
; RV64IM-NEXT: mulw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 5
ret i32 %1
}
define i32 @mul_pow2(i32 %a) nounwind {
; RV32I-LABEL: mul_pow2:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 3
; RV32I-NEXT: ret
;
; RV32IM-LABEL: mul_pow2:
; RV32IM: # %bb.0:
; RV32IM-NEXT: slli a0, a0, 3
; RV32IM-NEXT: ret
;
; RV64I-LABEL: mul_pow2:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 3
; RV64I-NEXT: ret
;
; RV64IM-LABEL: mul_pow2:
; RV64IM: # %bb.0:
; RV64IM-NEXT: slli a0, a0, 3
; RV64IM-NEXT: ret
%1 = mul i32 %a, 8
ret i32 %1
}
define i64 @mul64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: mul64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp)
; RV32I-NEXT: call __muldi3
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IM-LABEL: mul64:
; RV32IM: # %bb.0:
; RV32IM-NEXT: mul a3, a0, a3
; RV32IM-NEXT: mulhu a4, a0, a2
; RV32IM-NEXT: add a3, a4, a3
; RV32IM-NEXT: mul a1, a1, a2
; RV32IM-NEXT: add a1, a3, a1
; RV32IM-NEXT: mul a0, a0, a2
; RV32IM-NEXT: ret
;
; RV64I-LABEL: mul64:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IM-LABEL: mul64:
; RV64IM: # %bb.0:
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i64 %a, %b
ret i64 %1
}
define i64 @mul64_constant(i64 %a) nounwind {
; RV32I-LABEL: mul64_constant:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp)
; RV32I-NEXT: addi a2, zero, 5
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: call __muldi3
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IM-LABEL: mul64_constant:
; RV32IM: # %bb.0:
; RV32IM-NEXT: addi a2, zero, 5
; RV32IM-NEXT: mul a1, a1, a2
; RV32IM-NEXT: mulhu a3, a0, a2
; RV32IM-NEXT: add a1, a3, a1
; RV32IM-NEXT: mul a0, a0, a2
; RV32IM-NEXT: ret
;
; RV64I-LABEL: mul64_constant:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a1, zero, 5
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IM-LABEL: mul64_constant:
; RV64IM: # %bb.0:
; RV64IM-NEXT: addi a1, zero, 5
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i64 %a, 5
ret i64 %1
}
define i32 @mulhs(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: mulhs:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp)
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: srai a3, a2, 31
; RV32I-NEXT: call __muldi3
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IM-LABEL: mulhs:
; RV32IM: # %bb.0:
; RV32IM-NEXT: mulh a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: mulhs:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IM-LABEL: mulhs:
; RV64IM: # %bb.0:
; RV64IM-NEXT: sext.w a1, a1
; RV64IM-NEXT: sext.w a0, a0
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
%1 = sext i32 %a to i64
%2 = sext i32 %b to i64
%3 = mul i64 %1, %2
%4 = lshr i64 %3, 32
%5 = trunc i64 %4 to i32
ret i32 %5
}
define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind {
; RV32I-LABEL: mulhu:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp)
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: mv a1, zero
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: call __muldi3
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: lw ra, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IM-LABEL: mulhu:
; RV32IM: # %bb.0:
; RV32IM-NEXT: mulhu a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: mulhu:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IM-LABEL: mulhu:
; RV64IM: # %bb.0:
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
%1 = zext i32 %a to i64
%2 = zext i32 %b to i64
%3 = mul i64 %1, %2
%4 = lshr i64 %3, 32
%5 = trunc i64 %4 to i32
ret i32 %5
}