mirror of
https://github.com/RPCS3/llvm.git
synced 2024-11-27 21:50:29 +00:00
71589ff672
Enable enableMultipleCopyHints() on X86. Original Patch by @jonpa: While enabling the mischeduler for SystemZ, it was discovered that for some reason a test needed one extra seemingly needless COPY (test/CodeGen/SystemZ/call-03.ll). The handling for that is resulted in this patch, which improves the register coalescing by providing not just one copy hint, but a sorted list of copy hints. On SystemZ, this gives ~12500 less register moves on SPEC, as well as marginally less spilling. Instead of improving just the SystemZ backend, the improvement has been implemented in common-code (calculateSpillWeightAndHint(). This gives a lot of test failures, but since this should be a general improvement I hope that the involved targets will help and review the test updates. Differential Revision: https://reviews.llvm.org/D38128 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@342578 91177308-0d34-0410-b5e6-96231b3b80d8
149 lines
3.8 KiB
LLVM
149 lines
3.8 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
|
|
; RUN: llc < %s -mtriple=i386-unknown-unknown | FileCheck %s --check-prefix=X32
|
|
|
|
define i8 @select_i8_neg1_or_0(i1 %a) {
|
|
; X64-LABEL: select_i8_neg1_or_0:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: andb $1, %al
|
|
; X64-NEXT: negb %al
|
|
; X64-NEXT: # kill: def $al killed $al killed $eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X32-LABEL: select_i8_neg1_or_0:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X32-NEXT: andb $1, %al
|
|
; X32-NEXT: negb %al
|
|
; X32-NEXT: retl
|
|
%b = sext i1 %a to i8
|
|
ret i8 %b
|
|
}
|
|
|
|
define i8 @select_i8_neg1_or_0_zeroext(i1 zeroext %a) {
|
|
; X64-LABEL: select_i8_neg1_or_0_zeroext:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: negb %al
|
|
; X64-NEXT: # kill: def $al killed $al killed $eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X32-LABEL: select_i8_neg1_or_0_zeroext:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X32-NEXT: negb %al
|
|
; X32-NEXT: retl
|
|
%b = sext i1 %a to i8
|
|
ret i8 %b
|
|
}
|
|
|
|
define i16 @select_i16_neg1_or_0(i1 %a) {
|
|
; X64-LABEL: select_i16_neg1_or_0:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: andl $1, %eax
|
|
; X64-NEXT: negl %eax
|
|
; X64-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X32-LABEL: select_i16_neg1_or_0:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: andl $1, %eax
|
|
; X32-NEXT: negl %eax
|
|
; X32-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; X32-NEXT: retl
|
|
%b = sext i1 %a to i16
|
|
ret i16 %b
|
|
}
|
|
|
|
define i16 @select_i16_neg1_or_0_zeroext(i1 zeroext %a) {
|
|
; X64-LABEL: select_i16_neg1_or_0_zeroext:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: negl %eax
|
|
; X64-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X32-LABEL: select_i16_neg1_or_0_zeroext:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: negl %eax
|
|
; X32-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; X32-NEXT: retl
|
|
%b = sext i1 %a to i16
|
|
ret i16 %b
|
|
}
|
|
|
|
define i32 @select_i32_neg1_or_0(i1 %a) {
|
|
; X64-LABEL: select_i32_neg1_or_0:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: andl $1, %eax
|
|
; X64-NEXT: negl %eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X32-LABEL: select_i32_neg1_or_0:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: andl $1, %eax
|
|
; X32-NEXT: negl %eax
|
|
; X32-NEXT: retl
|
|
%b = sext i1 %a to i32
|
|
ret i32 %b
|
|
}
|
|
|
|
define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) {
|
|
; X64-LABEL: select_i32_neg1_or_0_zeroext:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: negl %eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X32-LABEL: select_i32_neg1_or_0_zeroext:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: negl %eax
|
|
; X32-NEXT: retl
|
|
%b = sext i1 %a to i32
|
|
ret i32 %b
|
|
}
|
|
|
|
define i64 @select_i64_neg1_or_0(i1 %a) {
|
|
; X64-LABEL: select_i64_neg1_or_0:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: andl $1, %eax
|
|
; X64-NEXT: negq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X32-LABEL: select_i64_neg1_or_0:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: andl $1, %eax
|
|
; X32-NEXT: negl %eax
|
|
; X32-NEXT: movl %eax, %edx
|
|
; X32-NEXT: retl
|
|
%b = sext i1 %a to i64
|
|
ret i64 %b
|
|
}
|
|
|
|
define i64 @select_i64_neg1_or_0_zeroext(i1 zeroext %a) {
|
|
; X64-LABEL: select_i64_neg1_or_0_zeroext:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: negq %rax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X32-LABEL: select_i64_neg1_or_0_zeroext:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: negl %eax
|
|
; X32-NEXT: movl %eax, %edx
|
|
; X32-NEXT: retl
|
|
%b = sext i1 %a to i64
|
|
ret i64 %b
|
|
}
|
|
|