mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-13 23:18:58 +00:00
4c690f3954
This re-applies r268760, reverted in r268794. Fixes http://llvm.org/PR27670 The original imp-defs assertion was way overzealous: forward all implicit operands, except imp-defs of the new super-reg def (r268787 for GR64, but also possible for GR16->GR32), or imp-uses of the new super-reg use. While there, mark the source use as Undef, and add an imp-use of the old source reg: that should cover any case of dead super-regs. At the stage the pass runs, flags are unlikely to matter anyway; still, let's be as correct as possible. Also add MIR tests for the various interesting cases. Original commit message: Codesize is less (16) or equal (8), and we avoid partial dependencies. Differential Revision: http://reviews.llvm.org/D19999 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@268831 91177308-0d34-0410-b5e6-96231b3b80d8
71 lines
2.1 KiB
LLVM
71 lines
2.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
|
|
; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=1 -mtriple=x86_64-- < %s | FileCheck --check-prefix=X64 --check-prefix=BWON64 %s
|
|
; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=0 -mtriple=x86_64-- < %s | FileCheck --check-prefix=X64 --check-prefix=BWOFF64 %s
|
|
; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=1 -mtriple=i386-- < %s | FileCheck --check-prefix=X32 --check-prefix=BWON32 %s
|
|
; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=0 -mtriple=i386-- < %s | FileCheck --check-prefix=X32 --check-prefix=BWOFF32 %s
|
|
|
|
target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
|
|
|
|
define i8 @test_movb(i8 %a0) {
|
|
; BWON64-LABEL: test_movb:
|
|
; BWON64: # BB#0:
|
|
; BWON64-NEXT: movl %edi, %eax
|
|
; BWON64-NEXT: retq
|
|
;
|
|
; BWOFF64-LABEL: test_movb:
|
|
; BWOFF64: # BB#0:
|
|
; BWOFF64-NEXT: movb %dil, %al
|
|
; BWOFF64-NEXT: retq
|
|
;
|
|
; X32-LABEL: test_movb:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X32-NEXT: retl
|
|
ret i8 %a0
|
|
}
|
|
|
|
define i16 @test_movw(i16 %a0) {
|
|
; BWON64-LABEL: test_movw:
|
|
; BWON64: # BB#0:
|
|
; BWON64-NEXT: movl %edi, %eax
|
|
; BWON64-NEXT: retq
|
|
;
|
|
; BWOFF64-LABEL: test_movw:
|
|
; BWOFF64: # BB#0:
|
|
; BWOFF64-NEXT: movw %di, %ax
|
|
; BWOFF64-NEXT: retq
|
|
;
|
|
; BWON32-LABEL: test_movw:
|
|
; BWON32: # BB#0:
|
|
; BWON32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
; BWON32-NEXT: retl
|
|
;
|
|
; BWOFF32-LABEL: test_movw:
|
|
; BWOFF32: # BB#0:
|
|
; BWOFF32-NEXT: movw {{[0-9]+}}(%esp), %ax
|
|
; BWOFF32-NEXT: retl
|
|
ret i16 %a0
|
|
}
|
|
|
|
; Verify we don't mess with H-reg copies (only generated in 32-bit mode).
|
|
define i8 @test_movb_hreg(i16 %a0) {
|
|
; X64-LABEL: test_movb_hreg:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: shrl $8, %eax
|
|
; X64-NEXT: addb %dil, %al
|
|
; X64-NEXT: retq
|
|
;
|
|
; X32-LABEL: test_movb_hreg:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: addb %al, %ah
|
|
; X32-NEXT: movb %ah, %al
|
|
; X32-NEXT: retl
|
|
%tmp0 = trunc i16 %a0 to i8
|
|
%tmp1 = lshr i16 %a0, 8
|
|
%tmp2 = trunc i16 %tmp1 to i8
|
|
%tmp3 = add i8 %tmp0, %tmp2
|
|
ret i8 %tmp3
|
|
}
|