mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-13 14:47:00 +00:00
4c690f3954
This re-applies r268760, reverted in r268794. Fixes http://llvm.org/PR27670 The original imp-defs assertion was way overzealous: forward all implicit operands, except imp-defs of the new super-reg def (r268787 for GR64, but also possible for GR16->GR32), or imp-uses of the new super-reg use. While there, mark the source use as Undef, and add an imp-use of the old source reg: that should cover any case of dead super-regs. At the stage the pass runs, flags are unlikely to matter anyway; still, let's be as correct as possible. Also add MIR tests for the various interesting cases. Original commit message: Codesize is less (16) or equal (8), and we avoid partial dependencies. Differential Revision: http://reviews.llvm.org/D19999 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@268831 91177308-0d34-0410-b5e6-96231b3b80d8
68 lines
1.9 KiB
LLVM
68 lines
1.9 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
|
|
|
|
define i8 @mask8(i8 %x) {
|
|
; CHECK-LABEL: mask8:
|
|
; CHECK: ## BB#0:
|
|
; CHECK-NEXT: kmovb %edi, %k0
|
|
; CHECK-NEXT: knotb %k0, %k0
|
|
; CHECK-NEXT: kmovb %k0, %eax
|
|
; CHECK-NEXT: retq
|
|
%m0 = bitcast i8 %x to <8 x i1>
|
|
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
|
|
%ret = bitcast <8 x i1> %m1 to i8
|
|
ret i8 %ret
|
|
}
|
|
|
|
define void @mask8_mem(i8* %ptr) {
|
|
; CHECK-LABEL: mask8_mem:
|
|
; CHECK: ## BB#0:
|
|
; CHECK-NEXT: kmovb (%rdi), %k0
|
|
; CHECK-NEXT: knotb %k0, %k0
|
|
; CHECK-NEXT: kmovb %k0, (%rdi)
|
|
; CHECK-NEXT: retq
|
|
%x = load i8, i8* %ptr, align 4
|
|
%m0 = bitcast i8 %x to <8 x i1>
|
|
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
|
|
%ret = bitcast <8 x i1> %m1 to i8
|
|
store i8 %ret, i8* %ptr, align 4
|
|
ret void
|
|
}
|
|
|
|
define i8 @mand8(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: mand8:
|
|
; CHECK: ## BB#0:
|
|
; CHECK-NEXT: movl %edi, %eax
|
|
; CHECK-NEXT: xorl %esi, %eax
|
|
; CHECK-NEXT: andl %esi, %edi
|
|
; CHECK-NEXT: orl %eax, %edi
|
|
; CHECK-NEXT: movl %edi, %eax
|
|
; CHECK-NEXT: retq
|
|
%ma = bitcast i8 %x to <8 x i1>
|
|
%mb = bitcast i8 %y to <8 x i1>
|
|
%mc = and <8 x i1> %ma, %mb
|
|
%md = xor <8 x i1> %ma, %mb
|
|
%me = or <8 x i1> %mc, %md
|
|
%ret = bitcast <8 x i1> %me to i8
|
|
ret i8 %ret
|
|
}
|
|
|
|
define i8 @mand8_mem(<8 x i1>* %x, <8 x i1>* %y) {
|
|
; CHECK-LABEL: mand8_mem:
|
|
; CHECK: ## BB#0:
|
|
; CHECK-NEXT: kmovb (%rdi), %k0
|
|
; CHECK-NEXT: kmovb (%rsi), %k1
|
|
; CHECK-NEXT: kandb %k1, %k0, %k2
|
|
; CHECK-NEXT: kxorb %k1, %k0, %k0
|
|
; CHECK-NEXT: korb %k0, %k2, %k0
|
|
; CHECK-NEXT: kmovb %k0, %eax
|
|
; CHECK-NEXT: retq
|
|
%ma = load <8 x i1>, <8 x i1>* %x
|
|
%mb = load <8 x i1>, <8 x i1>* %y
|
|
%mc = and <8 x i1> %ma, %mb
|
|
%md = xor <8 x i1> %ma, %mb
|
|
%me = or <8 x i1> %mc, %md
|
|
%ret = bitcast <8 x i1> %me to i8
|
|
ret i8 %ret
|
|
}
|