2007-10-12 21:30:57 +00:00
|
|
|
//====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
|
2009-07-31 20:07:27 +00:00
|
|
|
//
|
2006-02-21 19:13:53 +00:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 20:36:04 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2009-07-31 20:07:27 +00:00
|
|
|
//
|
2006-02-21 19:13:53 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file describes the X86 SSE instruction set, defining the instructions,
|
|
|
|
// and properties of the instructions which are needed for code generation,
|
|
|
|
// machine code emission, and analysis.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-10-07 21:55:32 +00:00
|
|
|
|
2006-03-18 01:23:20 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE specific DAG Nodes.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2007-01-05 07:55:56 +00:00
|
|
|
def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
|
|
|
|
SDTCisFP<0>, SDTCisInt<2> ]>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
|
|
|
|
SDTCisFP<1>, SDTCisVT<3, i8>]>;
|
2007-01-05 07:55:56 +00:00
|
|
|
|
2006-11-10 21:43:37 +00:00
|
|
|
def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
|
|
|
|
def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
|
2006-04-05 23:38:46 +00:00
|
|
|
def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
|
2006-03-22 02:53:00 +00:00
|
|
|
[SDNPCommutative, SDNPAssociative]>;
|
2007-01-05 07:55:56 +00:00
|
|
|
def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
|
|
|
|
[SDNPCommutative, SDNPAssociative]>;
|
2006-04-05 23:38:46 +00:00
|
|
|
def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
|
2006-03-22 02:53:00 +00:00
|
|
|
[SDNPCommutative, SDNPAssociative]>;
|
2007-07-10 00:05:58 +00:00
|
|
|
def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
|
|
|
|
def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
|
2007-01-05 07:55:56 +00:00
|
|
|
def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
|
2007-10-01 18:12:48 +00:00
|
|
|
def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
|
2007-09-29 00:00:36 +00:00
|
|
|
def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
|
2009-07-31 20:07:27 +00:00
|
|
|
def X86pshufb : SDNode<"X86ISD::PSHUFB",
|
Generate better code for v8i16 shuffles on SSE2
Generate better code for v16i8 shuffles on SSE2 (avoids stack)
Generate pshufb for v8i16 and v16i8 shuffles on SSSE3 where it is fewer uops.
Document the shuffle matching logic and add some FIXMEs for later further
cleanups.
New tests that test the above.
Examples:
New:
_shuf2:
pextrw $7, %xmm0, %eax
punpcklqdq %xmm1, %xmm0
pshuflw $128, %xmm0, %xmm0
pinsrw $2, %eax, %xmm0
Old:
_shuf2:
pextrw $2, %xmm0, %eax
pextrw $7, %xmm0, %ecx
pinsrw $2, %ecx, %xmm0
pinsrw $3, %eax, %xmm0
movd %xmm1, %eax
pinsrw $4, %eax, %xmm0
ret
=========
New:
_shuf4:
punpcklqdq %xmm1, %xmm0
pshufb LCPI1_0, %xmm0
Old:
_shuf4:
pextrw $3, %xmm0, %eax
movsd %xmm1, %xmm0
pextrw $3, %xmm1, %ecx
pinsrw $4, %ecx, %xmm0
pinsrw $5, %eax, %xmm0
========
New:
_shuf1:
pushl %ebx
pushl %edi
pushl %esi
pextrw $1, %xmm0, %eax
rolw $8, %ax
movd %xmm0, %ecx
rolw $8, %cx
pextrw $5, %xmm0, %edx
pextrw $4, %xmm0, %esi
pextrw $3, %xmm0, %edi
pextrw $2, %xmm0, %ebx
movaps %xmm0, %xmm1
pinsrw $0, %ecx, %xmm1
pinsrw $1, %eax, %xmm1
rolw $8, %bx
pinsrw $2, %ebx, %xmm1
rolw $8, %di
pinsrw $3, %edi, %xmm1
rolw $8, %si
pinsrw $4, %esi, %xmm1
rolw $8, %dx
pinsrw $5, %edx, %xmm1
pextrw $7, %xmm0, %eax
rolw $8, %ax
movaps %xmm1, %xmm0
pinsrw $7, %eax, %xmm0
popl %esi
popl %edi
popl %ebx
ret
Old:
_shuf1:
subl $252, %esp
movaps %xmm0, (%esp)
movaps %xmm0, 16(%esp)
movaps %xmm0, 32(%esp)
movaps %xmm0, 48(%esp)
movaps %xmm0, 64(%esp)
movaps %xmm0, 80(%esp)
movaps %xmm0, 96(%esp)
movaps %xmm0, 224(%esp)
movaps %xmm0, 208(%esp)
movaps %xmm0, 192(%esp)
movaps %xmm0, 176(%esp)
movaps %xmm0, 160(%esp)
movaps %xmm0, 144(%esp)
movaps %xmm0, 128(%esp)
movaps %xmm0, 112(%esp)
movzbl 14(%esp), %eax
movd %eax, %xmm1
movzbl 22(%esp), %eax
movd %eax, %xmm2
punpcklbw %xmm1, %xmm2
movzbl 42(%esp), %eax
movd %eax, %xmm1
movzbl 50(%esp), %eax
movd %eax, %xmm3
punpcklbw %xmm1, %xmm3
punpcklbw %xmm2, %xmm3
movzbl 77(%esp), %eax
movd %eax, %xmm1
movzbl 84(%esp), %eax
movd %eax, %xmm2
punpcklbw %xmm1, %xmm2
movzbl 104(%esp), %eax
movd %eax, %xmm1
punpcklbw %xmm1, %xmm0
punpcklbw %xmm2, %xmm0
movaps %xmm0, %xmm1
punpcklbw %xmm3, %xmm1
movzbl 127(%esp), %eax
movd %eax, %xmm0
movzbl 135(%esp), %eax
movd %eax, %xmm2
punpcklbw %xmm0, %xmm2
movzbl 155(%esp), %eax
movd %eax, %xmm0
movzbl 163(%esp), %eax
movd %eax, %xmm3
punpcklbw %xmm0, %xmm3
punpcklbw %xmm2, %xmm3
movzbl 188(%esp), %eax
movd %eax, %xmm0
movzbl 197(%esp), %eax
movd %eax, %xmm2
punpcklbw %xmm0, %xmm2
movzbl 217(%esp), %eax
movd %eax, %xmm4
movzbl 225(%esp), %eax
movd %eax, %xmm0
punpcklbw %xmm4, %xmm0
punpcklbw %xmm2, %xmm0
punpcklbw %xmm3, %xmm0
punpcklbw %xmm1, %xmm0
addl $252, %esp
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@65311 91177308-0d34-0410-b5e6-96231b3b80d8
2009-02-23 08:49:38 +00:00
|
|
|
SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
|
|
|
|
SDTCisSameAs<0,2>]>>;
|
2008-02-11 04:19:36 +00:00
|
|
|
def X86pextrb : SDNode<"X86ISD::PEXTRB",
|
|
|
|
SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
|
|
|
|
def X86pextrw : SDNode<"X86ISD::PEXTRW",
|
|
|
|
SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
|
2009-07-31 20:07:27 +00:00
|
|
|
def X86pinsrb : SDNode<"X86ISD::PINSRB",
|
2008-02-11 04:19:36 +00:00
|
|
|
SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
|
|
|
|
SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
|
2009-07-31 20:07:27 +00:00
|
|
|
def X86pinsrw : SDNode<"X86ISD::PINSRW",
|
2008-02-11 04:19:36 +00:00
|
|
|
SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
|
|
|
|
SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
|
2009-07-31 20:07:27 +00:00
|
|
|
def X86insrtps : SDNode<"X86ISD::INSERTPS",
|
2008-02-11 04:19:36 +00:00
|
|
|
SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
|
2009-07-24 00:33:09 +00:00
|
|
|
SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
|
2008-05-09 21:53:03 +00:00
|
|
|
def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
|
|
|
|
SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
|
|
|
|
def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
|
|
|
|
[SDNPHasChain, SDNPMayLoad]>;
|
2008-05-29 08:22:04 +00:00
|
|
|
def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
|
|
|
|
def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
|
|
|
|
def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
|
|
|
|
def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
|
|
|
|
def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
|
|
|
|
def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
|
|
|
|
def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
|
|
|
|
def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
|
|
|
|
def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
|
|
|
|
def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
|
|
|
|
def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
|
2006-03-25 09:37:23 +00:00
|
|
|
|
2010-03-28 05:07:17 +00:00
|
|
|
def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
|
|
|
|
SDTCisVT<1, v4f32>,
|
|
|
|
SDTCisVT<2, v4f32>]>;
|
2009-07-29 00:28:05 +00:00
|
|
|
def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
|
|
|
|
|
2006-10-07 21:55:32 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE Complex Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// These are 'extloads' from a scalar to the low element of a vector, zeroing
|
|
|
|
// the top elements. These are used for the SSE 'ss' and 'sd' instruction
|
|
|
|
// forms.
|
2009-04-08 21:14:34 +00:00
|
|
|
def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
|
2008-01-10 07:59:24 +00:00
|
|
|
[SDNPHasChain, SDNPMayLoad]>;
|
2009-04-08 21:14:34 +00:00
|
|
|
def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
|
2008-01-10 07:59:24 +00:00
|
|
|
[SDNPHasChain, SDNPMayLoad]>;
|
2006-10-07 21:55:32 +00:00
|
|
|
|
|
|
|
def ssmem : Operand<v4f32> {
|
|
|
|
let PrintMethod = "printf32mem";
|
2009-07-30 01:56:29 +00:00
|
|
|
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
|
2009-08-10 18:41:10 +00:00
|
|
|
let ParserMatchClass = X86MemAsmOperand;
|
2006-10-07 21:55:32 +00:00
|
|
|
}
|
|
|
|
def sdmem : Operand<v2f64> {
|
|
|
|
let PrintMethod = "printf64mem";
|
2009-07-30 01:56:29 +00:00
|
|
|
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
|
2009-08-10 18:41:10 +00:00
|
|
|
let ParserMatchClass = X86MemAsmOperand;
|
2006-10-07 21:55:32 +00:00
|
|
|
}
|
|
|
|
|
2006-02-21 20:00:20 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2006-03-17 19:55:52 +00:00
|
|
|
// SSE pattern fragments
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-03-18 01:23:20 +00:00
|
|
|
def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
|
|
|
|
def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
|
2007-06-25 15:19:03 +00:00
|
|
|
def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
|
2006-03-23 07:44:07 +00:00
|
|
|
def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
|
2006-03-17 19:55:52 +00:00
|
|
|
|
2007-07-27 17:16:43 +00:00
|
|
|
// Like 'store', but always requires vector alignment.
|
2007-07-18 20:23:34 +00:00
|
|
|
def alignedstore : PatFrag<(ops node:$val, node:$ptr),
|
2008-10-15 06:50:19 +00:00
|
|
|
(store node:$val, node:$ptr), [{
|
|
|
|
return cast<StoreSDNode>(N)->getAlignment() >= 16;
|
2007-07-18 20:23:34 +00:00
|
|
|
}]>;
|
|
|
|
|
2007-07-27 17:16:43 +00:00
|
|
|
// Like 'load', but always requires vector alignment.
|
2008-10-15 06:50:19 +00:00
|
|
|
def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
|
|
|
|
return cast<LoadSDNode>(N)->getAlignment() >= 16;
|
2007-07-18 20:23:34 +00:00
|
|
|
}]>;
|
|
|
|
|
2010-05-25 17:33:22 +00:00
|
|
|
def alignedloadfsf32 : PatFrag<(ops node:$ptr),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(f32 (alignedload node:$ptr))>;
|
2010-05-25 17:33:22 +00:00
|
|
|
def alignedloadfsf64 : PatFrag<(ops node:$ptr),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(f64 (alignedload node:$ptr))>;
|
2010-05-25 17:33:22 +00:00
|
|
|
def alignedloadv4f32 : PatFrag<(ops node:$ptr),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(v4f32 (alignedload node:$ptr))>;
|
2010-05-25 17:33:22 +00:00
|
|
|
def alignedloadv2f64 : PatFrag<(ops node:$ptr),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(v2f64 (alignedload node:$ptr))>;
|
2010-05-25 17:33:22 +00:00
|
|
|
def alignedloadv4i32 : PatFrag<(ops node:$ptr),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(v4i32 (alignedload node:$ptr))>;
|
2010-05-25 17:33:22 +00:00
|
|
|
def alignedloadv2i64 : PatFrag<(ops node:$ptr),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(v2i64 (alignedload node:$ptr))>;
|
2007-07-18 20:23:34 +00:00
|
|
|
|
|
|
|
// Like 'load', but uses special alignment checks suitable for use in
|
|
|
|
// memory operands in most SSE instructions, which are required to
|
2010-01-11 16:29:42 +00:00
|
|
|
// be naturally aligned on some targets but not on others. If the subtarget
|
|
|
|
// allows unaligned accesses, match any load, though this may require
|
|
|
|
// setting a feature bit in the processor (on startup, for example).
|
|
|
|
// Opteron 10h and later implement such a feature.
|
2008-10-15 06:50:19 +00:00
|
|
|
def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
|
2010-01-11 16:29:42 +00:00
|
|
|
return Subtarget->hasVectorUAMem()
|
|
|
|
|| cast<LoadSDNode>(N)->getAlignment() >= 16;
|
2007-07-18 20:23:34 +00:00
|
|
|
}]>;
|
|
|
|
|
2007-07-27 17:16:43 +00:00
|
|
|
def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
|
|
|
|
def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
|
2007-07-18 20:23:34 +00:00
|
|
|
def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
|
|
|
|
def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
|
|
|
|
def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
|
|
|
|
def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
|
2008-02-09 23:46:37 +00:00
|
|
|
def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
|
2007-07-18 20:23:34 +00:00
|
|
|
|
2007-08-11 09:52:53 +00:00
|
|
|
// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
|
|
|
|
// 16-byte boundary.
|
2008-02-09 23:46:37 +00:00
|
|
|
// FIXME: 8 byte alignment for mmx reads is not required
|
2008-10-16 00:03:00 +00:00
|
|
|
def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
|
2008-10-15 06:50:19 +00:00
|
|
|
return cast<LoadSDNode>(N)->getAlignment() >= 8;
|
2007-08-11 09:52:53 +00:00
|
|
|
}]>;
|
|
|
|
|
|
|
|
def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
|
|
|
|
def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
|
|
|
|
def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
|
|
|
|
def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
|
|
|
|
|
2010-02-16 20:50:18 +00:00
|
|
|
// MOVNT Support
|
|
|
|
// Like 'store', but requires the non-temporal bit to be set
|
|
|
|
def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
|
|
|
|
(st node:$val, node:$ptr), [{
|
|
|
|
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
|
|
|
|
return ST->isNonTemporal();
|
|
|
|
return false;
|
|
|
|
}]>;
|
|
|
|
|
|
|
|
def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
|
|
|
|
(st node:$val, node:$ptr), [{
|
|
|
|
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
|
|
|
|
return ST->isNonTemporal() && !ST->isTruncatingStore() &&
|
|
|
|
ST->getAddressingMode() == ISD::UNINDEXED &&
|
|
|
|
ST->getAlignment() >= 16;
|
|
|
|
return false;
|
|
|
|
}]>;
|
|
|
|
|
|
|
|
def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
|
|
|
|
(st node:$val, node:$ptr), [{
|
|
|
|
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
|
|
|
|
return ST->isNonTemporal() &&
|
|
|
|
ST->getAlignment() < 16;
|
|
|
|
return false;
|
|
|
|
}]>;
|
|
|
|
|
2006-03-30 07:33:32 +00:00
|
|
|
def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
|
|
|
|
def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
|
2006-03-29 23:07:14 +00:00
|
|
|
def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
|
|
|
|
def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
|
2006-03-29 18:47:40 +00:00
|
|
|
def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
|
|
|
|
def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
|
|
|
|
|
2008-09-24 23:27:55 +00:00
|
|
|
def vzmovl_v2i64 : PatFrag<(ops node:$src),
|
|
|
|
(bitconvert (v2i64 (X86vzmovl
|
|
|
|
(v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
|
|
|
|
def vzmovl_v4i32 : PatFrag<(ops node:$src),
|
|
|
|
(bitconvert (v4i32 (X86vzmovl
|
|
|
|
(v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
|
|
|
|
|
|
|
|
def vzload_v2i64 : PatFrag<(ops node:$src),
|
|
|
|
(bitconvert (v2i64 (X86vzload node:$src)))>;
|
|
|
|
|
|
|
|
|
2006-03-24 07:29:27 +00:00
|
|
|
def fp32imm0 : PatLeaf<(f32 fpimm), [{
|
|
|
|
return N->isExactlyValue(+0.0);
|
|
|
|
}]>;
|
|
|
|
|
2009-10-28 06:30:34 +00:00
|
|
|
// BYTE_imm - Transform bit immediates into byte immediates.
|
|
|
|
def BYTE_imm : SDNodeXForm<imm, [{
|
2006-04-04 21:49:39 +00:00
|
|
|
// Transformation function: imm >> 3
|
2008-09-12 16:56:44 +00:00
|
|
|
return getI32Imm(N->getZExtValue() >> 3);
|
2006-04-04 21:49:39 +00:00
|
|
|
}]>;
|
|
|
|
|
2006-03-22 08:01:21 +00:00
|
|
|
// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
|
|
|
|
// SHUFP* etc. imm.
|
2009-04-27 18:41:29 +00:00
|
|
|
def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
|
2006-03-22 08:01:21 +00:00
|
|
|
return getI8Imm(X86::getShuffleSHUFImmediate(N));
|
2006-03-22 02:53:00 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-07-31 20:07:27 +00:00
|
|
|
// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
|
2006-03-29 23:07:14 +00:00
|
|
|
// PSHUFHW imm.
|
2009-04-27 18:41:29 +00:00
|
|
|
def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
|
2006-03-29 23:07:14 +00:00
|
|
|
return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
|
|
|
|
}]>;
|
|
|
|
|
2009-07-31 20:07:27 +00:00
|
|
|
// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
|
2006-03-29 23:07:14 +00:00
|
|
|
// PSHUFLW imm.
|
2009-04-27 18:41:29 +00:00
|
|
|
def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
|
2006-03-29 23:07:14 +00:00
|
|
|
return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
|
|
|
|
}]>;
|
|
|
|
|
2009-10-19 02:17:23 +00:00
|
|
|
// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
|
|
|
|
// a PALIGNR imm.
|
|
|
|
def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
|
|
|
|
return getI8Imm(X86::getShufflePALIGNRImmediate(N));
|
|
|
|
}]>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
|
|
|
|
return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
|
2008-09-25 20:50:48 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def movddup : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
|
2006-03-28 02:43:26 +00:00
|
|
|
}]>;
|
2006-03-24 02:58:06 +00:00
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
|
Fixed a bug which causes x86 be to incorrectly match
shuffle v, undef, <2, ?, 3, ?>
to movhlps
It should match to unpckhps instead.
Added proper matching code for
shuffle v, undef, <2, 3, 2, 3>
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@31519 91177308-0d34-0410-b5e6-96231b3b80d8
2006-11-07 22:14:24 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
|
2006-04-06 23:23:56 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-11-07 23:17:15 +00:00
|
|
|
def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
|
2006-04-06 23:23:56 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def movlp : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
|
2006-04-11 00:19:04 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def movl : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
|
2006-04-14 21:59:03 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
|
2006-04-14 21:59:03 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
|
2006-03-28 00:39:58 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
|
2006-03-28 02:43:26 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27437 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-05 07:20:06 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
|
2007-05-17 18:44:37 +00:00
|
|
|
}]>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
|
2009-04-24 12:40:33 +00:00
|
|
|
}]>;
|
2006-03-22 18:59:22 +00:00
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
|
2006-03-30 19:54:57 +00:00
|
|
|
}], SHUFFLE_get_shuf_imm>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def shufp : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
|
2009-04-24 12:40:33 +00:00
|
|
|
}], SHUFFLE_get_shuf_imm>;
|
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
|
|
|
|
}], SHUFFLE_get_pshufhw_imm>;
|
2009-04-24 12:40:33 +00:00
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
|
|
|
|
}], SHUFFLE_get_pshuflw_imm>;
|
2009-04-24 12:40:33 +00:00
|
|
|
|
2009-10-19 02:17:23 +00:00
|
|
|
def palign : PatFrag<(ops node:$lhs, node:$rhs),
|
|
|
|
(vector_shuffle node:$lhs, node:$rhs), [{
|
|
|
|
return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
|
|
|
|
}], SHUFFLE_get_palign_imm>;
|
|
|
|
|
2006-03-17 19:55:52 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2006-02-21 20:00:20 +00:00
|
|
|
// SSE scalar FP Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
2006-02-21 19:13:53 +00:00
|
|
|
|
2009-10-29 18:10:34 +00:00
|
|
|
// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
|
|
|
|
// instruction selection into a branch sequence.
|
|
|
|
let Uses = [EFLAGS], usesCustomInserter = 1 in {
|
2006-02-21 20:00:20 +00:00
|
|
|
def CMOV_FR32 : I<0, Pseudo,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
|
2006-02-21 20:00:20 +00:00
|
|
|
"#CMOV_FR32 PSEUDO!",
|
2007-09-29 00:00:36 +00:00
|
|
|
[(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
|
2007-09-25 01:57:46 +00:00
|
|
|
EFLAGS))]>;
|
2007-09-29 00:00:36 +00:00
|
|
|
def CMOV_FR64 : I<0, Pseudo,
|
2007-09-25 01:57:46 +00:00
|
|
|
(outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
|
|
|
|
"#CMOV_FR64 PSEUDO!",
|
2007-09-29 00:00:36 +00:00
|
|
|
[(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
|
2007-09-25 01:57:46 +00:00
|
|
|
EFLAGS))]>;
|
2007-09-29 00:00:36 +00:00
|
|
|
def CMOV_V4F32 : I<0, Pseudo,
|
2007-09-25 01:57:46 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
|
|
|
|
"#CMOV_V4F32 PSEUDO!",
|
|
|
|
[(set VR128:$dst,
|
2007-09-29 00:00:36 +00:00
|
|
|
(v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
|
2007-09-25 01:57:46 +00:00
|
|
|
EFLAGS)))]>;
|
2007-09-29 00:00:36 +00:00
|
|
|
def CMOV_V2F64 : I<0, Pseudo,
|
2007-09-25 01:57:46 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
|
|
|
|
"#CMOV_V2F64 PSEUDO!",
|
|
|
|
[(set VR128:$dst,
|
2007-09-29 00:00:36 +00:00
|
|
|
(v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
|
2007-09-25 01:57:46 +00:00
|
|
|
EFLAGS)))]>;
|
2007-09-29 00:00:36 +00:00
|
|
|
def CMOV_V2I64 : I<0, Pseudo,
|
2007-09-25 01:57:46 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
|
|
|
|
"#CMOV_V2I64 PSEUDO!",
|
|
|
|
[(set VR128:$dst,
|
2007-09-29 00:00:36 +00:00
|
|
|
(v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
|
2007-09-25 01:57:46 +00:00
|
|
|
EFLAGS)))]>;
|
2006-02-21 19:13:53 +00:00
|
|
|
}
|
2006-02-21 19:26:52 +00:00
|
|
|
|
2010-06-19 01:32:46 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE 1 & 2 Instructions Classes
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
|
|
|
|
multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
2010-06-21 21:28:07 +00:00
|
|
|
RegisterClass RC, X86MemOperand x86memop> {
|
2010-06-19 01:32:46 +00:00
|
|
|
let isCommutable = 1 in {
|
|
|
|
def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
|
|
|
|
OpcodeStr, [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
|
|
|
|
}
|
2010-06-21 21:28:07 +00:00
|
|
|
def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
|
2010-06-19 01:32:46 +00:00
|
|
|
OpcodeStr, [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
|
|
|
|
multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
|
|
|
string asm, string SSEVer, string FPSizeStr,
|
2010-06-21 21:28:07 +00:00
|
|
|
Operand memopr, ComplexPattern mem_cpat> {
|
2010-06-19 01:32:46 +00:00
|
|
|
def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
|
|
|
|
asm, [(set RC:$dst, (
|
|
|
|
!nameconcat<Intrinsic>("int_x86_sse",
|
|
|
|
!strconcat(SSEVer, !strconcat("_",
|
|
|
|
!strconcat(OpcodeStr, FPSizeStr))))
|
|
|
|
RC:$src1, RC:$src2))]>;
|
2010-06-21 21:28:07 +00:00
|
|
|
def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
|
2010-06-19 01:32:46 +00:00
|
|
|
asm, [(set RC:$dst, (
|
|
|
|
!nameconcat<Intrinsic>("int_x86_sse",
|
|
|
|
!strconcat(SSEVer, !strconcat("_",
|
|
|
|
!strconcat(OpcodeStr, FPSizeStr))))
|
|
|
|
RC:$src1, mem_cpat:$src2))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// sse12_fp_packed - SSE 1 & 2 packed instructions class
|
|
|
|
multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
RegisterClass RC, ValueType vt,
|
|
|
|
X86MemOperand x86memop, PatFrag mem_frag,
|
2010-06-19 02:44:01 +00:00
|
|
|
Domain d, bit MayLoad = 0> {
|
2010-06-19 01:32:46 +00:00
|
|
|
let isCommutable = 1 in
|
|
|
|
def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
|
|
|
|
OpcodeStr, [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))],d>;
|
2010-06-19 02:44:01 +00:00
|
|
|
let mayLoad = MayLoad in
|
|
|
|
def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
|
|
|
|
OpcodeStr, [(set RC:$dst, (OpNode RC:$src1,
|
|
|
|
(mem_frag addr:$src2)))],d>;
|
2010-06-19 01:32:46 +00:00
|
|
|
}
|
|
|
|
|
2010-06-19 04:09:22 +00:00
|
|
|
/// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
|
|
|
|
multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
|
|
|
|
string OpcodeStr, X86MemOperand x86memop,
|
|
|
|
list<dag> pat_rr, list<dag> pat_rm> {
|
|
|
|
let isCommutable = 1 in
|
|
|
|
def rr : PI<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2), OpcodeStr, pat_rr, d>;
|
|
|
|
def rm : PI<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, x86memop:$src2), OpcodeStr, pat_rm, d>;
|
|
|
|
}
|
|
|
|
|
2010-06-19 01:32:46 +00:00
|
|
|
/// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
|
|
|
|
multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
|
|
|
string asm, string SSEVer, string FPSizeStr,
|
2010-06-21 21:28:07 +00:00
|
|
|
X86MemOperand x86memop, PatFrag mem_frag,
|
2010-06-19 01:32:46 +00:00
|
|
|
Domain d> {
|
|
|
|
def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
|
|
|
|
asm, [(set RC:$dst, (
|
|
|
|
!nameconcat<Intrinsic>("int_x86_sse",
|
|
|
|
!strconcat(SSEVer, !strconcat("_",
|
|
|
|
!strconcat(OpcodeStr, FPSizeStr))))
|
|
|
|
RC:$src1, RC:$src2))], d>;
|
2010-06-21 21:28:07 +00:00
|
|
|
def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
|
2010-06-19 01:32:46 +00:00
|
|
|
asm, [(set RC:$dst, (
|
|
|
|
!nameconcat<Intrinsic>("int_x86_sse",
|
|
|
|
!strconcat(SSEVer, !strconcat("_",
|
|
|
|
!strconcat(OpcodeStr, FPSizeStr))))
|
|
|
|
RC:$src1, (mem_frag addr:$src2)))], d>;
|
|
|
|
}
|
|
|
|
|
2010-06-22 18:09:32 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE 1 & 2 - Move Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2010-06-22 22:38:56 +00:00
|
|
|
class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
|
|
|
|
SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
|
|
|
|
[(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
|
|
|
|
|
|
|
|
// Loading from memory automatically zeroing upper bits.
|
|
|
|
class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
|
|
|
|
PatFrag mem_pat, string OpcodeStr> :
|
|
|
|
SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set RC:$dst, (mem_pat addr:$src))]>;
|
|
|
|
|
2010-06-22 18:09:32 +00:00
|
|
|
// Move Instructions. Register-to-register movss/movsd is not used for FR32/64
|
|
|
|
// register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
|
|
|
|
// is used instead. Register-to-register movss/movsd is not modeled as an
|
|
|
|
// INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
|
|
|
|
// in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
|
2010-06-22 22:38:56 +00:00
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
def VMOVSSrr : sse12_move_rr<FR32, v4f32,
|
|
|
|
"movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
|
|
|
|
def VMOVSDrr : sse12_move_rr<FR64, v2f64,
|
|
|
|
"movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
|
|
|
|
|
|
|
|
let canFoldAsLoad = 1, isReMaterializable = 1 in {
|
|
|
|
def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
|
|
|
|
|
|
|
|
let AddedComplexity = 20 in
|
|
|
|
def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-22 18:09:32 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2010-06-22 22:38:56 +00:00
|
|
|
def MOVSSrr : sse12_move_rr<FR32, v4f32,
|
|
|
|
"movss\t{$src2, $dst|$dst, $src2}">, XS;
|
|
|
|
def MOVSDrr : sse12_move_rr<FR64, v2f64,
|
|
|
|
"movsd\t{$src2, $dst|$dst, $src2}">, XD;
|
2010-06-22 18:09:32 +00:00
|
|
|
}
|
2010-02-28 00:17:42 +00:00
|
|
|
|
2010-06-22 18:09:32 +00:00
|
|
|
let canFoldAsLoad = 1, isReMaterializable = 1 in {
|
2010-06-22 22:38:56 +00:00
|
|
|
def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
|
|
|
|
|
|
|
|
let AddedComplexity = 20 in
|
|
|
|
def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
|
2010-06-22 18:09:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let AddedComplexity = 15 in {
|
2010-02-28 00:17:42 +00:00
|
|
|
// Extract the low 32-bit value from one vector and insert it into another.
|
|
|
|
def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
|
2010-03-08 18:57:56 +00:00
|
|
|
(MOVSSrr (v4f32 VR128:$src1),
|
2010-05-24 14:48:17 +00:00
|
|
|
(EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
|
2010-06-22 18:09:32 +00:00
|
|
|
// Extract the low 64-bit value from one vector and insert it into another.
|
|
|
|
def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
|
|
|
|
(MOVSDrr (v2f64 VR128:$src1),
|
|
|
|
(EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
|
|
|
|
}
|
2010-02-28 00:17:42 +00:00
|
|
|
|
|
|
|
// Implicitly promote a 32-bit scalar to a vector.
|
|
|
|
def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
|
2010-05-24 14:48:17 +00:00
|
|
|
(INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
|
2010-06-22 18:09:32 +00:00
|
|
|
// Implicitly promote a 64-bit scalar to a vector.
|
|
|
|
def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
|
|
|
|
(INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
|
2010-02-28 00:17:42 +00:00
|
|
|
|
2010-06-22 18:09:32 +00:00
|
|
|
let AddedComplexity = 20 in {
|
2010-02-28 00:17:42 +00:00
|
|
|
// MOVSSrm zeros the high parts of the register; represent this
|
|
|
|
// with SUBREG_TO_REG.
|
|
|
|
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
|
2010-05-24 14:48:17 +00:00
|
|
|
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
|
2010-02-28 00:17:42 +00:00
|
|
|
def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
|
2010-05-24 14:48:17 +00:00
|
|
|
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
|
2010-02-28 00:17:42 +00:00
|
|
|
def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
|
2010-05-24 14:48:17 +00:00
|
|
|
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
|
2010-06-22 18:09:32 +00:00
|
|
|
// MOVSDrm zeros the high parts of the register; represent this
|
|
|
|
// with SUBREG_TO_REG.
|
|
|
|
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
|
|
|
|
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
|
|
|
|
def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
|
|
|
|
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
|
|
|
|
def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
|
|
|
|
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
|
|
|
|
def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
|
|
|
|
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
|
|
|
|
def : Pat<(v2f64 (X86vzload addr:$src)),
|
|
|
|
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
|
2010-02-28 00:17:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Store scalar value to memory.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movss\t{$src, $dst|$dst, $src}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(store FR32:$src, addr:$dst)]>;
|
2010-06-22 18:09:32 +00:00
|
|
|
def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
|
|
|
|
"movsd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store FR64:$src, addr:$dst)]>;
|
2006-02-21 19:26:52 +00:00
|
|
|
|
2010-06-22 22:38:56 +00:00
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
|
|
|
|
"movss\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store FR32:$src, addr:$dst)]>, XS, VEX_4V;
|
|
|
|
def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
|
|
|
|
"movsd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store FR64:$src, addr:$dst)]>, XD, VEX_4V;
|
|
|
|
}
|
|
|
|
|
2010-02-28 00:17:42 +00:00
|
|
|
// Extract and store.
|
|
|
|
def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
|
|
|
|
addr:$dst),
|
|
|
|
(MOVSSmr addr:$dst,
|
2010-05-24 14:48:17 +00:00
|
|
|
(EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
|
2010-06-22 18:09:32 +00:00
|
|
|
def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
|
|
|
|
addr:$dst),
|
|
|
|
(MOVSDmr addr:$dst,
|
|
|
|
(EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
|
|
|
|
|
2010-06-25 20:22:12 +00:00
|
|
|
// Move Aligned/Unaligned floating point values
|
|
|
|
multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
|
|
|
|
X86MemOperand x86memop, PatFrag ld_frag,
|
|
|
|
string asm, Domain d,
|
|
|
|
bit IsReMaterializable = 1> {
|
|
|
|
let neverHasSideEffects = 1 in
|
2010-06-25 23:33:42 +00:00
|
|
|
def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
|
|
|
|
!strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
|
2010-06-25 20:22:12 +00:00
|
|
|
let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
|
2010-06-25 23:33:42 +00:00
|
|
|
def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
|
|
|
|
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
|
2010-06-25 20:22:12 +00:00
|
|
|
[(set RC:$dst, (ld_frag addr:$src))], d>;
|
|
|
|
}
|
|
|
|
|
2010-06-25 23:33:42 +00:00
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
|
|
|
|
"movaps", SSEPackedSingle>, VEX;
|
|
|
|
defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
|
|
|
|
"movapd", SSEPackedDouble>, OpSize, VEX;
|
|
|
|
defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
|
|
|
|
"movups", SSEPackedSingle>, VEX;
|
|
|
|
defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
|
|
|
|
"movupd", SSEPackedDouble, 0>, OpSize, VEX;
|
|
|
|
}
|
2010-06-25 20:22:12 +00:00
|
|
|
defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
|
2010-06-25 23:33:42 +00:00
|
|
|
"movaps", SSEPackedSingle>, TB;
|
2010-06-25 20:22:12 +00:00
|
|
|
defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
|
2010-06-25 23:33:42 +00:00
|
|
|
"movapd", SSEPackedDouble>, TB, OpSize;
|
2010-06-25 20:22:12 +00:00
|
|
|
defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
|
2010-06-25 23:33:42 +00:00
|
|
|
"movups", SSEPackedSingle>, TB;
|
2010-06-25 20:22:12 +00:00
|
|
|
defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
|
2010-06-25 23:33:42 +00:00
|
|
|
"movupd", SSEPackedDouble, 0>, TB, OpSize;
|
2010-06-25 20:22:12 +00:00
|
|
|
|
2010-06-25 23:33:42 +00:00
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movaps\t{$src, $dst|$dst, $src}",
|
|
|
|
[(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
|
|
|
|
def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movapd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
|
|
|
|
def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movups\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
|
|
|
|
def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movupd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
|
|
|
|
}
|
2010-06-25 20:22:12 +00:00
|
|
|
def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movaps\t{$src, $dst|$dst, $src}",
|
|
|
|
[(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
|
|
|
|
def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movapd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
|
|
|
|
def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movups\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (v4f32 VR128:$src), addr:$dst)]>;
|
|
|
|
def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movupd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (v2f64 VR128:$src), addr:$dst)]>;
|
|
|
|
|
|
|
|
// Intrinsic forms of MOVUPS/D load and store
|
2010-06-25 23:33:42 +00:00
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
let canFoldAsLoad = 1, isReMaterializable = 1 in
|
|
|
|
def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins f128mem:$src),
|
|
|
|
"movups\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
|
|
|
|
def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins f128mem:$src),
|
|
|
|
"movupd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
|
|
|
|
def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
|
|
|
|
(ins f128mem:$dst, VR128:$src),
|
|
|
|
"movups\t{$src, $dst|$dst, $src}",
|
|
|
|
[(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
|
|
|
|
def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
|
|
|
|
(ins f128mem:$dst, VR128:$src),
|
|
|
|
"movupd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
|
|
|
|
}
|
2010-06-25 20:22:12 +00:00
|
|
|
let canFoldAsLoad = 1, isReMaterializable = 1 in
|
|
|
|
def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
|
|
|
"movups\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
|
|
|
|
def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
|
|
|
"movupd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
|
|
|
|
|
|
|
|
def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movups\t{$src, $dst|$dst, $src}",
|
|
|
|
[(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
|
|
|
|
def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movupd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
|
|
|
|
|
|
|
|
// Move Low/High packed floating point values
|
|
|
|
multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
|
|
|
|
PatFrag mov_frag, string base_opc,
|
|
|
|
string asm_opr> {
|
|
|
|
def PSrm : PI<opc, MRMSrcMem,
|
|
|
|
(outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
|
|
|
|
!strconcat(!strconcat(base_opc,"s"), asm_opr),
|
|
|
|
[(set RC:$dst,
|
|
|
|
(mov_frag RC:$src1,
|
|
|
|
(bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
|
|
|
|
SSEPackedSingle>, TB;
|
|
|
|
|
|
|
|
def PDrm : PI<opc, MRMSrcMem,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, f64mem:$src2),
|
|
|
|
!strconcat(!strconcat(base_opc,"d"), asm_opr),
|
|
|
|
[(set RC:$dst, (v2f64 (mov_frag RC:$src1,
|
|
|
|
(scalar_to_vector (loadf64 addr:$src2)))))],
|
|
|
|
SSEPackedDouble>, TB, OpSize;
|
|
|
|
}
|
|
|
|
|
2010-06-25 23:33:42 +00:00
|
|
|
let isAsmParserOnly = 1, AddedComplexity = 20 in {
|
|
|
|
defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
|
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
|
|
|
|
defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
|
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
|
|
|
|
}
|
2010-06-25 20:22:12 +00:00
|
|
|
let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
|
|
|
|
defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
|
|
|
|
"\t{$src2, $dst|$dst, $src2}">;
|
|
|
|
defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
|
|
|
|
"\t{$src2, $dst|$dst, $src2}">;
|
|
|
|
}
|
|
|
|
|
2010-06-25 23:33:42 +00:00
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
|
|
|
|
"movlps\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
|
|
|
|
(iPTR 0))), addr:$dst)]>, VEX;
|
|
|
|
def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
|
|
|
|
"movlpd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (f64 (vector_extract (v2f64 VR128:$src),
|
|
|
|
(iPTR 0))), addr:$dst)]>, VEX;
|
|
|
|
}
|
2010-06-25 20:22:12 +00:00
|
|
|
def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
|
|
|
|
"movlps\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
|
|
|
|
(iPTR 0))), addr:$dst)]>;
|
|
|
|
def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
|
|
|
|
"movlpd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (f64 (vector_extract (v2f64 VR128:$src),
|
|
|
|
(iPTR 0))), addr:$dst)]>;
|
|
|
|
|
|
|
|
// v2f64 extract element 1 is always custom lowered to unpack high to low
|
|
|
|
// and extract element 0 so the non-store version isn't too horrible.
|
2010-06-25 23:33:42 +00:00
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
|
|
|
|
"movhps\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (f64 (vector_extract
|
|
|
|
(unpckh (bc_v2f64 (v4f32 VR128:$src)),
|
|
|
|
(undef)), (iPTR 0))), addr:$dst)]>,
|
|
|
|
VEX;
|
|
|
|
def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
|
|
|
|
"movhpd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (f64 (vector_extract
|
|
|
|
(v2f64 (unpckh VR128:$src, (undef))),
|
|
|
|
(iPTR 0))), addr:$dst)]>,
|
|
|
|
VEX;
|
|
|
|
}
|
2010-06-25 20:22:12 +00:00
|
|
|
def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
|
|
|
|
"movhps\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (f64 (vector_extract
|
|
|
|
(unpckh (bc_v2f64 (v4f32 VR128:$src)),
|
|
|
|
(undef)), (iPTR 0))), addr:$dst)]>;
|
|
|
|
def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
|
|
|
|
"movhpd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(store (f64 (vector_extract
|
|
|
|
(v2f64 (unpckh VR128:$src, (undef))),
|
|
|
|
(iPTR 0))), addr:$dst)]>;
|
|
|
|
|
2010-06-25 23:33:42 +00:00
|
|
|
let isAsmParserOnly = 1, AddedComplexity = 20 in {
|
|
|
|
def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
"movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
|
|
|
|
VEX_4V;
|
|
|
|
def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
"movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
|
|
|
|
VEX_4V;
|
|
|
|
}
|
2010-06-25 20:22:12 +00:00
|
|
|
let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
|
|
|
|
def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
"movlhps\t{$src2, $dst|$dst, $src2}",
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
|
|
|
|
def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
"movhlps\t{$src2, $dst|$dst, $src2}",
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
|
|
|
|
(MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
|
|
|
|
let AddedComplexity = 20 in {
|
|
|
|
def : Pat<(v4f32 (movddup VR128:$src, (undef))),
|
|
|
|
(MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
|
|
|
|
def : Pat<(v2i64 (movddup VR128:$src, (undef))),
|
|
|
|
(MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
|
|
|
|
}
|
|
|
|
|
2010-06-22 18:09:32 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE 1 & 2 - Conversion Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
2010-02-28 00:17:42 +00:00
|
|
|
|
2010-06-25 18:06:22 +00:00
|
|
|
multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
|
2010-06-24 22:22:21 +00:00
|
|
|
SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
|
|
|
|
string asm> {
|
|
|
|
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
|
|
|
|
[(set DstRC:$dst, (OpNode SrcRC:$src))]>;
|
|
|
|
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
|
|
|
|
[(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
|
|
|
|
}
|
|
|
|
|
2010-06-25 18:06:22 +00:00
|
|
|
multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
|
|
|
|
SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
|
|
|
|
string asm, Domain d> {
|
|
|
|
def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
|
|
|
|
[(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
|
|
|
|
def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
|
|
|
|
[(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
|
2010-06-25 00:39:30 +00:00
|
|
|
SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
|
|
|
|
string asm> {
|
|
|
|
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
|
|
|
|
asm, []>;
|
|
|
|
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
|
|
|
|
(ins DstRC:$src1, x86memop:$src), asm, []>;
|
|
|
|
}
|
|
|
|
|
|
|
|
let isAsmParserOnly = 1 in {
|
2010-06-25 18:06:22 +00:00
|
|
|
defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
|
2010-06-25 00:39:30 +00:00
|
|
|
"cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
|
2010-06-25 18:06:22 +00:00
|
|
|
defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
|
2010-06-25 00:39:30 +00:00
|
|
|
"cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
|
2010-06-25 18:06:22 +00:00
|
|
|
defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
|
2010-06-25 00:39:30 +00:00
|
|
|
"cvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}">, XS,
|
|
|
|
VEX_4V;
|
2010-06-25 18:06:22 +00:00
|
|
|
defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
|
2010-06-25 00:39:30 +00:00
|
|
|
"cvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}">, XD,
|
|
|
|
VEX_4V;
|
|
|
|
}
|
2010-06-25 18:06:22 +00:00
|
|
|
|
|
|
|
defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
|
|
|
|
"cvttss2si\t{$src, $dst|$dst, $src}">, XS;
|
|
|
|
defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
|
|
|
|
"cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
|
|
|
|
defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
|
2010-06-24 22:22:21 +00:00
|
|
|
"cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
|
2010-06-25 18:06:22 +00:00
|
|
|
defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
|
2010-06-24 22:22:21 +00:00
|
|
|
"cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-06-25 18:06:22 +00:00
|
|
|
// Conversion Instructions Intrinsics - Match intrinsics which expect MM
|
|
|
|
// and/or XMM operand(s).
|
|
|
|
multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
|
|
|
|
Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
|
|
|
|
string asm, Domain d> {
|
|
|
|
def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
|
|
|
|
[(set DstRC:$dst, (Int SrcRC:$src))], d>;
|
|
|
|
def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
|
|
|
|
[(set DstRC:$dst, (Int (ld_frag addr:$src)))], d>;
|
|
|
|
}
|
|
|
|
|
2010-06-24 23:37:07 +00:00
|
|
|
multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
|
|
|
|
Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
|
|
|
|
string asm> {
|
|
|
|
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
|
|
|
|
[(set DstRC:$dst, (Int SrcRC:$src))]>;
|
|
|
|
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
|
|
|
|
[(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
|
|
|
|
}
|
|
|
|
|
2010-06-25 18:06:22 +00:00
|
|
|
multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
|
|
|
|
RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
|
|
|
|
PatFrag ld_frag, string asm, Domain d> {
|
|
|
|
def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
|
|
|
|
asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], d>;
|
|
|
|
def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
|
|
|
|
(ins DstRC:$src1, x86memop:$src2), asm,
|
|
|
|
[(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], d>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
|
|
|
|
RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
|
|
|
|
PatFrag ld_frag, string asm> {
|
|
|
|
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
|
|
|
|
asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
|
|
|
|
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
|
|
|
|
(ins DstRC:$src1, x86memop:$src2), asm,
|
|
|
|
[(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
|
|
|
|
f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS,
|
|
|
|
VEX;
|
|
|
|
defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
|
|
|
|
f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD,
|
|
|
|
VEX;
|
|
|
|
}
|
2010-06-24 23:37:07 +00:00
|
|
|
defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
|
|
|
|
f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS;
|
|
|
|
defm Int_CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
|
|
|
|
f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD;
|
|
|
|
|
2010-06-25 18:06:22 +00:00
|
|
|
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
|
|
|
|
int_x86_sse_cvtsi2ss, i32mem, loadi32,
|
|
|
|
"cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XS;
|
|
|
|
defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
|
|
|
|
int_x86_sse2_cvtsi2sd, i32mem, loadi32,
|
|
|
|
"cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XD;
|
2010-06-24 23:37:07 +00:00
|
|
|
}
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-06-25 18:06:22 +00:00
|
|
|
// Instructions below don't have an AVX form.
|
2010-06-24 23:37:07 +00:00
|
|
|
defm Int_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
|
|
|
|
f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
|
|
|
|
SSEPackedSingle>, TB;
|
|
|
|
defm Int_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
|
|
|
|
f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
|
|
|
|
SSEPackedDouble>, TB, OpSize;
|
|
|
|
defm Int_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
|
|
|
|
f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
|
|
|
|
SSEPackedSingle>, TB;
|
|
|
|
defm Int_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
|
|
|
|
f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
|
|
|
|
SSEPackedDouble>, TB, OpSize;
|
2010-06-25 18:06:22 +00:00
|
|
|
defm Int_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
|
|
|
|
i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
|
|
|
|
SSEPackedDouble>, TB, OpSize;
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
2010-06-24 23:37:07 +00:00
|
|
|
defm Int_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
|
|
|
|
int_x86_sse_cvtpi2ps,
|
|
|
|
i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
|
|
|
|
SSEPackedSingle>, TB;
|
2010-06-25 18:06:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// SSE 1 Only
|
|
|
|
|
|
|
|
// Aliases for intrinsics
|
2010-06-25 23:47:23 +00:00
|
|
|
let isAsmParserOnly = 1, Pattern = []<dag> in {
|
|
|
|
defm Int_VCVTTSS2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
|
|
|
|
int_x86_sse_cvttss2si, f32mem, load,
|
|
|
|
"cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS;
|
|
|
|
defm Int_VCVTTSD2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
|
|
|
|
int_x86_sse2_cvttsd2si, f128mem, load,
|
|
|
|
"cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD;
|
|
|
|
}
|
2010-06-25 18:06:22 +00:00
|
|
|
defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
|
|
|
|
f32mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
|
|
|
|
XS;
|
|
|
|
defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
|
|
|
|
f128mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
|
|
|
|
XD;
|
|
|
|
|
2010-06-25 23:47:23 +00:00
|
|
|
let isAsmParserOnly = 1, Pattern = []<dag> in {
|
|
|
|
defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
|
|
|
|
"cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
|
|
|
|
defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load,
|
|
|
|
"cvtdq2ps\t{$src, $dst|$dst, $src}",
|
|
|
|
SSEPackedSingle>, TB, VEX;
|
|
|
|
}
|
2010-06-25 18:06:22 +00:00
|
|
|
let Pattern = []<dag> in {
|
|
|
|
defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
|
|
|
|
"cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
|
|
|
|
defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load /*dummy*/,
|
|
|
|
"cvtdq2ps\t{$src, $dst|$dst, $src}",
|
|
|
|
SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
|
|
|
|
}
|
2010-06-24 23:37:07 +00:00
|
|
|
|
2010-06-25 20:29:27 +00:00
|
|
|
/// SSE 2 Only
|
|
|
|
|
|
|
|
def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
|
|
|
|
"cvtsd2ss\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set FR32:$dst, (fround FR64:$src))]>;
|
|
|
|
def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
|
|
|
|
"cvtsd2ss\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
|
|
|
|
Requires<[HasSSE2, OptForSize]>;
|
|
|
|
|
|
|
|
def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvtps2dq\t{$src, $dst|$dst, $src}", []>;
|
|
|
|
def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
|
|
|
"cvtps2dq\t{$src, $dst|$dst, $src}", []>;
|
|
|
|
|
|
|
|
// SSE2 instructions with XS prefix
|
|
|
|
def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
|
|
|
|
"cvtss2sd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set FR64:$dst, (fextend FR32:$src))]>, XS,
|
|
|
|
Requires<[HasSSE2]>;
|
|
|
|
def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
|
|
|
|
"cvtss2sd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
|
|
|
|
Requires<[HasSSE2, OptForSize]>;
|
|
|
|
|
|
|
|
def : Pat<(extloadf32 addr:$src),
|
|
|
|
(CVTSS2SDrr (MOVSSrm addr:$src))>,
|
|
|
|
Requires<[HasSSE2, OptForSpeed]>;
|
|
|
|
|
|
|
|
// SSE2 instructions without OpSize prefix
|
|
|
|
def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvtdq2ps\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
|
|
|
|
TB, Requires<[HasSSE2]>;
|
|
|
|
def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
|
|
|
|
"cvtdq2ps\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtdq2ps
|
|
|
|
(bitconvert (memopv2i64 addr:$src))))]>,
|
|
|
|
TB, Requires<[HasSSE2]>;
|
|
|
|
|
|
|
|
// SSE2 instructions with XS prefix
|
|
|
|
def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvtdq2pd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
|
|
|
|
XS, Requires<[HasSSE2]>;
|
|
|
|
def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
|
|
|
|
"cvtdq2pd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtdq2pd
|
|
|
|
(bitconvert (memopv2i64 addr:$src))))]>,
|
|
|
|
XS, Requires<[HasSSE2]>;
|
|
|
|
|
|
|
|
def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvtps2dq\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
|
|
|
|
def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
|
|
|
"cvtps2dq\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtps2dq
|
|
|
|
(memop addr:$src)))]>;
|
|
|
|
// SSE2 packed instructions with XS prefix
|
|
|
|
def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvttps2dq\t{$src, $dst|$dst, $src}", []>;
|
|
|
|
def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
|
|
|
"cvttps2dq\t{$src, $dst|$dst, $src}", []>;
|
|
|
|
|
|
|
|
def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvttps2dq\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(int_x86_sse2_cvttps2dq VR128:$src))]>,
|
|
|
|
XS, Requires<[HasSSE2]>;
|
|
|
|
def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
|
|
|
"cvttps2dq\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvttps2dq
|
|
|
|
(memop addr:$src)))]>,
|
|
|
|
XS, Requires<[HasSSE2]>;
|
|
|
|
|
|
|
|
// SSE2 packed instructions with XD prefix
|
|
|
|
def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvtpd2dq\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
|
|
|
|
XD, Requires<[HasSSE2]>;
|
|
|
|
def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
|
|
|
"cvtpd2dq\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtpd2dq
|
|
|
|
(memop addr:$src)))]>,
|
|
|
|
XD, Requires<[HasSSE2]>;
|
|
|
|
|
|
|
|
def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvttpd2dq\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
|
|
|
|
def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
|
|
|
|
"cvttpd2dq\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq
|
|
|
|
(memop addr:$src)))]>;
|
|
|
|
|
|
|
|
// SSE2 instructions without OpSize prefix
|
|
|
|
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
|
|
|
|
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
|
|
|
|
"cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
|
|
|
|
|
|
|
|
def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvtps2pd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
|
|
|
|
TB, Requires<[HasSSE2]>;
|
|
|
|
def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
|
|
|
|
"cvtps2pd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtps2pd
|
|
|
|
(load addr:$src)))]>,
|
|
|
|
TB, Requires<[HasSSE2]>;
|
|
|
|
|
|
|
|
def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
|
|
|
|
def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
|
|
|
"cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
|
|
|
|
|
|
|
|
|
|
|
|
def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvtpd2ps\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
|
|
|
|
def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
|
|
|
"cvtpd2ps\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtpd2ps
|
|
|
|
(memop addr:$src)))]>;
|
|
|
|
|
|
|
|
// Match intrinsics which expect XMM operand(s).
|
|
|
|
// Aliases for intrinsics
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
|
|
|
|
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
|
|
|
"cvtsd2ss\t{$src2, $dst|$dst, $src2}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
|
|
|
|
VR128:$src2))]>;
|
|
|
|
def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
|
|
|
|
(outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
|
|
|
|
"cvtsd2ss\t{$src2, $dst|$dst, $src2}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
|
|
|
|
(load addr:$src2)))]>;
|
|
|
|
def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
|
|
|
|
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
|
|
|
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
|
|
|
|
VR128:$src2))]>, XS,
|
|
|
|
Requires<[HasSSE2]>;
|
|
|
|
def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
|
|
|
|
(outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
|
|
|
|
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
|
|
|
|
[(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
|
|
|
|
(load addr:$src2)))]>, XS,
|
|
|
|
Requires<[HasSSE2]>;
|
|
|
|
}
|
|
|
|
|
2010-06-22 18:09:32 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE 1 & 2 - Compare Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-06-24 20:48:23 +00:00
|
|
|
// sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
|
2010-06-24 00:32:06 +00:00
|
|
|
multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
|
2010-06-24 20:48:23 +00:00
|
|
|
string asm, string asm_alt> {
|
2010-06-24 00:32:06 +00:00
|
|
|
def rr : SIi8<0xC2, MRMSrcReg,
|
2010-06-24 20:48:23 +00:00
|
|
|
(outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
|
2010-06-24 00:32:06 +00:00
|
|
|
asm, []>;
|
2010-06-22 18:09:32 +00:00
|
|
|
let mayLoad = 1 in
|
2010-06-24 00:32:06 +00:00
|
|
|
def rm : SIi8<0xC2, MRMSrcMem,
|
2010-06-24 20:48:23 +00:00
|
|
|
(outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
|
2010-06-24 00:32:06 +00:00
|
|
|
asm, []>;
|
|
|
|
// Accept explicit immediate argument form instead of comparison code.
|
|
|
|
let isAsmParserOnly = 1 in {
|
2010-06-24 20:48:23 +00:00
|
|
|
def rr_alt : SIi8<0xC2, MRMSrcReg,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
|
|
|
|
asm_alt, []>;
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def rm_alt : SIi8<0xC2, MRMSrcMem,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
|
|
|
|
asm_alt, []>;
|
2010-06-24 00:32:06 +00:00
|
|
|
}
|
2010-05-25 18:40:53 +00:00
|
|
|
}
|
2010-06-24 00:32:06 +00:00
|
|
|
|
2010-06-24 20:48:23 +00:00
|
|
|
let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
|
|
|
|
defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
|
|
|
|
"cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
|
|
|
|
"cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
|
|
|
|
XS, VEX_4V;
|
|
|
|
defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
|
|
|
|
"cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
|
|
|
|
"cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
|
|
|
|
XD, VEX_4V;
|
|
|
|
}
|
|
|
|
|
2010-06-24 00:32:06 +00:00
|
|
|
let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
|
2010-06-24 20:48:23 +00:00
|
|
|
defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
|
|
|
|
"cmp${cc}ss\t{$src, $dst|$dst, $src}",
|
|
|
|
"cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
|
|
|
|
defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
|
|
|
|
"cmp${cc}sd\t{$src, $dst|$dst, $src}",
|
|
|
|
"cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
|
|
|
|
}
|
2010-06-24 00:32:06 +00:00
|
|
|
|
2010-06-24 22:04:40 +00:00
|
|
|
multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
|
|
|
|
Intrinsic Int, string asm> {
|
|
|
|
def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
|
|
|
|
[(set VR128:$dst, (Int VR128:$src1,
|
|
|
|
VR128:$src, imm:$cc))]>;
|
|
|
|
def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
|
|
|
|
[(set VR128:$dst, (Int VR128:$src1,
|
|
|
|
(load addr:$src), imm:$cc))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Aliases to match intrinsics which expect XMM operand(s).
|
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
|
|
|
|
"cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
|
|
|
|
XS, VEX_4V;
|
|
|
|
defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
|
|
|
|
"cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
|
|
|
|
XD, VEX_4V;
|
|
|
|
}
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
|
|
|
|
"cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
|
|
|
|
defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
|
|
|
|
"cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-06-24 20:48:23 +00:00
|
|
|
// sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
|
|
|
|
multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
|
|
|
|
ValueType vt, X86MemOperand x86memop,
|
|
|
|
PatFrag ld_frag, string OpcodeStr, Domain d> {
|
|
|
|
def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
|
|
|
|
[(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
|
|
|
|
def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
|
|
|
|
[(set EFLAGS, (OpNode (vt RC:$src1),
|
|
|
|
(ld_frag addr:$src2)))], d>;
|
2007-05-02 23:11:52 +00:00
|
|
|
}
|
|
|
|
|
2007-09-14 21:48:26 +00:00
|
|
|
let Defs = [EFLAGS] in {
|
2010-06-24 20:48:23 +00:00
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
|
|
|
|
"ucomiss", SSEPackedSingle>, VEX;
|
|
|
|
defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
|
|
|
|
"ucomisd", SSEPackedDouble>, OpSize, VEX;
|
|
|
|
let Pattern = []<dag> in {
|
|
|
|
defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
|
|
|
|
"comiss", SSEPackedSingle>, VEX;
|
|
|
|
defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
|
|
|
|
"comisd", SSEPackedDouble>, OpSize, VEX;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
|
|
|
|
load, "ucomiss", SSEPackedSingle>, VEX;
|
|
|
|
defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
|
|
|
|
load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
|
|
|
|
|
|
|
|
defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
|
|
|
|
load, "comiss", SSEPackedSingle>, VEX;
|
|
|
|
defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
|
|
|
|
load, "comisd", SSEPackedDouble>, OpSize, VEX;
|
|
|
|
}
|
|
|
|
defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
|
|
|
|
"ucomiss", SSEPackedSingle>, TB;
|
|
|
|
defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
|
|
|
|
"ucomisd", SSEPackedDouble>, TB, OpSize;
|
|
|
|
|
|
|
|
let Pattern = []<dag> in {
|
|
|
|
defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
|
|
|
|
"comiss", SSEPackedSingle>, TB;
|
|
|
|
defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
|
|
|
|
"comisd", SSEPackedDouble>, TB, OpSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
|
|
|
|
load, "ucomiss", SSEPackedSingle>, TB;
|
|
|
|
defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
|
|
|
|
load, "ucomisd", SSEPackedDouble>, TB, OpSize;
|
|
|
|
|
|
|
|
defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
|
|
|
|
"comiss", SSEPackedSingle>, TB;
|
|
|
|
defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
|
|
|
|
"comisd", SSEPackedDouble>, TB, OpSize;
|
2007-09-14 21:48:26 +00:00
|
|
|
} // Defs = [EFLAGS]
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-06-24 20:48:23 +00:00
|
|
|
// sse12_cmp_packed - sse 1 & 2 compared packed instructions
|
|
|
|
multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
|
|
|
|
Intrinsic Int, string asm, string asm_alt,
|
|
|
|
Domain d> {
|
|
|
|
def rri : PIi8<0xC2, MRMSrcReg,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
|
|
|
|
[(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
|
|
|
|
def rmi : PIi8<0xC2, MRMSrcMem,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
|
|
|
|
[(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
|
|
|
|
// Accept explicit immediate argument form instead of comparison code.
|
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
def rri_alt : PIi8<0xC2, MRMSrcReg,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
|
|
|
|
asm_alt, [], d>;
|
|
|
|
def rmi_alt : PIi8<0xC2, MRMSrcMem,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
|
|
|
|
asm_alt, [], d>;
|
|
|
|
}
|
2010-06-24 00:15:50 +00:00
|
|
|
}
|
2010-06-24 20:48:23 +00:00
|
|
|
|
2010-06-24 00:15:50 +00:00
|
|
|
let isAsmParserOnly = 1 in {
|
2010-06-24 20:48:23 +00:00
|
|
|
defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
|
2010-06-24 00:15:50 +00:00
|
|
|
"cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
|
2010-06-24 20:48:23 +00:00
|
|
|
"cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
|
|
|
|
SSEPackedSingle>, VEX_4V;
|
|
|
|
defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
|
2010-06-24 00:15:50 +00:00
|
|
|
"cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
|
|
|
|
"cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
|
2010-06-24 20:48:23 +00:00
|
|
|
SSEPackedDouble>, OpSize, VEX_4V;
|
|
|
|
}
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
|
|
|
|
"cmp${cc}ps\t{$src, $dst|$dst, $src}",
|
|
|
|
"cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
|
|
|
|
SSEPackedSingle>, TB;
|
|
|
|
defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
|
|
|
|
"cmp${cc}pd\t{$src, $dst|$dst, $src}",
|
|
|
|
"cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
|
|
|
|
SSEPackedDouble>, TB, OpSize;
|
2010-06-24 00:15:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
|
|
|
|
(CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
|
|
|
|
def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
|
|
|
|
(CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
|
|
|
|
def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
|
|
|
|
(CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
|
|
|
|
def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
|
|
|
|
(CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE 1 & 2 - Shuffle Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// sse12_shuffle - sse 1 & 2 shuffle instructions
|
|
|
|
multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
|
|
|
|
ValueType vt, string asm, PatFrag mem_frag,
|
|
|
|
Domain d, bit IsConvertibleToThreeAddress = 0> {
|
|
|
|
def rmi : PIi8<0xC6, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, f128mem:$src2, i8imm:$src3), asm,
|
|
|
|
[(set VR128:$dst, (vt (shufp:$src3
|
|
|
|
VR128:$src1, (mem_frag addr:$src2))))], d>;
|
|
|
|
let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
|
|
|
|
def rri : PIi8<0xC6, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2, i8imm:$src3), asm,
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(vt (shufp:$src3 VR128:$src1, VR128:$src2)))], d>;
|
|
|
|
}
|
|
|
|
|
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
|
|
|
|
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
|
|
|
|
memopv4f32, SSEPackedSingle>, VEX_4V;
|
|
|
|
defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
|
|
|
|
"shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
|
|
|
|
memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
|
|
|
|
}
|
|
|
|
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
|
|
|
|
"shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
|
|
|
|
memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
|
|
|
|
TB;
|
|
|
|
defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
|
|
|
|
"shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
|
|
|
|
memopv2f64, SSEPackedDouble>, TB, OpSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE 1 & 2 - Unpack Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
|
|
|
|
multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
|
|
|
|
PatFrag mem_frag, RegisterClass RC,
|
|
|
|
X86MemOperand x86memop, string asm,
|
|
|
|
Domain d> {
|
|
|
|
def rr : PI<opc, MRMSrcReg,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, RC:$src2),
|
|
|
|
asm, [(set RC:$dst,
|
|
|
|
(vt (OpNode RC:$src1, RC:$src2)))], d>;
|
|
|
|
def rm : PI<opc, MRMSrcMem,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, x86memop:$src2),
|
|
|
|
asm, [(set RC:$dst,
|
|
|
|
(vt (OpNode RC:$src1,
|
|
|
|
(mem_frag addr:$src2))))], d>;
|
|
|
|
}
|
|
|
|
|
|
|
|
let AddedComplexity = 10 in {
|
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
|
|
|
|
VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
|
|
|
SSEPackedSingle>, VEX_4V;
|
|
|
|
defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
|
|
|
|
VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
|
|
|
SSEPackedDouble>, OpSize, VEX_4V;
|
|
|
|
defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
|
|
|
|
VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
|
|
|
SSEPackedSingle>, VEX_4V;
|
|
|
|
defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
|
|
|
|
VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
|
|
|
SSEPackedDouble>, OpSize, VEX_4V;
|
|
|
|
}
|
|
|
|
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
|
|
|
|
VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
|
|
|
|
SSEPackedSingle>, TB;
|
|
|
|
defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
|
|
|
|
VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
|
|
|
|
SSEPackedDouble>, TB, OpSize;
|
|
|
|
defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
|
|
|
|
VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
|
|
|
|
SSEPackedSingle>, TB;
|
|
|
|
defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
|
|
|
|
VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
|
|
|
|
SSEPackedDouble>, TB, OpSize;
|
|
|
|
} // Constraints = "$src1 = $dst"
|
|
|
|
} // AddedComplexity
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE 1 & 2 - Extract Floating-Point Sign mask
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
|
|
|
|
multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
|
|
|
|
Domain d> {
|
|
|
|
def rr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
|
|
|
|
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set GR32:$dst, (Int RC:$src))], d>;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mask creation
|
|
|
|
defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
|
|
|
|
SSEPackedSingle>, TB;
|
|
|
|
defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
|
|
|
|
SSEPackedDouble>, TB, OpSize;
|
|
|
|
|
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
|
|
|
|
"movmskps", SSEPackedSingle>, VEX;
|
|
|
|
defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
|
|
|
|
"movmskpd", SSEPackedDouble>, OpSize,
|
|
|
|
VEX;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2010-06-22 18:09:32 +00:00
|
|
|
// Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
|
|
|
|
// names that start with 'Fs'.
|
2007-05-02 23:11:52 +00:00
|
|
|
|
|
|
|
// Alias instructions that map fld0 to pxor for sse.
|
2009-09-21 18:30:38 +00:00
|
|
|
let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
|
2010-06-22 18:09:32 +00:00
|
|
|
canFoldAsLoad = 1 in {
|
2010-02-05 21:30:49 +00:00
|
|
|
// FIXME: Set encoding to pseudo!
|
2010-02-05 21:34:18 +00:00
|
|
|
def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
|
|
|
|
[(set FR32:$dst, fp32imm0)]>,
|
|
|
|
Requires<[HasSSE1]>, TB, OpSize;
|
2010-06-22 18:09:32 +00:00
|
|
|
def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
|
|
|
|
[(set FR64:$dst, fpimm0)]>,
|
|
|
|
Requires<[HasSSE2]>, TB, OpSize;
|
|
|
|
}
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-06-22 18:09:32 +00:00
|
|
|
// Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
|
|
|
|
// bits are disregarded.
|
|
|
|
let neverHasSideEffects = 1 in {
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movaps\t{$src, $dst|$dst, $src}", []>;
|
2010-06-22 18:09:32 +00:00
|
|
|
def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
|
|
|
|
"movapd\t{$src, $dst|$dst, $src}", []>;
|
|
|
|
}
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-06-22 18:09:32 +00:00
|
|
|
// Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
|
|
|
|
// bits are disregarded.
|
|
|
|
let canFoldAsLoad = 1, isReMaterializable = 1 in {
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movaps\t{$src, $dst|$dst, $src}",
|
2007-07-27 17:16:43 +00:00
|
|
|
[(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
|
2010-06-22 18:09:32 +00:00
|
|
|
def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
|
|
|
|
"movapd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
|
|
|
|
}
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-06-22 18:17:40 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE 1 & 2 - Logical Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2010-05-28 22:47:03 +00:00
|
|
|
/// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
|
|
|
|
///
|
|
|
|
multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
|
2010-06-19 02:44:01 +00:00
|
|
|
SDNode OpNode, bit MayLoad = 0> {
|
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode, FR32,
|
|
|
|
f32, f128mem, memopfsf32, SSEPackedSingle, MayLoad>, VEX_4V;
|
|
|
|
|
|
|
|
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode, FR64,
|
|
|
|
f64, f128mem, memopfsf64, SSEPackedDouble, MayLoad>, OpSize,
|
|
|
|
VEX_4V;
|
2010-05-28 22:47:03 +00:00
|
|
|
}
|
|
|
|
|
2010-06-19 02:44:01 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"ps\t{$src2, $dst|$dst, $src2}"), OpNode, FR32, f32,
|
|
|
|
f128mem, memopfsf32, SSEPackedSingle, MayLoad>, TB;
|
2010-05-28 22:47:03 +00:00
|
|
|
|
2010-06-19 02:44:01 +00:00
|
|
|
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"pd\t{$src2, $dst|$dst, $src2}"), OpNode, FR64, f64,
|
|
|
|
f128mem, memopfsf64, SSEPackedDouble, MayLoad>, TB, OpSize;
|
2010-05-28 22:47:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-05-02 23:11:52 +00:00
|
|
|
// Alias bitwise logical operations using SSE logical ops on packed FP values.
|
2010-06-19 02:44:01 +00:00
|
|
|
defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
|
|
|
|
defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
|
|
|
|
defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-06-19 02:44:01 +00:00
|
|
|
let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
|
|
|
|
defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef, 1>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-06-22 18:17:40 +00:00
|
|
|
/// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
|
|
|
|
///
|
|
|
|
multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
|
|
|
|
SDNode OpNode, int HasPat = 0,
|
|
|
|
list<list<dag>> Pattern = []> {
|
|
|
|
let isAsmParserOnly = 1 in {
|
|
|
|
defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
|
|
|
|
!strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
f128mem,
|
|
|
|
!if(HasPat, Pattern[0], // rr
|
|
|
|
[(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
|
|
|
|
VR128:$src2)))]),
|
|
|
|
!if(HasPat, Pattern[2], // rm
|
|
|
|
[(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
|
|
|
|
(memopv2i64 addr:$src2)))])>,
|
|
|
|
VEX_4V;
|
|
|
|
|
|
|
|
defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
|
|
|
|
!strconcat(OpcodeStr, "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
f128mem,
|
|
|
|
!if(HasPat, Pattern[1], // rr
|
|
|
|
[(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
|
|
|
|
(bc_v2i64 (v2f64
|
|
|
|
VR128:$src2))))]),
|
|
|
|
!if(HasPat, Pattern[3], // rm
|
|
|
|
[(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
|
|
|
|
(memopv2i64 addr:$src2)))])>,
|
|
|
|
OpSize, VEX_4V;
|
|
|
|
}
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
|
|
|
|
!strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"), f128mem,
|
|
|
|
!if(HasPat, Pattern[0], // rr
|
|
|
|
[(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
|
|
|
|
VR128:$src2)))]),
|
|
|
|
!if(HasPat, Pattern[2], // rm
|
|
|
|
[(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
|
|
|
|
(memopv2i64 addr:$src2)))])>, TB;
|
|
|
|
|
|
|
|
defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
|
|
|
|
!strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"), f128mem,
|
|
|
|
!if(HasPat, Pattern[1], // rr
|
|
|
|
[(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
|
|
|
|
(bc_v2i64 (v2f64
|
|
|
|
VR128:$src2))))]),
|
|
|
|
!if(HasPat, Pattern[3], // rm
|
|
|
|
[(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
|
|
|
|
(memopv2i64 addr:$src2)))])>,
|
|
|
|
TB, OpSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm AND : sse12_fp_packed_logical<0x54, "and", and>;
|
|
|
|
defm OR : sse12_fp_packed_logical<0x56, "or", or>;
|
|
|
|
defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
|
|
|
|
let isCommutable = 0 in
|
|
|
|
defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
|
|
|
|
// single r+r
|
|
|
|
[(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
|
|
|
|
(bc_v2i64 (v4i32 immAllOnesV))),
|
|
|
|
VR128:$src2)))],
|
|
|
|
// double r+r
|
|
|
|
[(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
|
|
|
|
(bc_v2i64 (v2f64 VR128:$src2))))],
|
|
|
|
// single r+m
|
|
|
|
[(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
|
|
|
|
(bc_v2i64 (v4i32 immAllOnesV))),
|
|
|
|
(memopv2i64 addr:$src2))))],
|
|
|
|
// double r+m
|
|
|
|
[(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
|
|
|
|
(memopv2i64 addr:$src2)))]]>;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE 1 & 2 - Arithmetic Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2010-05-27 18:17:40 +00:00
|
|
|
/// basic_sse12_fp_binop_rm - SSE 1 & 2 binops come in both scalar and
|
|
|
|
/// vector forms.
|
2007-07-10 00:05:58 +00:00
|
|
|
///
|
|
|
|
/// In addition, we also have a special variant of the scalar form here to
|
|
|
|
/// represent the associated intrinsic operation. This form is unlike the
|
|
|
|
/// plain scalar form, in that it takes an entire vector (instead of a scalar)
|
2009-02-26 03:12:02 +00:00
|
|
|
/// and leaves the top elements unmodified (therefore these cannot be commuted).
|
2006-10-07 20:55:57 +00:00
|
|
|
///
|
2010-06-22 18:17:40 +00:00
|
|
|
/// These three forms can each be reg+reg or reg+mem.
|
2006-10-07 20:55:57 +00:00
|
|
|
///
|
2010-05-27 18:17:40 +00:00
|
|
|
multiclass basic_sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
|
2010-06-19 01:22:34 +00:00
|
|
|
SDNode OpNode> {
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-06-19 00:09:27 +00:00
|
|
|
let isAsmParserOnly = 1 in {
|
2010-06-18 22:10:11 +00:00
|
|
|
defm V#NAME#SS : sse12_fp_scalar<opc,
|
2010-06-17 23:05:30 +00:00
|
|
|
!strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2010-06-18 22:10:11 +00:00
|
|
|
OpNode, FR32, f32mem>, XS, VEX_4V;
|
2010-06-17 23:05:30 +00:00
|
|
|
|
2010-06-18 22:10:11 +00:00
|
|
|
defm V#NAME#SD : sse12_fp_scalar<opc,
|
2010-06-17 23:05:30 +00:00
|
|
|
!strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2010-06-18 22:10:11 +00:00
|
|
|
OpNode, FR64, f64mem>, XD, VEX_4V;
|
2010-06-18 23:13:35 +00:00
|
|
|
|
|
|
|
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
|
|
|
|
VR128, v4f32, f128mem, memopv4f32, SSEPackedSingle>,
|
|
|
|
VEX_4V;
|
|
|
|
|
|
|
|
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
|
|
|
|
VR128, v2f64, f128mem, memopv2f64, SSEPackedDouble>,
|
|
|
|
OpSize, VEX_4V;
|
2010-06-19 00:00:22 +00:00
|
|
|
|
|
|
|
defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
|
|
|
|
!strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
"", "_ss", ssmem, sse_load_f32>, XS, VEX_4V;
|
|
|
|
|
|
|
|
defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
|
|
|
|
!strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
"2", "_sd", sdmem, sse_load_f64>, XD, VEX_4V;
|
2010-05-27 18:17:40 +00:00
|
|
|
}
|
|
|
|
|
2010-06-17 23:05:30 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2010-06-18 22:10:11 +00:00
|
|
|
defm SS : sse12_fp_scalar<opc,
|
|
|
|
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
OpNode, FR32, f32mem>, XS;
|
2010-06-18 23:13:35 +00:00
|
|
|
|
2010-06-18 22:10:11 +00:00
|
|
|
defm SD : sse12_fp_scalar<opc,
|
|
|
|
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
OpNode, FR64, f64mem>, XD;
|
2010-06-11 23:50:47 +00:00
|
|
|
|
2010-06-18 23:13:35 +00:00
|
|
|
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"ps\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v4f32,
|
|
|
|
f128mem, memopv4f32, SSEPackedSingle>, TB;
|
2010-05-27 18:17:40 +00:00
|
|
|
|
2010-06-18 23:13:35 +00:00
|
|
|
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"pd\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v2f64,
|
|
|
|
f128mem, memopv2f64, SSEPackedDouble>, TB, OpSize;
|
2010-06-12 01:53:48 +00:00
|
|
|
|
2010-06-19 00:00:22 +00:00
|
|
|
defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
|
2010-06-12 02:38:32 +00:00
|
|
|
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
|
2010-06-19 00:00:22 +00:00
|
|
|
"", "_ss", ssmem, sse_load_f32>, XS;
|
2010-06-12 02:38:32 +00:00
|
|
|
|
2010-06-19 00:00:22 +00:00
|
|
|
defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
|
2010-06-12 02:38:32 +00:00
|
|
|
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
|
2010-06-19 00:00:22 +00:00
|
|
|
"2", "_sd", sdmem, sse_load_f64>, XD;
|
2010-06-12 03:12:14 +00:00
|
|
|
}
|
2006-02-21 19:26:52 +00:00
|
|
|
}
|
|
|
|
|
2006-10-07 20:35:44 +00:00
|
|
|
// Arithmetic instructions
|
2010-06-19 01:22:34 +00:00
|
|
|
defm ADD : basic_sse12_fp_binop_rm<0x58, "add", fadd>;
|
|
|
|
defm MUL : basic_sse12_fp_binop_rm<0x59, "mul", fmul>;
|
2010-06-17 23:05:30 +00:00
|
|
|
|
|
|
|
let isCommutable = 0 in {
|
|
|
|
defm SUB : basic_sse12_fp_binop_rm<0x5C, "sub", fsub>;
|
|
|
|
defm DIV : basic_sse12_fp_binop_rm<0x5E, "div", fdiv>;
|
|
|
|
}
|
2006-10-07 20:35:44 +00:00
|
|
|
|
2010-05-27 18:17:40 +00:00
|
|
|
/// sse12_fp_binop_rm - Other SSE 1 & 2 binops
|
2007-07-10 00:05:58 +00:00
|
|
|
///
|
2010-05-27 18:17:40 +00:00
|
|
|
/// This multiclass is like basic_sse12_fp_binop_rm, with the addition of
|
2007-07-10 00:05:58 +00:00
|
|
|
/// instructions for a full-vector intrinsic form. Operations that map
|
|
|
|
/// onto C operators don't use this form since they just use the plain
|
|
|
|
/// vector form instead of having a separate vector intrinsic form.
|
|
|
|
///
|
2010-05-27 18:17:40 +00:00
|
|
|
multiclass sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
|
2010-06-19 01:22:34 +00:00
|
|
|
SDNode OpNode> {
|
2007-07-10 00:05:58 +00:00
|
|
|
|
2010-06-19 01:17:05 +00:00
|
|
|
let isAsmParserOnly = 1 in {
|
2010-06-18 01:12:56 +00:00
|
|
|
// Scalar operation, reg+reg.
|
2010-06-18 22:10:11 +00:00
|
|
|
defm V#NAME#SS : sse12_fp_scalar<opc,
|
|
|
|
!strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
OpNode, FR32, f32mem>, XS, VEX_4V;
|
2007-07-10 00:05:58 +00:00
|
|
|
|
2010-06-18 22:10:11 +00:00
|
|
|
defm V#NAME#SD : sse12_fp_scalar<opc,
|
|
|
|
!strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
OpNode, FR64, f64mem>, XD, VEX_4V;
|
2010-06-19 00:37:31 +00:00
|
|
|
|
|
|
|
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
|
|
|
|
VR128, v4f32, f128mem, memopv4f32, SSEPackedSingle>,
|
|
|
|
VEX_4V;
|
|
|
|
|
|
|
|
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
|
|
|
|
VR128, v2f64, f128mem, memopv2f64, SSEPackedDouble>,
|
|
|
|
OpSize, VEX_4V;
|
|
|
|
|
|
|
|
defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
|
|
|
|
!strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
"", "_ss", ssmem, sse_load_f32>, XS, VEX_4V;
|
|
|
|
|
|
|
|
defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
|
|
|
|
!strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
"2", "_sd", sdmem, sse_load_f64>, XD, VEX_4V;
|
2010-06-19 01:17:05 +00:00
|
|
|
|
|
|
|
defm V#NAME#PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
|
|
|
|
!strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
"", "_ps", f128mem, memopv4f32, SSEPackedSingle>, VEX_4V;
|
|
|
|
|
|
|
|
defm V#NAME#PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
|
|
|
|
!strconcat(OpcodeStr, "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
"2", "_pd", f128mem, memopv2f64, SSEPackedDouble>, OpSize,
|
|
|
|
VEX_4V;
|
2010-05-27 18:17:40 +00:00
|
|
|
}
|
|
|
|
|
2010-06-18 01:12:56 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
// Scalar operation, reg+reg.
|
2010-06-18 22:10:11 +00:00
|
|
|
defm SS : sse12_fp_scalar<opc,
|
|
|
|
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
OpNode, FR32, f32mem>, XS;
|
|
|
|
defm SD : sse12_fp_scalar<opc,
|
|
|
|
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
OpNode, FR64, f64mem>, XD;
|
2010-06-19 00:37:31 +00:00
|
|
|
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"ps\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v4f32,
|
|
|
|
f128mem, memopv4f32, SSEPackedSingle>, TB;
|
2007-07-10 00:05:58 +00:00
|
|
|
|
2010-06-19 00:37:31 +00:00
|
|
|
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
|
|
|
|
"pd\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v2f64,
|
|
|
|
f128mem, memopv2f64, SSEPackedDouble>, TB, OpSize;
|
2010-05-27 18:17:40 +00:00
|
|
|
|
2010-06-19 00:37:31 +00:00
|
|
|
defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
|
2010-05-27 18:17:40 +00:00
|
|
|
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
|
2010-06-19 00:37:31 +00:00
|
|
|
"", "_ss", ssmem, sse_load_f32>, XS;
|
2010-05-27 18:17:40 +00:00
|
|
|
|
2010-06-19 00:37:31 +00:00
|
|
|
defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
|
2010-05-27 18:17:40 +00:00
|
|
|
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
|
2010-06-19 00:37:31 +00:00
|
|
|
"2", "_sd", sdmem, sse_load_f64>, XD;
|
2007-07-10 00:05:58 +00:00
|
|
|
|
2010-06-19 01:17:05 +00:00
|
|
|
defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
|
2010-05-27 18:17:40 +00:00
|
|
|
!strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
|
2010-06-19 01:17:05 +00:00
|
|
|
"", "_ps", f128mem, memopv4f32, SSEPackedSingle>, TB;
|
2010-05-27 18:17:40 +00:00
|
|
|
|
2010-06-19 01:17:05 +00:00
|
|
|
defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
|
2010-05-27 18:17:40 +00:00
|
|
|
!strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
|
2010-06-19 01:17:05 +00:00
|
|
|
"2", "_pd", f128mem, memopv2f64, SSEPackedDouble>, TB, OpSize;
|
2007-07-10 00:05:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-18 01:12:56 +00:00
|
|
|
let isCommutable = 0 in {
|
|
|
|
defm MAX : sse12_fp_binop_rm<0x5F, "max", X86fmax>;
|
|
|
|
defm MIN : sse12_fp_binop_rm<0x5D, "min", X86fmin>;
|
|
|
|
}
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Arithmetic
|
|
|
|
|
|
|
|
/// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
|
2007-05-02 23:11:52 +00:00
|
|
|
///
|
2007-07-10 00:05:58 +00:00
|
|
|
/// In addition, we also have a special variant of the scalar form here to
|
|
|
|
/// represent the associated intrinsic operation. This form is unlike the
|
|
|
|
/// plain scalar form, in that it takes an entire vector (instead of a
|
|
|
|
/// scalar) and leaves the top elements undefined.
|
|
|
|
///
|
|
|
|
/// And, we have a special variant form for a full-vector intrinsic form.
|
|
|
|
///
|
|
|
|
/// These four forms can each have a reg or a mem operand, so there are a
|
|
|
|
/// total of eight "instructions".
|
|
|
|
///
|
|
|
|
multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
|
|
|
|
SDNode OpNode,
|
|
|
|
Intrinsic F32Int,
|
|
|
|
Intrinsic V4F32Int,
|
|
|
|
bit Commutable = 0> {
|
|
|
|
// Scalar operation, reg.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
|
2007-07-10 00:05:58 +00:00
|
|
|
[(set FR32:$dst, (OpNode FR32:$src))]> {
|
2007-05-02 23:11:52 +00:00
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Scalar operation, mem.
|
2009-12-18 07:40:29 +00:00
|
|
|
def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
|
2009-12-18 07:40:29 +00:00
|
|
|
[(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
|
2009-12-22 17:47:23 +00:00
|
|
|
Requires<[HasSSE1, OptForSize]>;
|
2009-07-31 20:07:27 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Vector operation, reg.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
|
2007-07-10 00:05:58 +00:00
|
|
|
[(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Vector operation, mem.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
|
2007-07-18 20:23:34 +00:00
|
|
|
[(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Intrinsic operation, reg.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
|
2007-07-10 00:05:58 +00:00
|
|
|
[(set VR128:$dst, (F32Int VR128:$src))]> {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Intrinsic operation, mem.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
|
2007-07-10 00:05:58 +00:00
|
|
|
[(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Vector intrinsic operation, reg
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
|
2007-07-10 00:05:58 +00:00
|
|
|
[(set VR128:$dst, (V4F32Int VR128:$src))]> {
|
|
|
|
let isCommutable = Commutable;
|
2007-05-02 23:11:52 +00:00
|
|
|
}
|
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Vector intrinsic operation, mem
|
2007-08-02 21:06:40 +00:00
|
|
|
def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
|
2008-05-23 00:37:07 +00:00
|
|
|
[(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
}
|
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Square root.
|
|
|
|
defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
|
|
|
|
int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
|
|
|
|
|
|
|
|
// Reciprocal approximations. Note that these typically require refinement
|
|
|
|
// in order to obtain suitable precision.
|
|
|
|
defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
|
|
|
|
int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
|
|
|
|
defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
|
|
|
|
int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
|
|
|
|
|
2008-03-08 00:58:38 +00:00
|
|
|
// Prefetch intrinsic.
|
|
|
|
def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
|
|
|
|
"prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
|
|
|
|
def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
|
|
|
|
"prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
|
|
|
|
def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
|
|
|
|
"prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
|
|
|
|
def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
|
|
|
|
"prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
|
|
|
// Non-temporal stores
|
2010-02-16 20:50:18 +00:00
|
|
|
def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movntps\t{$src, $dst|$dst, $src}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
|
|
|
|
|
2010-02-16 20:50:18 +00:00
|
|
|
let AddedComplexity = 400 in { // Prefer non-temporal versions
|
|
|
|
def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movntps\t{$src, $dst|$dst, $src}",
|
|
|
|
[(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
|
|
|
|
|
|
|
|
def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movntdq\t{$src, $dst|$dst, $src}",
|
|
|
|
[(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
|
|
|
|
|
|
|
|
def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
|
|
|
|
"movnti\t{$src, $dst|$dst, $src}",
|
|
|
|
[(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
|
|
|
|
TB, Requires<[HasSSE2]>;
|
|
|
|
|
|
|
|
def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
|
|
|
|
"movnti\t{$src, $dst|$dst, $src}",
|
|
|
|
[(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
|
|
|
|
TB, Requires<[HasSSE2]>;
|
|
|
|
}
|
|
|
|
|
2007-05-02 23:11:52 +00:00
|
|
|
// Load, store, and memory fence
|
2010-05-20 01:23:41 +00:00
|
|
|
def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
|
|
|
|
TB, Requires<[HasSSE1]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
|
|
|
// MXCSR register
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
|
2007-07-31 20:11:57 +00:00
|
|
|
"stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
|
|
|
// Alias instructions that map zero vector to pxor / xorp* for sse.
|
2008-12-03 18:15:48 +00:00
|
|
|
// We set canFoldAsLoad because this can be converted to a constant-pool
|
2008-12-03 05:21:24 +00:00
|
|
|
// load of an all-zeros value if folding it would be beneficial.
|
2010-02-05 21:30:49 +00:00
|
|
|
// FIXME: Change encoding to pseudo!
|
2009-08-11 22:17:52 +00:00
|
|
|
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
|
2010-03-31 00:40:13 +00:00
|
|
|
isCodeGenOnly = 1 in {
|
|
|
|
def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
|
|
|
|
[(set VR128:$dst, (v4f32 immAllZerosV))]>;
|
|
|
|
def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
|
|
|
|
[(set VR128:$dst, (v2f64 immAllZerosV))]>;
|
|
|
|
let ExeDomain = SSEPackedInt in
|
|
|
|
def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
[(set VR128:$dst, (v4i32 immAllZerosV))]>;
|
2010-03-31 00:40:13 +00:00
|
|
|
}
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-03-31 00:40:13 +00:00
|
|
|
def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
|
|
|
|
def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
|
|
|
|
def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
|
2008-03-12 07:02:50 +00:00
|
|
|
|
2010-02-28 00:17:42 +00:00
|
|
|
def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
|
2010-05-24 14:48:17 +00:00
|
|
|
(f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2009-07-31 20:07:27 +00:00
|
|
|
//===---------------------------------------------------------------------===//
|
2007-05-02 23:11:52 +00:00
|
|
|
// SSE2 Instructions
|
2009-07-31 20:07:27 +00:00
|
|
|
//===---------------------------------------------------------------------===//
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Arithmetic
|
|
|
|
|
|
|
|
/// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
|
2006-10-07 21:17:13 +00:00
|
|
|
///
|
2007-07-10 00:05:58 +00:00
|
|
|
/// In addition, we also have a special variant of the scalar form here to
|
|
|
|
/// represent the associated intrinsic operation. This form is unlike the
|
|
|
|
/// plain scalar form, in that it takes an entire vector (instead of a
|
|
|
|
/// scalar) and leaves the top elements undefined.
|
|
|
|
///
|
|
|
|
/// And, we have a special variant form for a full-vector intrinsic form.
|
|
|
|
///
|
|
|
|
/// These four forms can each have a reg or a mem operand, so there are a
|
|
|
|
/// total of eight "instructions".
|
|
|
|
///
|
|
|
|
multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
|
|
|
|
SDNode OpNode,
|
|
|
|
Intrinsic F64Int,
|
|
|
|
Intrinsic V2F64Int,
|
|
|
|
bit Commutable = 0> {
|
|
|
|
// Scalar operation, reg.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
|
2007-07-10 00:05:58 +00:00
|
|
|
[(set FR64:$dst, (OpNode FR64:$src))]> {
|
2006-10-07 21:17:13 +00:00
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Scalar operation, mem.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
|
2007-07-10 00:05:58 +00:00
|
|
|
[(set FR64:$dst, (OpNode (load addr:$src)))]>;
|
2009-07-31 20:07:27 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Vector operation, reg.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
|
2007-07-10 00:05:58 +00:00
|
|
|
[(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
2006-02-22 02:26:30 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Vector operation, mem.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
|
2007-07-18 20:23:34 +00:00
|
|
|
[(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
|
2006-04-14 21:59:03 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Intrinsic operation, reg.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
|
2007-07-10 00:05:58 +00:00
|
|
|
[(set VR128:$dst, (F64Int VR128:$src))]> {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
2006-02-22 02:26:30 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Intrinsic operation, mem.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
|
2007-07-10 00:05:58 +00:00
|
|
|
[(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
|
2006-10-07 05:50:25 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Vector intrinsic operation, reg
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
|
2007-07-10 00:05:58 +00:00
|
|
|
[(set VR128:$dst, (V2F64Int VR128:$src))]> {
|
|
|
|
let isCommutable = Commutable;
|
2007-05-02 23:11:52 +00:00
|
|
|
}
|
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Vector intrinsic operation, mem
|
2007-08-02 21:06:40 +00:00
|
|
|
def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
|
2008-05-23 00:37:07 +00:00
|
|
|
[(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
|
2006-04-03 23:49:17 +00:00
|
|
|
}
|
2006-02-21 20:00:20 +00:00
|
|
|
|
2007-07-10 00:05:58 +00:00
|
|
|
// Square root.
|
|
|
|
defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
|
|
|
|
int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
|
|
|
|
|
|
|
|
// There is no f64 version of the reciprocal approximation instructions.
|
|
|
|
|
2009-07-31 20:07:27 +00:00
|
|
|
//===---------------------------------------------------------------------===//
|
2006-02-21 20:00:20 +00:00
|
|
|
// SSE integer instructions
|
2010-03-25 18:52:04 +00:00
|
|
|
let ExeDomain = SSEPackedInt in {
|
2006-02-21 20:00:20 +00:00
|
|
|
|
|
|
|
// Move Instructions
|
2008-01-11 06:59:07 +00:00
|
|
|
let neverHasSideEffects = 1 in
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movdqa\t{$src, $dst|$dst, $src}", []>;
|
2008-12-03 18:15:48 +00:00
|
|
|
let canFoldAsLoad = 1, mayLoad = 1 in
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movdqa\t{$src, $dst|$dst, $src}",
|
2007-07-20 00:27:43 +00:00
|
|
|
[/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
|
2008-01-11 06:59:07 +00:00
|
|
|
let mayStore = 1 in
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movdqa\t{$src, $dst|$dst, $src}",
|
2007-07-20 00:27:43 +00:00
|
|
|
[/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
|
2008-12-03 18:15:48 +00:00
|
|
|
let canFoldAsLoad = 1, mayLoad = 1 in
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movdqu\t{$src, $dst|$dst, $src}",
|
2007-07-20 00:27:43 +00:00
|
|
|
[/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
|
2006-04-14 23:32:40 +00:00
|
|
|
XS, Requires<[HasSSE2]>;
|
2008-01-11 06:59:07 +00:00
|
|
|
let mayStore = 1 in
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movdqu\t{$src, $dst|$dst, $src}",
|
2007-07-20 00:27:43 +00:00
|
|
|
[/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
|
2006-04-14 23:32:40 +00:00
|
|
|
XS, Requires<[HasSSE2]>;
|
2006-03-23 07:44:07 +00:00
|
|
|
|
2007-07-18 20:23:34 +00:00
|
|
|
// Intrinsic forms of MOVDQU load and store
|
2008-12-03 18:15:48 +00:00
|
|
|
let canFoldAsLoad = 1 in
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movdqu\t{$src, $dst|$dst, $src}",
|
2007-07-18 20:23:34 +00:00
|
|
|
[(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
|
|
|
|
XS, Requires<[HasSSE2]>;
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movdqu\t{$src, $dst|$dst, $src}",
|
2007-07-18 20:23:34 +00:00
|
|
|
[(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
|
|
|
|
XS, Requires<[HasSSE2]>;
|
2006-10-07 18:39:00 +00:00
|
|
|
|
2008-03-05 08:11:27 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2006-10-07 19:02:31 +00:00
|
|
|
multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
|
|
|
|
bit Commutable = 0> {
|
2010-05-25 17:33:22 +00:00
|
|
|
def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
2006-10-07 18:39:00 +00:00
|
|
|
[(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
2010-05-25 17:33:22 +00:00
|
|
|
def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
2006-10-07 18:39:00 +00:00
|
|
|
[(set VR128:$dst, (IntId VR128:$src1,
|
2010-05-25 17:33:22 +00:00
|
|
|
(bitconvert (memopv2i64
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
addr:$src2))))]>;
|
2006-10-07 18:39:00 +00:00
|
|
|
}
|
|
|
|
|
2008-05-03 00:52:09 +00:00
|
|
|
multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
|
|
|
|
string OpcodeStr,
|
|
|
|
Intrinsic IntId, Intrinsic IntId2> {
|
2010-05-25 17:33:22 +00:00
|
|
|
def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2),
|
2008-05-03 00:52:09 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
2008-05-03 00:52:09 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (IntId VR128:$src1,
|
2009-07-31 20:07:27 +00:00
|
|
|
(bitconvert (memopv2i64 addr:$src2))))]>;
|
2010-05-25 17:33:22 +00:00
|
|
|
def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, i32i8imm:$src2),
|
2008-05-03 00:52:09 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
|
|
|
|
}
|
|
|
|
|
2006-10-07 19:14:49 +00:00
|
|
|
/// PDI_binop_rm - Simple SSE2 binary operator.
|
|
|
|
multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
ValueType OpVT, bit Commutable = 0> {
|
2010-05-25 17:33:22 +00:00
|
|
|
def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
2006-10-07 19:14:49 +00:00
|
|
|
[(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
2010-05-25 17:33:22 +00:00
|
|
|
def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
2006-10-07 19:14:49 +00:00
|
|
|
[(set VR128:$dst, (OpVT (OpNode VR128:$src1,
|
2009-07-31 20:07:27 +00:00
|
|
|
(bitconvert (memopv2i64 addr:$src2)))))]>;
|
2006-10-07 19:14:49 +00:00
|
|
|
}
|
2006-10-07 19:34:33 +00:00
|
|
|
|
|
|
|
/// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
|
|
|
|
///
|
|
|
|
/// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
|
|
|
|
/// to collapse (bitconvert VT to VT) into its operand.
|
|
|
|
///
|
|
|
|
multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
bit Commutable = 0> {
|
2009-07-31 20:07:27 +00:00
|
|
|
def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
|
|
|
|
let isCommutable = Commutable;
|
2007-04-10 22:10:25 +00:00
|
|
|
}
|
2009-07-31 20:07:27 +00:00
|
|
|
def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst, (OpNode VR128:$src1,
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(memopv2i64 addr:$src2)))]>;
|
2007-04-10 22:10:25 +00:00
|
|
|
}
|
2006-10-07 19:14:49 +00:00
|
|
|
|
2008-03-05 08:19:16 +00:00
|
|
|
} // Constraints = "$src1 = $dst"
|
2010-03-25 18:52:04 +00:00
|
|
|
} // ExeDomain = SSEPackedInt
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2006-03-23 01:57:24 +00:00
|
|
|
// 128-bit Integer Arithmetic
|
2006-10-07 19:14:49 +00:00
|
|
|
|
|
|
|
defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
|
|
|
|
defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
|
|
|
|
defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
|
2006-10-07 19:34:33 +00:00
|
|
|
defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
|
2006-04-13 00:43:35 +00:00
|
|
|
|
2006-10-07 19:02:31 +00:00
|
|
|
defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
|
|
|
|
defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
|
|
|
|
defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
|
|
|
|
defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
|
2006-04-13 00:43:35 +00:00
|
|
|
|
2006-10-07 19:14:49 +00:00
|
|
|
defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
|
|
|
|
defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
|
|
|
|
defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
|
2006-10-07 19:34:33 +00:00
|
|
|
defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
|
2006-04-13 00:43:35 +00:00
|
|
|
|
2006-10-07 19:02:31 +00:00
|
|
|
defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
|
|
|
|
defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
|
|
|
|
defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
|
|
|
|
defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
|
2006-10-07 18:48:46 +00:00
|
|
|
|
2006-10-07 19:14:49 +00:00
|
|
|
defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
|
2006-04-13 05:24:54 +00:00
|
|
|
|
2006-10-07 19:02:31 +00:00
|
|
|
defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
|
|
|
|
defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
|
|
|
|
defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
|
2006-04-13 06:11:45 +00:00
|
|
|
|
2006-10-07 19:02:31 +00:00
|
|
|
defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
|
2006-04-13 06:11:45 +00:00
|
|
|
|
2006-10-07 19:02:31 +00:00
|
|
|
defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
|
|
|
|
defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
|
2006-10-07 07:06:17 +00:00
|
|
|
|
|
|
|
|
2006-10-07 19:02:31 +00:00
|
|
|
defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
|
|
|
|
defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
|
|
|
|
defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
|
|
|
|
defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
|
2009-05-28 02:04:00 +00:00
|
|
|
defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
|
2006-03-29 23:07:14 +00:00
|
|
|
|
2006-10-07 07:06:17 +00:00
|
|
|
|
2008-05-03 00:52:09 +00:00
|
|
|
defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
|
|
|
|
int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
|
|
|
|
defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
|
|
|
|
int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
|
|
|
|
defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
|
|
|
|
int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
|
|
|
|
|
|
|
|
defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
|
|
|
|
int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
|
|
|
|
defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
|
|
|
|
int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
|
2008-05-13 17:52:09 +00:00
|
|
|
defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
|
2008-05-03 00:52:09 +00:00
|
|
|
int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
|
|
|
|
|
|
|
|
defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
|
|
|
|
int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
|
2008-05-13 01:47:52 +00:00
|
|
|
defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
|
2008-05-03 00:52:09 +00:00
|
|
|
int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
|
2006-10-07 07:06:17 +00:00
|
|
|
|
2006-10-07 19:49:05 +00:00
|
|
|
// 128-bit logical shifts.
|
2010-03-25 18:52:04 +00:00
|
|
|
let Constraints = "$src1 = $dst", neverHasSideEffects = 1,
|
|
|
|
ExeDomain = SSEPackedInt in {
|
2007-05-02 23:11:52 +00:00
|
|
|
def PSLLDQri : PDIi8<0x73, MRM7r,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pslldq\t{$src2, $dst|$dst, $src2}", []>;
|
2007-05-02 23:11:52 +00:00
|
|
|
def PSRLDQri : PDIi8<0x73, MRM3r,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"psrldq\t{$src2, $dst|$dst, $src2}", []>;
|
2007-05-02 23:11:52 +00:00
|
|
|
// PSRADQri doesn't exist in SSE[1-3].
|
2006-04-04 21:49:39 +00:00
|
|
|
}
|
|
|
|
|
2006-10-07 19:49:05 +00:00
|
|
|
let Predicates = [HasSSE2] in {
|
|
|
|
def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
|
2009-10-28 06:30:34 +00:00
|
|
|
(v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
|
2006-10-07 19:49:05 +00:00
|
|
|
def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
|
2009-10-28 06:30:34 +00:00
|
|
|
(v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
|
2008-10-02 05:56:52 +00:00
|
|
|
def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
|
|
|
|
(v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
|
|
|
|
def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
|
|
|
|
(v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
|
2007-01-05 07:55:56 +00:00
|
|
|
def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
|
2009-10-28 06:30:34 +00:00
|
|
|
(v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
|
2008-05-29 08:22:04 +00:00
|
|
|
|
|
|
|
// Shift up / down and insert zero's.
|
|
|
|
def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
|
2009-10-28 06:30:34 +00:00
|
|
|
(v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
|
2008-05-29 08:22:04 +00:00
|
|
|
def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
|
2009-10-28 06:30:34 +00:00
|
|
|
(v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
|
2006-10-07 19:49:05 +00:00
|
|
|
}
|
|
|
|
|
2006-03-29 23:07:14 +00:00
|
|
|
// Logical
|
2006-10-07 19:37:30 +00:00
|
|
|
defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
|
|
|
|
defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
|
|
|
|
defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
|
2006-03-29 23:07:14 +00:00
|
|
|
|
2010-03-25 18:52:04 +00:00
|
|
|
let Constraints = "$src1 = $dst", ExeDomain = SSEPackedInt in {
|
2007-05-02 23:11:52 +00:00
|
|
|
def PANDNrr : PDI<0xDF, MRMSrcReg,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pandn\t{$src2, $dst|$dst, $src2}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
|
|
|
|
VR128:$src2)))]>;
|
|
|
|
|
|
|
|
def PANDNrm : PDI<0xDF, MRMSrcMem,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pandn\t{$src2, $dst|$dst, $src2}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
|
2007-08-02 21:17:01 +00:00
|
|
|
(memopv2i64 addr:$src2))))]>;
|
2006-03-29 23:07:14 +00:00
|
|
|
}
|
2006-03-25 09:37:23 +00:00
|
|
|
|
2006-10-07 06:47:08 +00:00
|
|
|
// SSE2 Integer comparison
|
2010-06-25 21:05:35 +00:00
|
|
|
defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
|
|
|
|
defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
|
|
|
|
defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
|
2007-05-02 23:11:52 +00:00
|
|
|
defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
|
|
|
|
defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
|
|
|
|
defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
|
2006-04-14 23:32:40 +00:00
|
|
|
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPEQBrr VR128:$src1, VR128:$src2)>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPEQBrm VR128:$src1, addr:$src2)>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPEQWrr VR128:$src1, VR128:$src2)>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPEQWrm VR128:$src1, addr:$src2)>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPEQDrr VR128:$src1, VR128:$src2)>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPEQDrm VR128:$src1, addr:$src2)>;
|
|
|
|
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPGTBrr VR128:$src1, VR128:$src2)>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPGTBrm VR128:$src1, addr:$src2)>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPGTWrr VR128:$src1, VR128:$src2)>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPGTWrm VR128:$src1, addr:$src2)>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPGTDrr VR128:$src1, VR128:$src2)>;
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
|
2008-05-12 23:09:43 +00:00
|
|
|
(PCMPGTDrm VR128:$src1, addr:$src2)>;
|
|
|
|
|
|
|
|
|
2006-03-29 23:07:14 +00:00
|
|
|
// Pack instructions
|
2006-10-07 19:02:31 +00:00
|
|
|
defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
|
|
|
|
defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
|
|
|
|
defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
|
2006-03-29 23:07:14 +00:00
|
|
|
|
2010-03-25 18:52:04 +00:00
|
|
|
let ExeDomain = SSEPackedInt in {
|
|
|
|
|
2006-03-29 23:07:14 +00:00
|
|
|
// Shuffle and unpack instructions
|
2009-10-19 02:17:23 +00:00
|
|
|
let AddedComplexity = 5 in {
|
2006-04-04 19:12:30 +00:00
|
|
|
def PSHUFDri : PDIi8<0x70, MRMSrcReg,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2009-04-27 18:41:29 +00:00
|
|
|
[(set VR128:$dst, (v4i32 (pshufd:$src2
|
|
|
|
VR128:$src1, (undef))))]>;
|
2006-04-04 19:12:30 +00:00
|
|
|
def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2009-04-27 18:41:29 +00:00
|
|
|
[(set VR128:$dst, (v4i32 (pshufd:$src2
|
Optimize splat of a scalar load into a shuffle of a vector load when it's legal. e.g.
vector_shuffle (scalar_to_vector (i32 load (ptr + 4))), undef, <0, 0, 0, 0>
=>
vector_shuffle (v4i32 load ptr), undef, <1, 1, 1, 1>
iff ptr is 16-byte aligned (or can be made into 16-byte aligned).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@90984 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-09 21:00:30 +00:00
|
|
|
(bc_v4i32 (memopv2i64 addr:$src1)),
|
2009-07-31 20:07:27 +00:00
|
|
|
(undef))))]>;
|
2009-11-07 08:45:53 +00:00
|
|
|
}
|
2006-03-29 23:07:14 +00:00
|
|
|
|
|
|
|
// SSE2 with ImmT == Imm8 and XS prefix.
|
2006-04-04 19:12:30 +00:00
|
|
|
def PSHUFHWri : Ii8<0x70, MRMSrcReg,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2009-04-27 18:41:29 +00:00
|
|
|
[(set VR128:$dst, (v8i16 (pshufhw:$src2 VR128:$src1,
|
|
|
|
(undef))))]>,
|
2006-03-29 23:07:14 +00:00
|
|
|
XS, Requires<[HasSSE2]>;
|
2006-04-04 19:12:30 +00:00
|
|
|
def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2009-04-27 18:41:29 +00:00
|
|
|
[(set VR128:$dst, (v8i16 (pshufhw:$src2
|
2009-07-31 20:07:27 +00:00
|
|
|
(bc_v8i16 (memopv2i64 addr:$src1)),
|
|
|
|
(undef))))]>,
|
2006-03-29 23:07:14 +00:00
|
|
|
XS, Requires<[HasSSE2]>;
|
|
|
|
|
|
|
|
// SSE2 with ImmT == Imm8 and XD prefix.
|
2006-04-04 19:12:30 +00:00
|
|
|
def PSHUFLWri : Ii8<0x70, MRMSrcReg,
|
2009-04-27 18:41:29 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2009-04-27 18:41:29 +00:00
|
|
|
[(set VR128:$dst, (v8i16 (pshuflw:$src2 VR128:$src1,
|
|
|
|
(undef))))]>,
|
2006-03-29 23:07:14 +00:00
|
|
|
XD, Requires<[HasSSE2]>;
|
2006-04-04 19:12:30 +00:00
|
|
|
def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
|
2009-04-27 18:41:29 +00:00
|
|
|
(outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2009-04-27 18:41:29 +00:00
|
|
|
[(set VR128:$dst, (v8i16 (pshuflw:$src2
|
|
|
|
(bc_v8i16 (memopv2i64 addr:$src1)),
|
|
|
|
(undef))))]>,
|
2006-03-29 23:07:14 +00:00
|
|
|
XD, Requires<[HasSSE2]>;
|
|
|
|
|
2010-06-01 17:02:50 +00:00
|
|
|
// Unpack instructions
|
|
|
|
multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
|
|
|
|
PatFrag unp_frag, PatFrag bc_frag> {
|
|
|
|
def rr : PDI<opc, MRMSrcReg,
|
|
|
|
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
|
|
|
!strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
|
|
|
|
def rm : PDI<opc, MRMSrcMem,
|
|
|
|
(outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
|
|
|
|
!strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (unp_frag VR128:$src1,
|
|
|
|
(bc_frag (memopv2i64
|
|
|
|
addr:$src2))))]>;
|
|
|
|
}
|
2006-03-25 09:37:23 +00:00
|
|
|
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2010-06-01 17:02:50 +00:00
|
|
|
defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
|
|
|
|
defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
|
|
|
|
defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
|
|
|
|
|
|
|
|
/// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
|
|
|
|
/// knew to collapse (bitconvert VT to VT) into its operand.
|
2009-07-31 20:07:27 +00:00
|
|
|
def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"punpcklqdq\t{$src2, $dst|$dst, $src2}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst,
|
2009-04-27 18:41:29 +00:00
|
|
|
(v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
|
2009-07-31 20:07:27 +00:00
|
|
|
def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"punpcklqdq\t{$src2, $dst|$dst, $src2}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst,
|
2009-04-27 18:41:29 +00:00
|
|
|
(v2i64 (unpckl VR128:$src1,
|
|
|
|
(memopv2i64 addr:$src2))))]>;
|
2009-07-31 20:07:27 +00:00
|
|
|
|
2010-06-01 17:02:50 +00:00
|
|
|
defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
|
|
|
|
defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
|
|
|
|
defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
|
|
|
|
|
|
|
|
/// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
|
|
|
|
/// knew to collapse (bitconvert VT to VT) into its operand.
|
2009-07-31 20:07:27 +00:00
|
|
|
def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"punpckhqdq\t{$src2, $dst|$dst, $src2}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst,
|
2009-04-27 18:41:29 +00:00
|
|
|
(v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
|
2009-07-31 20:07:27 +00:00
|
|
|
def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"punpckhqdq\t{$src2, $dst|$dst, $src2}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst,
|
2009-04-27 18:41:29 +00:00
|
|
|
(v2i64 (unpckh VR128:$src1,
|
|
|
|
(memopv2i64 addr:$src2))))]>;
|
2006-03-23 01:57:24 +00:00
|
|
|
}
|
2006-03-21 07:09:35 +00:00
|
|
|
|
2006-03-31 19:22:53 +00:00
|
|
|
// Extract / Insert
|
2006-04-14 23:32:40 +00:00
|
|
|
def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2006-05-16 07:21:53 +00:00
|
|
|
[(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
|
2008-02-11 04:19:36 +00:00
|
|
|
imm:$src2))]>;
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2007-05-02 23:11:52 +00:00
|
|
|
def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1,
|
2007-05-02 23:11:52 +00:00
|
|
|
GR32:$src2, i32i8imm:$src3),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst,
|
2008-02-11 04:19:36 +00:00
|
|
|
(X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1,
|
2007-05-02 23:11:52 +00:00
|
|
|
i16mem:$src2, i32i8imm:$src3),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
2008-02-11 04:19:36 +00:00
|
|
|
(X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
|
|
|
|
imm:$src3))]>;
|
2006-03-31 19:22:53 +00:00
|
|
|
}
|
|
|
|
|
2006-03-30 00:33:26 +00:00
|
|
|
// Mask creation
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"pmovmskb\t{$src, $dst|$dst, $src}",
|
2006-05-16 07:21:53 +00:00
|
|
|
[(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
|
2006-03-30 00:33:26 +00:00
|
|
|
|
2006-04-11 06:57:30 +00:00
|
|
|
// Conditional store
|
2007-09-11 19:55:27 +00:00
|
|
|
let Uses = [EDI] in
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
|
2007-07-31 20:11:57 +00:00
|
|
|
"maskmovdqu\t{$mask, $src|$src, $mask}",
|
2007-09-11 19:55:27 +00:00
|
|
|
[(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
|
2006-04-11 06:57:30 +00:00
|
|
|
|
2009-02-10 22:06:28 +00:00
|
|
|
let Uses = [RDI] in
|
|
|
|
def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
|
|
|
|
"maskmovdqu\t{$mask, $src|$src, $mask}",
|
|
|
|
[(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
|
|
|
|
|
2010-03-25 18:52:04 +00:00
|
|
|
} // ExeDomain = SSEPackedInt
|
|
|
|
|
2006-03-25 06:03:26 +00:00
|
|
|
// Non-temporal stores
|
2010-02-16 20:50:18 +00:00
|
|
|
def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
|
|
|
|
"movntpd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
|
2010-03-25 18:52:04 +00:00
|
|
|
let ExeDomain = SSEPackedInt in
|
2010-02-16 20:50:18 +00:00
|
|
|
def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movntdq\t{$src, $dst|$dst, $src}",
|
|
|
|
[(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
|
|
|
|
def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
|
2010-02-15 20:53:01 +00:00
|
|
|
"movnti\t{$src, $dst|$dst, $src}",
|
|
|
|
[(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
|
|
|
|
TB, Requires<[HasSSE2]>;
|
2010-02-15 17:02:56 +00:00
|
|
|
|
2010-02-16 20:50:18 +00:00
|
|
|
let AddedComplexity = 400 in { // Prefer non-temporal versions
|
|
|
|
def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movntpd\t{$src, $dst|$dst, $src}",
|
|
|
|
[(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
|
|
|
|
|
2010-03-25 18:52:04 +00:00
|
|
|
let ExeDomain = SSEPackedInt in
|
2010-02-16 20:50:18 +00:00
|
|
|
def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
|
|
|
|
"movntdq\t{$src, $dst|$dst, $src}",
|
|
|
|
[(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
|
|
|
|
}
|
|
|
|
|
2006-04-14 07:43:12 +00:00
|
|
|
// Flush cache
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
|
2006-04-14 07:43:12 +00:00
|
|
|
TB, Requires<[HasSSE2]>;
|
|
|
|
|
|
|
|
// Load, store, and memory fence
|
2010-02-12 23:54:57 +00:00
|
|
|
def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
|
2006-04-14 07:43:12 +00:00
|
|
|
"lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
|
2010-02-12 23:54:57 +00:00
|
|
|
def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
|
2006-04-14 07:43:12 +00:00
|
|
|
"mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
|
2006-03-25 06:03:26 +00:00
|
|
|
|
2010-05-20 01:35:50 +00:00
|
|
|
// Pause. This "instruction" is encoded as "rep; nop", so even though it
|
2010-05-26 18:03:53 +00:00
|
|
|
// was introduced with SSE2, it's backward compatible.
|
2010-05-20 01:35:50 +00:00
|
|
|
def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
|
|
|
|
|
2008-02-16 01:24:58 +00:00
|
|
|
//TODO: custom lower this so as to never even generate the noop
|
2010-02-23 06:54:29 +00:00
|
|
|
def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
|
2008-02-16 01:24:58 +00:00
|
|
|
(i8 0)), (NOOP)>;
|
|
|
|
def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
|
|
|
|
def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
|
2010-02-23 06:54:29 +00:00
|
|
|
def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
|
2008-02-16 01:24:58 +00:00
|
|
|
(i8 1)), (MFENCE)>;
|
|
|
|
|
2006-03-26 09:53:12 +00:00
|
|
|
// Alias instructions that map zero vector to pxor / xorp* for sse.
|
2008-12-03 18:15:48 +00:00
|
|
|
// We set canFoldAsLoad because this can be converted to a constant-pool
|
2008-12-03 05:21:24 +00:00
|
|
|
// load of an all-ones value if folding it would be beneficial.
|
2009-08-11 22:17:52 +00:00
|
|
|
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
|
2010-03-30 22:46:55 +00:00
|
|
|
isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
|
2010-02-05 21:30:49 +00:00
|
|
|
// FIXME: Change encoding to pseudo.
|
|
|
|
def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
[(set VR128:$dst, (v4i32 immAllOnesV))]>;
|
2006-03-27 07:00:16 +00:00
|
|
|
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movd\t{$src, $dst|$dst, $src}",
|
2006-04-03 20:53:28 +00:00
|
|
|
[(set VR128:$dst,
|
2006-05-16 07:21:53 +00:00
|
|
|
(v4i32 (scalar_to_vector GR32:$src)))]>;
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movd\t{$src, $dst|$dst, $src}",
|
2006-04-03 20:53:28 +00:00
|
|
|
[(set VR128:$dst,
|
|
|
|
(v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
|
2006-11-16 23:33:25 +00:00
|
|
|
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movd\t{$src, $dst|$dst, $src}",
|
2006-12-05 18:45:06 +00:00
|
|
|
[(set FR32:$dst, (bitconvert GR32:$src))]>;
|
|
|
|
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movd\t{$src, $dst|$dst, $src}",
|
2006-12-14 19:43:11 +00:00
|
|
|
[(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
|
2006-12-05 18:45:06 +00:00
|
|
|
|
2006-04-03 20:53:28 +00:00
|
|
|
// SSE2 instructions with XS prefix
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movq\t{$src, $dst|$dst, $src}",
|
2006-04-03 20:53:28 +00:00
|
|
|
[(set VR128:$dst,
|
|
|
|
(v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
|
|
|
|
Requires<[HasSSE2]>;
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movq\t{$src, $dst|$dst, $src}",
|
2006-11-16 23:33:25 +00:00
|
|
|
[(store (i64 (vector_extract (v2i64 VR128:$src),
|
|
|
|
(iPTR 0))), addr:$dst)]>;
|
|
|
|
|
2010-02-28 00:17:42 +00:00
|
|
|
def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
|
2010-05-24 14:48:17 +00:00
|
|
|
(f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
|
2010-02-28 00:17:42 +00:00
|
|
|
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movd\t{$src, $dst|$dst, $src}",
|
2006-05-16 07:21:53 +00:00
|
|
|
[(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
|
2006-06-15 08:14:54 +00:00
|
|
|
(iPTR 0)))]>;
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movd\t{$src, $dst|$dst, $src}",
|
2006-04-03 20:53:28 +00:00
|
|
|
[(store (i32 (vector_extract (v4i32 VR128:$src),
|
2006-06-15 08:14:54 +00:00
|
|
|
(iPTR 0))), addr:$dst)]>;
|
2006-04-03 20:53:28 +00:00
|
|
|
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movd\t{$src, $dst|$dst, $src}",
|
2006-12-14 19:43:11 +00:00
|
|
|
[(set GR32:$dst, (bitconvert FR32:$src))]>;
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movd\t{$src, $dst|$dst, $src}",
|
2006-12-14 19:43:11 +00:00
|
|
|
[(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
|
2006-12-05 18:45:06 +00:00
|
|
|
|
2006-04-11 22:28:25 +00:00
|
|
|
// Store / copy lower 64-bits of a XMM register.
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movq\t{$src, $dst|$dst, $src}",
|
2006-04-11 22:28:25 +00:00
|
|
|
[(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
|
|
|
|
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27923 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 01:05:10 +00:00
|
|
|
// movd / movq to XMM register zero-extends
|
2007-12-15 03:00:47 +00:00
|
|
|
let AddedComplexity = 15 in {
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movd\t{$src, $dst|$dst, $src}",
|
2008-05-09 21:53:03 +00:00
|
|
|
[(set VR128:$dst, (v4i32 (X86vzmovl
|
2008-05-08 00:57:18 +00:00
|
|
|
(v4i32 (scalar_to_vector GR32:$src)))))]>;
|
2007-12-15 03:00:47 +00:00
|
|
|
// This is X86-64 only.
|
|
|
|
def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
|
|
|
|
"mov{d|q}\t{$src, $dst|$dst, $src}",
|
2008-05-09 21:53:03 +00:00
|
|
|
[(set VR128:$dst, (v2i64 (X86vzmovl
|
2008-05-08 00:57:18 +00:00
|
|
|
(v2i64 (scalar_to_vector GR64:$src)))))]>;
|
2007-12-15 03:00:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let AddedComplexity = 20 in {
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movd\t{$src, $dst|$dst, $src}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst,
|
2008-05-09 21:53:03 +00:00
|
|
|
(v4i32 (X86vzmovl (v4i32 (scalar_to_vector
|
2008-05-08 00:57:18 +00:00
|
|
|
(loadi32 addr:$src))))))]>;
|
2008-05-22 18:56:56 +00:00
|
|
|
|
|
|
|
def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
|
|
|
|
(MOVZDI2PDIrm addr:$src)>;
|
|
|
|
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
|
|
|
|
(MOVZDI2PDIrm addr:$src)>;
|
Disable some DAG combiner optimizations that may be
wrong for volatile loads and stores. In fact this
is almost all of them! There are three types of
problems: (1) it is wrong to change the width of
a volatile memory access. These may be used to
do memory mapped i/o, in which case a load can have
an effect even if the result is not used. Consider
loading an i32 but only using the lower 8 bits. It
is wrong to change this into a load of an i8, because
you are no longer tickling the other three bytes. It
is also unwise to make a load/store wider. For
example, changing an i16 load into an i32 load is
wrong no matter how aligned things are, since the
fact of loading an additional 2 bytes can have
i/o side-effects. (2) it is wrong to change the
number of volatile load/stores: they may be counted
by the hardware. (3) it is wrong to change a volatile
load/store that requires one memory access into one
that requires several. For example on x86-32, you
can store a double in one processor operation, but to
store an i64 requires two (two i32 stores). In a
multi-threaded program you may want to bitcast an i64
to a double and store as a double because that will
occur atomically, and be indivisible to other threads.
So it would be wrong to convert the store-of-double
into a store of an i64, because this will become two
i32 stores - no longer atomic. My policy here is
to say that the number of processor operations for
an illegal operation is undefined. So it is alright
to change a store of an i64 (requires at least two
stores; but could be validly lowered to memcpy for
example) into a store of double (one processor op).
In short, if the new store is legal and has the same
size then I say that the transform is ok. It would
also be possible to say that transforms are always
ok if before they were illegal, whether after they
are illegal or not, but that's more awkward to do
and I doubt it buys us anything much.
However this exposed an interesting thing - on x86-32
a store of i64 is considered legal! That is because
operations are marked legal by default, regardless of
whether the type is legal or not. In some ways this
is clever: before type legalization this means that
operations on illegal types are considered legal;
after type legalization there are no illegal types
so now operations are only legal if they really are.
But I consider this to be too cunning for mere mortals.
Better to do things explicitly by testing AfterLegalize.
So I have changed things so that operations with illegal
types are considered illegal - indeed they can never
map to a machine operation. However this means that
the DAG combiner is more conservative because before
it was "accidentally" performing transforms where the
type was illegal because the operation was nonetheless
marked legal. So in a few such places I added a check
on AfterLegalize, which I suppose was actually just
forgotten before. This causes the DAG combiner to do
slightly more than it used to, which resulted in the X86
backend blowing up because it got a slightly surprising
node it wasn't expecting, so I tweaked it.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@52254 91177308-0d34-0410-b5e6-96231b3b80d8
2008-06-13 19:07:40 +00:00
|
|
|
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
|
|
|
|
(MOVZDI2PDIrm addr:$src)>;
|
2008-05-22 18:56:56 +00:00
|
|
|
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movq\t{$src, $dst|$dst, $src}",
|
2007-12-15 03:00:47 +00:00
|
|
|
[(set VR128:$dst,
|
2008-05-09 21:53:03 +00:00
|
|
|
(v2i64 (X86vzmovl (v2i64 (scalar_to_vector
|
2008-05-08 00:57:18 +00:00
|
|
|
(loadi64 addr:$src))))))]>, XS,
|
2007-12-15 03:00:47 +00:00
|
|
|
Requires<[HasSSE2]>;
|
2006-03-21 23:01:21 +00:00
|
|
|
|
2008-05-22 18:56:56 +00:00
|
|
|
def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
|
|
|
|
(MOVZQI2PQIrm addr:$src)>;
|
|
|
|
def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
|
|
|
|
(MOVZQI2PQIrm addr:$src)>;
|
2008-05-09 21:53:03 +00:00
|
|
|
def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
|
2008-05-10 00:59:18 +00:00
|
|
|
}
|
2008-05-09 21:53:03 +00:00
|
|
|
|
2007-12-15 03:00:47 +00:00
|
|
|
// Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
|
|
|
|
// IA32 document. movq xmm1, xmm2 does clear the high bits.
|
|
|
|
let AddedComplexity = 15 in
|
|
|
|
def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"movq\t{$src, $dst|$dst, $src}",
|
2008-05-09 21:53:03 +00:00
|
|
|
[(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
|
2007-12-15 03:00:47 +00:00
|
|
|
XS, Requires<[HasSSE2]>;
|
|
|
|
|
2008-05-20 18:24:47 +00:00
|
|
|
let AddedComplexity = 20 in {
|
2007-12-15 03:00:47 +00:00
|
|
|
def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
|
|
|
|
"movq\t{$src, $dst|$dst, $src}",
|
2008-05-09 21:53:03 +00:00
|
|
|
[(set VR128:$dst, (v2i64 (X86vzmovl
|
2008-05-20 18:24:47 +00:00
|
|
|
(loadv2i64 addr:$src))))]>,
|
2007-12-15 03:00:47 +00:00
|
|
|
XS, Requires<[HasSSE2]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2008-05-20 18:24:47 +00:00
|
|
|
def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
|
|
|
|
(MOVZPQILo2PQIrm addr:$src)>;
|
|
|
|
}
|
|
|
|
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
// Instructions for the disassembler
|
|
|
|
// xr = XMM register
|
|
|
|
// xm = mem64
|
|
|
|
|
|
|
|
def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"movq\t{$src, $dst|$dst, $src}", []>, XS;
|
|
|
|
|
2009-07-31 20:07:27 +00:00
|
|
|
//===---------------------------------------------------------------------===//
|
2007-05-02 23:11:52 +00:00
|
|
|
// SSE3 Instructions
|
2009-07-31 20:07:27 +00:00
|
|
|
//===---------------------------------------------------------------------===//
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-06-22 18:09:32 +00:00
|
|
|
// Conversion Instructions
|
|
|
|
def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
|
|
|
"cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
|
|
|
|
def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
|
|
|
|
def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
|
|
|
"cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
|
|
|
|
def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
"cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
|
|
|
|
|
2007-05-02 23:11:52 +00:00
|
|
|
// Move Instructions
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movshdup\t{$src, $dst|$dst, $src}",
|
2009-04-27 18:41:29 +00:00
|
|
|
[(set VR128:$dst, (v4f32 (movshdup
|
|
|
|
VR128:$src, (undef))))]>;
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movshdup\t{$src, $dst|$dst, $src}",
|
2009-04-27 18:41:29 +00:00
|
|
|
[(set VR128:$dst, (movshdup
|
|
|
|
(memopv4f32 addr:$src), (undef)))]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movsldup\t{$src, $dst|$dst, $src}",
|
2009-04-27 18:41:29 +00:00
|
|
|
[(set VR128:$dst, (v4f32 (movsldup
|
|
|
|
VR128:$src, (undef))))]>;
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movsldup\t{$src, $dst|$dst, $src}",
|
2009-04-27 18:41:29 +00:00
|
|
|
[(set VR128:$dst, (movsldup
|
|
|
|
(memopv4f32 addr:$src), (undef)))]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movddup\t{$src, $dst|$dst, $src}",
|
2009-04-27 18:41:29 +00:00
|
|
|
[(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"movddup\t{$src, $dst|$dst, $src}",
|
2008-09-25 20:50:48 +00:00
|
|
|
[(set VR128:$dst,
|
2009-04-27 18:41:29 +00:00
|
|
|
(v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
|
|
|
|
(undef))))]>;
|
2008-09-25 20:50:48 +00:00
|
|
|
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
|
|
|
|
(undef)),
|
2008-09-25 20:50:48 +00:00
|
|
|
(MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
|
2009-04-29 22:47:44 +00:00
|
|
|
|
|
|
|
let AddedComplexity = 5 in {
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
|
2008-09-25 20:50:48 +00:00
|
|
|
(MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
|
2009-04-29 22:47:44 +00:00
|
|
|
def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
|
|
|
|
(MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
|
|
|
|
def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
|
|
|
|
(MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
|
|
|
|
def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
|
|
|
|
(MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
|
|
|
|
}
|
2007-05-02 23:11:52 +00:00
|
|
|
|
|
|
|
// Arithmetic
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2007-05-02 23:11:52 +00:00
|
|
|
def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"addsubps\t{$src2, $dst|$dst, $src2}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
|
|
|
|
VR128:$src2))]>;
|
|
|
|
def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"addsubps\t{$src2, $dst|$dst, $src2}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
|
2008-05-23 00:37:07 +00:00
|
|
|
(memop addr:$src2)))]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"addsubpd\t{$src2, $dst|$dst, $src2}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
|
|
|
|
VR128:$src2))]>;
|
|
|
|
def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
"addsubpd\t{$src2, $dst|$dst, $src2}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
|
2008-05-23 00:37:07 +00:00
|
|
|
(memop addr:$src2)))]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
}
|
|
|
|
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
|
2007-07-31 20:11:57 +00:00
|
|
|
"lddqu\t{$src, $dst|$dst, $src}",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
|
|
|
|
|
|
|
|
// Horizontal ops
|
|
|
|
class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
: S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
|
|
|
|
class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
: S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
2008-05-23 00:37:07 +00:00
|
|
|
[(set VR128:$dst, (v4f32 (IntId VR128:$src1, (memop addr:$src2))))]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
: S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
2007-05-02 23:11:52 +00:00
|
|
|
[(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
|
|
|
|
class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40033 91177308-0d34-0410-b5e6-96231b3b80d8
2007-07-19 01:14:50 +00:00
|
|
|
: S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
|
2007-07-31 20:11:57 +00:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
2008-05-23 00:37:07 +00:00
|
|
|
[(set VR128:$dst, (v2f64 (IntId VR128:$src1, (memopv2f64 addr:$src2))))]>;
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2007-05-02 23:11:52 +00:00
|
|
|
def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
|
|
|
|
def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
|
|
|
|
def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
|
|
|
|
def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
|
|
|
|
def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
|
|
|
|
def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
|
|
|
|
def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
|
|
|
|
def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Thread synchronization
|
2010-02-12 23:54:57 +00:00
|
|
|
def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
|
2010-02-12 23:54:57 +00:00
|
|
|
def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
|
2007-05-02 23:11:52 +00:00
|
|
|
[(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
|
|
|
|
|
|
|
|
// vector_shuffle v1, <undef> <1, 1, 3, 3>
|
|
|
|
let AddedComplexity = 15 in
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
|
2007-05-02 23:11:52 +00:00
|
|
|
(MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
|
|
|
|
let AddedComplexity = 20 in
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
|
2007-05-02 23:11:52 +00:00
|
|
|
(MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
|
|
|
|
|
|
|
|
// vector_shuffle v1, <undef> <0, 0, 2, 2>
|
|
|
|
let AddedComplexity = 15 in
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
|
2007-05-02 23:11:52 +00:00
|
|
|
(MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
|
|
|
|
let AddedComplexity = 20 in
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
|
2007-05-02 23:11:52 +00:00
|
|
|
(MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
|
|
|
|
|
2009-07-31 20:07:27 +00:00
|
|
|
//===---------------------------------------------------------------------===//
|
2007-05-02 23:11:52 +00:00
|
|
|
// SSSE3 Instructions
|
2009-07-31 20:07:27 +00:00
|
|
|
//===---------------------------------------------------------------------===//
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2007-08-10 06:22:27 +00:00
|
|
|
/// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
|
2008-02-09 23:46:37 +00:00
|
|
|
multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic IntId64, Intrinsic IntId128> {
|
|
|
|
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR64:$dst, (IntId64 VR64:$src))]>;
|
|
|
|
|
|
|
|
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR64:$dst,
|
|
|
|
(IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
|
|
|
|
|
|
|
|
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR128:$dst, (IntId128 VR128:$src))]>,
|
|
|
|
OpSize;
|
2007-08-10 06:22:27 +00:00
|
|
|
|
2008-02-09 23:46:37 +00:00
|
|
|
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins i128mem:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId128
|
|
|
|
(bitconvert (memopv16i8 addr:$src))))]>, OpSize;
|
2007-08-10 06:22:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
|
2008-02-09 23:46:37 +00:00
|
|
|
multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic IntId64, Intrinsic IntId128> {
|
|
|
|
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
|
|
|
|
(ins VR64:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR64:$dst, (IntId64 VR64:$src))]>;
|
|
|
|
|
|
|
|
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
|
|
|
|
(ins i64mem:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR64:$dst,
|
|
|
|
(IntId64
|
|
|
|
(bitconvert (memopv4i16 addr:$src))))]>;
|
|
|
|
|
|
|
|
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR128:$dst, (IntId128 VR128:$src))]>,
|
|
|
|
OpSize;
|
2007-08-10 06:22:27 +00:00
|
|
|
|
2008-02-09 23:46:37 +00:00
|
|
|
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins i128mem:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId128
|
|
|
|
(bitconvert (memopv8i16 addr:$src))))]>, OpSize;
|
2007-08-10 06:22:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
|
2008-02-09 23:46:37 +00:00
|
|
|
multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic IntId64, Intrinsic IntId128> {
|
|
|
|
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
|
|
|
|
(ins VR64:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR64:$dst, (IntId64 VR64:$src))]>;
|
|
|
|
|
|
|
|
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
|
|
|
|
(ins i64mem:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR64:$dst,
|
|
|
|
(IntId64
|
|
|
|
(bitconvert (memopv2i32 addr:$src))))]>;
|
|
|
|
|
|
|
|
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR128:$dst, (IntId128 VR128:$src))]>,
|
|
|
|
OpSize;
|
2007-08-10 06:22:27 +00:00
|
|
|
|
2008-02-09 23:46:37 +00:00
|
|
|
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins i128mem:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId128
|
|
|
|
(bitconvert (memopv4i32 addr:$src))))]>, OpSize;
|
2007-05-02 23:11:52 +00:00
|
|
|
}
|
|
|
|
|
2007-08-10 06:22:27 +00:00
|
|
|
defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
|
|
|
|
int_x86_ssse3_pabs_b,
|
|
|
|
int_x86_ssse3_pabs_b_128>;
|
|
|
|
defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
|
|
|
|
int_x86_ssse3_pabs_w,
|
|
|
|
int_x86_ssse3_pabs_w_128>;
|
|
|
|
defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
|
|
|
|
int_x86_ssse3_pabs_d,
|
|
|
|
int_x86_ssse3_pabs_d_128>;
|
|
|
|
|
|
|
|
/// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2007-08-10 06:22:27 +00:00
|
|
|
multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic IntId64, Intrinsic IntId128,
|
|
|
|
bit Commutable = 0> {
|
|
|
|
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
|
|
|
|
(ins VR64:$src1, VR64:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
|
|
|
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
|
|
|
|
(ins VR64:$src1, i64mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR64:$dst,
|
|
|
|
(IntId64 VR64:$src1,
|
|
|
|
(bitconvert (memopv8i8 addr:$src2))))]>;
|
|
|
|
|
|
|
|
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
|
|
|
|
OpSize {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
|
|
|
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId128 VR128:$src1,
|
|
|
|
(bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2007-08-10 06:22:27 +00:00
|
|
|
multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic IntId64, Intrinsic IntId128,
|
|
|
|
bit Commutable = 0> {
|
|
|
|
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
|
|
|
|
(ins VR64:$src1, VR64:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
|
|
|
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
|
|
|
|
(ins VR64:$src1, i64mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR64:$dst,
|
|
|
|
(IntId64 VR64:$src1,
|
|
|
|
(bitconvert (memopv4i16 addr:$src2))))]>;
|
|
|
|
|
|
|
|
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
|
|
|
|
OpSize {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
|
|
|
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId128 VR128:$src1,
|
|
|
|
(bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2007-08-10 06:22:27 +00:00
|
|
|
multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic IntId64, Intrinsic IntId128,
|
|
|
|
bit Commutable = 0> {
|
|
|
|
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
|
|
|
|
(ins VR64:$src1, VR64:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
|
|
|
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
|
|
|
|
(ins VR64:$src1, i64mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR64:$dst,
|
|
|
|
(IntId64 VR64:$src1,
|
|
|
|
(bitconvert (memopv2i32 addr:$src2))))]>;
|
|
|
|
|
|
|
|
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
|
|
|
|
OpSize {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
|
|
|
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId128 VR128:$src1,
|
|
|
|
(bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-17 07:38:24 +00:00
|
|
|
let ImmT = NoImm in { // None of these have i8 immediate fields.
|
2007-08-10 06:22:27 +00:00
|
|
|
defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
|
|
|
|
int_x86_ssse3_phadd_w,
|
2008-06-16 21:16:24 +00:00
|
|
|
int_x86_ssse3_phadd_w_128>;
|
2007-08-10 06:22:27 +00:00
|
|
|
defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
|
|
|
|
int_x86_ssse3_phadd_d,
|
2008-06-16 21:16:24 +00:00
|
|
|
int_x86_ssse3_phadd_d_128>;
|
2007-08-10 06:22:27 +00:00
|
|
|
defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
|
|
|
|
int_x86_ssse3_phadd_sw,
|
2008-06-16 21:16:24 +00:00
|
|
|
int_x86_ssse3_phadd_sw_128>;
|
2007-08-10 06:22:27 +00:00
|
|
|
defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
|
|
|
|
int_x86_ssse3_phsub_w,
|
|
|
|
int_x86_ssse3_phsub_w_128>;
|
|
|
|
defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
|
|
|
|
int_x86_ssse3_phsub_d,
|
|
|
|
int_x86_ssse3_phsub_d_128>;
|
|
|
|
defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
|
|
|
|
int_x86_ssse3_phsub_sw,
|
|
|
|
int_x86_ssse3_phsub_sw_128>;
|
|
|
|
defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
|
|
|
|
int_x86_ssse3_pmadd_ub_sw,
|
2008-06-16 21:16:24 +00:00
|
|
|
int_x86_ssse3_pmadd_ub_sw_128>;
|
2007-08-10 06:22:27 +00:00
|
|
|
defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
|
|
|
|
int_x86_ssse3_pmul_hr_sw,
|
|
|
|
int_x86_ssse3_pmul_hr_sw_128, 1>;
|
2010-05-25 17:33:22 +00:00
|
|
|
|
2007-08-10 06:22:27 +00:00
|
|
|
defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
|
|
|
|
int_x86_ssse3_pshuf_b,
|
|
|
|
int_x86_ssse3_pshuf_b_128>;
|
|
|
|
defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
|
|
|
|
int_x86_ssse3_psign_b,
|
|
|
|
int_x86_ssse3_psign_b_128>;
|
|
|
|
defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
|
|
|
|
int_x86_ssse3_psign_w,
|
|
|
|
int_x86_ssse3_psign_w_128>;
|
2009-05-28 18:48:53 +00:00
|
|
|
defm PSIGND : SS3I_binop_rm_int_32<0x0A, "psignd",
|
2007-08-10 06:22:27 +00:00
|
|
|
int_x86_ssse3_psign_d,
|
|
|
|
int_x86_ssse3_psign_d_128>;
|
2010-04-17 07:38:24 +00:00
|
|
|
}
|
2007-08-10 06:22:27 +00:00
|
|
|
|
2010-04-15 01:40:20 +00:00
|
|
|
// palignr patterns.
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2007-08-10 09:00:17 +00:00
|
|
|
def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
|
2009-11-20 22:28:42 +00:00
|
|
|
(ins VR64:$src1, VR64:$src2, i8imm:$src3),
|
2007-10-11 20:58:37 +00:00
|
|
|
"palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
|
2009-10-28 06:30:34 +00:00
|
|
|
[]>;
|
2008-05-28 01:50:19 +00:00
|
|
|
def PALIGNR64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
|
2009-11-20 22:28:42 +00:00
|
|
|
(ins VR64:$src1, i64mem:$src2, i8imm:$src3),
|
2007-10-11 20:58:37 +00:00
|
|
|
"palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
|
2009-10-28 06:30:34 +00:00
|
|
|
[]>;
|
2007-08-10 09:00:17 +00:00
|
|
|
|
|
|
|
def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
|
2009-11-20 22:28:42 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
|
2007-10-11 20:58:37 +00:00
|
|
|
"palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
|
2009-10-28 06:30:34 +00:00
|
|
|
[]>, OpSize;
|
2008-05-28 01:50:19 +00:00
|
|
|
def PALIGNR128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
|
2009-11-20 22:28:42 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
|
2007-10-11 20:58:37 +00:00
|
|
|
"palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
|
2009-10-28 06:30:34 +00:00
|
|
|
[]>, OpSize;
|
2007-08-10 06:22:27 +00:00
|
|
|
}
|
2007-05-02 23:11:52 +00:00
|
|
|
|
2010-04-20 00:59:54 +00:00
|
|
|
let AddedComplexity = 5 in {
|
|
|
|
|
2010-04-15 01:40:20 +00:00
|
|
|
def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)),
|
|
|
|
(PALIGNR64rr VR64:$src2, VR64:$src1,
|
|
|
|
(SHUFFLE_get_palign_imm VR64:$src3))>,
|
|
|
|
Requires<[HasSSSE3]>;
|
|
|
|
def : Pat<(v2i32 (palign:$src3 VR64:$src1, VR64:$src2)),
|
|
|
|
(PALIGNR64rr VR64:$src2, VR64:$src1,
|
|
|
|
(SHUFFLE_get_palign_imm VR64:$src3))>,
|
|
|
|
Requires<[HasSSSE3]>;
|
|
|
|
def : Pat<(v2f32 (palign:$src3 VR64:$src1, VR64:$src2)),
|
|
|
|
(PALIGNR64rr VR64:$src2, VR64:$src1,
|
|
|
|
(SHUFFLE_get_palign_imm VR64:$src3))>,
|
|
|
|
Requires<[HasSSSE3]>;
|
|
|
|
def : Pat<(v4i16 (palign:$src3 VR64:$src1, VR64:$src2)),
|
|
|
|
(PALIGNR64rr VR64:$src2, VR64:$src1,
|
|
|
|
(SHUFFLE_get_palign_imm VR64:$src3))>,
|
|
|
|
Requires<[HasSSSE3]>;
|
|
|
|
def : Pat<(v8i8 (palign:$src3 VR64:$src1, VR64:$src2)),
|
|
|
|
(PALIGNR64rr VR64:$src2, VR64:$src1,
|
|
|
|
(SHUFFLE_get_palign_imm VR64:$src3))>,
|
|
|
|
Requires<[HasSSSE3]>;
|
2009-10-28 06:30:34 +00:00
|
|
|
|
2009-10-19 02:17:23 +00:00
|
|
|
def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
|
|
|
|
(PALIGNR128rr VR128:$src2, VR128:$src1,
|
|
|
|
(SHUFFLE_get_palign_imm VR128:$src3))>,
|
|
|
|
Requires<[HasSSSE3]>;
|
|
|
|
def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
|
|
|
|
(PALIGNR128rr VR128:$src2, VR128:$src1,
|
|
|
|
(SHUFFLE_get_palign_imm VR128:$src3))>,
|
|
|
|
Requires<[HasSSSE3]>;
|
|
|
|
def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
|
|
|
|
(PALIGNR128rr VR128:$src2, VR128:$src1,
|
|
|
|
(SHUFFLE_get_palign_imm VR128:$src3))>,
|
|
|
|
Requires<[HasSSSE3]>;
|
|
|
|
def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
|
|
|
|
(PALIGNR128rr VR128:$src2, VR128:$src1,
|
|
|
|
(SHUFFLE_get_palign_imm VR128:$src3))>,
|
|
|
|
Requires<[HasSSSE3]>;
|
2009-11-07 08:45:53 +00:00
|
|
|
}
|
2009-10-19 02:17:23 +00:00
|
|
|
|
Generate better code for v8i16 shuffles on SSE2
Generate better code for v16i8 shuffles on SSE2 (avoids stack)
Generate pshufb for v8i16 and v16i8 shuffles on SSSE3 where it is fewer uops.
Document the shuffle matching logic and add some FIXMEs for later further
cleanups.
New tests that test the above.
Examples:
New:
_shuf2:
pextrw $7, %xmm0, %eax
punpcklqdq %xmm1, %xmm0
pshuflw $128, %xmm0, %xmm0
pinsrw $2, %eax, %xmm0
Old:
_shuf2:
pextrw $2, %xmm0, %eax
pextrw $7, %xmm0, %ecx
pinsrw $2, %ecx, %xmm0
pinsrw $3, %eax, %xmm0
movd %xmm1, %eax
pinsrw $4, %eax, %xmm0
ret
=========
New:
_shuf4:
punpcklqdq %xmm1, %xmm0
pshufb LCPI1_0, %xmm0
Old:
_shuf4:
pextrw $3, %xmm0, %eax
movsd %xmm1, %xmm0
pextrw $3, %xmm1, %ecx
pinsrw $4, %ecx, %xmm0
pinsrw $5, %eax, %xmm0
========
New:
_shuf1:
pushl %ebx
pushl %edi
pushl %esi
pextrw $1, %xmm0, %eax
rolw $8, %ax
movd %xmm0, %ecx
rolw $8, %cx
pextrw $5, %xmm0, %edx
pextrw $4, %xmm0, %esi
pextrw $3, %xmm0, %edi
pextrw $2, %xmm0, %ebx
movaps %xmm0, %xmm1
pinsrw $0, %ecx, %xmm1
pinsrw $1, %eax, %xmm1
rolw $8, %bx
pinsrw $2, %ebx, %xmm1
rolw $8, %di
pinsrw $3, %edi, %xmm1
rolw $8, %si
pinsrw $4, %esi, %xmm1
rolw $8, %dx
pinsrw $5, %edx, %xmm1
pextrw $7, %xmm0, %eax
rolw $8, %ax
movaps %xmm1, %xmm0
pinsrw $7, %eax, %xmm0
popl %esi
popl %edi
popl %ebx
ret
Old:
_shuf1:
subl $252, %esp
movaps %xmm0, (%esp)
movaps %xmm0, 16(%esp)
movaps %xmm0, 32(%esp)
movaps %xmm0, 48(%esp)
movaps %xmm0, 64(%esp)
movaps %xmm0, 80(%esp)
movaps %xmm0, 96(%esp)
movaps %xmm0, 224(%esp)
movaps %xmm0, 208(%esp)
movaps %xmm0, 192(%esp)
movaps %xmm0, 176(%esp)
movaps %xmm0, 160(%esp)
movaps %xmm0, 144(%esp)
movaps %xmm0, 128(%esp)
movaps %xmm0, 112(%esp)
movzbl 14(%esp), %eax
movd %eax, %xmm1
movzbl 22(%esp), %eax
movd %eax, %xmm2
punpcklbw %xmm1, %xmm2
movzbl 42(%esp), %eax
movd %eax, %xmm1
movzbl 50(%esp), %eax
movd %eax, %xmm3
punpcklbw %xmm1, %xmm3
punpcklbw %xmm2, %xmm3
movzbl 77(%esp), %eax
movd %eax, %xmm1
movzbl 84(%esp), %eax
movd %eax, %xmm2
punpcklbw %xmm1, %xmm2
movzbl 104(%esp), %eax
movd %eax, %xmm1
punpcklbw %xmm1, %xmm0
punpcklbw %xmm2, %xmm0
movaps %xmm0, %xmm1
punpcklbw %xmm3, %xmm1
movzbl 127(%esp), %eax
movd %eax, %xmm0
movzbl 135(%esp), %eax
movd %eax, %xmm2
punpcklbw %xmm0, %xmm2
movzbl 155(%esp), %eax
movd %eax, %xmm0
movzbl 163(%esp), %eax
movd %eax, %xmm3
punpcklbw %xmm0, %xmm3
punpcklbw %xmm2, %xmm3
movzbl 188(%esp), %eax
movd %eax, %xmm0
movzbl 197(%esp), %eax
movd %eax, %xmm2
punpcklbw %xmm0, %xmm2
movzbl 217(%esp), %eax
movd %eax, %xmm4
movzbl 225(%esp), %eax
movd %eax, %xmm0
punpcklbw %xmm4, %xmm0
punpcklbw %xmm2, %xmm0
punpcklbw %xmm3, %xmm0
punpcklbw %xmm1, %xmm0
addl $252, %esp
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@65311 91177308-0d34-0410-b5e6-96231b3b80d8
2009-02-23 08:49:38 +00:00
|
|
|
def : Pat<(X86pshufb VR128:$src, VR128:$mask),
|
|
|
|
(PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
|
|
|
|
def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
|
|
|
|
(PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
|
|
|
|
|
2009-07-31 20:07:27 +00:00
|
|
|
//===---------------------------------------------------------------------===//
|
2006-03-21 23:01:21 +00:00
|
|
|
// Non-Instruction Patterns
|
2009-07-31 20:07:27 +00:00
|
|
|
//===---------------------------------------------------------------------===//
|
2006-03-21 23:01:21 +00:00
|
|
|
|
2009-07-31 20:07:27 +00:00
|
|
|
// extload f32 -> f64. This matches load+fextend because we have a hack in
|
|
|
|
// the isel (PreprocessForFPConvert) that can introduce loads after dag
|
|
|
|
// combine.
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46307 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-24 08:07:48 +00:00
|
|
|
// Since these loads aren't folded into the fextend, we have to match it
|
|
|
|
// explicitly here.
|
|
|
|
let Predicates = [HasSSE2] in
|
|
|
|
def : Pat<(fextend (loadf32 addr:$src)),
|
|
|
|
(CVTSS2SDrm addr:$src)>;
|
|
|
|
|
2006-03-24 02:58:06 +00:00
|
|
|
// bit_convert
|
2006-10-07 04:52:09 +00:00
|
|
|
let Predicates = [HasSSE2] in {
|
|
|
|
def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
|
|
|
|
def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
|
|
|
|
def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
|
|
|
|
def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
|
|
|
|
def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
|
|
|
|
def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
|
|
|
|
def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
|
|
|
|
def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
|
|
|
|
def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
|
|
|
|
def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
|
|
|
|
def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
|
|
|
|
def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
|
|
|
|
def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
|
|
|
|
def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
|
|
|
|
def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
|
|
|
|
def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
|
|
|
|
def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
|
|
|
|
def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
|
|
|
|
def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
|
|
|
|
def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
|
|
|
|
def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
|
|
|
|
def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
|
|
|
|
def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
|
|
|
|
def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
|
|
|
|
def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
|
|
|
|
def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
|
|
|
|
def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
|
|
|
|
def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
|
|
|
|
def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
|
|
|
|
def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
|
|
|
|
}
|
2006-03-22 02:53:00 +00:00
|
|
|
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27923 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 01:05:10 +00:00
|
|
|
// Move scalar to XMM zero-extended
|
|
|
|
// movd to XMM register zero-extends
|
2006-10-09 21:42:15 +00:00
|
|
|
let AddedComplexity = 15 in {
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27923 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 01:05:10 +00:00
|
|
|
// Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
|
2008-05-09 21:53:03 +00:00
|
|
|
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
|
2010-03-31 00:40:13 +00:00
|
|
|
(MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
|
2008-05-09 21:53:03 +00:00
|
|
|
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
|
2010-03-31 00:40:13 +00:00
|
|
|
(MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
|
2008-05-09 23:37:55 +00:00
|
|
|
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
|
2010-03-31 00:40:13 +00:00
|
|
|
(MOVSSrr (v4f32 (V_SET0PS)),
|
2010-05-24 14:48:17 +00:00
|
|
|
(f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
|
2008-07-10 01:08:23 +00:00
|
|
|
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
|
2010-03-31 00:40:13 +00:00
|
|
|
(MOVSSrr (v4i32 (V_SET0PI)),
|
2010-05-24 14:48:17 +00:00
|
|
|
(EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27923 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 01:05:10 +00:00
|
|
|
}
|
2006-03-24 23:15:12 +00:00
|
|
|
|
2006-03-22 02:53:00 +00:00
|
|
|
// Splat v2f64 / v2i64
|
2006-04-19 21:15:24 +00:00
|
|
|
let AddedComplexity = 10 in {
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
|
2006-06-20 00:25:29 +00:00
|
|
|
(UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
|
2006-10-27 21:08:32 +00:00
|
|
|
(UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
|
2006-06-20 00:25:29 +00:00
|
|
|
(PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
|
2006-10-27 21:08:32 +00:00
|
|
|
(PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
|
2006-04-19 21:15:24 +00:00
|
|
|
}
|
2006-03-29 03:04:49 +00:00
|
|
|
|
2006-04-18 21:55:35 +00:00
|
|
|
// Special unary SHUFPSrri case.
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
|
|
|
|
(SHUFPSrri VR128:$src1, VR128:$src1,
|
2010-02-26 01:14:30 +00:00
|
|
|
(SHUFFLE_get_shuf_imm VR128:$src3))>;
|
2009-04-27 18:41:29 +00:00
|
|
|
let AddedComplexity = 5 in
|
|
|
|
def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
|
|
|
|
(PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
|
|
|
|
Requires<[HasSSE2]>;
|
|
|
|
// Special unary SHUFPDrri case.
|
|
|
|
def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
|
2009-07-31 20:07:27 +00:00
|
|
|
(SHUFPDrri VR128:$src1, VR128:$src1,
|
2009-04-27 18:41:29 +00:00
|
|
|
(SHUFFLE_get_shuf_imm VR128:$src3))>,
|
|
|
|
Requires<[HasSSE2]>;
|
2007-08-02 21:17:01 +00:00
|
|
|
// Special unary SHUFPDrri case.
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
|
2009-07-31 20:07:27 +00:00
|
|
|
(SHUFPDrri VR128:$src1, VR128:$src1,
|
2009-04-27 18:41:29 +00:00
|
|
|
(SHUFFLE_get_shuf_imm VR128:$src3))>,
|
2007-08-02 21:17:01 +00:00
|
|
|
Requires<[HasSSE2]>;
|
2006-04-10 22:35:16 +00:00
|
|
|
// Unary v4f32 shuffle with PSHUF* in order to fold a load.
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
|
|
|
|
(PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
|
2006-03-30 19:54:57 +00:00
|
|
|
Requires<[HasSSE2]>;
|
2008-09-26 23:41:32 +00:00
|
|
|
|
2006-04-10 22:35:16 +00:00
|
|
|
// Special binary v4i32 shuffle cases with SHUFPS.
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
|
2009-07-31 20:07:27 +00:00
|
|
|
(SHUFPSrri VR128:$src1, VR128:$src2,
|
2009-04-27 18:41:29 +00:00
|
|
|
(SHUFFLE_get_shuf_imm VR128:$src3))>,
|
2006-06-20 00:25:29 +00:00
|
|
|
Requires<[HasSSE2]>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
|
2009-07-31 20:07:27 +00:00
|
|
|
(SHUFPSrmi VR128:$src1, addr:$src2,
|
2009-04-27 18:41:29 +00:00
|
|
|
(SHUFFLE_get_shuf_imm VR128:$src3))>,
|
2006-06-20 00:25:29 +00:00
|
|
|
Requires<[HasSSE2]>;
|
2007-12-15 03:00:47 +00:00
|
|
|
// Special binary v2i64 shuffle cases using SHUFPDrri.
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
|
2009-07-31 20:07:27 +00:00
|
|
|
(SHUFPDrri VR128:$src1, VR128:$src2,
|
2009-04-27 18:41:29 +00:00
|
|
|
(SHUFFLE_get_shuf_imm VR128:$src3))>,
|
2007-12-15 03:00:47 +00:00
|
|
|
Requires<[HasSSE2]>;
|
2006-03-30 07:33:32 +00:00
|
|
|
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27437 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-05 07:20:06 +00:00
|
|
|
// vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
|
2008-09-26 23:41:32 +00:00
|
|
|
let AddedComplexity = 15 in {
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
|
|
|
|
(PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
|
2008-09-26 23:41:32 +00:00
|
|
|
Requires<[OptForSpeed, HasSSE2]>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
|
|
|
|
(PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
|
2008-09-26 23:41:32 +00:00
|
|
|
Requires<[OptForSpeed, HasSSE2]>;
|
|
|
|
}
|
2006-04-19 21:15:24 +00:00
|
|
|
let AddedComplexity = 10 in {
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(UNPCKLPSrr VR128:$src, VR128:$src)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(PUNPCKLBWrr VR128:$src, VR128:$src)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(PUNPCKLWDrr VR128:$src, VR128:$src)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(PUNPCKLDQrr VR128:$src, VR128:$src)>;
|
2006-04-19 21:15:24 +00:00
|
|
|
}
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27437 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-05 07:20:06 +00:00
|
|
|
|
2007-05-17 18:44:37 +00:00
|
|
|
// vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
|
2008-09-26 23:41:32 +00:00
|
|
|
let AddedComplexity = 15 in {
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
|
|
|
|
(PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
|
2008-09-26 23:41:32 +00:00
|
|
|
Requires<[OptForSpeed, HasSSE2]>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
|
|
|
|
(PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
|
2008-09-26 23:41:32 +00:00
|
|
|
Requires<[OptForSpeed, HasSSE2]>;
|
|
|
|
}
|
2007-05-17 18:44:37 +00:00
|
|
|
let AddedComplexity = 10 in {
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(UNPCKHPSrr VR128:$src, VR128:$src)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(PUNPCKHBWrr VR128:$src, VR128:$src)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(PUNPCKHWDrr VR128:$src, VR128:$src)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(PUNPCKHDQrr VR128:$src, VR128:$src)>;
|
2007-05-17 18:44:37 +00:00
|
|
|
}
|
|
|
|
|
2008-09-26 23:41:32 +00:00
|
|
|
let AddedComplexity = 20 in {
|
2006-04-19 20:37:34 +00:00
|
|
|
// vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
|
2009-11-07 23:17:15 +00:00
|
|
|
def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
|
2006-06-20 00:25:29 +00:00
|
|
|
(MOVLHPSrr VR128:$src1, VR128:$src2)>;
|
2006-04-19 20:37:34 +00:00
|
|
|
|
|
|
|
// vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
|
2006-06-20 00:25:29 +00:00
|
|
|
(MOVHLPSrr VR128:$src1, VR128:$src2)>;
|
2006-05-31 00:51:37 +00:00
|
|
|
|
Fixed a bug which causes x86 be to incorrectly match
shuffle v, undef, <2, ?, 3, ?>
to movhlps
It should match to unpckhps instead.
Added proper matching code for
shuffle v, undef, <2, 3, 2, 3>
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@31519 91177308-0d34-0410-b5e6-96231b3b80d8
2006-11-07 22:14:24 +00:00
|
|
|
// vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
|
2006-06-20 00:25:29 +00:00
|
|
|
(MOVHLPSrr VR128:$src1, VR128:$src1)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
|
2006-06-20 00:25:29 +00:00
|
|
|
(MOVHLPSrr VR128:$src1, VR128:$src1)>;
|
2006-10-09 21:42:15 +00:00
|
|
|
}
|
2006-04-19 20:37:34 +00:00
|
|
|
|
Fixed a bug which causes x86 be to incorrectly match
shuffle v, undef, <2, ?, 3, ?>
to movhlps
It should match to unpckhps instead.
Added proper matching code for
shuffle v, undef, <2, 3, 2, 3>
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@31519 91177308-0d34-0410-b5e6-96231b3b80d8
2006-11-07 22:14:24 +00:00
|
|
|
let AddedComplexity = 20 in {
|
2006-04-19 20:37:34 +00:00
|
|
|
// vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVLPSrm VR128:$src1, addr:$src2)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVLPDrm VR128:$src1, addr:$src2)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVLPSrm VR128:$src1, addr:$src2)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVLPDrm VR128:$src1, addr:$src2)>;
|
2006-10-09 21:42:15 +00:00
|
|
|
}
|
2006-04-24 21:58:20 +00:00
|
|
|
|
2008-05-23 21:23:16 +00:00
|
|
|
// (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVLPSmr addr:$src1, VR128:$src2)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVLPDmr addr:$src1, VR128:$src2)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
|
|
|
|
addr:$src1),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVLPSmr addr:$src1, VR128:$src2)>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVLPDmr addr:$src1, VR128:$src2)>;
|
2008-05-23 21:23:16 +00:00
|
|
|
|
2006-10-09 21:42:15 +00:00
|
|
|
let AddedComplexity = 15 in {
|
2006-04-24 21:58:20 +00:00
|
|
|
// Setting the lowest element in the vector.
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
|
2010-02-28 00:17:42 +00:00
|
|
|
(MOVSSrr (v4i32 VR128:$src1),
|
2010-05-24 14:48:17 +00:00
|
|
|
(EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
|
2010-02-28 00:17:42 +00:00
|
|
|
(MOVSDrr (v2i64 VR128:$src1),
|
2010-05-24 14:48:17 +00:00
|
|
|
(EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
|
2006-04-24 23:34:56 +00:00
|
|
|
|
2010-02-28 00:17:42 +00:00
|
|
|
// vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
|
2010-05-24 14:48:17 +00:00
|
|
|
(MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
|
2010-02-28 00:17:42 +00:00
|
|
|
Requires<[HasSSE2]>;
|
2009-04-27 18:41:29 +00:00
|
|
|
def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
|
2010-05-24 14:48:17 +00:00
|
|
|
(MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
|
2010-02-28 00:17:42 +00:00
|
|
|
Requires<[HasSSE2]>;
|
2006-10-09 21:42:15 +00:00
|
|
|
}
|
2006-05-03 20:32:03 +00:00
|
|
|
|
2009-06-19 07:00:55 +00:00
|
|
|
// vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
|
|
|
|
// fall back to this for SSE1)
|
|
|
|
def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
|
2009-07-31 20:07:27 +00:00
|
|
|
(SHUFPSrri VR128:$src2, VR128:$src1,
|
2010-02-26 01:14:30 +00:00
|
|
|
(SHUFFLE_get_shuf_imm VR128:$src3))>;
|
2009-06-19 07:00:55 +00:00
|
|
|
|
2006-04-24 23:34:56 +00:00
|
|
|
// Set lowest element and zero upper elements.
|
2008-05-09 21:53:03 +00:00
|
|
|
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
|
2008-05-08 22:35:02 +00:00
|
|
|
(MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
|
2006-04-24 23:34:56 +00:00
|
|
|
|
2006-04-12 21:21:57 +00:00
|
|
|
// Some special case pandn patterns.
|
|
|
|
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
|
|
|
|
VR128:$src2)),
|
2006-06-20 00:25:29 +00:00
|
|
|
(PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
|
2006-04-12 21:21:57 +00:00
|
|
|
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
|
|
|
|
VR128:$src2)),
|
2006-06-20 00:25:29 +00:00
|
|
|
(PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
|
2006-04-12 21:21:57 +00:00
|
|
|
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
|
|
|
|
VR128:$src2)),
|
2006-06-20 00:25:29 +00:00
|
|
|
(PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
|
2006-04-12 21:21:57 +00:00
|
|
|
|
|
|
|
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
|
2008-05-23 00:37:07 +00:00
|
|
|
(memop addr:$src2))),
|
2006-06-20 00:25:29 +00:00
|
|
|
(PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
|
2006-04-12 21:21:57 +00:00
|
|
|
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
|
2008-05-23 00:37:07 +00:00
|
|
|
(memop addr:$src2))),
|
2006-06-20 00:25:29 +00:00
|
|
|
(PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
|
2006-04-12 21:21:57 +00:00
|
|
|
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
|
2008-05-23 00:37:07 +00:00
|
|
|
(memop addr:$src2))),
|
2006-06-20 00:25:29 +00:00
|
|
|
(PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
|
2007-11-17 03:58:34 +00:00
|
|
|
// vector -> vector casts
|
|
|
|
def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
|
|
|
|
(Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
|
|
|
|
def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
|
|
|
|
(Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
|
2008-09-05 23:07:03 +00:00
|
|
|
def : Pat<(v2f64 (sint_to_fp (v2i32 VR64:$src))),
|
|
|
|
(Int_CVTPI2PDrr VR64:$src)>, Requires<[HasSSE2]>;
|
|
|
|
def : Pat<(v2i32 (fp_to_sint (v2f64 VR128:$src))),
|
|
|
|
(Int_CVTTPD2PIrr VR128:$src)>, Requires<[HasSSE2]>;
|
2007-11-17 03:58:34 +00:00
|
|
|
|
2007-07-20 00:27:43 +00:00
|
|
|
// Use movaps / movups for SSE integer load / store (one byte shorter).
|
2007-07-27 17:16:43 +00:00
|
|
|
def : Pat<(alignedloadv4i32 addr:$src),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVAPSrm addr:$src)>;
|
2007-07-27 17:16:43 +00:00
|
|
|
def : Pat<(loadv4i32 addr:$src),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVUPSrm addr:$src)>;
|
2007-07-20 00:27:43 +00:00
|
|
|
def : Pat<(alignedloadv2i64 addr:$src),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVAPSrm addr:$src)>;
|
2007-07-20 00:27:43 +00:00
|
|
|
def : Pat<(loadv2i64 addr:$src),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVUPSrm addr:$src)>;
|
2007-07-20 00:27:43 +00:00
|
|
|
|
|
|
|
def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVAPSmr addr:$dst, VR128:$src)>;
|
2007-07-20 00:27:43 +00:00
|
|
|
def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVAPSmr addr:$dst, VR128:$src)>;
|
2007-07-20 00:27:43 +00:00
|
|
|
def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVAPSmr addr:$dst, VR128:$src)>;
|
2007-07-20 00:27:43 +00:00
|
|
|
def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVAPSmr addr:$dst, VR128:$src)>;
|
2007-07-20 00:27:43 +00:00
|
|
|
def : Pat<(store (v2i64 VR128:$src), addr:$dst),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVUPSmr addr:$dst, VR128:$src)>;
|
2007-07-20 00:27:43 +00:00
|
|
|
def : Pat<(store (v4i32 VR128:$src), addr:$dst),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVUPSmr addr:$dst, VR128:$src)>;
|
2007-07-20 00:27:43 +00:00
|
|
|
def : Pat<(store (v8i16 VR128:$src), addr:$dst),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVUPSmr addr:$dst, VR128:$src)>;
|
2007-07-20 00:27:43 +00:00
|
|
|
def : Pat<(store (v16i8 VR128:$src), addr:$dst),
|
2010-02-26 01:14:30 +00:00
|
|
|
(MOVUPSmr addr:$dst, VR128:$src)>;
|
2009-07-31 20:07:27 +00:00
|
|
|
|
2008-02-03 07:18:54 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE4.1 Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2008-10-10 23:51:03 +00:00
|
|
|
multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd,
|
2008-02-03 07:18:54 +00:00
|
|
|
string OpcodeStr,
|
|
|
|
Intrinsic V4F32Int,
|
2008-02-04 05:34:34 +00:00
|
|
|
Intrinsic V2F64Int> {
|
2008-02-03 07:18:54 +00:00
|
|
|
// Intrinsic operation, reg.
|
|
|
|
// Vector intrinsic operation, reg
|
2009-07-31 20:07:27 +00:00
|
|
|
def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
|
2008-02-04 06:00:24 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
|
2008-02-03 07:18:54 +00:00
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2008-02-04 05:34:34 +00:00
|
|
|
[(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
|
|
|
|
OpSize;
|
2008-02-03 07:18:54 +00:00
|
|
|
|
|
|
|
// Vector intrinsic operation, mem
|
2009-12-18 07:40:29 +00:00
|
|
|
def PSm_Int : Ii8<opcps, MRMSrcMem,
|
2008-02-04 06:00:24 +00:00
|
|
|
(outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
|
2008-02-03 07:18:54 +00:00
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2008-05-23 00:37:07 +00:00
|
|
|
[(set VR128:$dst,
|
|
|
|
(V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>,
|
2009-12-18 07:40:29 +00:00
|
|
|
TA, OpSize,
|
2009-12-22 17:47:23 +00:00
|
|
|
Requires<[HasSSE41]>;
|
2008-02-03 07:18:54 +00:00
|
|
|
|
|
|
|
// Vector intrinsic operation, reg
|
2008-03-14 07:39:27 +00:00
|
|
|
def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
|
2008-02-04 06:00:24 +00:00
|
|
|
(outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
|
2008-02-03 07:18:54 +00:00
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2008-02-04 05:34:34 +00:00
|
|
|
[(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
|
|
|
|
OpSize;
|
2008-02-03 07:18:54 +00:00
|
|
|
|
|
|
|
// Vector intrinsic operation, mem
|
2008-03-14 07:39:27 +00:00
|
|
|
def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
|
2008-02-04 06:00:24 +00:00
|
|
|
(outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
|
2008-02-03 07:18:54 +00:00
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2008-05-23 00:37:07 +00:00
|
|
|
[(set VR128:$dst,
|
|
|
|
(V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>,
|
2008-02-04 05:34:34 +00:00
|
|
|
OpSize;
|
2008-02-03 07:18:54 +00:00
|
|
|
}
|
|
|
|
|
2008-10-10 23:51:03 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
|
|
|
|
string OpcodeStr,
|
|
|
|
Intrinsic F32Int,
|
|
|
|
Intrinsic F64Int> {
|
|
|
|
// Intrinsic operation, reg.
|
|
|
|
def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
|
2009-07-31 20:07:27 +00:00
|
|
|
(outs VR128:$dst),
|
2008-10-10 23:51:03 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
2008-10-10 23:51:03 +00:00
|
|
|
(F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
|
|
|
|
OpSize;
|
|
|
|
|
|
|
|
// Intrinsic operation, mem.
|
2009-07-31 20:07:27 +00:00
|
|
|
def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
|
|
|
|
(outs VR128:$dst),
|
2008-10-10 23:51:03 +00:00
|
|
|
(ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-10-10 23:51:03 +00:00
|
|
|
"ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
2008-10-10 23:51:03 +00:00
|
|
|
(F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
|
|
|
|
OpSize;
|
|
|
|
|
|
|
|
// Intrinsic operation, reg.
|
|
|
|
def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
|
2009-07-31 20:07:27 +00:00
|
|
|
(outs VR128:$dst),
|
2008-10-10 23:51:03 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
2008-10-10 23:51:03 +00:00
|
|
|
(F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
|
|
|
|
OpSize;
|
|
|
|
|
|
|
|
// Intrinsic operation, mem.
|
|
|
|
def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
|
2009-07-31 20:07:27 +00:00
|
|
|
(outs VR128:$dst),
|
2008-10-10 23:51:03 +00:00
|
|
|
(ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
2008-10-10 23:51:03 +00:00
|
|
|
(F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
|
|
|
|
OpSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-03 07:18:54 +00:00
|
|
|
// FP round - roundss, roundps, roundsd, roundpd
|
2008-10-10 23:51:03 +00:00
|
|
|
defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round",
|
|
|
|
int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
|
|
|
|
defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
|
|
|
|
int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
|
2008-02-04 05:34:34 +00:00
|
|
|
|
|
|
|
// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
|
|
|
|
multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic IntId128> {
|
|
|
|
def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
|
|
|
|
def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins i128mem:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId128
|
|
|
|
(bitconvert (memopv8i16 addr:$src))))]>, OpSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
|
|
|
|
int_x86_sse41_phminposuw>;
|
|
|
|
|
|
|
|
/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2008-02-04 05:34:34 +00:00
|
|
|
multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic IntId128, bit Commutable = 0> {
|
2008-02-09 23:46:37 +00:00
|
|
|
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
|
|
|
|
OpSize {
|
2008-02-04 05:34:34 +00:00
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
2008-02-09 23:46:37 +00:00
|
|
|
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId128 VR128:$src1,
|
|
|
|
(bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
|
2008-02-04 05:34:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq",
|
|
|
|
int_x86_sse41_pcmpeqq, 1>;
|
|
|
|
defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw",
|
|
|
|
int_x86_sse41_packusdw, 0>;
|
|
|
|
defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb",
|
|
|
|
int_x86_sse41_pminsb, 1>;
|
|
|
|
defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd",
|
|
|
|
int_x86_sse41_pminsd, 1>;
|
|
|
|
defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud",
|
|
|
|
int_x86_sse41_pminud, 1>;
|
|
|
|
defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw",
|
|
|
|
int_x86_sse41_pminuw, 1>;
|
|
|
|
defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb",
|
|
|
|
int_x86_sse41_pmaxsb, 1>;
|
|
|
|
defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd",
|
|
|
|
int_x86_sse41_pmaxsd, 1>;
|
|
|
|
defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud",
|
|
|
|
int_x86_sse41_pmaxud, 1>;
|
|
|
|
defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw",
|
|
|
|
int_x86_sse41_pmaxuw, 1>;
|
2008-02-04 06:00:24 +00:00
|
|
|
|
2008-12-18 21:42:19 +00:00
|
|
|
defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq, 1>;
|
|
|
|
|
2008-07-17 16:51:19 +00:00
|
|
|
def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
|
|
|
|
(PCMPEQQrr VR128:$src1, VR128:$src2)>;
|
|
|
|
def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
|
|
|
|
(PCMPEQQrm VR128:$src1, addr:$src2)>;
|
|
|
|
|
2008-02-09 01:38:08 +00:00
|
|
|
/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2008-05-23 17:49:40 +00:00
|
|
|
multiclass SS41I_binop_patint<bits<8> opc, string OpcodeStr, ValueType OpVT,
|
|
|
|
SDNode OpNode, Intrinsic IntId128,
|
|
|
|
bit Commutable = 0> {
|
2008-02-09 01:38:08 +00:00
|
|
|
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
2008-05-23 17:49:40 +00:00
|
|
|
[(set VR128:$dst, (OpNode (OpVT VR128:$src1),
|
|
|
|
VR128:$src2))]>, OpSize {
|
2008-02-09 01:38:08 +00:00
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
|
|
|
def rr_int : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
|
|
|
|
OpSize {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
|
|
|
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst,
|
2010-02-18 06:33:42 +00:00
|
|
|
(OpVT (OpNode VR128:$src1, (memop addr:$src2))))]>, OpSize;
|
2008-02-09 01:38:08 +00:00
|
|
|
def rm_int : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst,
|
2008-05-23 00:37:07 +00:00
|
|
|
(IntId128 VR128:$src1, (memop addr:$src2)))]>,
|
2008-02-09 01:38:08 +00:00
|
|
|
OpSize;
|
|
|
|
}
|
|
|
|
}
|
2010-03-30 18:49:01 +00:00
|
|
|
|
|
|
|
/// SS48I_binop_rm - Simple SSE41 binary operator.
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
ValueType OpVT, bit Commutable = 0> {
|
2010-05-25 17:33:22 +00:00
|
|
|
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
|
2010-03-30 18:49:01 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
|
|
|
|
OpSize {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
2010-05-25 17:33:22 +00:00
|
|
|
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
|
2010-03-30 18:49:01 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (OpNode VR128:$src1,
|
|
|
|
(bc_v4i32 (memopv2i64 addr:$src2))))]>,
|
|
|
|
OpSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, 1>;
|
2008-02-09 01:38:08 +00:00
|
|
|
|
2008-03-14 07:39:27 +00:00
|
|
|
/// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2008-02-04 06:00:24 +00:00
|
|
|
multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic IntId128, bit Commutable = 0> {
|
2008-03-14 07:39:27 +00:00
|
|
|
def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
|
2008-02-09 23:46:37 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-10 18:47:57 +00:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
2008-02-09 23:46:37 +00:00
|
|
|
(IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
|
|
|
|
OpSize {
|
2008-02-04 06:00:24 +00:00
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
2008-03-14 07:39:27 +00:00
|
|
|
def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
|
2008-02-09 23:46:37 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2008-02-10 18:47:57 +00:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2008-02-09 23:46:37 +00:00
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId128 VR128:$src1,
|
|
|
|
(bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
|
|
|
|
OpSize;
|
2008-02-04 06:00:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps",
|
|
|
|
int_x86_sse41_blendps, 0>;
|
|
|
|
defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd",
|
|
|
|
int_x86_sse41_blendpd, 0>;
|
|
|
|
defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw",
|
|
|
|
int_x86_sse41_pblendw, 0>;
|
|
|
|
defm DPPS : SS41I_binop_rmi_int<0x40, "dpps",
|
|
|
|
int_x86_sse41_dpps, 1>;
|
|
|
|
defm DPPD : SS41I_binop_rmi_int<0x41, "dppd",
|
|
|
|
int_x86_sse41_dppd, 1>;
|
|
|
|
defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw",
|
2010-04-08 00:52:02 +00:00
|
|
|
int_x86_sse41_mpsadbw, 0>;
|
2008-02-09 01:38:08 +00:00
|
|
|
|
2008-02-09 23:46:37 +00:00
|
|
|
|
2008-03-14 07:39:27 +00:00
|
|
|
/// SS41I_ternary_int - SSE 4.1 ternary operator
|
2008-03-05 08:19:16 +00:00
|
|
|
let Uses = [XMM0], Constraints = "$src1 = $dst" in {
|
2008-02-10 18:47:57 +00:00
|
|
|
multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
|
|
|
|
def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-10 18:47:57 +00:00
|
|
|
"\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
|
|
|
|
[(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
|
|
|
|
OpSize;
|
|
|
|
|
|
|
|
def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId VR128:$src1,
|
|
|
|
(bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
|
|
|
|
defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
|
|
|
|
defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
|
|
|
|
|
|
|
|
|
2008-02-09 23:46:37 +00:00
|
|
|
multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
|
|
|
|
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
|
|
|
|
|
|
|
|
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2008-09-24 23:27:55 +00:00
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
|
|
|
|
OpSize;
|
2008-02-09 23:46:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
|
|
|
|
defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
|
|
|
|
defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
|
|
|
|
defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
|
|
|
|
defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
|
|
|
|
defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
|
|
|
|
|
2008-09-24 23:27:55 +00:00
|
|
|
// Common patterns involving scalar load.
|
|
|
|
def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
|
|
|
|
(PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
|
|
|
|
(PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
|
|
|
|
def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
|
|
|
|
(PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
|
|
|
|
(PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
|
|
|
|
def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
|
|
|
|
(PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
|
|
|
|
(PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
|
|
|
|
def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
|
|
|
|
(PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
|
|
|
|
(PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
|
|
|
|
def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
|
|
|
|
(PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
|
|
|
|
(PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
|
|
|
|
def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
|
|
|
|
(PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
|
|
|
|
(PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
|
|
|
|
|
|
|
|
|
2008-02-09 23:46:37 +00:00
|
|
|
multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
|
|
|
|
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
|
|
|
|
|
|
|
|
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2008-09-24 23:27:55 +00:00
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
|
|
|
|
OpSize;
|
2008-02-09 23:46:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
|
|
|
|
defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
|
|
|
|
defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
|
|
|
|
defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
|
|
|
|
|
2008-09-24 23:27:55 +00:00
|
|
|
// Common patterns involving scalar load
|
|
|
|
def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
|
2008-09-25 00:49:51 +00:00
|
|
|
(PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
|
2008-09-24 23:27:55 +00:00
|
|
|
def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
|
2008-09-25 00:49:51 +00:00
|
|
|
(PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
|
2008-09-24 23:27:55 +00:00
|
|
|
|
|
|
|
def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
|
2008-09-25 00:49:51 +00:00
|
|
|
(PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
|
2008-09-24 23:27:55 +00:00
|
|
|
def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
|
2008-09-25 00:49:51 +00:00
|
|
|
(PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
|
2008-09-24 23:27:55 +00:00
|
|
|
|
|
|
|
|
2008-02-09 23:46:37 +00:00
|
|
|
multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
|
|
|
|
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
|
|
|
|
|
2008-09-24 23:27:55 +00:00
|
|
|
// Expecting a i16 load any extended to i32 value.
|
2008-02-09 23:46:37 +00:00
|
|
|
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2008-09-24 23:27:55 +00:00
|
|
|
[(set VR128:$dst, (IntId (bitconvert
|
|
|
|
(v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
|
|
|
|
OpSize;
|
2008-02-09 23:46:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
|
2009-06-06 05:55:37 +00:00
|
|
|
defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
|
2008-02-09 23:46:37 +00:00
|
|
|
|
2008-09-24 23:27:55 +00:00
|
|
|
// Common patterns involving scalar load
|
|
|
|
def : Pat<(int_x86_sse41_pmovsxbq
|
|
|
|
(bitconvert (v4i32 (X86vzmovl
|
|
|
|
(v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
|
2008-09-25 00:49:51 +00:00
|
|
|
(PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
|
2008-09-24 23:27:55 +00:00
|
|
|
|
|
|
|
def : Pat<(int_x86_sse41_pmovzxbq
|
|
|
|
(bitconvert (v4i32 (X86vzmovl
|
|
|
|
(v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
|
2008-09-25 00:49:51 +00:00
|
|
|
(PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
|
2008-09-24 23:27:55 +00:00
|
|
|
|
2008-02-09 23:46:37 +00:00
|
|
|
|
2008-02-11 04:19:36 +00:00
|
|
|
/// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
|
|
|
|
multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
|
2008-03-26 08:11:49 +00:00
|
|
|
def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
|
2008-02-09 23:46:37 +00:00
|
|
|
(ins VR128:$src1, i32i8imm:$src2),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-09 23:46:37 +00:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2008-02-11 04:19:36 +00:00
|
|
|
[(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
|
|
|
|
OpSize;
|
2008-03-14 07:39:27 +00:00
|
|
|
def mr : SS4AIi8<opc, MRMDestMem, (outs),
|
2008-02-09 23:46:37 +00:00
|
|
|
(ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-09 23:46:37 +00:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2008-02-11 04:19:36 +00:00
|
|
|
[]>, OpSize;
|
|
|
|
// FIXME:
|
|
|
|
// There's an AssertZext in the way of writing the store pattern
|
|
|
|
// (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
|
|
|
|
}
|
|
|
|
|
|
|
|
defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
|
|
|
|
|
|
|
|
|
|
|
|
/// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
|
|
|
|
multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
|
2008-03-14 07:39:27 +00:00
|
|
|
def mr : SS4AIi8<opc, MRMDestMem, (outs),
|
2008-02-11 04:19:36 +00:00
|
|
|
(ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-11 04:19:36 +00:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
[]>, OpSize;
|
|
|
|
// FIXME:
|
|
|
|
// There's an AssertZext in the way of writing the store pattern
|
|
|
|
// (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
|
2008-02-09 23:46:37 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 04:19:36 +00:00
|
|
|
defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
|
2008-02-09 23:46:37 +00:00
|
|
|
|
2008-02-11 04:19:36 +00:00
|
|
|
|
|
|
|
/// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
|
|
|
|
multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
|
2008-03-26 08:11:49 +00:00
|
|
|
def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
|
2008-02-09 23:46:37 +00:00
|
|
|
(ins VR128:$src1, i32i8imm:$src2),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-09 23:46:37 +00:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
[(set GR32:$dst,
|
|
|
|
(extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
|
2008-03-14 07:39:27 +00:00
|
|
|
def mr : SS4AIi8<opc, MRMDestMem, (outs),
|
2008-02-09 23:46:37 +00:00
|
|
|
(ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-09 23:46:37 +00:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
[(store (extractelt (v4i32 VR128:$src1), imm:$src2),
|
|
|
|
addr:$dst)]>, OpSize;
|
2008-02-09 01:38:08 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 04:19:36 +00:00
|
|
|
defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
|
|
|
|
|
2008-02-09 01:38:08 +00:00
|
|
|
|
2008-03-24 21:52:23 +00:00
|
|
|
/// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
|
|
|
|
/// destination
|
2008-02-11 04:19:36 +00:00
|
|
|
multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
|
2008-03-26 08:11:49 +00:00
|
|
|
def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
|
2008-02-09 23:46:37 +00:00
|
|
|
(ins VR128:$src1, i32i8imm:$src2),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-09 23:46:37 +00:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2008-04-16 02:32:24 +00:00
|
|
|
[(set GR32:$dst,
|
|
|
|
(extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
|
2008-03-24 21:52:23 +00:00
|
|
|
OpSize;
|
2009-07-31 20:07:27 +00:00
|
|
|
def mr : SS4AIi8<opc, MRMDestMem, (outs),
|
2008-02-09 23:46:37 +00:00
|
|
|
(ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-09 23:46:37 +00:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2008-03-24 21:52:23 +00:00
|
|
|
[(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
|
2008-02-09 23:46:37 +00:00
|
|
|
addr:$dst)]>, OpSize;
|
2008-02-09 01:38:08 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 04:19:36 +00:00
|
|
|
defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
|
|
|
|
|
2008-08-08 18:30:21 +00:00
|
|
|
// Also match an EXTRACTPS store when the store is done as f32 instead of i32.
|
|
|
|
def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
|
|
|
|
imm:$src2))),
|
|
|
|
addr:$dst),
|
|
|
|
(EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
|
|
|
|
Requires<[HasSSE41]>;
|
|
|
|
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2008-02-11 04:19:36 +00:00
|
|
|
multiclass SS41I_insert8<bits<8> opc, string OpcodeStr> {
|
2008-03-14 07:39:27 +00:00
|
|
|
def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
|
2008-02-11 04:19:36 +00:00
|
|
|
(ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-11 04:19:36 +00:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
2008-02-11 04:19:36 +00:00
|
|
|
(X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
|
2008-03-14 07:39:27 +00:00
|
|
|
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
|
2008-02-11 04:19:36 +00:00
|
|
|
(ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
2008-02-11 04:19:36 +00:00
|
|
|
(X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
|
|
|
|
imm:$src3))]>, OpSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
|
|
|
|
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2008-02-11 04:19:36 +00:00
|
|
|
multiclass SS41I_insert32<bits<8> opc, string OpcodeStr> {
|
2008-03-14 07:39:27 +00:00
|
|
|
def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
|
2008-02-11 04:19:36 +00:00
|
|
|
(ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-11 04:19:36 +00:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
2008-02-11 04:19:36 +00:00
|
|
|
(v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
|
|
|
|
OpSize;
|
2008-03-14 07:39:27 +00:00
|
|
|
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
|
2008-02-11 04:19:36 +00:00
|
|
|
(ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
2008-02-11 04:19:36 +00:00
|
|
|
(v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
|
|
|
|
imm:$src3)))]>, OpSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
|
|
|
|
|
2009-07-23 02:22:41 +00:00
|
|
|
// insertps has a few different modes, there's the first two here below which
|
|
|
|
// are optimized inserts that won't zero arbitrary elements in the destination
|
|
|
|
// vector. The next one matches the intrinsic and could zero arbitrary elements
|
|
|
|
// in the target vector.
|
2008-03-05 08:19:16 +00:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2008-02-11 04:19:36 +00:00
|
|
|
multiclass SS41I_insertf32<bits<8> opc, string OpcodeStr> {
|
2009-07-24 00:33:09 +00:00
|
|
|
def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
|
2009-07-31 20:07:27 +00:00
|
|
|
!strconcat(OpcodeStr,
|
2008-02-11 04:19:36 +00:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
|
|
|
(X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
OpSize;
|
2009-07-24 00:33:09 +00:00
|
|
|
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
|
2008-02-11 04:19:36 +00:00
|
|
|
(ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2009-07-31 20:07:27 +00:00
|
|
|
[(set VR128:$dst,
|
2009-07-24 00:33:09 +00:00
|
|
|
(X86insrtps VR128:$src1,
|
|
|
|
(v4f32 (scalar_to_vector (loadf32 addr:$src2))),
|
2008-02-11 04:19:36 +00:00
|
|
|
imm:$src3))]>, OpSize;
|
|
|
|
}
|
|
|
|
}
|
2008-02-09 23:46:37 +00:00
|
|
|
|
2008-03-26 08:11:49 +00:00
|
|
|
defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
|
2008-03-16 21:14:46 +00:00
|
|
|
|
2009-07-24 00:33:09 +00:00
|
|
|
def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
|
|
|
|
(INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>;
|
|
|
|
|
2009-07-29 00:28:05 +00:00
|
|
|
// ptest instruction we'll lower to this in X86ISelLowering primarily from
|
|
|
|
// the intel intrinsic that corresponds to this.
|
2008-03-16 21:14:46 +00:00
|
|
|
let Defs = [EFLAGS] in {
|
|
|
|
def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
|
2009-07-29 00:28:05 +00:00
|
|
|
"ptest \t{$src2, $src1|$src1, $src2}",
|
2010-03-28 05:07:17 +00:00
|
|
|
[(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>,
|
|
|
|
OpSize;
|
2008-03-16 21:14:46 +00:00
|
|
|
def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
|
2009-07-29 00:28:05 +00:00
|
|
|
"ptest \t{$src2, $src1|$src1, $src2}",
|
2010-03-28 05:07:17 +00:00
|
|
|
[(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>,
|
|
|
|
OpSize;
|
2008-03-16 21:14:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
|
|
|
|
"movntdqa\t{$src, $dst|$dst, $src}",
|
2010-02-10 00:10:31 +00:00
|
|
|
[(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
|
|
|
|
OpSize;
|
2008-07-17 16:51:19 +00:00
|
|
|
|
2009-08-18 22:50:32 +00:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SSE4.2 Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2008-07-17 16:51:19 +00:00
|
|
|
/// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic IntId128, bit Commutable = 0> {
|
|
|
|
def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
|
|
|
|
OpSize {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
|
|
|
def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId128 VR128:$src1,
|
|
|
|
(bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-17 17:04:58 +00:00
|
|
|
defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
|
2008-07-17 16:51:19 +00:00
|
|
|
|
|
|
|
def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
|
|
|
|
(PCMPGTQrr VR128:$src1, VR128:$src2)>;
|
|
|
|
def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
|
|
|
|
(PCMPGTQrm VR128:$src1, addr:$src2)>;
|
2009-08-08 21:55:08 +00:00
|
|
|
|
|
|
|
// crc intrinsic instruction
|
|
|
|
// This set of instructions are only rm, the only difference is the size
|
|
|
|
// of r and m.
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
2009-08-10 21:48:58 +00:00
|
|
|
def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
|
2009-08-08 21:55:08 +00:00
|
|
|
(ins GR32:$src1, i8mem:$src2),
|
2010-03-19 20:04:42 +00:00
|
|
|
"crc32{b} \t{$src2, $src1|$src1, $src2}",
|
2009-08-08 21:55:08 +00:00
|
|
|
[(set GR32:$dst,
|
|
|
|
(int_x86_sse42_crc32_8 GR32:$src1,
|
2010-03-19 20:04:42 +00:00
|
|
|
(load addr:$src2)))]>;
|
2009-08-10 21:48:58 +00:00
|
|
|
def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
|
2009-08-08 21:55:08 +00:00
|
|
|
(ins GR32:$src1, GR8:$src2),
|
2010-03-19 20:04:42 +00:00
|
|
|
"crc32{b} \t{$src2, $src1|$src1, $src2}",
|
2009-08-08 21:55:08 +00:00
|
|
|
[(set GR32:$dst,
|
2010-03-19 20:04:42 +00:00
|
|
|
(int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
|
2009-08-10 21:48:58 +00:00
|
|
|
def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
|
2009-08-08 21:55:08 +00:00
|
|
|
(ins GR32:$src1, i16mem:$src2),
|
2010-03-19 20:04:42 +00:00
|
|
|
"crc32{w} \t{$src2, $src1|$src1, $src2}",
|
2009-08-08 21:55:08 +00:00
|
|
|
[(set GR32:$dst,
|
|
|
|
(int_x86_sse42_crc32_16 GR32:$src1,
|
|
|
|
(load addr:$src2)))]>,
|
|
|
|
OpSize;
|
2009-08-10 21:48:58 +00:00
|
|
|
def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
|
2009-08-08 21:55:08 +00:00
|
|
|
(ins GR32:$src1, GR16:$src2),
|
2010-03-19 20:04:42 +00:00
|
|
|
"crc32{w} \t{$src2, $src1|$src1, $src2}",
|
2009-08-08 21:55:08 +00:00
|
|
|
[(set GR32:$dst,
|
2009-08-10 21:48:58 +00:00
|
|
|
(int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
|
2009-08-08 21:55:08 +00:00
|
|
|
OpSize;
|
2009-08-10 21:48:58 +00:00
|
|
|
def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
|
2009-08-08 21:55:08 +00:00
|
|
|
(ins GR32:$src1, i32mem:$src2),
|
2010-03-19 20:04:42 +00:00
|
|
|
"crc32{l} \t{$src2, $src1|$src1, $src2}",
|
2009-08-08 21:55:08 +00:00
|
|
|
[(set GR32:$dst,
|
|
|
|
(int_x86_sse42_crc32_32 GR32:$src1,
|
2010-03-19 20:04:42 +00:00
|
|
|
(load addr:$src2)))]>;
|
2009-08-10 21:48:58 +00:00
|
|
|
def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
|
2009-08-08 21:55:08 +00:00
|
|
|
(ins GR32:$src1, GR32:$src2),
|
2010-03-19 20:04:42 +00:00
|
|
|
"crc32{l} \t{$src2, $src1|$src1, $src2}",
|
2009-08-08 21:55:08 +00:00
|
|
|
[(set GR32:$dst,
|
2010-03-19 20:04:42 +00:00
|
|
|
(int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
|
|
|
|
def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
|
|
|
|
(ins GR64:$src1, i8mem:$src2),
|
|
|
|
"crc32{b} \t{$src2, $src1|$src1, $src2}",
|
|
|
|
[(set GR64:$dst,
|
|
|
|
(int_x86_sse42_crc64_8 GR64:$src1,
|
|
|
|
(load addr:$src2)))]>,
|
|
|
|
REX_W;
|
|
|
|
def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
|
|
|
|
(ins GR64:$src1, GR8:$src2),
|
|
|
|
"crc32{b} \t{$src2, $src1|$src1, $src2}",
|
|
|
|
[(set GR64:$dst,
|
|
|
|
(int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
|
|
|
|
REX_W;
|
|
|
|
def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
|
2009-08-08 21:55:08 +00:00
|
|
|
(ins GR64:$src1, i64mem:$src2),
|
2010-03-19 20:04:42 +00:00
|
|
|
"crc32{q} \t{$src2, $src1|$src1, $src2}",
|
2009-08-08 21:55:08 +00:00
|
|
|
[(set GR64:$dst,
|
2010-03-19 20:04:42 +00:00
|
|
|
(int_x86_sse42_crc64_64 GR64:$src1,
|
2009-08-08 21:55:08 +00:00
|
|
|
(load addr:$src2)))]>,
|
2010-03-19 20:04:42 +00:00
|
|
|
REX_W;
|
|
|
|
def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
|
2009-08-08 21:55:08 +00:00
|
|
|
(ins GR64:$src1, GR64:$src2),
|
2010-03-19 20:04:42 +00:00
|
|
|
"crc32{q} \t{$src2, $src1|$src1, $src2}",
|
2009-08-08 21:55:08 +00:00
|
|
|
[(set GR64:$dst,
|
2010-03-19 20:04:42 +00:00
|
|
|
(int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
|
|
|
|
REX_W;
|
2009-08-08 21:55:08 +00:00
|
|
|
}
|
2009-08-18 22:50:32 +00:00
|
|
|
|
|
|
|
// String/text processing instructions.
|
2009-10-29 18:10:34 +00:00
|
|
|
let Defs = [EFLAGS], usesCustomInserter = 1 in {
|
2009-08-18 22:50:32 +00:00
|
|
|
def PCMPISTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
|
|
|
|
"#PCMPISTRM128rr PSEUDO!",
|
|
|
|
[(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
|
|
|
|
imm:$src3))]>, OpSize;
|
2009-08-18 22:50:32 +00:00
|
|
|
def PCMPISTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
|
|
|
|
"#PCMPISTRM128rm PSEUDO!",
|
|
|
|
[(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, (load addr:$src2),
|
|
|
|
imm:$src3))]>, OpSize;
|
2009-08-18 22:50:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let Defs = [XMM0, EFLAGS] in {
|
|
|
|
def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
|
|
|
|
"pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
|
2009-08-18 22:50:32 +00:00
|
|
|
def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
|
|
|
|
"pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
|
2009-08-18 22:50:32 +00:00
|
|
|
}
|
|
|
|
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
|
2009-08-18 22:50:32 +00:00
|
|
|
def PCMPESTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, VR128:$src3, i8imm:$src5),
|
|
|
|
"#PCMPESTRM128rr PSEUDO!",
|
2010-05-25 17:33:22 +00:00
|
|
|
[(set VR128:$dst,
|
|
|
|
(int_x86_sse42_pcmpestrm128
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>, OpSize;
|
|
|
|
|
2009-08-18 22:50:32 +00:00
|
|
|
def PCMPESTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
|
|
|
|
"#PCMPESTRM128rm PSEUDO!",
|
2010-05-25 17:33:22 +00:00
|
|
|
[(set VR128:$dst, (int_x86_sse42_pcmpestrm128
|
|
|
|
VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>,
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
OpSize;
|
2009-08-18 22:50:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
|
2009-08-20 18:24:27 +00:00
|
|
|
def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, VR128:$src3, i8imm:$src5),
|
|
|
|
"pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
|
2009-08-20 18:24:27 +00:00
|
|
|
def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
|
|
|
|
"pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
|
2009-08-18 22:50:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let Defs = [ECX, EFLAGS] in {
|
|
|
|
multiclass SS42AI_pcmpistri<Intrinsic IntId128> {
|
2010-05-25 17:33:22 +00:00
|
|
|
def rr : SS42AI<0x63, MRMSrcReg, (outs),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
|
|
|
|
"pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}",
|
|
|
|
[(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
|
|
|
|
(implicit EFLAGS)]>, OpSize;
|
2009-08-18 22:50:32 +00:00
|
|
|
def rm : SS42AI<0x63, MRMSrcMem, (outs),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
|
|
|
|
"pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}",
|
|
|
|
[(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
|
|
|
|
(implicit EFLAGS)]>, OpSize;
|
2009-08-18 22:50:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
|
|
|
|
defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
|
|
|
|
defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
|
|
|
|
defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
|
|
|
|
defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
|
|
|
|
defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
|
|
|
|
|
|
|
|
let Defs = [ECX, EFLAGS] in {
|
|
|
|
let Uses = [EAX, EDX] in {
|
|
|
|
multiclass SS42AI_pcmpestri<Intrinsic IntId128> {
|
|
|
|
def rr : SS42AI<0x61, MRMSrcReg, (outs),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, VR128:$src3, i8imm:$src5),
|
|
|
|
"pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}",
|
|
|
|
[(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
|
|
|
|
(implicit EFLAGS)]>, OpSize;
|
2009-08-18 22:50:32 +00:00
|
|
|
def rm : SS42AI<0x61, MRMSrcMem, (outs),
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
|
|
|
|
"pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}",
|
2010-05-25 17:33:22 +00:00
|
|
|
[(set ECX,
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
(IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
|
|
|
|
(implicit EFLAGS)]>, OpSize;
|
2009-08-18 22:50:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
|
|
|
|
defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
|
|
|
|
defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
|
|
|
|
defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
|
|
|
|
defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
|
|
|
|
defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
|
2010-04-02 21:54:27 +00:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AES-NI Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic IntId128, bit Commutable = 0> {
|
|
|
|
def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
|
|
|
|
OpSize {
|
|
|
|
let isCommutable = Commutable;
|
|
|
|
}
|
|
|
|
def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, i128mem:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(IntId128 VR128:$src1,
|
|
|
|
(bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
|
|
|
|
int_x86_aesni_aesenc>;
|
|
|
|
defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
|
|
|
|
int_x86_aesni_aesenclast>;
|
|
|
|
defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
|
|
|
|
int_x86_aesni_aesdec>;
|
|
|
|
defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
|
|
|
|
int_x86_aesni_aesdeclast>;
|
|
|
|
|
|
|
|
def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
|
|
|
|
(AESENCrr VR128:$src1, VR128:$src2)>;
|
|
|
|
def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
|
|
|
|
(AESENCrm VR128:$src1, addr:$src2)>;
|
|
|
|
def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
|
|
|
|
(AESENCLASTrr VR128:$src1, VR128:$src2)>;
|
|
|
|
def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
|
|
|
|
(AESENCLASTrm VR128:$src1, addr:$src2)>;
|
|
|
|
def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
|
|
|
|
(AESDECrr VR128:$src1, VR128:$src2)>;
|
|
|
|
def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
|
|
|
|
(AESDECrm VR128:$src1, addr:$src2)>;
|
|
|
|
def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
|
|
|
|
(AESDECLASTrr VR128:$src1, VR128:$src2)>;
|
|
|
|
def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
|
|
|
|
(AESDECLASTrm VR128:$src1, addr:$src2)>;
|
|
|
|
|
2010-04-02 23:48:33 +00:00
|
|
|
def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1),
|
|
|
|
"aesimc\t{$src1, $dst|$dst, $src1}",
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(int_x86_aesni_aesimc VR128:$src1))]>,
|
|
|
|
OpSize;
|
|
|
|
|
|
|
|
def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins i128mem:$src1),
|
|
|
|
"aesimc\t{$src1, $dst|$dst, $src1}",
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
|
|
|
|
OpSize;
|
|
|
|
|
2010-04-02 21:54:27 +00:00
|
|
|
def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
|
2010-05-25 17:33:22 +00:00
|
|
|
(ins VR128:$src1, i8imm:$src2),
|
2010-04-02 21:54:27 +00:00
|
|
|
"aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
|
|
|
|
OpSize;
|
|
|
|
def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
|
2010-05-25 17:33:22 +00:00
|
|
|
(ins i128mem:$src1, i8imm:$src2),
|
2010-04-02 21:54:27 +00:00
|
|
|
"aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
|
|
|
[(set VR128:$dst,
|
|
|
|
(int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
|
|
|
|
imm:$src2))]>,
|
|
|
|
OpSize;
|