mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-02 16:36:40 +00:00
Added encoding prefixes for KNL instructions (EVEX).
Added 512-bit operands printing. Added instruction formats for KNL instructions. llvm-svn: 187324
This commit is contained in:
parent
d3834e5bef
commit
505373db43
@ -635,7 +635,7 @@ namespace ISD {
|
||||
/// which do not reference a specific memory location should be less than
|
||||
/// this value. Those that do must not be less than this value, and can
|
||||
/// be used with SelectionDAG::getMemIntrinsicNode.
|
||||
static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+150;
|
||||
static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+180;
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// MemIndexedMode enum - This enum defines the load / store indexed
|
||||
|
@ -831,6 +831,18 @@ struct X86Operand : public MCParsedAsmOperand {
|
||||
return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
|
||||
getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
|
||||
}
|
||||
bool isMemVZ32() const {
|
||||
return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
|
||||
getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
|
||||
}
|
||||
bool isMemVZ64() const {
|
||||
return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
|
||||
getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
|
||||
}
|
||||
|
||||
bool isMem512() const {
|
||||
return Kind == Memory && (!Mem.Size || Mem.Size == 512);
|
||||
}
|
||||
|
||||
bool isAbsMem() const {
|
||||
return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
|
||||
@ -891,6 +903,16 @@ struct X86Operand : public MCParsedAsmOperand {
|
||||
addMemOperands(Inst, N);
|
||||
}
|
||||
|
||||
void addMemVZ32Operands(MCInst &Inst, unsigned N) const {
|
||||
addMemOperands(Inst, N);
|
||||
}
|
||||
void addMemVZ64Operands(MCInst &Inst, unsigned N) const {
|
||||
addMemOperands(Inst, N);
|
||||
}
|
||||
void addMem512Operands(MCInst &Inst, unsigned N) const {
|
||||
addMemOperands(Inst, N);
|
||||
}
|
||||
|
||||
void addMemOperands(MCInst &Inst, unsigned N) const {
|
||||
assert((N == 5) && "Invalid number of operands!");
|
||||
Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
|
||||
|
@ -286,6 +286,9 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
|
||||
case TYPE_XMM256:
|
||||
mcInst.addOperand(MCOperand::CreateReg(X86::YMM0 + (immediate >> 4)));
|
||||
return;
|
||||
case TYPE_XMM512:
|
||||
mcInst.addOperand(MCOperand::CreateReg(X86::ZMM0 + (immediate >> 4)));
|
||||
return;
|
||||
case TYPE_REL8:
|
||||
isBranch = true;
|
||||
pcrel = insn.startLocation + insn.immediateOffset + insn.immediateSize;
|
||||
@ -443,6 +446,7 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
|
||||
EA_BASES_64BIT
|
||||
REGS_XMM
|
||||
REGS_YMM
|
||||
REGS_ZMM
|
||||
#undef ENTRY
|
||||
}
|
||||
} else {
|
||||
@ -565,6 +569,7 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
|
||||
case TYPE_XMM64:
|
||||
case TYPE_XMM128:
|
||||
case TYPE_XMM256:
|
||||
case TYPE_XMM512:
|
||||
case TYPE_DEBUGREG:
|
||||
case TYPE_CONTROLREG:
|
||||
return translateRMRegister(mcInst, insn);
|
||||
|
@ -1255,6 +1255,8 @@ static int readModRM(struct InternalInstruction* insn) {
|
||||
return prefix##_EAX + index; \
|
||||
case TYPE_R64: \
|
||||
return prefix##_RAX + index; \
|
||||
case TYPE_XMM512: \
|
||||
return prefix##_ZMM0 + index; \
|
||||
case TYPE_XMM256: \
|
||||
return prefix##_YMM0 + index; \
|
||||
case TYPE_XMM128: \
|
||||
|
@ -219,7 +219,23 @@ extern "C" {
|
||||
ENTRY(XMM12) \
|
||||
ENTRY(XMM13) \
|
||||
ENTRY(XMM14) \
|
||||
ENTRY(XMM15)
|
||||
ENTRY(XMM15) \
|
||||
ENTRY(XMM16) \
|
||||
ENTRY(XMM17) \
|
||||
ENTRY(XMM18) \
|
||||
ENTRY(XMM19) \
|
||||
ENTRY(XMM20) \
|
||||
ENTRY(XMM21) \
|
||||
ENTRY(XMM22) \
|
||||
ENTRY(XMM23) \
|
||||
ENTRY(XMM24) \
|
||||
ENTRY(XMM25) \
|
||||
ENTRY(XMM26) \
|
||||
ENTRY(XMM27) \
|
||||
ENTRY(XMM28) \
|
||||
ENTRY(XMM29) \
|
||||
ENTRY(XMM30) \
|
||||
ENTRY(XMM31)
|
||||
|
||||
#define REGS_YMM \
|
||||
ENTRY(YMM0) \
|
||||
@ -237,7 +253,57 @@ extern "C" {
|
||||
ENTRY(YMM12) \
|
||||
ENTRY(YMM13) \
|
||||
ENTRY(YMM14) \
|
||||
ENTRY(YMM15)
|
||||
ENTRY(YMM15) \
|
||||
ENTRY(YMM16) \
|
||||
ENTRY(YMM17) \
|
||||
ENTRY(YMM18) \
|
||||
ENTRY(YMM19) \
|
||||
ENTRY(YMM20) \
|
||||
ENTRY(YMM21) \
|
||||
ENTRY(YMM22) \
|
||||
ENTRY(YMM23) \
|
||||
ENTRY(YMM24) \
|
||||
ENTRY(YMM25) \
|
||||
ENTRY(YMM26) \
|
||||
ENTRY(YMM27) \
|
||||
ENTRY(YMM28) \
|
||||
ENTRY(YMM29) \
|
||||
ENTRY(YMM30) \
|
||||
ENTRY(YMM31)
|
||||
|
||||
#define REGS_ZMM \
|
||||
ENTRY(ZMM0) \
|
||||
ENTRY(ZMM1) \
|
||||
ENTRY(ZMM2) \
|
||||
ENTRY(ZMM3) \
|
||||
ENTRY(ZMM4) \
|
||||
ENTRY(ZMM5) \
|
||||
ENTRY(ZMM6) \
|
||||
ENTRY(ZMM7) \
|
||||
ENTRY(ZMM8) \
|
||||
ENTRY(ZMM9) \
|
||||
ENTRY(ZMM10) \
|
||||
ENTRY(ZMM11) \
|
||||
ENTRY(ZMM12) \
|
||||
ENTRY(ZMM13) \
|
||||
ENTRY(ZMM14) \
|
||||
ENTRY(ZMM15) \
|
||||
ENTRY(ZMM16) \
|
||||
ENTRY(ZMM17) \
|
||||
ENTRY(ZMM18) \
|
||||
ENTRY(ZMM19) \
|
||||
ENTRY(ZMM20) \
|
||||
ENTRY(ZMM21) \
|
||||
ENTRY(ZMM22) \
|
||||
ENTRY(ZMM23) \
|
||||
ENTRY(ZMM24) \
|
||||
ENTRY(ZMM25) \
|
||||
ENTRY(ZMM26) \
|
||||
ENTRY(ZMM27) \
|
||||
ENTRY(ZMM28) \
|
||||
ENTRY(ZMM29) \
|
||||
ENTRY(ZMM30) \
|
||||
ENTRY(ZMM31)
|
||||
|
||||
#define REGS_SEGMENT \
|
||||
ENTRY(ES) \
|
||||
@ -285,6 +351,7 @@ extern "C" {
|
||||
REGS_MMX \
|
||||
REGS_XMM \
|
||||
REGS_YMM \
|
||||
REGS_ZMM \
|
||||
REGS_SEGMENT \
|
||||
REGS_DEBUG \
|
||||
REGS_CONTROL \
|
||||
@ -319,6 +386,7 @@ typedef enum {
|
||||
ALL_EA_BASES
|
||||
REGS_XMM
|
||||
REGS_YMM
|
||||
REGS_ZMM
|
||||
#undef ENTRY
|
||||
SIB_INDEX_max
|
||||
} SIBIndex;
|
||||
|
@ -116,8 +116,106 @@ enum attributeBits {
|
||||
ENUM_ENTRY(IC_VEX_L_XS, 4, "requires VEX and the L and XS prefix")\
|
||||
ENUM_ENTRY(IC_VEX_L_XD, 4, "requires VEX and the L and XD prefix")\
|
||||
ENUM_ENTRY(IC_VEX_L_OPSIZE, 4, "requires VEX, L, and OpSize") \
|
||||
ENUM_ENTRY(IC_VEX_L_W_OPSIZE, 5, "requires VEX, L, W and OpSize")
|
||||
|
||||
ENUM_ENTRY(IC_VEX_L_W, 3, "requires VEX, L and W") \
|
||||
ENUM_ENTRY(IC_VEX_L_W_XS, 4, "requires VEX, L, W and XS prefix") \
|
||||
ENUM_ENTRY(IC_VEX_L_W_XD, 4, "requires VEX, L, W and XD prefix") \
|
||||
ENUM_ENTRY(IC_VEX_L_W_OPSIZE, 4, "requires VEX, L, W and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX, 1, "requires an EVEX prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_XS, 2, "requires EVEX and the XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_XD, 2, "requires EVEX and the XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_OPSIZE, 2, "requires EVEX and the OpSize prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W, 3, "requires EVEX and the W prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_XS, 4, "requires EVEX, W, and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_XD, 4, "requires EVEX, W, and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_OPSIZE, 4, "requires EVEX, W, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L, 3, "requires EVEX and the L prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_XS, 4, "requires EVEX and the L and XS prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L_XD, 4, "requires EVEX and the L and XD prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L_OPSIZE, 4, "requires EVEX, L, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W, 3, "requires EVEX, L and W") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_XS, 4, "requires EVEX, L, W and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_XD, 4, "requires EVEX, L, W and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_OPSIZE, 4, "requires EVEX, L, W and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L2, 3, "requires EVEX and the L2 prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_XS, 4, "requires EVEX and the L2 and XS prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L2_XD, 4, "requires EVEX and the L2 and XD prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L2_OPSIZE, 4, "requires EVEX, L2, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W, 3, "requires EVEX, L2 and W") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_XS, 4, "requires EVEX, L2, W and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_XD, 4, "requires EVEX, L2, W and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE, 4, "requires EVEX, L2, W and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_K, 1, "requires an EVEX_K prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_XS_K, 2, "requires EVEX_K and the XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_XD_K, 2, "requires EVEX_K and the XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_OPSIZE_K, 2, "requires EVEX_K and the OpSize prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_K, 3, "requires EVEX_K and the W prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_XS_K, 4, "requires EVEX_K, W, and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_XD_K, 4, "requires EVEX_K, W, and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_OPSIZE_K, 4, "requires EVEX_K, W, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L_K, 3, "requires EVEX_K and the L prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_XS_K, 4, "requires EVEX_K and the L and XS prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L_XD_K, 4, "requires EVEX_K and the L and XD prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L_OPSIZE_K, 4, "requires EVEX_K, L, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_K, 3, "requires EVEX_K, L and W") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_XS_K, 4, "requires EVEX_K, L, W and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_XD_K, 4, "requires EVEX_K, L, W and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K, 4, "requires EVEX_K, L, W and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_K, 3, "requires EVEX_K and the L2 prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_XS_K, 4, "requires EVEX_K and the L2 and XS prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L2_XD_K, 4, "requires EVEX_K and the L2 and XD prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K, 4, "requires EVEX_K, L2, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_K, 3, "requires EVEX_K, L2 and W") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_XS_K, 4, "requires EVEX_K, L2, W and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_XD_K, 4, "requires EVEX_K, L2, W and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K, 4, "requires EVEX_K, L2, W and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_B, 1, "requires an EVEX_B prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_XS_B, 2, "requires EVEX_B and the XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_XD_B, 2, "requires EVEX_B and the XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_OPSIZE_B, 2, "requires EVEX_B and the OpSize prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_B, 3, "requires EVEX_B and the W prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_XS_B, 4, "requires EVEX_B, W, and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_XD_B, 4, "requires EVEX_B, W, and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_OPSIZE_B, 4, "requires EVEX_B, W, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L_B, 3, "requires EVEX_B and the L prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_XS_B, 4, "requires EVEX_B and the L and XS prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L_XD_B, 4, "requires EVEX_B and the L and XD prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L_OPSIZE_B, 4, "requires EVEX_B, L, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_B, 3, "requires EVEX_B, L and W") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_XS_B, 4, "requires EVEX_B, L, W and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_XD_B, 4, "requires EVEX_B, L, W and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_B, 4, "requires EVEX_B, L, W and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_B, 3, "requires EVEX_B and the L2 prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_XS_B, 4, "requires EVEX_B and the L2 and XS prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L2_XD_B, 4, "requires EVEX_B and the L2 and XD prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L2_OPSIZE_B, 4, "requires EVEX_B, L2, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_B, 3, "requires EVEX_B, L2 and W") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_XS_B, 4, "requires EVEX_B, L2, W and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_XD_B, 4, "requires EVEX_B, L2, W and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_B, 4, "requires EVEX_B, L2, W and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_K_B, 1, "requires EVEX_B and EVEX_K prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_XS_K_B, 2, "requires EVEX_B, EVEX_K and the XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_XD_K_B, 2, "requires EVEX_B, EVEX_K and the XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_OPSIZE_K_B, 2, "requires EVEX_B, EVEX_K and the OpSize prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_K_B, 3, "requires EVEX_B, EVEX_K and the W prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, W, and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, W, and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, W, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L_K_B, 3, "requires EVEX_B, EVEX_K and the L prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_XS_K_B, 4, "requires EVEX_B, EVEX_K and the L and XS prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L_XD_K_B, 4, "requires EVEX_B, EVEX_K and the L and XD prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_K_B, 3, "requires EVEX_B, EVEX_K, L and W") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, L, W and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, L, W and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L, W and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_K_B, 3, "requires EVEX_B, EVEX_K and the L2 prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_XS_K_B, 4, "requires EVEX_B, EVEX_K and the L2 and XS prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L2_XD_K_B, 4, "requires EVEX_B, EVEX_K and the L2 and XD prefix")\
|
||||
ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L2, and OpSize") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_K_B, 3, "requires EVEX_B, EVEX_K, L2 and W") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and XS prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and XD prefix") \
|
||||
ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and OpSize")
|
||||
|
||||
#define ENUM_ENTRY(n, r, d) n,
|
||||
typedef enum {
|
||||
@ -224,6 +322,7 @@ struct ContextDecision {
|
||||
ENUM_ENTRY(ENCODING_REG, "Register operand in ModR/M byte.") \
|
||||
ENUM_ENTRY(ENCODING_RM, "R/M operand in ModR/M byte.") \
|
||||
ENUM_ENTRY(ENCODING_VVVV, "Register operand in VEX.vvvv byte.") \
|
||||
ENUM_ENTRY(ENCODING_WRITEMASK, "Register operand in EVEX.aaa byte.") \
|
||||
ENUM_ENTRY(ENCODING_CB, "1-byte code offset (possible new CS value)") \
|
||||
ENUM_ENTRY(ENCODING_CW, "2-byte") \
|
||||
ENUM_ENTRY(ENCODING_CD, "4-byte") \
|
||||
@ -321,6 +420,9 @@ struct ContextDecision {
|
||||
ENUM_ENTRY(TYPE_XMM64, "8-byte") \
|
||||
ENUM_ENTRY(TYPE_XMM128, "16-byte") \
|
||||
ENUM_ENTRY(TYPE_XMM256, "32-byte") \
|
||||
ENUM_ENTRY(TYPE_XMM512, "64-byte") \
|
||||
ENUM_ENTRY(TYPE_VK8, "8-bit") \
|
||||
ENUM_ENTRY(TYPE_VK16, "16-bit") \
|
||||
ENUM_ENTRY(TYPE_XMM0, "Implicit use of XMM0") \
|
||||
ENUM_ENTRY(TYPE_SEGMENTREG, "Segment register operand") \
|
||||
ENUM_ENTRY(TYPE_DEBUGREG, "Debug register operand") \
|
||||
|
@ -65,6 +65,9 @@ public:
|
||||
void printi256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
printMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printi512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
printMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printf32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
printMemReference(MI, OpNo, O);
|
||||
}
|
||||
@ -80,6 +83,9 @@ public:
|
||||
void printf256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
printMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printf512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
printMemReference(MI, OpNo, O);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -69,6 +69,10 @@ public:
|
||||
O << "YMMWORD PTR ";
|
||||
printMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printi512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
O << "ZMMWORD PTR ";
|
||||
printMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printf32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
O << "DWORD PTR ";
|
||||
printMemReference(MI, OpNo, O);
|
||||
@ -89,6 +93,10 @@ public:
|
||||
O << "YMMWORD PTR ";
|
||||
printMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printf512mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
O << "ZMMWORD PTR ";
|
||||
printMemReference(MI, OpNo, O);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -462,20 +462,54 @@ namespace X86II {
|
||||
// prefix. Usually used for scalar instructions. Needed by disassembler.
|
||||
VEX_LIG = 1U << 6,
|
||||
|
||||
// TODO: we should combine VEX_L and VEX_LIG together to form a 2-bit field
|
||||
// with following encoding:
|
||||
// - 00 V128
|
||||
// - 01 V256
|
||||
// - 10 V512
|
||||
// - 11 LIG (but, in insn encoding, leave VEX.L and EVEX.L in zeros.
|
||||
// this will save 1 tsflag bit
|
||||
|
||||
// VEX_EVEX - Specifies that this instruction use EVEX form which provides
|
||||
// syntax support up to 32 512-bit register operands and up to 7 16-bit
|
||||
// mask operands as well as source operand data swizzling/memory operand
|
||||
// conversion, eviction hint, and rounding mode.
|
||||
EVEX = 1U << 7,
|
||||
|
||||
// EVEX_K - Set if this instruction requires masking
|
||||
EVEX_K = 1U << 8,
|
||||
|
||||
// EVEX_Z - Set if this instruction has EVEX.Z field set.
|
||||
EVEX_Z = 1U << 9,
|
||||
|
||||
// EVEX_L2 - Set if this instruction has EVEX.L' field set.
|
||||
EVEX_L2 = 1U << 10,
|
||||
|
||||
// EVEX_B - Set if this instruction has EVEX.B field set.
|
||||
EVEX_B = 1U << 11,
|
||||
|
||||
// EVEX_CD8E - compressed disp8 form, element-size
|
||||
EVEX_CD8EShift = VEXShift + 12,
|
||||
EVEX_CD8EMask = 3,
|
||||
|
||||
// EVEX_CD8V - compressed disp8 form, vector-width
|
||||
EVEX_CD8VShift = EVEX_CD8EShift + 2,
|
||||
EVEX_CD8VMask = 7,
|
||||
|
||||
/// Has3DNow0F0FOpcode - This flag indicates that the instruction uses the
|
||||
/// wacky 0x0F 0x0F prefix for 3DNow! instructions. The manual documents
|
||||
/// this as having a 0x0F prefix with a 0x0F opcode, and each instruction
|
||||
/// storing a classifier in the imm8 field. To simplify our implementation,
|
||||
/// we handle this by storeing the classifier in the opcode field and using
|
||||
/// this flag to indicate that the encoder should do the wacky 3DNow! thing.
|
||||
Has3DNow0F0FOpcode = 1U << 7,
|
||||
Has3DNow0F0FOpcode = 1U << 17,
|
||||
|
||||
/// MemOp4 - Used to indicate swapping of operand 3 and 4 to be encoded in
|
||||
/// ModRM or I8IMM. This is used for FMA4 and XOP instructions.
|
||||
MemOp4 = 1U << 8,
|
||||
MemOp4 = 1U << 18,
|
||||
|
||||
/// XOP - Opcode prefix used by XOP instructions.
|
||||
XOP = 1U << 9
|
||||
XOP = 1U << 19
|
||||
|
||||
};
|
||||
|
||||
@ -533,12 +567,19 @@ namespace X86II {
|
||||
unsigned CurOp = 0;
|
||||
if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0)
|
||||
++CurOp;
|
||||
else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0) {
|
||||
assert(Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1);
|
||||
else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 &&
|
||||
Desc.getOperandConstraint(3, MCOI::TIED_TO) == 1)
|
||||
// Special case for AVX-512 GATHER with 2 TIED_TO operands
|
||||
// Skip the first 2 operands: dst, mask_wb
|
||||
CurOp += 2;
|
||||
else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 &&
|
||||
Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1)
|
||||
// Special case for GATHER with 2 TIED_TO operands
|
||||
// Skip the first 2 operands: dst, mask_wb
|
||||
CurOp += 2;
|
||||
}
|
||||
else if (NumOps > 2 && Desc.getOperandConstraint(NumOps - 2, MCOI::TIED_TO) == 0)
|
||||
// SCATTER
|
||||
++CurOp;
|
||||
return CurOp;
|
||||
}
|
||||
|
||||
@ -569,12 +610,15 @@ namespace X86II {
|
||||
case X86II::MRMSrcMem: {
|
||||
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
|
||||
bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
|
||||
bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
|
||||
bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
|
||||
unsigned FirstMemOp = 1;
|
||||
if (HasVEX_4V)
|
||||
++FirstMemOp;// Skip the register source (which is encoded in VEX_VVVV).
|
||||
if (HasMemOp4)
|
||||
++FirstMemOp;// Skip the register source (which is encoded in I8IMM).
|
||||
|
||||
if (HasEVEX_K)
|
||||
++FirstMemOp;// Skip the mask register
|
||||
// FIXME: Maybe lea should have its own form? This is a horrible hack.
|
||||
//if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
|
||||
// Opcode == X86::LEA16r || Opcode == X86::LEA32r)
|
||||
@ -611,6 +655,14 @@ namespace X86II {
|
||||
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or
|
||||
/// higher) register? e.g. r8, xmm8, xmm13, etc.
|
||||
inline bool isX86_64ExtendedReg(unsigned RegNo) {
|
||||
if ((RegNo > X86::XMM7 && RegNo <= X86::XMM15) ||
|
||||
(RegNo > X86::XMM23 && RegNo <= X86::XMM31) ||
|
||||
(RegNo > X86::YMM7 && RegNo <= X86::YMM15) ||
|
||||
(RegNo > X86::YMM23 && RegNo <= X86::YMM31) ||
|
||||
(RegNo > X86::ZMM7 && RegNo <= X86::ZMM15) ||
|
||||
(RegNo > X86::ZMM23 && RegNo <= X86::ZMM31))
|
||||
return true;
|
||||
|
||||
switch (RegNo) {
|
||||
default: break;
|
||||
case X86::R8: case X86::R9: case X86::R10: case X86::R11:
|
||||
@ -621,16 +673,21 @@ namespace X86II {
|
||||
case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W:
|
||||
case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B:
|
||||
case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B:
|
||||
case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
|
||||
case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
|
||||
case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11:
|
||||
case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
|
||||
case X86::CR8: case X86::CR9: case X86::CR10: case X86::CR11:
|
||||
case X86::CR12: case X86::CR13: case X86::CR14: case X86::CR15:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// is32ExtendedReg - Is the MemoryOperand a 32 extended (zmm16 or higher)
|
||||
/// registers? e.g. zmm21, etc.
|
||||
static inline bool is32ExtendedReg(unsigned RegNo) {
|
||||
return ((RegNo > X86::XMM15 && RegNo <= X86::XMM31) ||
|
||||
(RegNo > X86::YMM15 && RegNo <= X86::YMM31) ||
|
||||
(RegNo > X86::ZMM15 && RegNo <= X86::ZMM31));
|
||||
}
|
||||
|
||||
|
||||
inline bool isX86_64NonExtLowByteReg(unsigned reg) {
|
||||
return (reg == X86::SPL || reg == X86::BPL ||
|
||||
|
@ -90,11 +90,14 @@ def FeatureAVX512 : SubtargetFeature<"avx-512", "X86SSELevel", "AVX512",
|
||||
"Enable AVX-512 instructions",
|
||||
[FeatureAVX2]>;
|
||||
def FeatureERI : SubtargetFeature<"avx-512-eri", "HasERI", "true",
|
||||
"Enable AVX-512 Exponential and Reciprocal Instructions">;
|
||||
"Enable AVX-512 Exponential and Reciprocal Instructions",
|
||||
[FeatureAVX512]>;
|
||||
def FeatureCDI : SubtargetFeature<"avx-512-cdi", "HasCDI", "true",
|
||||
"Enable AVX-512 Conflict Detection Instructions">;
|
||||
"Enable AVX-512 Conflict Detection Instructions",
|
||||
[FeatureAVX512]>;
|
||||
def FeaturePFI : SubtargetFeature<"avx-512-pfi", "HasPFI", "true",
|
||||
"Enable AVX-512 PreFetch Instructions">;
|
||||
"Enable AVX-512 PreFetch Instructions",
|
||||
[FeatureAVX512]>;
|
||||
|
||||
def FeaturePCLMUL : SubtargetFeature<"pclmul", "HasPCLMUL", "true",
|
||||
"Enable packed carry-less multiplication instructions",
|
||||
|
@ -96,6 +96,20 @@ def SSEPackedSingle : Domain<1>;
|
||||
def SSEPackedDouble : Domain<2>;
|
||||
def SSEPackedInt : Domain<3>;
|
||||
|
||||
// Class specifying the vector form of the decompressed
|
||||
// displacement of 8-bit.
|
||||
class CD8VForm<bits<3> val> {
|
||||
bits<3> Value = val;
|
||||
}
|
||||
def CD8VF : CD8VForm<0>; // v := VL
|
||||
def CD8VH : CD8VForm<1>; // v := VL/2
|
||||
def CD8VQ : CD8VForm<2>; // v := VL/4
|
||||
def CD8VO : CD8VForm<3>; // v := VL/8
|
||||
def CD8VT1 : CD8VForm<4>; // v := 1
|
||||
def CD8VT2 : CD8VForm<5>; // v := 2
|
||||
def CD8VT4 : CD8VForm<6>; // v := 4
|
||||
def CD8VT8 : CD8VForm<7>; // v := 8
|
||||
|
||||
// Prefix byte classes which are used to indicate to the ad-hoc machine code
|
||||
// emitter that various prefix bytes are required.
|
||||
class OpSize { bit hasOpSizePrefix = 1; }
|
||||
@ -132,6 +146,19 @@ class VEX_4VOp3 : VEX { bit hasVEX_4VOp3Prefix = 1; }
|
||||
class VEX_I8IMM { bit hasVEX_i8ImmReg = 1; }
|
||||
class VEX_L { bit hasVEX_L = 1; }
|
||||
class VEX_LIG { bit ignoresVEX_L = 1; }
|
||||
class EVEX : VEX { bit hasEVEXPrefix = 1; }
|
||||
class EVEX_4V : VEX_4V { bit hasEVEXPrefix = 1; }
|
||||
class EVEX_K { bit hasEVEX_K = 1; }
|
||||
class EVEX_KZ : EVEX_K { bit hasEVEX_Z = 1; }
|
||||
class EVEX_B { bit hasEVEX_B = 1; }
|
||||
class EVEX_V512 { bit hasEVEX_L2 = 1; bit hasVEX_L = 0; }
|
||||
class EVEX_CD8<int esize, CD8VForm form> {
|
||||
bits<2> EVEX_CD8E = !if(!eq(esize, 8), 0b00,
|
||||
!if(!eq(esize, 16), 0b01,
|
||||
!if(!eq(esize, 32), 0b10,
|
||||
!if(!eq(esize, 64), 0b11, ?))));
|
||||
bits<3> EVEX_CD8V = form.Value;
|
||||
}
|
||||
class Has3DNow0F0FOpcode { bit has3DNow0F0FOpcode = 1; }
|
||||
class MemOp4 { bit hasMemOp4Prefix = 1; }
|
||||
class XOP { bit hasXOP_Prefix = 1; }
|
||||
@ -177,6 +204,13 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
|
||||
// to be encoded in a immediate field?
|
||||
bit hasVEX_L = 0; // Does this inst use large (256-bit) registers?
|
||||
bit ignoresVEX_L = 0; // Does this instruction ignore the L-bit
|
||||
bit hasEVEXPrefix = 0; // Does this inst require EVEX form?
|
||||
bit hasEVEX_K = 0; // Does this inst require masking?
|
||||
bit hasEVEX_Z = 0; // Does this inst set the EVEX_Z field?
|
||||
bit hasEVEX_L2 = 0; // Does this inst set the EVEX_L2 field?
|
||||
bit hasEVEX_B = 0; // Does this inst set the EVEX_B field?
|
||||
bits<2> EVEX_CD8E = 0; // Compressed disp8 form - element-size.
|
||||
bits<3> EVEX_CD8V = 0; // Compressed disp8 form - vector-width.
|
||||
bit has3DNow0F0FOpcode =0;// Wacky 3dNow! encoding?
|
||||
bit hasMemOp4Prefix = 0; // Same bit as VEX_W, but used for swapping operands
|
||||
bit hasXOP_Prefix = 0; // Does this inst require an XOP prefix?
|
||||
@ -200,9 +234,16 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
|
||||
let TSFlags{37} = hasVEX_i8ImmReg;
|
||||
let TSFlags{38} = hasVEX_L;
|
||||
let TSFlags{39} = ignoresVEX_L;
|
||||
let TSFlags{40} = has3DNow0F0FOpcode;
|
||||
let TSFlags{41} = hasMemOp4Prefix;
|
||||
let TSFlags{42} = hasXOP_Prefix;
|
||||
let TSFlags{40} = hasEVEXPrefix;
|
||||
let TSFlags{41} = hasEVEX_K;
|
||||
let TSFlags{42} = hasEVEX_Z;
|
||||
let TSFlags{43} = hasEVEX_L2;
|
||||
let TSFlags{44} = hasEVEX_B;
|
||||
let TSFlags{46-45} = EVEX_CD8E;
|
||||
let TSFlags{49-47} = EVEX_CD8V;
|
||||
let TSFlags{50} = has3DNow0F0FOpcode;
|
||||
let TSFlags{51} = hasMemOp4Prefix;
|
||||
let TSFlags{52} = hasXOP_Prefix;
|
||||
}
|
||||
|
||||
class PseudoI<dag oops, dag iops, list<dag> pattern>
|
||||
@ -553,6 +594,74 @@ class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize,
|
||||
Requires<[HasAVX2]>;
|
||||
|
||||
|
||||
// AVX-512 Instruction Templates:
|
||||
// Instructions introduced in AVX-512 (no SSE equivalent forms)
|
||||
//
|
||||
// AVX5128I - AVX-512 instructions with T8 and OpSize prefix.
|
||||
// AVX512AIi8 - AVX-512 instructions with TA, OpSize prefix and ImmT = Imm8.
|
||||
// AVX512PDI - AVX-512 instructions with TB, OpSize, double packed.
|
||||
// AVX512PSI - AVX-512 instructions with TB, single packed.
|
||||
// AVX512XS8I - AVX-512 instructions with T8 and XS prefixes.
|
||||
// AVX512XSI - AVX-512 instructions with XS prefix, generic domain.
|
||||
// AVX512BI - AVX-512 instructions with TB, OpSize, int packed domain.
|
||||
// AVX512SI - AVX-512 scalar instructions with TB and OpSize prefixes.
|
||||
|
||||
class AVX5128I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, OpSize,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512XS8I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8XS,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512XSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin>, XS,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512XDI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, XD,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512BI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512BIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512SI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512Ii8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB,
|
||||
OpSize, Requires<[HasAVX512]>;
|
||||
class AVX512PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, Domain d, InstrItinClass itin = NoItinerary>
|
||||
: Ii8<o, F, outs, ins, asm, pattern, itin, d>, Requires<[HasAVX512]>;
|
||||
class AVX512PI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, Domain d, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin, d>, Requires<[HasAVX512]>;
|
||||
class AVX512FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag>pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin>, T8,
|
||||
OpSize, EVEX_4V, Requires<[HasAVX512]>;
|
||||
|
||||
// AES Instruction Templates:
|
||||
//
|
||||
// AES8I
|
||||
|
@ -317,6 +317,16 @@ def X86MemVY64Operand : AsmOperandClass {
|
||||
let Name = "MemVY64"; let PredicateMethod = "isMemVY64";
|
||||
}
|
||||
|
||||
def X86MemVZ64Operand : AsmOperandClass {
|
||||
let Name = "MemVZ64"; let PredicateMethod = "isMemVZ64";
|
||||
}
|
||||
def X86MemVZ32Operand : AsmOperandClass {
|
||||
let Name = "MemVZ32"; let PredicateMethod = "isMemVZ32";
|
||||
}
|
||||
def X86Mem512AsmOperand : AsmOperandClass {
|
||||
let Name = "Mem512"; let PredicateMethod = "isMem512";
|
||||
}
|
||||
|
||||
def X86AbsMemAsmOperand : AsmOperandClass {
|
||||
let Name = "AbsMem";
|
||||
let SuperClasses = [X86MemAsmOperand];
|
||||
@ -345,6 +355,8 @@ def i128mem : X86MemOperand<"printi128mem"> {
|
||||
let ParserMatchClass = X86Mem128AsmOperand; }
|
||||
def i256mem : X86MemOperand<"printi256mem"> {
|
||||
let ParserMatchClass = X86Mem256AsmOperand; }
|
||||
def i512mem : X86MemOperand<"printi512mem"> {
|
||||
let ParserMatchClass = X86Mem512AsmOperand; }
|
||||
def f32mem : X86MemOperand<"printf32mem"> {
|
||||
let ParserMatchClass = X86Mem32AsmOperand; }
|
||||
def f64mem : X86MemOperand<"printf64mem"> {
|
||||
@ -355,6 +367,12 @@ def f128mem : X86MemOperand<"printf128mem"> {
|
||||
let ParserMatchClass = X86Mem128AsmOperand; }
|
||||
def f256mem : X86MemOperand<"printf256mem">{
|
||||
let ParserMatchClass = X86Mem256AsmOperand; }
|
||||
def f512mem : X86MemOperand<"printf512mem">{
|
||||
let ParserMatchClass = X86Mem512AsmOperand; }
|
||||
def v512mem : Operand<iPTR> {
|
||||
let PrintMethod = "printf512mem";
|
||||
let MIOperandInfo = (ops ptr_rc, i8imm, VR512, i32imm, i8imm);
|
||||
let ParserMatchClass = X86Mem512AsmOperand; }
|
||||
|
||||
// Gather mem operands
|
||||
def vx32mem : X86MemOperand<"printi32mem">{
|
||||
@ -369,6 +387,15 @@ def vx64mem : X86MemOperand<"printi64mem">{
|
||||
def vy64mem : X86MemOperand<"printi64mem">{
|
||||
let MIOperandInfo = (ops ptr_rc, i8imm, VR256, i32imm, i8imm);
|
||||
let ParserMatchClass = X86MemVY64Operand; }
|
||||
def vy64xmem : X86MemOperand<"printi64mem">{
|
||||
let MIOperandInfo = (ops ptr_rc, i8imm, VR256X, i32imm, i8imm);
|
||||
let ParserMatchClass = X86MemVY64Operand; }
|
||||
def vz32mem : X86MemOperand<"printi32mem">{
|
||||
let MIOperandInfo = (ops ptr_rc, i16imm, VR512, i32imm, i8imm);
|
||||
let ParserMatchClass = X86MemVZ32Operand; }
|
||||
def vz64mem : X86MemOperand<"printi64mem">{
|
||||
let MIOperandInfo = (ops ptr_rc, i8imm, VR512, i32imm, i8imm);
|
||||
let ParserMatchClass = X86MemVZ64Operand; }
|
||||
}
|
||||
|
||||
// A version of i8mem for use on x86-64 that uses GR64_NOREX instead of
|
||||
@ -590,11 +617,19 @@ def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
|
||||
def HasAVX : Predicate<"Subtarget->hasAVX()">;
|
||||
def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
|
||||
def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
|
||||
def HasAVX512 : Predicate<"Subtarget->hasAVX512()">;
|
||||
def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
|
||||
def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
|
||||
def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
|
||||
def HasCDI : Predicate<"Subtarget->hasCDI()">;
|
||||
def HasPFI : Predicate<"Subtarget->hasPFI()">;
|
||||
def HasEMI : Predicate<"Subtarget->hasERI()">;
|
||||
|
||||
def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
|
||||
def HasAES : Predicate<"Subtarget->hasAES()">;
|
||||
def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">;
|
||||
def HasFMA : Predicate<"Subtarget->hasFMA()">;
|
||||
def UseFMAOnAVX : Predicate<"Subtarget->hasFMA() && !Subtarget->hasAVX512()">;
|
||||
def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
|
||||
def HasXOP : Predicate<"Subtarget->hasXOP()">;
|
||||
def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
|
||||
|
@ -701,7 +701,6 @@ unsigned get512BitSuperRegister(unsigned Reg) {
|
||||
if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31)
|
||||
return Reg;
|
||||
llvm_unreachable("Unexpected SIMD register");
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -477,6 +477,9 @@ void X86Subtarget::initializeEnvironment() {
|
||||
HasBMI2 = false;
|
||||
HasRTM = false;
|
||||
HasHLE = false;
|
||||
HasERI = false;
|
||||
HasCDI = false;
|
||||
HasPFI=false;
|
||||
HasADX = false;
|
||||
HasPRFCHW = false;
|
||||
HasRDSEED = false;
|
||||
|
@ -81,16 +81,20 @@ static inline bool inheritsFrom(InstructionContext child,
|
||||
case IC_64BIT_REXW_OPSIZE:
|
||||
return false;
|
||||
case IC_VEX:
|
||||
return inheritsFrom(child, IC_VEX_W) ||
|
||||
return inheritsFrom(child, IC_VEX_L_W) ||
|
||||
inheritsFrom(child, IC_VEX_W) ||
|
||||
(VEX_LIG && inheritsFrom(child, IC_VEX_L));
|
||||
case IC_VEX_XS:
|
||||
return inheritsFrom(child, IC_VEX_W_XS) ||
|
||||
return inheritsFrom(child, IC_VEX_L_W_XS) ||
|
||||
inheritsFrom(child, IC_VEX_W_XS) ||
|
||||
(VEX_LIG && inheritsFrom(child, IC_VEX_L_XS));
|
||||
case IC_VEX_XD:
|
||||
return inheritsFrom(child, IC_VEX_W_XD) ||
|
||||
return inheritsFrom(child, IC_VEX_L_W_XD) ||
|
||||
inheritsFrom(child, IC_VEX_W_XD) ||
|
||||
(VEX_LIG && inheritsFrom(child, IC_VEX_L_XD));
|
||||
case IC_VEX_OPSIZE:
|
||||
return inheritsFrom(child, IC_VEX_W_OPSIZE) ||
|
||||
return inheritsFrom(child, IC_VEX_L_W_OPSIZE) ||
|
||||
inheritsFrom(child, IC_VEX_W_OPSIZE) ||
|
||||
(VEX_LIG && inheritsFrom(child, IC_VEX_L_OPSIZE));
|
||||
case IC_VEX_W:
|
||||
case IC_VEX_W_XS:
|
||||
@ -100,11 +104,90 @@ static inline bool inheritsFrom(InstructionContext child,
|
||||
case IC_VEX_L:
|
||||
case IC_VEX_L_XS:
|
||||
case IC_VEX_L_XD:
|
||||
return false;
|
||||
case IC_VEX_L_OPSIZE:
|
||||
return inheritsFrom(child, IC_VEX_L_W_OPSIZE);
|
||||
return false;
|
||||
case IC_VEX_L_W:
|
||||
case IC_VEX_L_W_XS:
|
||||
case IC_VEX_L_W_XD:
|
||||
case IC_VEX_L_W_OPSIZE:
|
||||
return false;
|
||||
case IC_EVEX:
|
||||
return inheritsFrom(child, IC_EVEX_W) ||
|
||||
inheritsFrom(child, IC_EVEX_L_W);
|
||||
case IC_EVEX_XS:
|
||||
return inheritsFrom(child, IC_EVEX_W_XS) ||
|
||||
inheritsFrom(child, IC_EVEX_L_W_XS);
|
||||
case IC_EVEX_XD:
|
||||
return inheritsFrom(child, IC_EVEX_W_XD) ||
|
||||
inheritsFrom(child, IC_EVEX_L_W_XD);
|
||||
case IC_EVEX_OPSIZE:
|
||||
return inheritsFrom(child, IC_EVEX_W_OPSIZE) ||
|
||||
inheritsFrom(child, IC_EVEX_W_OPSIZE);
|
||||
case IC_EVEX_W:
|
||||
case IC_EVEX_W_XS:
|
||||
case IC_EVEX_W_XD:
|
||||
case IC_EVEX_W_OPSIZE:
|
||||
return false;
|
||||
case IC_EVEX_L:
|
||||
case IC_EVEX_L_XS:
|
||||
case IC_EVEX_L_XD:
|
||||
case IC_EVEX_L_OPSIZE:
|
||||
return false;
|
||||
case IC_EVEX_L_W:
|
||||
case IC_EVEX_L_W_XS:
|
||||
case IC_EVEX_L_W_XD:
|
||||
case IC_EVEX_L_W_OPSIZE:
|
||||
return false;
|
||||
case IC_EVEX_L2:
|
||||
case IC_EVEX_L2_XS:
|
||||
case IC_EVEX_L2_XD:
|
||||
case IC_EVEX_L2_OPSIZE:
|
||||
return false;
|
||||
case IC_EVEX_L2_W:
|
||||
case IC_EVEX_L2_W_XS:
|
||||
case IC_EVEX_L2_W_XD:
|
||||
case IC_EVEX_L2_W_OPSIZE:
|
||||
return false;
|
||||
case IC_EVEX_K:
|
||||
return inheritsFrom(child, IC_EVEX_W_K) ||
|
||||
inheritsFrom(child, IC_EVEX_L_W_K);
|
||||
case IC_EVEX_XS_K:
|
||||
return inheritsFrom(child, IC_EVEX_W_XS_K) ||
|
||||
inheritsFrom(child, IC_EVEX_L_W_XS_K);
|
||||
case IC_EVEX_XD_K:
|
||||
return inheritsFrom(child, IC_EVEX_W_XD_K) ||
|
||||
inheritsFrom(child, IC_EVEX_L_W_XD_K);
|
||||
case IC_EVEX_OPSIZE_K:
|
||||
return inheritsFrom(child, IC_EVEX_W_OPSIZE_K) ||
|
||||
inheritsFrom(child, IC_EVEX_W_OPSIZE_K);
|
||||
case IC_EVEX_W_K:
|
||||
case IC_EVEX_W_XS_K:
|
||||
case IC_EVEX_W_XD_K:
|
||||
case IC_EVEX_W_OPSIZE_K:
|
||||
return false;
|
||||
case IC_EVEX_L_K:
|
||||
case IC_EVEX_L_XS_K:
|
||||
case IC_EVEX_L_XD_K:
|
||||
case IC_EVEX_L_OPSIZE_K:
|
||||
return false;
|
||||
case IC_EVEX_L_W_K:
|
||||
case IC_EVEX_L_W_XS_K:
|
||||
case IC_EVEX_L_W_XD_K:
|
||||
case IC_EVEX_L_W_OPSIZE_K:
|
||||
return false;
|
||||
case IC_EVEX_L2_K:
|
||||
case IC_EVEX_L2_B:
|
||||
case IC_EVEX_L2_XS_K:
|
||||
case IC_EVEX_L2_XD_K:
|
||||
case IC_EVEX_L2_OPSIZE_K:
|
||||
case IC_EVEX_L2_OPSIZE_B:
|
||||
return false;
|
||||
case IC_EVEX_L2_W_K:
|
||||
case IC_EVEX_L2_W_XS_K:
|
||||
case IC_EVEX_L2_W_XD_K:
|
||||
case IC_EVEX_L2_W_OPSIZE_K:
|
||||
case IC_EVEX_L2_W_OPSIZE_B:
|
||||
return false;
|
||||
default:
|
||||
llvm_unreachable("Unknown instruction class");
|
||||
}
|
||||
@ -123,10 +206,13 @@ static inline bool outranks(InstructionContext upper,
|
||||
assert(lower < IC_max);
|
||||
|
||||
#define ENUM_ENTRY(n, r, d) r,
|
||||
#define ENUM_ENTRY_K_B(n, r, d) ENUM_ENTRY(n, r, d) \
|
||||
ENUM_ENTRY(n##_K_B, r, d) ENUM_ENTRY(n##_K, r, d) ENUM_ENTRY(n##_B, r, d)
|
||||
static int ranks[IC_max] = {
|
||||
INSTRUCTION_CONTEXTS
|
||||
};
|
||||
#undef ENUM_ENTRY
|
||||
#undef ENUM_ENTRY_K_B
|
||||
|
||||
return (ranks[upper] > ranks[lower]);
|
||||
}
|
||||
@ -142,8 +228,11 @@ static inline const char* stringForContext(InstructionContext insnContext) {
|
||||
default:
|
||||
llvm_unreachable("Unhandled instruction class");
|
||||
#define ENUM_ENTRY(n, r, d) case n: return #n; break;
|
||||
#define ENUM_ENTRY_K_B(n, r, d) ENUM_ENTRY(n, r, d) ENUM_ENTRY(n##_K_B, r, d)\
|
||||
ENUM_ENTRY(n##_K, r, d) ENUM_ENTRY(n##_B, r, d)
|
||||
INSTRUCTION_CONTEXTS
|
||||
#undef ENUM_ENTRY
|
||||
#undef ENUM_ENTRY_K_B
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,6 +236,10 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
|
||||
HasVEX_WPrefix = Rec->getValueAsBit("hasVEX_WPrefix");
|
||||
HasMemOp4Prefix = Rec->getValueAsBit("hasMemOp4Prefix");
|
||||
IgnoresVEX_L = Rec->getValueAsBit("ignoresVEX_L");
|
||||
HasEVEXPrefix = Rec->getValueAsBit("hasEVEXPrefix");
|
||||
HasEVEX_L2Prefix = Rec->getValueAsBit("hasEVEX_L2");
|
||||
HasEVEX_K = Rec->getValueAsBit("hasEVEX_K");
|
||||
HasEVEX_B = Rec->getValueAsBit("hasEVEX_B");
|
||||
HasLockPrefix = Rec->getValueAsBit("hasLockPrefix");
|
||||
IsCodeGenOnly = Rec->getValueAsBit("isCodeGenOnly");
|
||||
|
||||
@ -295,15 +299,98 @@ void RecognizableInstr::processInstr(DisassemblerTables &tables,
|
||||
recogInstr.emitDecodePath(tables);
|
||||
}
|
||||
|
||||
#define EVEX_KB(n) (HasEVEX_K && HasEVEX_B? n##_K_B : \
|
||||
(HasEVEX_K? n##_K : (HasEVEX_B ? n##_B : n)))
|
||||
|
||||
InstructionContext RecognizableInstr::insnContext() const {
|
||||
InstructionContext insnContext;
|
||||
|
||||
if (HasVEX_4VPrefix || HasVEX_4VOp3Prefix|| HasVEXPrefix) {
|
||||
if (HasEVEXPrefix) {
|
||||
if (HasVEX_LPrefix && HasEVEX_L2Prefix) {
|
||||
char msg[200];
|
||||
sprintf(msg, "Don't support VEX.L if EVEX_L2 is enabled: %s", Name.c_str());
|
||||
llvm_unreachable(msg);
|
||||
}
|
||||
// VEX_L & VEX_W
|
||||
if (HasVEX_LPrefix && HasVEX_WPrefix) {
|
||||
if (HasOpSizePrefix)
|
||||
insnContext = EVEX_KB(IC_EVEX_L_W_OPSIZE);
|
||||
else if (Prefix == X86Local::XS || Prefix == X86Local::T8XS)
|
||||
insnContext = EVEX_KB(IC_EVEX_L_W_XS);
|
||||
else if (Prefix == X86Local::XD || Prefix == X86Local::T8XD ||
|
||||
Prefix == X86Local::TAXD)
|
||||
insnContext = EVEX_KB(IC_EVEX_L_W_XD);
|
||||
else
|
||||
insnContext = EVEX_KB(IC_EVEX_L_W);
|
||||
} else if (HasVEX_LPrefix) {
|
||||
// VEX_L
|
||||
if (HasOpSizePrefix)
|
||||
insnContext = EVEX_KB(IC_EVEX_L_OPSIZE);
|
||||
else if (Prefix == X86Local::XS || Prefix == X86Local::T8XS)
|
||||
insnContext = EVEX_KB(IC_EVEX_L_XS);
|
||||
else if (Prefix == X86Local::XD || Prefix == X86Local::T8XD ||
|
||||
Prefix == X86Local::TAXD)
|
||||
insnContext = EVEX_KB(IC_EVEX_L_XD);
|
||||
else
|
||||
insnContext = EVEX_KB(IC_EVEX_L);
|
||||
}
|
||||
else if (HasEVEX_L2Prefix && HasVEX_WPrefix) {
|
||||
// EVEX_L2 & VEX_W
|
||||
if (HasOpSizePrefix)
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_W_OPSIZE);
|
||||
else if (Prefix == X86Local::XS || Prefix == X86Local::T8XS)
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_W_XS);
|
||||
else if (Prefix == X86Local::XD || Prefix == X86Local::T8XD ||
|
||||
Prefix == X86Local::TAXD)
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_W_XD);
|
||||
else
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_W);
|
||||
} else if (HasEVEX_L2Prefix) {
|
||||
// EVEX_L2
|
||||
if (HasOpSizePrefix)
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_OPSIZE);
|
||||
else if (Prefix == X86Local::XD || Prefix == X86Local::T8XD ||
|
||||
Prefix == X86Local::TAXD)
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_XD);
|
||||
else if (Prefix == X86Local::XS || Prefix == X86Local::T8XS)
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_XS);
|
||||
else
|
||||
insnContext = EVEX_KB(IC_EVEX_L2);
|
||||
}
|
||||
else if (HasVEX_WPrefix) {
|
||||
// VEX_W
|
||||
if (HasOpSizePrefix)
|
||||
insnContext = EVEX_KB(IC_EVEX_W_OPSIZE);
|
||||
else if (Prefix == X86Local::XS || Prefix == X86Local::T8XS)
|
||||
insnContext = EVEX_KB(IC_EVEX_W_XS);
|
||||
else if (Prefix == X86Local::XD || Prefix == X86Local::T8XD ||
|
||||
Prefix == X86Local::TAXD)
|
||||
insnContext = EVEX_KB(IC_EVEX_W_XD);
|
||||
else
|
||||
insnContext = EVEX_KB(IC_EVEX_W);
|
||||
}
|
||||
// No L, no W
|
||||
else if (HasOpSizePrefix)
|
||||
insnContext = EVEX_KB(IC_EVEX_OPSIZE);
|
||||
else if (Prefix == X86Local::XD || Prefix == X86Local::T8XD ||
|
||||
Prefix == X86Local::TAXD)
|
||||
insnContext = EVEX_KB(IC_EVEX_XD);
|
||||
else if (Prefix == X86Local::XS || Prefix == X86Local::T8XS)
|
||||
insnContext = EVEX_KB(IC_EVEX_XS);
|
||||
else
|
||||
insnContext = EVEX_KB(IC_EVEX);
|
||||
/// eof EVEX
|
||||
} else if (HasVEX_4VPrefix || HasVEX_4VOp3Prefix|| HasVEXPrefix) {
|
||||
if (HasVEX_LPrefix && HasVEX_WPrefix) {
|
||||
if (HasOpSizePrefix)
|
||||
insnContext = IC_VEX_L_W_OPSIZE;
|
||||
else if (Prefix == X86Local::XS || Prefix == X86Local::T8XS)
|
||||
insnContext = IC_VEX_L_W_XS;
|
||||
else if (Prefix == X86Local::XD || Prefix == X86Local::T8XD ||
|
||||
Prefix == X86Local::TAXD)
|
||||
insnContext = IC_VEX_L_W_XD;
|
||||
else
|
||||
llvm_unreachable("Don't support VEX.L and VEX.W together");
|
||||
insnContext = IC_VEX_L_W;
|
||||
} else if (HasOpSizePrefix && HasVEX_LPrefix)
|
||||
insnContext = IC_VEX_L_OPSIZE;
|
||||
else if (HasOpSizePrefix && HasVEX_WPrefix)
|
||||
@ -641,6 +728,9 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
|
||||
"Unexpected number of operands for MRMDestMemFrm");
|
||||
HANDLE_OPERAND(memory)
|
||||
|
||||
if (HasEVEX_K)
|
||||
HANDLE_OPERAND(writemaskRegister)
|
||||
|
||||
if (HasVEX_4VPrefix)
|
||||
// FIXME: In AVX, the register below becomes the one encoded
|
||||
// in ModRMVEX and the one above the one in the VEX.VVVV field
|
||||
@ -665,6 +755,9 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
|
||||
|
||||
HANDLE_OPERAND(roRegister)
|
||||
|
||||
if (HasEVEX_K)
|
||||
HANDLE_OPERAND(writemaskRegister)
|
||||
|
||||
if (HasVEX_4VPrefix)
|
||||
// FIXME: In AVX, the register below becomes the one encoded
|
||||
// in ModRMVEX and the one above the one in the VEX.VVVV field
|
||||
@ -698,6 +791,9 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
|
||||
|
||||
HANDLE_OPERAND(roRegister)
|
||||
|
||||
if (HasEVEX_K)
|
||||
HANDLE_OPERAND(writemaskRegister)
|
||||
|
||||
if (HasVEX_4VPrefix)
|
||||
// FIXME: In AVX, the register below becomes the one encoded
|
||||
// in ModRMVEX and the one above the one in the VEX.VVVV field
|
||||
@ -1079,17 +1175,22 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
|
||||
TYPE("i8imm", TYPE_IMM8)
|
||||
TYPE("GR8", TYPE_R8)
|
||||
TYPE("VR128", TYPE_XMM128)
|
||||
TYPE("VR128X", TYPE_XMM128)
|
||||
TYPE("f128mem", TYPE_M128)
|
||||
TYPE("f256mem", TYPE_M256)
|
||||
TYPE("f512mem", TYPE_M512)
|
||||
TYPE("FR64", TYPE_XMM64)
|
||||
TYPE("FR64X", TYPE_XMM64)
|
||||
TYPE("f64mem", TYPE_M64FP)
|
||||
TYPE("sdmem", TYPE_M64FP)
|
||||
TYPE("FR32", TYPE_XMM32)
|
||||
TYPE("FR32X", TYPE_XMM32)
|
||||
TYPE("f32mem", TYPE_M32FP)
|
||||
TYPE("ssmem", TYPE_M32FP)
|
||||
TYPE("RST", TYPE_ST)
|
||||
TYPE("i128mem", TYPE_M128)
|
||||
TYPE("i256mem", TYPE_M256)
|
||||
TYPE("i512mem", TYPE_M512)
|
||||
TYPE("i64i32imm_pcrel", TYPE_REL64)
|
||||
TYPE("i16imm_pcrel", TYPE_REL16)
|
||||
TYPE("i32imm_pcrel", TYPE_REL32)
|
||||
@ -1116,13 +1217,22 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
|
||||
TYPE("offset32", TYPE_MOFFS32)
|
||||
TYPE("offset64", TYPE_MOFFS64)
|
||||
TYPE("VR256", TYPE_XMM256)
|
||||
TYPE("VR256X", TYPE_XMM256)
|
||||
TYPE("VR512", TYPE_XMM512)
|
||||
TYPE("VK8", TYPE_VK8)
|
||||
TYPE("VK8WM", TYPE_VK8)
|
||||
TYPE("VK16", TYPE_VK16)
|
||||
TYPE("VK16WM", TYPE_VK16)
|
||||
TYPE("GR16_NOAX", TYPE_Rv)
|
||||
TYPE("GR32_NOAX", TYPE_Rv)
|
||||
TYPE("GR64_NOAX", TYPE_R64)
|
||||
TYPE("vx32mem", TYPE_M32)
|
||||
TYPE("vy32mem", TYPE_M32)
|
||||
TYPE("vz32mem", TYPE_M32)
|
||||
TYPE("vx64mem", TYPE_M64)
|
||||
TYPE("vy64mem", TYPE_M64)
|
||||
TYPE("vy64xmem", TYPE_M64)
|
||||
TYPE("vz64mem", TYPE_M64)
|
||||
errs() << "Unhandled type string " << s << "\n";
|
||||
llvm_unreachable("Unhandled type string");
|
||||
}
|
||||
@ -1149,10 +1259,15 @@ OperandEncoding RecognizableInstr::immediateEncodingFromString
|
||||
ENCODING("i8imm", ENCODING_IB)
|
||||
// This is not a typo. Instructions like BLENDVPD put
|
||||
// register IDs in 8-bit immediates nowadays.
|
||||
ENCODING("VR256", ENCODING_IB)
|
||||
ENCODING("VR128", ENCODING_IB)
|
||||
ENCODING("FR32", ENCODING_IB)
|
||||
ENCODING("FR64", ENCODING_IB)
|
||||
ENCODING("VR128", ENCODING_IB)
|
||||
ENCODING("VR256", ENCODING_IB)
|
||||
ENCODING("FR32X", ENCODING_IB)
|
||||
ENCODING("FR64X", ENCODING_IB)
|
||||
ENCODING("VR128X", ENCODING_IB)
|
||||
ENCODING("VR256X", ENCODING_IB)
|
||||
ENCODING("VR512", ENCODING_IB)
|
||||
errs() << "Unhandled immediate encoding " << s << "\n";
|
||||
llvm_unreachable("Unhandled immediate encoding");
|
||||
}
|
||||
@ -1165,10 +1280,17 @@ OperandEncoding RecognizableInstr::rmRegisterEncodingFromString
|
||||
ENCODING("GR64", ENCODING_RM)
|
||||
ENCODING("GR8", ENCODING_RM)
|
||||
ENCODING("VR128", ENCODING_RM)
|
||||
ENCODING("VR128X", ENCODING_RM)
|
||||
ENCODING("FR64", ENCODING_RM)
|
||||
ENCODING("FR32", ENCODING_RM)
|
||||
ENCODING("FR64X", ENCODING_RM)
|
||||
ENCODING("FR32X", ENCODING_RM)
|
||||
ENCODING("VR64", ENCODING_RM)
|
||||
ENCODING("VR256", ENCODING_RM)
|
||||
ENCODING("VR256X", ENCODING_RM)
|
||||
ENCODING("VR512", ENCODING_RM)
|
||||
ENCODING("VK8", ENCODING_RM)
|
||||
ENCODING("VK16", ENCODING_RM)
|
||||
errs() << "Unhandled R/M register encoding " << s << "\n";
|
||||
llvm_unreachable("Unhandled R/M register encoding");
|
||||
}
|
||||
@ -1188,6 +1310,15 @@ OperandEncoding RecognizableInstr::roRegisterEncodingFromString
|
||||
ENCODING("DEBUG_REG", ENCODING_REG)
|
||||
ENCODING("CONTROL_REG", ENCODING_REG)
|
||||
ENCODING("VR256", ENCODING_REG)
|
||||
ENCODING("VR256X", ENCODING_REG)
|
||||
ENCODING("VR128X", ENCODING_REG)
|
||||
ENCODING("FR64X", ENCODING_REG)
|
||||
ENCODING("FR32X", ENCODING_REG)
|
||||
ENCODING("VR512", ENCODING_REG)
|
||||
ENCODING("VK8", ENCODING_REG)
|
||||
ENCODING("VK16", ENCODING_REG)
|
||||
ENCODING("VK8WM", ENCODING_REG)
|
||||
ENCODING("VK16WM", ENCODING_REG)
|
||||
errs() << "Unhandled reg/opcode register encoding " << s << "\n";
|
||||
llvm_unreachable("Unhandled reg/opcode register encoding");
|
||||
}
|
||||
@ -1201,10 +1332,26 @@ OperandEncoding RecognizableInstr::vvvvRegisterEncodingFromString
|
||||
ENCODING("FR64", ENCODING_VVVV)
|
||||
ENCODING("VR128", ENCODING_VVVV)
|
||||
ENCODING("VR256", ENCODING_VVVV)
|
||||
ENCODING("FR32X", ENCODING_VVVV)
|
||||
ENCODING("FR64X", ENCODING_VVVV)
|
||||
ENCODING("VR128X", ENCODING_VVVV)
|
||||
ENCODING("VR256X", ENCODING_VVVV)
|
||||
ENCODING("VR512", ENCODING_VVVV)
|
||||
ENCODING("VK8", ENCODING_VVVV)
|
||||
ENCODING("VK16", ENCODING_VVVV)
|
||||
errs() << "Unhandled VEX.vvvv register encoding " << s << "\n";
|
||||
llvm_unreachable("Unhandled VEX.vvvv register encoding");
|
||||
}
|
||||
|
||||
OperandEncoding RecognizableInstr::writemaskRegisterEncodingFromString
|
||||
(const std::string &s,
|
||||
bool hasOpSizePrefix) {
|
||||
ENCODING("VK8WM", ENCODING_WRITEMASK)
|
||||
ENCODING("VK16WM", ENCODING_WRITEMASK)
|
||||
errs() << "Unhandled mask register encoding " << s << "\n";
|
||||
llvm_unreachable("Unhandled mask register encoding");
|
||||
}
|
||||
|
||||
OperandEncoding RecognizableInstr::memoryEncodingFromString
|
||||
(const std::string &s,
|
||||
bool hasOpSizePrefix) {
|
||||
@ -1216,10 +1363,12 @@ OperandEncoding RecognizableInstr::memoryEncodingFromString
|
||||
ENCODING("sdmem", ENCODING_RM)
|
||||
ENCODING("f128mem", ENCODING_RM)
|
||||
ENCODING("f256mem", ENCODING_RM)
|
||||
ENCODING("f512mem", ENCODING_RM)
|
||||
ENCODING("f64mem", ENCODING_RM)
|
||||
ENCODING("f32mem", ENCODING_RM)
|
||||
ENCODING("i128mem", ENCODING_RM)
|
||||
ENCODING("i256mem", ENCODING_RM)
|
||||
ENCODING("i512mem", ENCODING_RM)
|
||||
ENCODING("f80mem", ENCODING_RM)
|
||||
ENCODING("lea32mem", ENCODING_RM)
|
||||
ENCODING("lea64_32mem", ENCODING_RM)
|
||||
@ -1230,8 +1379,11 @@ OperandEncoding RecognizableInstr::memoryEncodingFromString
|
||||
ENCODING("opaque512mem", ENCODING_RM)
|
||||
ENCODING("vx32mem", ENCODING_RM)
|
||||
ENCODING("vy32mem", ENCODING_RM)
|
||||
ENCODING("vz32mem", ENCODING_RM)
|
||||
ENCODING("vx64mem", ENCODING_RM)
|
||||
ENCODING("vy64mem", ENCODING_RM)
|
||||
ENCODING("vy64xmem", ENCODING_RM)
|
||||
ENCODING("vz64mem", ENCODING_RM)
|
||||
errs() << "Unhandled memory encoding " << s << "\n";
|
||||
llvm_unreachable("Unhandled memory encoding");
|
||||
}
|
||||
|
@ -66,6 +66,14 @@ private:
|
||||
bool HasMemOp4Prefix;
|
||||
/// The ignoreVEX_L field from the record
|
||||
bool IgnoresVEX_L;
|
||||
/// The hasEVEXPrefix field from the record
|
||||
bool HasEVEXPrefix;
|
||||
/// The hasEVEX_L2Prefix field from the record
|
||||
bool HasEVEX_L2Prefix;
|
||||
/// The hasEVEX_K field from the record
|
||||
bool HasEVEX_K;
|
||||
/// The hasEVEX_B field from the record
|
||||
bool HasEVEX_B;
|
||||
/// The hasLockPrefix field from the record
|
||||
bool HasLockPrefix;
|
||||
/// The isCodeGenOnly filed from the record
|
||||
@ -176,6 +184,8 @@ private:
|
||||
bool hasOpSizePrefix);
|
||||
static OperandEncoding vvvvRegisterEncodingFromString(const std::string &s,
|
||||
bool HasOpSizePrefix);
|
||||
static OperandEncoding writemaskRegisterEncodingFromString(const std::string &s,
|
||||
bool HasOpSizePrefix);
|
||||
|
||||
/// handleOperand - Converts a single operand from the LLVM table format to
|
||||
/// the emitted table format, handling any duplicate operands it encounters
|
||||
|
Loading…
Reference in New Issue
Block a user