mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-09 21:32:49 +00:00
[X86] Add support for {vex2}, {vex3}, and {evex} to the assembler to match gas. Use {evex} to improve the one our 32-bit AVX512 tests.
These can be used to force the encoding used for instructions. {vex2} will fail if the instruction is not VEX encoded, but otherwise won't do anything since we prefer vex2 when possible. Might need to skip use of the _REV MOV instructions for this too, but I haven't done that yet. {vex3} will force the instruction to use the 3 byte VEX encoding or fail if there is no VEX form. {evex} will force the instruction to use the EVEX version or fail if there is no EVEX version. Differential Revision: https://reviews.llvm.org/D59266 llvm-svn: 358029
This commit is contained in:
parent
de9d5755d7
commit
c23aaf49ae
@ -71,6 +71,15 @@ class X86AsmParser : public MCTargetAsmParser {
|
||||
ParseInstructionInfo *InstInfo;
|
||||
bool Code16GCC;
|
||||
|
||||
enum VEXEncoding {
|
||||
VEXEncoding_Default,
|
||||
VEXEncoding_VEX2,
|
||||
VEXEncoding_VEX3,
|
||||
VEXEncoding_EVEX,
|
||||
};
|
||||
|
||||
VEXEncoding ForcedVEXEncoding = VEXEncoding_Default;
|
||||
|
||||
private:
|
||||
SMLoc consumeToken() {
|
||||
MCAsmParser &Parser = getParser();
|
||||
@ -858,6 +867,8 @@ private:
|
||||
bool parseDirectiveFPOEndProc(SMLoc L);
|
||||
bool parseDirectiveFPOData(SMLoc L);
|
||||
|
||||
unsigned checkTargetMatchPredicate(MCInst &Inst) override;
|
||||
|
||||
bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
|
||||
bool processInstruction(MCInst &Inst, const OperandVector &Ops);
|
||||
|
||||
@ -939,6 +950,9 @@ private:
|
||||
/// }
|
||||
|
||||
public:
|
||||
enum X86MatchResultTy {
|
||||
Match_Unsupported = FIRST_TARGET_MATCH_RESULT_TY,
|
||||
};
|
||||
|
||||
X86AsmParser(const MCSubtargetInfo &sti, MCAsmParser &Parser,
|
||||
const MCInstrInfo &mii, const MCTargetOptions &Options)
|
||||
@ -2296,6 +2310,48 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
|
||||
SMLoc NameLoc, OperandVector &Operands) {
|
||||
MCAsmParser &Parser = getParser();
|
||||
InstInfo = &Info;
|
||||
std::string TempName; // Used when we parse a pseudo prefix.
|
||||
|
||||
// Reset the forced VEX encoding.
|
||||
ForcedVEXEncoding = VEXEncoding_Default;
|
||||
|
||||
// Parse pseudo prefixes.
|
||||
while (1) {
|
||||
if (Name == "{") {
|
||||
if (getLexer().isNot(AsmToken::Identifier))
|
||||
return Error(Parser.getTok().getLoc(), "Unexpected token after '{'");
|
||||
std::string Prefix = Parser.getTok().getString().lower();
|
||||
Parser.Lex(); // Eat identifier.
|
||||
if (getLexer().isNot(AsmToken::RCurly))
|
||||
return Error(Parser.getTok().getLoc(), "Expected '}'");
|
||||
Parser.Lex(); // Eat curly.
|
||||
|
||||
if (Prefix == "vex2")
|
||||
ForcedVEXEncoding = VEXEncoding_VEX2;
|
||||
else if (Prefix == "vex3")
|
||||
ForcedVEXEncoding = VEXEncoding_VEX3;
|
||||
else if (Prefix == "evex")
|
||||
ForcedVEXEncoding = VEXEncoding_EVEX;
|
||||
else
|
||||
return Error(NameLoc, "unknown prefix");
|
||||
|
||||
NameLoc = Parser.getTok().getLoc();
|
||||
if (getLexer().is(AsmToken::LCurly)) {
|
||||
Parser.Lex();
|
||||
Name = "{";
|
||||
} else {
|
||||
if (getLexer().isNot(AsmToken::Identifier))
|
||||
return Error(Parser.getTok().getLoc(), "Expected identifier");
|
||||
TempName = Parser.getTok().getString().lower();
|
||||
Name = TempName;
|
||||
Parser.Lex();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
StringRef PatchedName = Name;
|
||||
|
||||
if ((Name.equals("jmp") || Name.equals("jc") || Name.equals("jz")) &&
|
||||
@ -2943,6 +2999,22 @@ static unsigned getPrefixes(OperandVector &Operands) {
|
||||
return Result;
|
||||
}
|
||||
|
||||
unsigned X86AsmParser::checkTargetMatchPredicate(MCInst &Inst) {
|
||||
unsigned Opc = Inst.getOpcode();
|
||||
const MCInstrDesc &MCID = MII.get(Opc);
|
||||
|
||||
if (ForcedVEXEncoding == VEXEncoding_EVEX &&
|
||||
(MCID.TSFlags & X86II::EncodingMask) != X86II::EVEX)
|
||||
return Match_Unsupported;
|
||||
|
||||
if ((ForcedVEXEncoding == VEXEncoding_VEX2 ||
|
||||
ForcedVEXEncoding == VEXEncoding_VEX3) &&
|
||||
(MCID.TSFlags & X86II::EncodingMask) != X86II::VEX)
|
||||
return Match_Unsupported;
|
||||
|
||||
return Match_Success;
|
||||
}
|
||||
|
||||
bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
|
||||
OperandVector &Operands,
|
||||
MCStreamer &Out,
|
||||
@ -2956,18 +3028,24 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
|
||||
MatchFPUWaitAlias(IDLoc, static_cast<X86Operand &>(*Operands[0]), Operands,
|
||||
Out, MatchingInlineAsm);
|
||||
X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
|
||||
bool WasOriginallyInvalidOperand = false;
|
||||
unsigned Prefixes = getPrefixes(Operands);
|
||||
|
||||
MCInst Inst;
|
||||
|
||||
// If VEX3 encoding is forced, we need to pass the USE_VEX3 flag to the
|
||||
// encoder.
|
||||
if (ForcedVEXEncoding == VEXEncoding_VEX3)
|
||||
Prefixes |= X86::IP_USE_VEX3;
|
||||
|
||||
if (Prefixes)
|
||||
Inst.setFlags(Prefixes);
|
||||
|
||||
// First, try a direct match.
|
||||
FeatureBitset MissingFeatures;
|
||||
switch (MatchInstruction(Operands, Inst, ErrorInfo, MissingFeatures,
|
||||
MatchingInlineAsm, isParsingIntelSyntax())) {
|
||||
unsigned OriginalError = MatchInstruction(Operands, Inst, ErrorInfo,
|
||||
MissingFeatures, MatchingInlineAsm,
|
||||
isParsingIntelSyntax());
|
||||
switch (OriginalError) {
|
||||
default: llvm_unreachable("Unexpected match result!");
|
||||
case Match_Success:
|
||||
if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
|
||||
@ -2987,9 +3065,8 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
|
||||
case Match_MissingFeature:
|
||||
return ErrorMissingFeature(IDLoc, MissingFeatures, MatchingInlineAsm);
|
||||
case Match_InvalidOperand:
|
||||
WasOriginallyInvalidOperand = true;
|
||||
break;
|
||||
case Match_MnemonicFail:
|
||||
case Match_Unsupported:
|
||||
break;
|
||||
}
|
||||
if (Op.getToken().empty()) {
|
||||
@ -3080,11 +3157,15 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
|
||||
// If all of the instructions reported an invalid mnemonic, then the original
|
||||
// mnemonic was invalid.
|
||||
if (std::count(std::begin(Match), std::end(Match), Match_MnemonicFail) == 4) {
|
||||
if (!WasOriginallyInvalidOperand) {
|
||||
if (OriginalError == Match_MnemonicFail)
|
||||
return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
|
||||
Op.getLocRange(), MatchingInlineAsm);
|
||||
}
|
||||
|
||||
if (OriginalError == Match_Unsupported)
|
||||
return Error(IDLoc, "unsupported instruction", EmptyRange,
|
||||
MatchingInlineAsm);
|
||||
|
||||
assert(OriginalError == Match_InvalidOperand && "Unexpected error");
|
||||
// Recover location info for the operand if we know which was the problem.
|
||||
if (ErrorInfo != ~0ULL) {
|
||||
if (ErrorInfo >= Operands.size())
|
||||
@ -3103,6 +3184,13 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
|
||||
MatchingInlineAsm);
|
||||
}
|
||||
|
||||
// If one instruction matched as unsupported, report this as unsupported.
|
||||
if (std::count(std::begin(Match), std::end(Match),
|
||||
Match_Unsupported) == 1) {
|
||||
return Error(IDLoc, "unsupported instruction", EmptyRange,
|
||||
MatchingInlineAsm);
|
||||
}
|
||||
|
||||
// If one instruction matched with a missing feature, report this as a
|
||||
// missing feature.
|
||||
if (std::count(std::begin(Match), std::end(Match),
|
||||
@ -3144,6 +3232,11 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
|
||||
|
||||
MCInst Inst;
|
||||
|
||||
// If VEX3 encoding is forced, we need to pass the USE_VEX3 flag to the
|
||||
// encoder.
|
||||
if (ForcedVEXEncoding == VEXEncoding_VEX3)
|
||||
Prefixes |= X86::IP_USE_VEX3;
|
||||
|
||||
if (Prefixes)
|
||||
Inst.setFlags(Prefixes);
|
||||
|
||||
@ -3292,6 +3385,13 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
|
||||
UnsizedMemOp->getLocRange());
|
||||
}
|
||||
|
||||
// If one instruction matched as unsupported, report this as unsupported.
|
||||
if (std::count(std::begin(Match), std::end(Match),
|
||||
Match_Unsupported) == 1) {
|
||||
return Error(IDLoc, "unsupported instruction", EmptyRange,
|
||||
MatchingInlineAsm);
|
||||
}
|
||||
|
||||
// If one instruction matched with a missing feature, report this as a
|
||||
// missing feature.
|
||||
if (std::count(std::begin(Match), std::end(Match),
|
||||
|
@ -60,7 +60,8 @@ namespace X86 {
|
||||
IP_HAS_REPEAT_NE = 4,
|
||||
IP_HAS_REPEAT = 8,
|
||||
IP_HAS_LOCK = 16,
|
||||
IP_HAS_NOTRACK = 32
|
||||
IP_HAS_NOTRACK = 32,
|
||||
IP_USE_VEX3 = 64,
|
||||
};
|
||||
|
||||
enum OperandType : unsigned {
|
||||
|
@ -979,7 +979,8 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
|
||||
uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
|
||||
|
||||
// Can we use the 2 byte VEX prefix?
|
||||
if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
|
||||
if (!(MI.getFlags() & X86::IP_USE_VEX3) &&
|
||||
Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
|
||||
EmitByte(0xC5, CurByte, OS);
|
||||
EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
|
||||
return;
|
||||
|
@ -4,6 +4,10 @@
|
||||
// CHECK: encoding: [0xc5,0xf1,0x58,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
|
||||
vaddpd -485498096(%edx,%eax,4), %xmm1, %xmm1
|
||||
|
||||
// CHECK: vaddpd -485498096(%edx,%eax,4), %xmm1, %xmm1
|
||||
// CHECK: encoding: [0xc4,0xe1,0x71,0x58,0x8c,0x82,0x10,0xe3,0x0f,0xe3]
|
||||
{vex3} vaddpd -485498096(%edx,%eax,4), %xmm1, %xmm1
|
||||
|
||||
// CHECK: vaddpd 485498096(%edx,%eax,4), %xmm1, %xmm1
|
||||
// CHECK: encoding: [0xc5,0xf1,0x58,0x8c,0x82,0xf0,0x1c,0xf0,0x1c]
|
||||
vaddpd 485498096(%edx,%eax,4), %xmm1, %xmm1
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -167,3 +167,15 @@ cltq
|
||||
|
||||
// 32: error: instruction requires: 64-bit mode
|
||||
cmpxchg16b (%eax)
|
||||
|
||||
// 32: error: unsupported instruction
|
||||
// 64: error: unsupported instruction
|
||||
{vex2} vmovdqu32 %xmm0, %xmm0
|
||||
|
||||
// 32: error: unsupported instruction
|
||||
// 64: error: unsupported instruction
|
||||
{vex3} vmovdqu32 %xmm0, %xmm0
|
||||
|
||||
// 32: error: unsupported instruction
|
||||
// 64: error: unsupported instruction
|
||||
{evex} vmovdqu %xmm0, %xmm0
|
||||
|
Loading…
Reference in New Issue
Block a user