[AMDGPU] Assembler: rework parsing of optional operands.

Summary:
Change process of parsing of optional operands. All optional operands use same parsing method - parseOptionalOperand().
No default values are added to OperandsVector.
Get rid of WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS.

Reviewers: tstellarAMD, vpykhtin, artem.tamazov, nhaustov

Subscribers: arsenm, kzhuravl

Differential Revision: http://reviews.llvm.org/D20527

llvm-svn: 270556
This commit is contained in:
Sam Kolton 2016-05-24 12:38:33 +00:00
parent 068739d10c
commit 5c1a0d6afe
3 changed files with 88 additions and 284 deletions

View File

@ -87,17 +87,6 @@ const char* const OpGsSymbolic[] = {
using namespace llvm;
// In some cases (e.g. buffer atomic instructions) MatchOperandParserImpl()
// may invoke tryCustomParseOperand() multiple times with the same MCK value.
// That leads to adding of the same "default" operand multiple times in a row,
// which is wrong. The workaround adds only the 1st default operand, while for
// the rest the "dummy" operands being added. The reason for dummies is that if
// we just skip adding an operand, then parser would get stuck in endless loop.
// Dummies shall be removed prior matching & emitting MCInsts.
//
// Comment out this macro to disable the workaround.
#define WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
namespace {
struct OptionalOperand;
@ -110,9 +99,6 @@ class AMDGPUOperand : public MCParsedAsmOperand {
Immediate,
Register,
Expression
#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
,Dummy
#endif
} Kind;
SMLoc StartLoc, EndLoc;
@ -239,12 +225,6 @@ public:
}
}
#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
bool isDummy() const {
return Kind == Dummy;
}
#endif
bool isToken() const override {
return Kind == Token;
}
@ -481,11 +461,6 @@ public:
case Expression:
OS << "<expr " << *Expr << '>';
break;
#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
case Dummy:
OS << "<dummy>";
break;
#endif
}
}
@ -536,15 +511,6 @@ public:
return Op;
}
#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
static AMDGPUOperand::Ptr CreateDummy(SMLoc S) {
auto Op = llvm::make_unique<AMDGPUOperand>(Dummy);
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
#endif
bool isSWaitCnt() const;
bool isHwreg() const;
bool isSendMsg() const;
@ -642,6 +608,8 @@ public:
std::unique_ptr<AMDGPUOperand> parseRegister();
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
unsigned checkTargetMatchPredicate(MCInst &Inst) override;
unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
unsigned Kind) override;
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
uint64_t &ErrorInfo,
@ -651,29 +619,19 @@ public:
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) override;
OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
int64_t Default = 0, bool AddDefault = false);
OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
OperandVector &Operands,
enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
int64_t Default = 0, bool AddDefault = false,
bool (*ConvertResult)(int64_t&) = 0);
OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
enum AMDGPUOperand::ImmTy ImmTy =
AMDGPUOperand::ImmTyNone,
bool AddDefault = false);
OperandMatchResultTy parseOptionalOps(
const ArrayRef<OptionalOperand> &OptionalOps,
OperandVector &Operands);
enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
OperandMatchResultTy parseStringWithPrefix(const char *Prefix, StringRef &Value);
OperandMatchResultTy parseImm(OperandVector &Operands);
OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
OperandMatchResultTy parseRegOrImmWithInputMods(OperandVector &Operands);
OperandMatchResultTy parseOptionalOperand(OperandVector &Operands, const OptionalOperand& Op, bool AddDefault);
OperandMatchResultTy parseAMDGPUOperand(OperandVector &Operands, StringRef Name);
void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
void cvtDS(MCInst &Inst, const OperandVector &Operands);
@ -681,14 +639,19 @@ public:
OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
bool parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier);
OperandMatchResultTy parseHwreg(OperandVector &Operands);
private:
struct OperandInfoTy {
int64_t Id;
bool IsSymbolic;
OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
};
bool parseSendMsg(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
public:
OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
AMDGPUOperand::Ptr defaultHwreg() const;
@ -701,30 +664,6 @@ public:
AMDGPUOperand::Ptr defaultSLC() const;
AMDGPUOperand::Ptr defaultTFE() const;
OperandMatchResultTy parseOModSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "omod"); }
OperandMatchResultTy parseClampSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "clamp"); }
OperandMatchResultTy parseSMRDOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "smrd_offset"); }
OperandMatchResultTy parseSMRDLiteralOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "smrd_literal_offset"); }
OperandMatchResultTy parseDPPCtrl(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "dpp_ctrl"); }
OperandMatchResultTy parseRowMask(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "row_mask"); }
OperandMatchResultTy parseBankMask(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "bank_mask"); }
OperandMatchResultTy parseBoundCtrl(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "bound_ctrl"); }
OperandMatchResultTy parseOffen(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offen"); }
OperandMatchResultTy parseIdxen(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "idxen"); }
OperandMatchResultTy parseAddr64(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "addr64"); }
OperandMatchResultTy parseOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset"); }
OperandMatchResultTy parseOffset0(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset0"); }
OperandMatchResultTy parseOffset1(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset1"); }
OperandMatchResultTy parseGLC(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "glc"); }
OperandMatchResultTy parseSLC(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "slc"); }
OperandMatchResultTy parseTFE(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "tfe"); }
OperandMatchResultTy parseGDS(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "gds"); }
OperandMatchResultTy parseDMask(OperandVector &Operands);
OperandMatchResultTy parseUNorm(OperandVector &Operands);
OperandMatchResultTy parseDA(OperandVector &Operands);
OperandMatchResultTy parseR128(OperandVector &Operands);
OperandMatchResultTy parseLWE(OperandVector &Operands);
AMDGPUOperand::Ptr defaultDMask() const;
AMDGPUOperand::Ptr defaultUNorm() const;
AMDGPUOperand::Ptr defaultDA() const;
@ -745,7 +684,7 @@ public:
void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands, bool AddDefault);
OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
AMDGPUOperand::Ptr defaultRowMask() const;
AMDGPUOperand::Ptr defaultBankMask() const;
AMDGPUOperand::Ptr defaultBoundCtrl() const;
@ -761,7 +700,6 @@ struct OptionalOperand {
const char *Name;
AMDGPUOperand::ImmTy Type;
bool IsBit;
int64_t Default;
bool (*ConvertResult)(int64_t&);
};
@ -1104,7 +1042,6 @@ unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
return Match_Success;
}
bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
@ -1112,17 +1049,6 @@ bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
bool MatchingInlineAsm) {
MCInst Inst;
#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
// Remove dummies prior matching. Iterate backwards becase vector::erase()
// invalidates all iterators which refer after erase point.
for (auto I = Operands.rbegin(), E = Operands.rend(); I != E; ) {
auto X = I++;
if (static_cast<AMDGPUOperand*>(X->get())->isDummy()) {
Operands.erase(X.base() -1);
}
}
#endif
switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
default: break;
case Match_Success:
@ -1491,24 +1417,12 @@ bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
//===----------------------------------------------------------------------===//
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
int64_t Default, bool AddDefault) {
// We are at the end of the statement, and this is a default argument, so
// use a default value.
if (getLexer().is(AsmToken::EndOfStatement)) {
Int = Default;
return MatchOperand_Success;
}
AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
switch(getLexer().getKind()) {
default: return MatchOperand_NoMatch;
case AsmToken::Identifier: {
StringRef Name = Parser.getTok().getString();
if (!Name.equals(Prefix)) {
if (AddDefault) {
Int = Default;
return MatchOperand_Success;
}
return MatchOperand_NoMatch;
}
@ -1531,13 +1445,12 @@ AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
enum AMDGPUOperand::ImmTy ImmTy,
int64_t Default, bool AddDefault,
bool (*ConvertResult)(int64_t&)) {
SMLoc S = Parser.getTok().getLoc();
int64_t Value = 0;
AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value, Default, AddDefault);
AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
if (Res != MatchOperand_Success)
return Res;
@ -1546,32 +1459,12 @@ AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
}
Operands.push_back(AMDGPUOperand::CreateImm(Value, S, ImmTy));
#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
if (Value == Default && AddDefault) {
// Reverse lookup in previously added operands (skip just added one)
// for the first non-dummy operand. If it is of the same type,
// then replace just added default operand with dummy.
for (auto I = Operands.rbegin(), E = Operands.rend(); I != E; ++I) {
if (I == Operands.rbegin())
continue;
if (static_cast<AMDGPUOperand*>(I->get())->isDummy())
continue;
if (static_cast<AMDGPUOperand*>(I->get())->isImmTy(ImmTy)) {
Operands.pop_back();
Operands.push_back(AMDGPUOperand::CreateDummy(S)); // invalidates iterators
break;
}
}
}
#endif
return MatchOperand_Success;
}
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
enum AMDGPUOperand::ImmTy ImmTy,
bool AddDefault) {
enum AMDGPUOperand::ImmTy ImmTy) {
int64_t Bit = 0;
SMLoc S = Parser.getTok().getLoc();
@ -1588,11 +1481,7 @@ AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Bit = 0;
Parser.Lex();
} else {
if (AddDefault) {
Bit = 0;
} else {
return MatchOperand_NoMatch;
}
return MatchOperand_NoMatch;
}
break;
}
@ -1619,56 +1508,6 @@ void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
}
}
static bool operandsHasOptionalOp(const OperandVector &Operands,
const OptionalOperand &OOp) {
for (unsigned i = 0; i < Operands.size(); i++) {
const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
(ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
return true;
}
return false;
}
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
for (const OptionalOperand &Op : OptionalOps) {
if (operandsHasOptionalOp(Operands, Op))
continue;
AMDGPUAsmParser::OperandMatchResultTy Res;
int64_t Value;
if (Op.IsBit) {
Res = parseNamedBit(Op.Name, Operands, Op.Type);
if (Res == MatchOperand_NoMatch)
continue;
return Res;
}
Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
if (Res == MatchOperand_NoMatch)
continue;
if (Res != MatchOperand_Success)
return Res;
bool DefaultValue = (Value == Op.Default);
if (Op.ConvertResult && !Op.ConvertResult(Value)) {
return MatchOperand_ParseFail;
}
if (!DefaultValue) {
Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
}
return MatchOperand_Success;
}
return MatchOperand_NoMatch;
}
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseStringWithPrefix(const char *Prefix, StringRef &Value) {
if (getLexer().isNot(AsmToken::Identifier)) {
@ -1896,7 +1735,7 @@ AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
switch(getLexer().getKind()) {
default: return MatchOperand_ParseFail;
default: return MatchOperand_NoMatch;
case AsmToken::Integer:
// The operand can be an integer value.
if (getParser().parseAbsoluteExpression(Imm16Val))
@ -2154,10 +1993,6 @@ AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
}
}
//===----------------------------------------------------------------------===//
// flat
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// mubuf
//===----------------------------------------------------------------------===//
@ -2232,31 +2067,6 @@ void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
// mimg
//===----------------------------------------------------------------------===//
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
}
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
}
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseDA(OperandVector &Operands) {
return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
}
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseR128(OperandVector &Operands) {
return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
}
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
}
void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
@ -2409,74 +2219,48 @@ static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
}
// Note: the order in this table matches the order of operands in AsmString.
static const OptionalOperand AMDGPUOperandTable[] = {
{"offen", AMDGPUOperand::ImmTyOffen, true, 0, nullptr},
{"offset0", AMDGPUOperand::ImmTyOffset0, false, 0, nullptr},
{"offset1", AMDGPUOperand::ImmTyOffset1, false, 0, nullptr},
{"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr},
{"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
{"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
{"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
{"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr},
{"clamp", AMDGPUOperand::ImmTyClampSI, true, 0, nullptr},
{"omod", AMDGPUOperand::ImmTyOModSI, false, 1, ConvertOmodMul},
{"unorm", AMDGPUOperand::ImmTyUNorm, true, 0, nullptr},
{"da", AMDGPUOperand::ImmTyDA, true, 0, nullptr},
{"r128", AMDGPUOperand::ImmTyR128, true, 0, nullptr},
{"lwe", AMDGPUOperand::ImmTyLWE, true, 0, nullptr},
{"dmask", AMDGPUOperand::ImmTyDMask, false, 0, nullptr},
{"dpp_ctrl", AMDGPUOperand::ImmTyDppCtrl, false, -1, nullptr},
{"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, 0xf, nullptr},
{"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, 0xf, nullptr},
{"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, -1, ConvertBoundCtrl},
static const OptionalOperand AMDGPUOptionalOperandTable[] = {
{"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
{"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
{"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
{"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
{"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
{"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
{"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
{"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
{"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
{"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
{"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
{"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
{"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
{"da", AMDGPUOperand::ImmTyDA, true, nullptr},
{"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
{"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
{"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
{"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
{"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
{"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
{"sdwa_sel", AMDGPUOperand::ImmTySdwaSel, false, nullptr},
{"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
};
AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands, const OptionalOperand& Op, bool AddDefault)
{
if (Op.IsBit) {
return parseNamedBit(Op.Name, Operands, Op.Type, AddDefault);
} else if (Op.Type == AMDGPUOperand::ImmTyDppCtrl) {
return parseDPPCtrlOps(Operands, AddDefault);
} else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
return parseOModOperand(Operands);
} else {
return parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.Default, AddDefault, Op.ConvertResult);
}
}
AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseAMDGPUOperand(OperandVector &Operands, StringRef Name)
{
StringRef Tok;
if (getLexer().is(AsmToken::Identifier)) {
Tok = Parser.getTok().getString();
}
bool optional = false;
if (Tok == "mul" || Tok == "div") { optional = true; }
for (const OptionalOperand &Op1 : AMDGPUOperandTable) {
if (Op1.Name == Tok) { optional = true; break; }
}
// Attemp to parse current optional operand.
for (const OptionalOperand &Op : AMDGPUOperandTable) {
// TODO: For now, omod is handled separately because
// token name does not match name in table.
bool parseThis =
Name == "" ||
(Op.Name == Name) ||
(Name == "omod" && Op.Type == AMDGPUOperand::ImmTyOModSI);
if (parseThis && Tok == Name) {
// Exactly the expected token for optional operand.
// Parse it and add operand normally.
return parseOptionalOperand(Operands, Op, true);
} else if (parseThis) {
// Token for optional operand which is later in the table
// than the one we expect. If needed, add default value
// for the operand we expect, do not consume anything
// and return MatchOperand_NoMatch. Parsing will continue.
return parseOptionalOperand(Operands, Op, optional);
} else if (Op.Name == Tok) {
// This looks like optional operand, but we do not expect it.
// This is the case when AsmString has token in it.
return MatchOperand_NoMatch;
AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
OperandMatchResultTy res;
for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
// try to parse any optional operand here
if (Op.IsBit) {
res = parseNamedBit(Op.Name, Operands, Op.Type);
} else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
res = parseOModOperand(Operands);
} else if (Op.Type == AMDGPUOperand::ImmTySdwaSel) {
res = parseSDWASel(Operands);
} else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
res = parseSDWADstUnused(Operands);
} else {
res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
}
if (res != MatchOperand_NoMatch) {
return res;
}
}
return MatchOperand_NoMatch;
@ -2486,9 +2270,9 @@ AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandV
{
StringRef Name = Parser.getTok().getString();
if (Name == "mul") {
return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, 0, false, ConvertOmodMul);
return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
} else if (Name == "div") {
return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, 0, false, ConvertOmodDiv);
return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
} else {
return MatchOperand_NoMatch;
}
@ -2569,7 +2353,7 @@ bool AMDGPUOperand::isDPPCtrl() const {
}
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseDPPCtrlOps(OperandVector &Operands, bool AddDefault) {
AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
StringRef Prefix;
int64_t Int;
@ -2595,12 +2379,7 @@ AMDGPUAsmParser::parseDPPCtrlOps(OperandVector &Operands, bool AddDefault) {
&& Prefix != "wave_shr"
&& Prefix != "wave_ror"
&& Prefix != "row_bcast") {
if (AddDefault) {
Operands.push_back(AMDGPUOperand::CreateImm(0, S, AMDGPUOperand::ImmTyDppCtrl));
return MatchOperand_Success;
} else {
return MatchOperand_NoMatch;
}
return MatchOperand_NoMatch;
}
Parser.Lex();
@ -2820,3 +2599,28 @@ extern "C" void LLVMInitializeAMDGPUAsmParser() {
#define GET_REGISTER_MATCHER
#define GET_MATCHER_IMPLEMENTATION
#include "AMDGPUGenAsmMatcher.inc"
// This fuction should be defined after auto-generated include so that we have
// MatchClassKind enum defined
unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
unsigned Kind) {
// Tokens like "glc" would be parsed as immediate operands in ParseOperand().
// But MatchInstructionImpl() expects to meet token and fails to validate
// operand. This method checks if we are given immediate operand but expect to
// get corresponding token.
AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
switch (Kind) {
case MCK_addr64:
return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
case MCK_gds:
return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
case MCK_glc:
return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
case MCK_idxen:
return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
case MCK_offen:
return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
default: return Match_InvalidOperand;
}
}

View File

@ -486,7 +486,7 @@ include "VIInstrFormats.td"
class NamedMatchClass<string CName, bit Optional = 1> : AsmOperandClass {
let Name = "Imm"#CName;
let PredicateMethod = "is"#CName;
let ParserMethod = "parse"#CName;
let ParserMethod = !if(Optional, "parseOptionalOperand", "parse"#CName);
let RenderMethod = "addImmOperands";
let IsOptional = Optional;
let DefaultMethod = "default"#CName;
@ -552,7 +552,7 @@ def src0_sel : NamedOperandU32<"SDWASrc0Sel", sdwa_sel>;
def src1_sel : NamedOperandU32<"SDWASrc1Sel", sdwa_sel>;
def dst_unused : NamedOperandU32<"SDWADstUnused", NamedMatchClass<"SDWADstUnused">>;
def hwreg : NamedOperandU16<"Hwreg", NamedMatchClass<"Hwreg">>;
def hwreg : NamedOperandU16<"Hwreg", NamedMatchClass<"Hwreg", 0>>;
} // End OperandType = "OPERAND_IMMEDIATE"

View File

@ -6,11 +6,11 @@
ds_add_u32 v2, v4 offset:1000000000
// offset0 twice
// CHECK: error: not a valid operand.
// CHECK: error: invalid operand for instruction
ds_write2_b32 v2, v4, v6 offset0:4 offset0:8
// offset1 twice
// CHECK: error: not a valid operand.
// CHECK: error: invalid operand for instruction
ds_write2_b32 v2, v4, v6 offset1:4 offset1:8
// offset0 too big