mirror of
https://github.com/RPCS3/llvm.git
synced 2025-01-31 07:43:37 +00:00
[TableGen] AsmMatcher: support for default values for optional operands
Summary: This change allows to specify "DefaultMethod" for optional operand (IsOptional = 1) in AsmOperandClass that return default value for operand. This is used in convertToMCInst to set default values in MCInst. Previously if you wanted to set default value for operand you had to create custom converter method. With this change it is possible to use standard converters even when optional operands presented. Reviewers: tstellarAMD, ab, craig.topper Subscribers: jyknight, dsanders, arsenm, nhaustov, llvm-commits Differential Revision: http://reviews.llvm.org/D18242 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@268726 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
ffe6f6b6be
commit
f117ec1a64
@ -619,6 +619,13 @@ class AsmOperandClass {
|
||||
///
|
||||
/// Optional arguments must be at the end of the operand list.
|
||||
bit IsOptional = 0;
|
||||
|
||||
/// The name of the method on the target specific asm parser that returns the
|
||||
/// default operand for this optional operand. This method is only used if
|
||||
/// IsOptional == 1. If not set, this will default to "defaultFooOperands",
|
||||
/// where Foo is the AsmOperandClass name. The method signature should be:
|
||||
/// std::unique_ptr<MCParsedAsmOperand> defaultFooOperands() const;
|
||||
string DefaultMethod = ?;
|
||||
}
|
||||
|
||||
def ImmAsmOperand : AsmOperandClass {
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "Utils/AMDKernelCodeTUtils.h"
|
||||
#include "llvm/ADT/APFloat.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ADT/SmallBitVector.h"
|
||||
#include "llvm/ADT/SmallString.h"
|
||||
#include "llvm/ADT/StringSwitch.h"
|
||||
#include "llvm/ADT/Twine.h"
|
||||
@ -59,6 +60,8 @@ public:
|
||||
|
||||
MCContext *Ctx;
|
||||
|
||||
typedef std::unique_ptr<AMDGPUOperand> Ptr;
|
||||
|
||||
enum ImmTy {
|
||||
ImmTyNone,
|
||||
ImmTyGDS,
|
||||
@ -390,9 +393,9 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
|
||||
enum ImmTy Type = ImmTyNone,
|
||||
bool IsFPImm = false) {
|
||||
static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc,
|
||||
enum ImmTy Type = ImmTyNone,
|
||||
bool IsFPImm = false) {
|
||||
auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
|
||||
Op->Imm.Val = Val;
|
||||
Op->Imm.IsFPImm = IsFPImm;
|
||||
@ -403,8 +406,8 @@ public:
|
||||
return Op;
|
||||
}
|
||||
|
||||
static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
|
||||
bool HasExplicitEncodingSize = true) {
|
||||
static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc,
|
||||
bool HasExplicitEncodingSize = true) {
|
||||
auto Res = llvm::make_unique<AMDGPUOperand>(Token);
|
||||
Res->Tok.Data = Str.data();
|
||||
Res->Tok.Length = Str.size();
|
||||
@ -413,11 +416,11 @@ public:
|
||||
return Res;
|
||||
}
|
||||
|
||||
static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
|
||||
SMLoc E,
|
||||
const MCRegisterInfo *TRI,
|
||||
const MCSubtargetInfo *STI,
|
||||
bool ForceVOP3) {
|
||||
static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S,
|
||||
SMLoc E,
|
||||
const MCRegisterInfo *TRI,
|
||||
const MCSubtargetInfo *STI,
|
||||
bool ForceVOP3) {
|
||||
auto Op = llvm::make_unique<AMDGPUOperand>(Register);
|
||||
Op->Reg.RegNo = RegNo;
|
||||
Op->Reg.TRI = TRI;
|
||||
@ -429,7 +432,7 @@ public:
|
||||
return Op;
|
||||
}
|
||||
|
||||
static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
|
||||
static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) {
|
||||
auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
|
||||
Op->Expr = Expr;
|
||||
Op->StartLoc = S;
|
||||
@ -568,11 +571,15 @@ public:
|
||||
bool parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier);
|
||||
OperandMatchResultTy parseHwreg(OperandVector &Operands);
|
||||
OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
|
||||
AMDGPUOperand::Ptr defaultHwreg() const;
|
||||
|
||||
void cvtFlat(MCInst &Inst, const OperandVector &Operands);
|
||||
void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
|
||||
|
||||
void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
|
||||
AMDGPUOperand::Ptr defaultMubufOffset() const;
|
||||
AMDGPUOperand::Ptr defaultGLC() const;
|
||||
AMDGPUOperand::Ptr defaultSLC() const;
|
||||
AMDGPUOperand::Ptr defaultTFE() const;
|
||||
|
||||
OperandMatchResultTy parseOModSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "omod"); }
|
||||
OperandMatchResultTy parseClampSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "clamp"); }
|
||||
OperandMatchResultTy parseSMRDOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "smrd_offset"); }
|
||||
@ -597,25 +604,36 @@ public:
|
||||
OperandMatchResultTy parseDA(OperandVector &Operands);
|
||||
OperandMatchResultTy parseR128(OperandVector &Operands);
|
||||
OperandMatchResultTy parseLWE(OperandVector &Operands);
|
||||
|
||||
AMDGPUOperand::Ptr defaultDMask() const;
|
||||
AMDGPUOperand::Ptr defaultUNorm() const;
|
||||
AMDGPUOperand::Ptr defaultDA() const;
|
||||
AMDGPUOperand::Ptr defaultR128() const;
|
||||
AMDGPUOperand::Ptr defaultLWE() const;
|
||||
AMDGPUOperand::Ptr defaultSMRDOffset() const;
|
||||
AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
|
||||
|
||||
AMDGPUOperand::Ptr defaultClampSI() const;
|
||||
AMDGPUOperand::Ptr defaultOModSI() const;
|
||||
|
||||
OperandMatchResultTy parseOModOperand(OperandVector &Operands);
|
||||
|
||||
void cvtId(MCInst &Inst, const OperandVector &Operands);
|
||||
void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
|
||||
void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
|
||||
void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
|
||||
void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
|
||||
|
||||
void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
|
||||
void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
|
||||
|
||||
OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands, bool AddDefault);
|
||||
void cvtDPP_mod(MCInst &Inst, const OperandVector &Operands);
|
||||
void cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands);
|
||||
void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool HasMods);
|
||||
AMDGPUOperand::Ptr defaultRowMask() const;
|
||||
AMDGPUOperand::Ptr defaultBankMask() const;
|
||||
AMDGPUOperand::Ptr defaultBoundCtrl() const;
|
||||
void cvtDPP(MCInst &Inst, const OperandVector &Operands);
|
||||
|
||||
OperandMatchResultTy parseSDWASel(OperandVector &Operands);
|
||||
OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
|
||||
AMDGPUOperand::Ptr defaultSDWASel() const;
|
||||
AMDGPUOperand::Ptr defaultSDWADstUnused() const;
|
||||
};
|
||||
|
||||
struct OptionalOperand {
|
||||
@ -1135,21 +1153,6 @@ bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool operandsHaveModifiers(const OperandVector &Operands) {
|
||||
|
||||
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
|
||||
const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
|
||||
if (Op.isRegKind() && Op.hasModifiers())
|
||||
return true;
|
||||
if (Op.isImm() && Op.hasModifiers())
|
||||
return true;
|
||||
if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOModSI ||
|
||||
Op.getImmTy() == AMDGPUOperand::ImmTyClampSI))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
AMDGPUAsmParser::OperandMatchResultTy
|
||||
AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
|
||||
|
||||
@ -1746,6 +1749,10 @@ bool AMDGPUOperand::isHwreg() const {
|
||||
return isImmTy(ImmTyHwreg);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultHwreg() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyHwreg);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// sopp branch targets
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -1777,52 +1784,6 @@ AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
|
||||
// flat
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
|
||||
const OperandVector &Operands) {
|
||||
OptionalImmIndexMap OptionalIdx;
|
||||
|
||||
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
|
||||
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
|
||||
|
||||
// Add the register arguments
|
||||
if (Op.isReg()) {
|
||||
Op.addRegOperands(Inst, 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
OptionalIdx[Op.getImmTy()] = i;
|
||||
}
|
||||
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
|
||||
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
|
||||
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
|
||||
}
|
||||
|
||||
|
||||
void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
|
||||
const OperandVector &Operands) {
|
||||
OptionalImmIndexMap OptionalIdx;
|
||||
|
||||
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
|
||||
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
|
||||
|
||||
// Add the register arguments
|
||||
if (Op.isReg()) {
|
||||
Op.addRegOperands(Inst, 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle 'glc' token for flat atomics.
|
||||
if (Op.isToken()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle optional arguments
|
||||
OptionalIdx[Op.getImmTy()] = i;
|
||||
}
|
||||
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
|
||||
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// mubuf
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -1831,6 +1792,22 @@ bool AMDGPUOperand::isMubufOffset() const {
|
||||
return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultMubufOffset() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE);
|
||||
}
|
||||
|
||||
void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
|
||||
const OperandVector &Operands) {
|
||||
OptionalImmIndexMap OptionalIdx;
|
||||
@ -1896,6 +1873,26 @@ AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
|
||||
return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// smrd
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -1913,6 +1910,14 @@ bool AMDGPUOperand::isSMRDLiteralOffset() const {
|
||||
return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// vop3
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -2036,6 +2041,14 @@ AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandV
|
||||
}
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultClampSI() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyClampSI);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOModSI() const {
|
||||
return AMDGPUOperand::CreateImm(1, SMLoc(), AMDGPUOperand::ImmTyOModSI);
|
||||
}
|
||||
|
||||
void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
|
||||
unsigned I = 1;
|
||||
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
|
||||
@ -2055,18 +2068,6 @@ void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands)
|
||||
}
|
||||
}
|
||||
|
||||
void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
|
||||
if (operandsHaveModifiers(Operands)) {
|
||||
cvtVOP3(Inst, Operands);
|
||||
} else {
|
||||
cvtId(Inst, Operands);
|
||||
}
|
||||
}
|
||||
|
||||
void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
|
||||
cvtVOP3(Inst, Operands);
|
||||
}
|
||||
|
||||
void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
|
||||
OptionalImmIndexMap OptionalIdx;
|
||||
unsigned I = 1;
|
||||
@ -2300,16 +2301,19 @@ AMDGPUAsmParser::parseDPPCtrlOps(OperandVector &Operands, bool AddDefault) {
|
||||
return MatchOperand_Success;
|
||||
}
|
||||
|
||||
void AMDGPUAsmParser::cvtDPP_mod(MCInst &Inst, const OperandVector &Operands) {
|
||||
cvtDPP(Inst, Operands, true);
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
|
||||
return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
|
||||
}
|
||||
|
||||
void AMDGPUAsmParser::cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands) {
|
||||
cvtDPP(Inst, Operands, false);
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
|
||||
return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
|
||||
}
|
||||
|
||||
void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands,
|
||||
bool HasMods) {
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
|
||||
}
|
||||
|
||||
void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
|
||||
OptionalImmIndexMap OptionalIdx;
|
||||
|
||||
unsigned I = 1;
|
||||
@ -2321,9 +2325,8 @@ void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands,
|
||||
for (unsigned E = Operands.size(); I != E; ++I) {
|
||||
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
|
||||
// Add the register arguments
|
||||
if (!HasMods && Op.isReg()) {
|
||||
Op.addRegOperands(Inst, 1);
|
||||
} else if (HasMods && Op.isRegOrImmWithInputMods()) {
|
||||
if (Op.isRegOrImmWithInputMods()) {
|
||||
// We convert only instructions with modifiers
|
||||
Op.addRegOrImmWithInputModsOperands(Inst, 2);
|
||||
} else if (Op.isDPPCtrl()) {
|
||||
Op.addImmOperands(Inst, 1);
|
||||
@ -2415,6 +2418,14 @@ AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
|
||||
return MatchOperand_Success;
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWASel() const {
|
||||
return AMDGPUOperand::CreateImm(6, SMLoc(), AMDGPUOperand::ImmTySdwaSel);
|
||||
}
|
||||
|
||||
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWADstUnused() const {
|
||||
return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySdwaDstUnused);
|
||||
}
|
||||
|
||||
|
||||
/// Force static initialization.
|
||||
extern "C" void LLVMInitializeAMDGPUAsmParser() {
|
||||
|
@ -147,8 +147,9 @@ class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern, bit HasMods
|
||||
|
||||
let AsmMatchConverter =
|
||||
!if(!eq(VOP3Only,1),
|
||||
"cvtVOP3_only",
|
||||
!if(!eq(HasMods,1), "cvtVOP3_2_mod", "cvtVOP3_2_nomod"));
|
||||
"cvtVOP3",
|
||||
!if(!eq(HasMods,1), "cvtVOP3_2_mod", ""));
|
||||
|
||||
let isCodeGenOnly = 0;
|
||||
|
||||
int Size = 8;
|
||||
@ -710,7 +711,6 @@ class FLAT <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
|
||||
|
||||
let UseNamedOperandTable = 1;
|
||||
let hasSideEffects = 0;
|
||||
let AsmMatchConverter = "cvtFlat";
|
||||
let SchedRW = [WriteVMEM];
|
||||
}
|
||||
|
||||
|
@ -467,6 +467,7 @@ class NamedMatchClass<string CName, bit Optional = 1> : AsmOperandClass {
|
||||
let ParserMethod = "parse"#CName;
|
||||
let RenderMethod = "addImmOperands";
|
||||
let IsOptional = Optional;
|
||||
let DefaultMethod = "default"#CName;
|
||||
}
|
||||
|
||||
def sdwa_sel : NamedMatchClass<"SDWASel">;
|
||||
@ -3118,7 +3119,7 @@ multiclass FLAT_ATOMIC <flat op, string asm_name, RegisterClass vdst_rc,
|
||||
dag outs_noret = (outs),
|
||||
string asm_noret = asm_name#" $addr, $data"#"$slc"#"$tfe"> {
|
||||
|
||||
let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0, AsmMatchConverter = "cvtFlatAtomic" in {
|
||||
let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0 in {
|
||||
def "" : FLAT_Pseudo <NAME, outs_noret,
|
||||
(ins VReg_64:$addr, data_rc:$data,
|
||||
slc:$slc, tfe:$tfe), []>,
|
||||
@ -3135,7 +3136,7 @@ multiclass FLAT_ATOMIC <flat op, string asm_name, RegisterClass vdst_rc,
|
||||
asm_noret>;
|
||||
}
|
||||
|
||||
let glc = 1, hasPostISelHook = 1, AsmMatchConverter = "cvtFlatAtomic" in {
|
||||
let glc = 1, hasPostISelHook = 1 in {
|
||||
defm _RTN : FLAT_AtomicRet_m <op, (outs vdst_rc:$vdst),
|
||||
(ins VReg_64:$addr, data_rc:$data, slc:$slc,
|
||||
tfe:$tfe),
|
||||
|
@ -182,7 +182,7 @@ class VOP_DPP <dag outs, dag ins, string asm, list<dag> pattern, bit HasMods = 0
|
||||
let DPP = 1;
|
||||
let Size = 8;
|
||||
|
||||
let AsmMatchConverter = !if(!eq(HasMods,1), "cvtDPP_mod", "cvtDPP_nomod");
|
||||
let AsmMatchConverter = !if(!eq(HasMods,1), "cvtDPP", "");
|
||||
}
|
||||
|
||||
class VOP_DPPe : Enc64 {
|
||||
|
@ -203,6 +203,10 @@ struct ClassInfo {
|
||||
/// Is this operand optional and not always required.
|
||||
bool IsOptional;
|
||||
|
||||
/// DefaultMethod - The name of the method that returns the default operand
|
||||
/// for optional operand
|
||||
std::string DefaultMethod;
|
||||
|
||||
public:
|
||||
/// isRegisterClass() - Check if this is a register class.
|
||||
bool isRegisterClass() const {
|
||||
@ -768,6 +772,12 @@ public:
|
||||
RecordKeeper &getRecords() const {
|
||||
return Records;
|
||||
}
|
||||
|
||||
bool hasOptionalOperands() const {
|
||||
return std::find_if(Classes.begin(), Classes.end(),
|
||||
[](const ClassInfo& Class){ return Class.IsOptional; })
|
||||
!= Classes.end();
|
||||
}
|
||||
};
|
||||
|
||||
} // end anonymous namespace
|
||||
@ -1119,6 +1129,7 @@ ClassInfo *AsmMatcherInfo::getTokenClass(StringRef Token) {
|
||||
Entry->ParserMethod = "";
|
||||
Entry->DiagnosticType = "";
|
||||
Entry->IsOptional = false;
|
||||
Entry->DefaultMethod = "<invalid>";
|
||||
}
|
||||
|
||||
return Entry;
|
||||
@ -1254,6 +1265,7 @@ buildRegisterClasses(SmallPtrSetImpl<Record*> &SingletonRegisters) {
|
||||
// FIXME: diagnostic type.
|
||||
CI->DiagnosticType = "";
|
||||
CI->IsOptional = false;
|
||||
CI->DefaultMethod = ""; // unused
|
||||
RegisterSetClasses.insert(std::make_pair(RS, CI));
|
||||
++Index;
|
||||
}
|
||||
@ -1372,6 +1384,15 @@ void AsmMatcherInfo::buildOperandClasses() {
|
||||
if (BitInit *BI = dyn_cast<BitInit>(IsOptional))
|
||||
CI->IsOptional = BI->getValue();
|
||||
|
||||
// Get or construct the default method name.
|
||||
Init *DMName = Rec->getValueInit("DefaultMethod");
|
||||
if (StringInit *SI = dyn_cast<StringInit>(DMName)) {
|
||||
CI->DefaultMethod = SI->getValue();
|
||||
} else {
|
||||
assert(isa<UnsetInit>(DMName) && "Unexpected DefaultMethod field!");
|
||||
CI->DefaultMethod = "default" + CI->ClassName + "Operands";
|
||||
}
|
||||
|
||||
++Index;
|
||||
}
|
||||
}
|
||||
@ -1808,7 +1829,8 @@ static unsigned getConverterOperandID(const std::string &Name,
|
||||
|
||||
static void emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
|
||||
std::vector<std::unique_ptr<MatchableInfo>> &Infos,
|
||||
bool HasMnemonicFirst, raw_ostream &OS) {
|
||||
bool HasMnemonicFirst, bool HasOptionalOperands,
|
||||
raw_ostream &OS) {
|
||||
SmallSetVector<std::string, 16> OperandConversionKinds;
|
||||
SmallSetVector<std::string, 16> InstructionConversionKinds;
|
||||
std::vector<std::vector<uint8_t> > ConversionTable;
|
||||
@ -1823,24 +1845,40 @@ static void emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
|
||||
std::string ConvertFnBody;
|
||||
raw_string_ostream CvtOS(ConvertFnBody);
|
||||
// Start the unified conversion function.
|
||||
CvtOS << "void " << Target.getName() << ClassName << "::\n"
|
||||
<< "convertToMCInst(unsigned Kind, MCInst &Inst, "
|
||||
<< "unsigned Opcode,\n"
|
||||
<< " const OperandVector"
|
||||
<< " &Operands) {\n"
|
||||
<< " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"
|
||||
<< " const uint8_t *Converter = ConversionTable[Kind];\n"
|
||||
<< " Inst.setOpcode(Opcode);\n"
|
||||
<< " for (const uint8_t *p = Converter; *p; p+= 2) {\n"
|
||||
<< " switch (*p) {\n"
|
||||
<< " default: llvm_unreachable(\"invalid conversion entry!\");\n"
|
||||
<< " case CVT_Reg:\n"
|
||||
<< " static_cast<" << TargetOperandClass
|
||||
<< "&>(*Operands[*(p + 1)]).addRegOperands(Inst, 1);\n"
|
||||
<< " break;\n"
|
||||
<< " case CVT_Tied:\n"
|
||||
<< " Inst.addOperand(Inst.getOperand(*(p + 1)));\n"
|
||||
<< " break;\n";
|
||||
if (HasOptionalOperands) {
|
||||
CvtOS << "void " << Target.getName() << ClassName << "::\n"
|
||||
<< "convertToMCInst(unsigned Kind, MCInst &Inst, "
|
||||
<< "unsigned Opcode,\n"
|
||||
<< " const OperandVector &Operands,\n"
|
||||
<< " const SmallBitVector &OptionalOperandsMask) {\n";
|
||||
} else {
|
||||
CvtOS << "void " << Target.getName() << ClassName << "::\n"
|
||||
<< "convertToMCInst(unsigned Kind, MCInst &Inst, "
|
||||
<< "unsigned Opcode,\n"
|
||||
<< " const OperandVector &Operands) {\n";
|
||||
}
|
||||
CvtOS << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n";
|
||||
CvtOS << " const uint8_t *Converter = ConversionTable[Kind];\n";
|
||||
if (HasOptionalOperands) {
|
||||
CvtOS << " unsigned NumDefaults = 0;\n";
|
||||
}
|
||||
CvtOS << " unsigned OpIdx;\n";
|
||||
CvtOS << " Inst.setOpcode(Opcode);\n";
|
||||
CvtOS << " for (const uint8_t *p = Converter; *p; p+= 2) {\n";
|
||||
if (HasOptionalOperands) {
|
||||
CvtOS << " OpIdx = *(p + 1) - NumDefaults;\n";
|
||||
} else {
|
||||
CvtOS << " OpIdx = *(p + 1);\n";
|
||||
}
|
||||
CvtOS << " switch (*p) {\n";
|
||||
CvtOS << " default: llvm_unreachable(\"invalid conversion entry!\");\n";
|
||||
CvtOS << " case CVT_Reg:\n";
|
||||
CvtOS << " static_cast<" << TargetOperandClass
|
||||
<< "&>(*Operands[OpIdx]).addRegOperands(Inst, 1);\n";
|
||||
CvtOS << " break;\n";
|
||||
CvtOS << " case CVT_Tied:\n";
|
||||
CvtOS << " Inst.addOperand(Inst.getOperand(OpIdx));\n";
|
||||
CvtOS << " break;\n";
|
||||
|
||||
std::string OperandFnBody;
|
||||
raw_string_ostream OpOS(OperandFnBody);
|
||||
@ -1934,6 +1972,11 @@ static void emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
|
||||
// the index of its entry in the vector).
|
||||
std::string Name = "CVT_" + (Op.Class->isRegisterClass() ? "Reg" :
|
||||
Op.Class->RenderMethod);
|
||||
if (Op.Class->IsOptional) {
|
||||
// For optional operands we must also care about DefaultMethod
|
||||
assert(HasOptionalOperands);
|
||||
Name += "_" + Op.Class->DefaultMethod;
|
||||
}
|
||||
Name = getEnumNameForToken(Name);
|
||||
|
||||
bool IsNewConverter = false;
|
||||
@ -1949,11 +1992,27 @@ static void emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
|
||||
|
||||
// This is a new operand kind. Add a handler for it to the
|
||||
// converter driver.
|
||||
CvtOS << " case " << Name << ":\n"
|
||||
<< " static_cast<" << TargetOperandClass
|
||||
<< "&>(*Operands[*(p + 1)])." << Op.Class->RenderMethod
|
||||
<< "(Inst, " << OpInfo.MINumOperands << ");\n"
|
||||
<< " break;\n";
|
||||
CvtOS << " case " << Name << ":\n";
|
||||
if (Op.Class->IsOptional) {
|
||||
// If optional operand is not present in actual instruction then we
|
||||
// should call its DefaultMethod before RenderMethod
|
||||
assert(HasOptionalOperands);
|
||||
CvtOS << " if (OptionalOperandsMask[*(p + 1) - 1]) {\n"
|
||||
<< " " << Op.Class->DefaultMethod << "()"
|
||||
<< "->" << Op.Class->RenderMethod << "(Inst, "
|
||||
<< OpInfo.MINumOperands << ");\n"
|
||||
<< " ++NumDefaults;\n"
|
||||
<< " } else {\n"
|
||||
<< " static_cast<" << TargetOperandClass
|
||||
<< "&>(*Operands[OpIdx])." << Op.Class->RenderMethod
|
||||
<< "(Inst, " << OpInfo.MINumOperands << ");\n"
|
||||
<< " }\n";
|
||||
} else {
|
||||
CvtOS << " static_cast<" << TargetOperandClass
|
||||
<< "&>(*Operands[OpIdx])." << Op.Class->RenderMethod
|
||||
<< "(Inst, " << OpInfo.MINumOperands << ");\n";
|
||||
}
|
||||
CvtOS << " break;\n";
|
||||
|
||||
// Add a handler for the operand number lookup.
|
||||
OpOS << " case " << Name << ":\n"
|
||||
@ -2806,6 +2865,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
|
||||
Info.buildOperandMatchInfo();
|
||||
|
||||
bool HasMnemonicFirst = AsmParser->getValueAsBit("HasMnemonicFirst");
|
||||
bool HasOptionalOperands = Info.hasOptionalOperands();
|
||||
|
||||
// Write the output.
|
||||
|
||||
@ -2815,10 +2875,16 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
|
||||
OS << " // This should be included into the middle of the declaration of\n";
|
||||
OS << " // your subclasses implementation of MCTargetAsmParser.\n";
|
||||
OS << " uint64_t ComputeAvailableFeatures(const FeatureBitset& FB) const;\n";
|
||||
OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, "
|
||||
<< "unsigned Opcode,\n"
|
||||
<< " const OperandVector "
|
||||
<< "&Operands);\n";
|
||||
if (HasOptionalOperands) {
|
||||
OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, "
|
||||
<< "unsigned Opcode,\n"
|
||||
<< " const OperandVector &Operands,\n"
|
||||
<< " const SmallBitVector &OptionalOperandsMask);\n";
|
||||
} else {
|
||||
OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, "
|
||||
<< "unsigned Opcode,\n"
|
||||
<< " const OperandVector &Operands);\n";
|
||||
}
|
||||
OS << " void convertToMapAndConstraints(unsigned Kind,\n ";
|
||||
OS << " const OperandVector &Operands) override;\n";
|
||||
if (HasMnemonicFirst)
|
||||
@ -2885,7 +2951,8 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
|
||||
// Generate the convertToMCInst function to convert operands into an MCInst.
|
||||
// Also, generate the convertToMapAndConstraints function for MS-style inline
|
||||
// assembly. The latter doesn't actually generate a MCInst.
|
||||
emitConvertFuncs(Target, ClassName, Info.Matchables, HasMnemonicFirst, OS);
|
||||
emitConvertFuncs(Target, ClassName, Info.Matchables, HasMnemonicFirst,
|
||||
HasOptionalOperands, OS);
|
||||
|
||||
// Emit the enumeration for classes which participate in matching.
|
||||
emitMatchClassEnumeration(Target, Info.Classes, OS);
|
||||
@ -3067,6 +3134,9 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
|
||||
OS << " bool HadMatchOtherThanPredicate = false;\n";
|
||||
OS << " unsigned RetCode = Match_InvalidOperand;\n";
|
||||
OS << " uint64_t MissingFeatures = ~0ULL;\n";
|
||||
if (HasOptionalOperands) {
|
||||
OS << " SmallBitVector OptionalOperandsMask(" << MaxNumOperands << ");\n";
|
||||
}
|
||||
OS << " // Set ErrorInfo to the operand that mismatches if it is\n";
|
||||
OS << " // wrong for all instances of the instruction.\n";
|
||||
OS << " ErrorInfo = ~0ULL;\n";
|
||||
@ -3111,6 +3181,9 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
|
||||
|
||||
// Emit check that the subclasses match.
|
||||
OS << " bool OperandsValid = true;\n";
|
||||
if (HasOptionalOperands) {
|
||||
OS << " OptionalOperandsMask.reset(0, " << MaxNumOperands << ");\n";
|
||||
}
|
||||
OS << " for (unsigned FormalIdx = " << (HasMnemonicFirst ? "0" : "SIndex")
|
||||
<< ", ActualIdx = " << (HasMnemonicFirst ? "1" : "SIndex")
|
||||
<< "; FormalIdx != " << MaxNumOperands << "; ++FormalIdx) {\n";
|
||||
@ -3120,6 +3193,10 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
|
||||
OS << " OperandsValid = (Formal == " <<"InvalidMatchClass) || "
|
||||
"isSubclass(Formal, OptionalMatchClass);\n";
|
||||
OS << " if (!OperandsValid) ErrorInfo = ActualIdx;\n";
|
||||
if (HasOptionalOperands) {
|
||||
OS << " OptionalOperandsMask.set(FormalIdx, " << MaxNumOperands
|
||||
<< ");\n";
|
||||
}
|
||||
OS << " break;\n";
|
||||
OS << " }\n";
|
||||
OS << " MCParsedAsmOperand &Actual = *Operands[ActualIdx];\n";
|
||||
@ -3140,8 +3217,12 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
|
||||
OS << " // If current formal operand wasn't matched and it is optional\n"
|
||||
<< " // then try to match next formal operand\n";
|
||||
OS << " if (Diag == Match_InvalidOperand "
|
||||
<< "&& isSubclass(Formal, OptionalMatchClass))\n";
|
||||
<< "&& isSubclass(Formal, OptionalMatchClass)) {\n";
|
||||
if (HasOptionalOperands) {
|
||||
OS << " OptionalOperandsMask.set(FormalIdx);\n";
|
||||
}
|
||||
OS << " continue;\n";
|
||||
OS << " }\n";
|
||||
OS << " // If this operand is broken for all of the instances of this\n";
|
||||
OS << " // mnemonic, keep track of it so we can report loc info.\n";
|
||||
OS << " // If we already had a match that only failed due to a\n";
|
||||
@ -3180,7 +3261,12 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
|
||||
OS << " }\n\n";
|
||||
OS << " // We have selected a definite instruction, convert the parsed\n"
|
||||
<< " // operands into the appropriate MCInst.\n";
|
||||
OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n";
|
||||
if (HasOptionalOperands) {
|
||||
OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands,\n"
|
||||
<< " OptionalOperandsMask);\n";
|
||||
} else {
|
||||
OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n";
|
||||
}
|
||||
OS << "\n";
|
||||
|
||||
// Verify the instruction with the target-specific match predicate function.
|
||||
|
Loading…
x
Reference in New Issue
Block a user