R600/SI: Improve AsmParser support for forced e64 encoding

We can now force e64 encoding even when the operands would be legal
for e32 encoding.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@235626 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tom Stellard 2015-04-23 19:33:48 +00:00
parent 71ed98da5c
commit 95081f5241
3 changed files with 61 additions and 5 deletions

View File

@ -80,6 +80,7 @@ public:
unsigned RegNo;
int Modifiers;
const MCRegisterInfo *TRI;
bool IsForcedVOP3;
};
union {
@ -109,7 +110,8 @@ public:
}
void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
Inst.addOperand(MCOperand::CreateImm(Reg.Modifiers));
Inst.addOperand(MCOperand::CreateImm(
Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
addRegOperands(Inst, N);
}
@ -163,12 +165,16 @@ public:
return Imm.Type;
}
bool isRegKind() const {
return Kind == Register;
}
bool isReg() const override {
return Kind == Register && Reg.Modifiers == -1;
}
bool isRegWithInputMods() const {
return Kind == Register && Reg.Modifiers != -1;
return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
}
void setModifiers(unsigned Mods) {
@ -176,6 +182,11 @@ public:
Reg.Modifiers = Mods;
}
bool hasModifiers() const {
assert(isRegKind());
return Reg.Modifiers != -1;
}
unsigned getReg() const override {
return Reg.RegNo;
}
@ -263,11 +274,13 @@ public:
static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
SMLoc E,
const MCRegisterInfo *TRI) {
const MCRegisterInfo *TRI,
bool ForceVOP3) {
auto Op = llvm::make_unique<AMDGPUOperand>(Register);
Op->Reg.RegNo = RegNo;
Op->Reg.TRI = TRI;
Op->Reg.Modifiers = -1;
Op->Reg.IsForcedVOP3 = ForceVOP3;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
@ -324,6 +337,10 @@ public:
ForcedEncodingSize = Size;
}
bool isForcedVOP3() const {
return ForcedEncodingSize == 64;
}
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
unsigned checkTargetMatchPredicate(MCInst &Inst) override;
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
@ -525,6 +542,28 @@ bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0ULL) {
if (ErrorInfo >= Operands.size()) {
if (isForcedVOP3()) {
// If 64-bit encoding has been forced we can end up with no
// clamp or omod operands if none of the registers have modifiers,
// so we need to add these to the operand list.
AMDGPUOperand &LastOp =
((AMDGPUOperand &)*Operands[Operands.size() - 1]);
if (LastOp.isRegKind() ||
(LastOp.isImm() &&
LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
SMLoc S = Parser.getTok().getLoc();
Operands.push_back(AMDGPUOperand::CreateImm(0, S,
AMDGPUOperand::ImmTyClamp));
Operands.push_back(AMDGPUOperand::CreateImm(0, S,
AMDGPUOperand::ImmTyOMod));
bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
Out, ErrorInfo,
MatchingInlineAsm);
if (!Res)
return Res;
}
}
return Error(IDLoc, "too few operands for instruction");
}
@ -546,7 +585,7 @@ static bool operandsHaveModifiers(const OperandVector &Operands) {
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
if (Op.isRegWithInputMods())
if (Op.isRegKind() && Op.hasModifiers())
return true;
if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
@ -647,7 +686,8 @@ AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Operands.push_back(AMDGPUOperand::CreateReg(
RegNo, S, E, getContext().getRegisterInfo()));
RegNo, S, E, getContext().getRegisterInfo(),
isForcedVOP3()));
if (HasModifiers || Modifiers) {
AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);

5
test/MC/R600/vop3-errs.s Normal file
View File

@ -0,0 +1,5 @@
// RUN: not llvm-mc -arch=amdgcn -show-encoding %s 2>&1 | FileCheck %s
// RUN: not llvm-mc -arch=amdgcn -mcpu=SI -show-encoding %s 2>&1 | FileCheck %s
v_add_f32_e64 v0, v1
// CHECK: error: too few operands for instruction

View File

@ -5,6 +5,11 @@
// VOPC Instructions
//===----------------------------------------------------------------------===//
// Test forced e64 encoding
v_cmp_lt_f32_e64 s[2:3], v4, -v6
// CHECK: v_cmp_lt_f32_e64 s[2:3], v4, -v6 ; encoding: [0x02,0x00,0x02,0xd0,0x04,0x0d,0x02,0x40]
//
// Modifier tests:
//
@ -95,6 +100,12 @@ v_fract_f32 v1, v2, div:2 clamp
// VOP2 Instructions
///===---------------------------------------------------------------------===//
// Test forced e64 encoding with e32 operands
v_ldexp_f32_e64 v1, v3, v5
// CHECK: v_ldexp_f32_e64 v1, v3, v5 ; encoding: [0x01,0x00,0x56,0xd2,0x03,0x0b,0x02,0x00]
// TODO: Modifier tests
v_cndmask_b32 v1, v3, v5, s[4:5]