[AMDGPU] Assembler: Fix VOP3 only instructions

Separate methods to convert parsed instructions to MCInst:

  - VOP3 only instructions (always create modifiers as operands in MCInst)
  - VOP2 instrunctions with modifiers (create modifiers as operands
    in MCInst when e64 encoding is forced or modifiers are parsed)
  - VOP2 instructions without modifiers (do not create modifiers
    as operands in MCInst)
  - Add VOP3Only flag. Pass HasMods flag to VOP3Common.
  - Simplify code that deals with modifiers (-1 is now same as
    0). This is no longer needed.
  - Add few tests (more will be added separately).
    Update error message now correct.

Patch By: Nikolay Haustov

Differential Revision: http://reviews.llvm.org/D16778

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@260483 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tom Stellard 2016-02-11 03:28:15 +00:00
parent e48f9ae77d
commit 4ea8ee7c76
5 changed files with 193 additions and 97 deletions

View File

@ -116,8 +116,7 @@ public:
}
void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
Inst.addOperand(MCOperand::createImm(
Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
addRegOperands(Inst, N);
}
@ -176,11 +175,23 @@ public:
}
bool isReg() const override {
return Kind == Register && Reg.Modifiers == -1;
return Kind == Register && Reg.Modifiers == 0;
}
bool isRegWithInputMods() const {
return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
return Kind == Register;
}
bool isClamp() const {
return isImm() && Imm.Type == ImmTyClamp;
}
bool isOMod() const {
return isImm() && Imm.Type == ImmTyOMod;
}
bool isMod() const {
return isClamp() || isOMod();
}
void setModifiers(unsigned Mods) {
@ -190,7 +201,7 @@ public:
bool hasModifiers() const {
assert(isRegKind());
return Reg.Modifiers != -1;
return Reg.Modifiers != 0;
}
unsigned getReg() const override {
@ -202,7 +213,7 @@ public:
}
bool isRegClass(unsigned RCID) const {
return Reg.TRI->getRegClass(RCID).contains(getReg());
return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
}
bool isSCSrc32() const {
@ -306,7 +317,7 @@ public:
Op->Reg.RegNo = RegNo;
Op->Reg.TRI = TRI;
Op->Reg.STI = STI;
Op->Reg.Modifiers = -1;
Op->Reg.Modifiers = 0;
Op->Reg.IsForcedVOP3 = ForceVOP3;
Op->StartLoc = S;
Op->EndLoc = E;
@ -462,6 +473,10 @@ public:
OperandMatchResultTy parseUNorm(OperandVector &Operands);
OperandMatchResultTy parseR128(OperandVector &Operands);
void cvtId(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
};
@ -1103,7 +1118,7 @@ AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
// If we are parsing after we reach EndOfStatement then this means we
// are appending default values to the Operands list. This is only done
// by custom parser, so we shouldn't continue on to the generic parsing.
if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
getLexer().is(AsmToken::EndOfStatement))
return ResTy;
@ -1153,8 +1168,6 @@ AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
SMLoc S, E;
unsigned RegNo;
if (!ParseRegister(RegNo, S, E)) {
bool HasModifiers = operandsHaveModifiers(Operands);
unsigned Modifiers = 0;
if (Negate)
@ -1167,34 +1180,23 @@ AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Modifiers |= 0x2;
}
if (Modifiers && !HasModifiers) {
// We are adding a modifier to src1 or src2 and previous sources
// don't have modifiers, so we need to go back and empty modifers
// for each previous source.
for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
--PrevRegIdx) {
AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
RegOp.setModifiers(0);
}
}
Operands.push_back(AMDGPUOperand::CreateReg(
RegNo, S, E, getContext().getRegisterInfo(), &getSTI(),
isForcedVOP3()));
if (HasModifiers || Modifiers) {
if (Modifiers) {
AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
RegOp.setModifiers(Modifiers);
}
} else {
Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
S));
Parser.Lex();
}
return MatchOperand_Success;
} else {
ResTy = parseVOP3OptionalOps(Operands);
if (ResTy == MatchOperand_NoMatch) {
Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
S));
Parser.Lex();
}
}
return MatchOperand_Success;
}
default:
return MatchOperand_NoMatch;
@ -1802,10 +1804,12 @@ static bool isVOP3(OperandVector &Operands) {
if (operandsHaveModifiers(Operands))
return true;
AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
if (Operands.size() >= 2) {
AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
return true;
if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
return true;
}
if (Operands.size() >= 5)
return true;
@ -1848,35 +1852,70 @@ AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
return MatchOperand_NoMatch;
}
void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
unsigned i = 1;
void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
if (Desc.getNumDefs() > 0) {
((AMDGPUOperand &)*Operands[i++]).addRegOperands(Inst, 1);
((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
for (unsigned E = Operands.size(); I != E; ++I)
((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
}
void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
if (operandsHaveModifiers(Operands) || isForcedVOP3()) {
cvtVOP3(Inst, Operands);
} else {
cvtId(Inst, Operands);
}
}
void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
if (operandsHaveModifiers(Operands)) {
cvtVOP3(Inst, Operands);
} else {
cvtId(Inst, Operands);
}
}
void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
cvtVOP3(Inst, Operands);
}
void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
if (Desc.getNumDefs() > 0) {
((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
if (operandsHaveModifiers(Operands)) {
for (unsigned e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
if (Op.isRegWithInputMods()) {
((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
continue;
}
OptionalIdx[Op.getImmTy()] = i;
unsigned ClampIdx = 0, OModIdx = 0;
for (unsigned E = Operands.size(); I != E; ++I) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
if (Op.isRegWithInputMods()) {
Op.addRegWithInputModsOperands(Inst, 2);
} else if (Op.isClamp()) {
ClampIdx = I;
} else if (Op.isOMod()) {
OModIdx = I;
} else if (Op.isImm()) {
Op.addImmOperands(Inst, 1);
} else {
assert(false);
}
}
unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
if (ClampIdx) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[ClampIdx]);
Op.addImmOperands(Inst, 1);
} else {
for (unsigned e = Operands.size(); i != e; ++i)
((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
Inst.addOperand(MCOperand::createImm(0));
}
if (OModIdx) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[OModIdx]);
Op.addImmOperands(Inst, 1);
} else {
Inst.addOperand(MCOperand::createImm(0));
}
}

View File

@ -123,7 +123,7 @@ class VOP2Common <dag outs, dag ins, string asm, list<dag> pattern> :
let Size = 4;
}
class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern, bit HasMods = 0, bit VOP3Only = 0> :
VOPAnyCommon <outs, ins, asm, pattern> {
// Using complex patterns gives VOP3 patterns a very high complexity rating,
@ -135,7 +135,10 @@ class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
let VOP3 = 1;
let VALU = 1;
let AsmMatchConverter = "cvtVOP3";
let AsmMatchConverter =
!if(!eq(VOP3Only,1),
"cvtVOP3_only",
!if(!eq(HasMods,1), "cvtVOP3_2_mod", "cvtVOP3_2_nomod"));
let isCodeGenOnly = 0;
int Size = 8;

View File

@ -477,6 +477,7 @@ def OModMatchClass : AsmOperandClass {
let PredicateMethod = "isImm";
let ParserMethod = "parseVOP3OptionalOps";
let RenderMethod = "addImmOperands";
let IsOptional = 1;
}
def ClampMatchClass : AsmOperandClass {
@ -484,6 +485,7 @@ def ClampMatchClass : AsmOperandClass {
let PredicateMethod = "isImm";
let ParserMethod = "parseVOP3OptionalOps";
let RenderMethod = "addImmOperands";
let IsOptional = 1;
}
class SMRDOffsetBaseMatchClass <string predicate> : AsmOperandClass {
@ -1072,8 +1074,10 @@ class getVOP3SrcForVT<ValueType VT> {
// Returns 1 if the source arguments have modifiers, 0 if they do not.
// XXX - do f16 instructions?
class hasModifiers<ValueType SrcVT> {
bit ret = !if(!eq(SrcVT.Value, f32.Value), 1,
!if(!eq(SrcVT.Value, f64.Value), 1, 0));
bit ret =
!if(!eq(SrcVT.Value, f32.Value), 1,
!if(!eq(SrcVT.Value, f64.Value), 1,
0));
}
// Returns the input arguments for VOP[12C] instructions for the given SrcVT.
@ -1471,8 +1475,9 @@ class VOP3DisableModFields <bit HasSrc0Mods,
bits<1> clamp = !if(HasOutputMods, ?, 0);
}
class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
VOP3Common <outs, ins, "", pattern>,
class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName,
bit HasMods = 0, bit VOP3Only = 0> :
VOP3Common <outs, ins, "", pattern, HasMods, VOP3Only>,
VOP <opName>,
SIMCInstr<opName#"_e64", SISubtarget.NONE>,
MnemonicAlias<opName#"_e64", opName> {
@ -1483,44 +1488,48 @@ class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
field bit src0;
}
class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
VOP3Common <outs, ins, asm, []>,
class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName,
bit HasMods = 0, bit VOP3Only = 0> :
VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3e <op>,
SIMCInstr<opName#"_e64", SISubtarget.SI> {
let AssemblerPredicates = [isSICI];
}
class VOP3_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> :
VOP3Common <outs, ins, asm, []>,
class VOP3_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName,
bit HasMods = 0, bit VOP3Only = 0> :
VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3e_vi <op>,
SIMCInstr <opName#"_e64", SISubtarget.VI> {
let AssemblerPredicates = [isVI];
}
class VOP3b_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
VOP3Common <outs, ins, asm, []>,
class VOP3b_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName,
bit HasMods = 0, bit VOP3Only = 0> :
VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3be <op>,
SIMCInstr<opName#"_e64", SISubtarget.SI> {
let AssemblerPredicates = [isSICI];
}
class VOP3b_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> :
VOP3Common <outs, ins, asm, []>,
class VOP3b_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName,
bit HasMods = 0, bit VOP3Only = 0> :
VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3be_vi <op>,
SIMCInstr <opName#"_e64", SISubtarget.VI> {
let AssemblerPredicates = [isVI];
}
multiclass VOP3_m <vop op, dag outs, dag ins, string asm, list<dag> pattern,
string opName, int NumSrcArgs, bit HasMods = 1> {
string opName, int NumSrcArgs, bit HasMods = 1, bit VOP3Only = 0> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods, VOP3Only>,
VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
!if(!eq(NumSrcArgs, 2), 0, 1),
HasMods>;
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods, VOP3Only>,
VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
!if(!eq(NumSrcArgs, 2), 0, 1),
HasMods>;
@ -1529,21 +1538,21 @@ multiclass VOP3_m <vop op, dag outs, dag ins, string asm, list<dag> pattern,
multiclass VOP3_1_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, bit HasMods = 1> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>;
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>;
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>;
}
multiclass VOP3SI_1_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, bit HasMods = 1> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>;
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>;
// No VI instruction. This class is for SI only.
}
@ -1552,13 +1561,13 @@ multiclass VOP3_2_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, string revOp,
bit HasMods = 1> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>;
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>;
}
@ -1566,10 +1575,10 @@ multiclass VOP3SI_2_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, string revOp,
bit HasMods = 1> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>;
// No VI instruction. This class is for SI only.
@ -1594,19 +1603,19 @@ multiclass VOP3_C_m <vop op, dag outs, dag ins, string asm,
bit HasMods, bit defExec,
string revOp, list<SchedReadWrite> sched> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)> {
let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched;
}
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods> {
let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched;
}
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods> {
let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched;
@ -1900,8 +1909,9 @@ multiclass VOPCX_I64 <vopc op, string opName, string revOp = opName> :
VOPCX <op, opName, VOPC_I1_I64_I64, COND_NULL, [Write64Bit], revOp>;
multiclass VOP3_Helper <vop3 op, string opName, dag outs, dag ins, string asm,
list<dag> pat, int NumSrcArgs, bit HasMods> : VOP3_m <
op, outs, ins, opName#" "#asm, pat, opName, NumSrcArgs, HasMods
list<dag> pat, int NumSrcArgs, bit HasMods,
bit VOP3Only = 0> : VOP3_m <
op, outs, ins, opName#" "#asm, pat, opName, NumSrcArgs, HasMods, VOP3Only
>;
multiclass VOPC_CLASS_F32 <vopc op, string opName> :
@ -1917,7 +1927,8 @@ multiclass VOPCX_CLASS_F64 <vopc op, string opName> :
VOPCClassInst <op, opName, VOPC_I1_F64_I32, 1, [WriteDoubleAdd]>;
multiclass VOP3Inst <vop3 op, string opName, VOPProfile P,
SDPatternOperator node = null_frag> : VOP3_Helper <
SDPatternOperator node = null_frag, bit VOP3Only = 0> :
VOP3_Helper <
op, opName, (outs P.DstRC.RegClass:$dst), P.Ins64, P.Asm64,
!if(!eq(P.NumSrcArgs, 3),
!if(P.HasModifiers,
@ -1941,7 +1952,7 @@ multiclass VOP3Inst <vop3 op, string opName, VOPProfile P,
(node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod))))],
[(set P.DstVT:$dst, (node P.Src0VT:$src0))]))),
P.NumSrcArgs, P.HasModifiers
P.NumSrcArgs, P.HasModifiers, VOP3Only
>;
// Special case for v_div_fmas_{f32|f64}, since it seems to be the

View File

@ -1727,23 +1727,23 @@ let SchedRW = [WriteDoubleAdd] in {
let isCommutable = 1 in {
defm V_ADD_F64 : VOP3Inst <vop3<0x164, 0x280>, "v_add_f64",
VOP_F64_F64_F64, fadd
VOP_F64_F64_F64, fadd, 1
>;
defm V_MUL_F64 : VOP3Inst <vop3<0x165, 0x281>, "v_mul_f64",
VOP_F64_F64_F64, fmul
VOP_F64_F64_F64, fmul, 1
>;
defm V_MIN_F64 : VOP3Inst <vop3<0x166, 0x282>, "v_min_f64",
VOP_F64_F64_F64, fminnum
VOP_F64_F64_F64, fminnum, 1
>;
defm V_MAX_F64 : VOP3Inst <vop3<0x167, 0x283>, "v_max_f64",
VOP_F64_F64_F64, fmaxnum
VOP_F64_F64_F64, fmaxnum, 1
>;
} // End isCommutable = 1
defm V_LDEXP_F64 : VOP3Inst <vop3<0x168, 0x284>, "v_ldexp_f64",
VOP_F64_F64_I32, AMDGPUldexp
VOP_F64_F64_I32, AMDGPUldexp, 1
>;
} // End let SchedRW = [WriteDoubleAdd]

View File

@ -198,8 +198,7 @@ v_subrev_f32 v1, v3, s5
v_mac_legacy_f32 v1, v3, s5
// SICI: v_mac_legacy_f32_e64 v1, v3, s5 ; encoding: [0x01,0x00,0x0c,0xd2,0x03,0x0b,0x00,0x00]
// FIXME: The error message should be: error: instruction not supported on this GPU
// NOVI: error: invalid operand for instruction
// NOVI: error: instruction not supported on this GPU
v_mul_legacy_f32 v1, v3, s5
// SICI: v_mul_legacy_f32_e64 v1, v3, s5 ; encoding: [0x01,0x00,0x0e,0xd2,0x03,0x0b,0x00,0x00]
@ -223,7 +222,51 @@ v_mad_legacy_f32 v2, v4, v6, v8
// SICI: v_mad_legacy_f32 v2, v4, v6, v8 ; encoding: [0x02,0x00,0x80,0xd2,0x04,0x0d,0x22,0x04]
// VI: v_mad_legacy_f32 v2, v4, v6, v8 ; encoding: [0x02,0x00,0xc0,0xd1,0x04,0x0d,0x22,0x04]
v_add_f64 v[0:1], v[2:3], v[5:6]
// SICI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64_e64 v[0:1], v[2:3], v[5:6]
// SICI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64 v[0:1], -v[2:3], v[5:6]
// SICI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x20]
// VI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x20]
v_add_f64_e64 v[0:1], -v[2:3], v[5:6]
// SICI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x20]
// VI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x20]
v_add_f64 v[0:1], v[2:3], -v[5:6]
// SICI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x40]
// VI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x40]
v_add_f64_e64 v[0:1], v[2:3], -v[5:6]
// SICI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x40]
// VI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x40]
v_add_f64 v[0:1], |v[2:3]|, v[5:6]
// SICI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64_e64 v[0:1], |v[2:3]|, v[5:6]
// SICI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64 v[0:1], v[2:3], |v[5:6]|
// SICI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64_e64 v[0:1], v[2:3], |v[5:6]|
// SICI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4
// SICI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x0a,0xc8,0xd2,0x02,0x0b,0x02,0x30]
// VI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x82,0x80,0xd2,0x02,0x0b,0x02,0x30]
v_add_f64_e64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4
// SICI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x0a,0xc8,0xd2,0x02,0x0b,0x02,0x30]
// VI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x82,0x80,0xd2,0x02,0x0b,0x02,0x30]