Index: llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -116,8 +116,7 @@ } void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const { - Inst.addOperand(MCOperand::createImm( - Reg.Modifiers == -1 ? 0 : Reg.Modifiers)); + Inst.addOperand(MCOperand::createImm(Reg.Modifiers)); addRegOperands(Inst, N); } @@ -176,11 +175,23 @@ } bool isReg() const override { - return Kind == Register && Reg.Modifiers == -1; + return Kind == Register && Reg.Modifiers == 0; } bool isRegWithInputMods() const { - return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1); + return Kind == Register; + } + + bool isClamp() const { + return isImm() && Imm.Type == ImmTyClamp; + } + + bool isOMod() const { + return isImm() && Imm.Type == ImmTyOMod; + } + + bool isMod() const { + return isClamp() || isOMod(); } void setModifiers(unsigned Mods) { @@ -190,7 +201,7 @@ bool hasModifiers() const { assert(isRegKind()); - return Reg.Modifiers != -1; + return Reg.Modifiers != 0; } unsigned getReg() const override { @@ -202,7 +213,7 @@ } bool isRegClass(unsigned RCID) const { - return Reg.TRI->getRegClass(RCID).contains(getReg()); + return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg()); } bool isSCSrc32() const { @@ -306,7 +317,7 @@ Op->Reg.RegNo = RegNo; Op->Reg.TRI = TRI; Op->Reg.STI = STI; - Op->Reg.Modifiers = -1; + Op->Reg.Modifiers = 0; Op->Reg.IsForcedVOP3 = ForceVOP3; Op->StartLoc = S; Op->EndLoc = E; @@ -462,6 +473,10 @@ OperandMatchResultTy parseUNorm(OperandVector &Operands); OperandMatchResultTy parseR128(OperandVector &Operands); + void cvtId(MCInst &Inst, const OperandVector &Operands); + void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands); + void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands); + void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands); void cvtVOP3(MCInst &Inst, const OperandVector &Operands); OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands); }; @@ -1103,7 +1118,7 @@ // If we are parsing after we reach EndOfStatement then this means we // are appending default values to the Operands list. This is only done // by custom parser, so we shouldn't continue on to the generic parsing. - if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail || + if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail|| getLexer().is(AsmToken::EndOfStatement)) return ResTy; @@ -1153,8 +1168,6 @@ SMLoc S, E; unsigned RegNo; if (!ParseRegister(RegNo, S, E)) { - - bool HasModifiers = operandsHaveModifiers(Operands); unsigned Modifiers = 0; if (Negate) @@ -1167,34 +1180,23 @@ Modifiers |= 0x2; } - if (Modifiers && !HasModifiers) { - // We are adding a modifier to src1 or src2 and previous sources - // don't have modifiers, so we need to go back and empty modifers - // for each previous source. - for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1; - --PrevRegIdx) { - - AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]); - RegOp.setModifiers(0); - } - } - - Operands.push_back(AMDGPUOperand::CreateReg( RegNo, S, E, getContext().getRegisterInfo(), &getSTI(), isForcedVOP3())); - if (HasModifiers || Modifiers) { + if (Modifiers) { AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]); RegOp.setModifiers(Modifiers); - } - } else { - Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(), - S)); - Parser.Lex(); - } - return MatchOperand_Success; + } else { + ResTy = parseVOP3OptionalOps(Operands); + if (ResTy == MatchOperand_NoMatch) { + Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(), + S)); + Parser.Lex(); + } + } + return MatchOperand_Success; } default: return MatchOperand_NoMatch; @@ -1802,10 +1804,12 @@ if (operandsHaveModifiers(Operands)) return true; - AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]); + if (Operands.size() >= 2) { + AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]); - if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID)) - return true; + if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID)) + return true; + } if (Operands.size() >= 5) return true; @@ -1848,35 +1852,70 @@ return MatchOperand_NoMatch; } -void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) { - - unsigned i = 1; +void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) { + unsigned I = 1; const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); if (Desc.getNumDefs() > 0) { - ((AMDGPUOperand &)*Operands[i++]).addRegOperands(Inst, 1); + ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1); } + for (unsigned E = Operands.size(); I != E; ++I) + ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1); +} - std::map OptionalIdx; +void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) { + if (operandsHaveModifiers(Operands) || isForcedVOP3()) { + cvtVOP3(Inst, Operands); + } else { + cvtId(Inst, Operands); + } +} +void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) { if (operandsHaveModifiers(Operands)) { - for (unsigned e = Operands.size(); i != e; ++i) { - AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); + cvtVOP3(Inst, Operands); + } else { + cvtId(Inst, Operands); + } +} - if (Op.isRegWithInputMods()) { - ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2); - continue; - } - OptionalIdx[Op.getImmTy()] = i; - } +void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) { + cvtVOP3(Inst, Operands); +} - unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp]; - unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod]; +void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) { + unsigned I = 1; + const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); + if (Desc.getNumDefs() > 0) { + ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1); + } - ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1); - ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1); + unsigned ClampIdx = 0, OModIdx = 0; + for (unsigned E = Operands.size(); I != E; ++I) { + AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); + if (Op.isRegWithInputMods()) { + Op.addRegWithInputModsOperands(Inst, 2); + } else if (Op.isClamp()) { + ClampIdx = I; + } else if (Op.isOMod()) { + OModIdx = I; + } else if (Op.isImm()) { + Op.addImmOperands(Inst, 1); + } else { + assert(false); + } + } + + if (ClampIdx) { + AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[ClampIdx]); + Op.addImmOperands(Inst, 1); + } else { + Inst.addOperand(MCOperand::createImm(0)); + } + if (OModIdx) { + AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[OModIdx]); + Op.addImmOperands(Inst, 1); } else { - for (unsigned e = Operands.size(); i != e; ++i) - ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1); + Inst.addOperand(MCOperand::createImm(0)); } } Index: llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td @@ -123,7 +123,7 @@ let Size = 4; } -class VOP3Common pattern> : +class VOP3Common pattern, bit HasMods = 0, bit VOP3Only = 0> : VOPAnyCommon { // Using complex patterns gives VOP3 patterns a very high complexity rating, @@ -135,7 +135,10 @@ let VOP3 = 1; let VALU = 1; - let AsmMatchConverter = "cvtVOP3"; + let AsmMatchConverter = + !if(!eq(VOP3Only,1), + "cvtVOP3_only", + !if(!eq(HasMods,1), "cvtVOP3_2_mod", "cvtVOP3_2_nomod")); let isCodeGenOnly = 0; int Size = 8; Index: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td @@ -477,6 +477,7 @@ let PredicateMethod = "isImm"; let ParserMethod = "parseVOP3OptionalOps"; let RenderMethod = "addImmOperands"; + let IsOptional = 1; } def ClampMatchClass : AsmOperandClass { @@ -484,6 +485,7 @@ let PredicateMethod = "isImm"; let ParserMethod = "parseVOP3OptionalOps"; let RenderMethod = "addImmOperands"; + let IsOptional = 1; } class SMRDOffsetBaseMatchClass : AsmOperandClass { @@ -1072,8 +1074,10 @@ // Returns 1 if the source arguments have modifiers, 0 if they do not. // XXX - do f16 instructions? class hasModifiers { - bit ret = !if(!eq(SrcVT.Value, f32.Value), 1, - !if(!eq(SrcVT.Value, f64.Value), 1, 0)); + bit ret = + !if(!eq(SrcVT.Value, f32.Value), 1, + !if(!eq(SrcVT.Value, f64.Value), 1, + 0)); } // Returns the input arguments for VOP[12C] instructions for the given SrcVT. @@ -1471,8 +1475,9 @@ bits<1> clamp = !if(HasOutputMods, ?, 0); } -class VOP3_Pseudo pattern, string opName> : - VOP3Common , +class VOP3_Pseudo pattern, string opName, + bit HasMods = 0, bit VOP3Only = 0> : + VOP3Common , VOP , SIMCInstr, MnemonicAlias { @@ -1483,44 +1488,48 @@ field bit src0; } -class VOP3_Real_si op, dag outs, dag ins, string asm, string opName> : - VOP3Common , +class VOP3_Real_si op, dag outs, dag ins, string asm, string opName, + bit HasMods = 0, bit VOP3Only = 0> : + VOP3Common , VOP3e , SIMCInstr { let AssemblerPredicates = [isSICI]; } -class VOP3_Real_vi op, dag outs, dag ins, string asm, string opName> : - VOP3Common , +class VOP3_Real_vi op, dag outs, dag ins, string asm, string opName, + bit HasMods = 0, bit VOP3Only = 0> : + VOP3Common , VOP3e_vi , SIMCInstr { let AssemblerPredicates = [isVI]; } -class VOP3b_Real_si op, dag outs, dag ins, string asm, string opName> : - VOP3Common , +class VOP3b_Real_si op, dag outs, dag ins, string asm, string opName, + bit HasMods = 0, bit VOP3Only = 0> : + VOP3Common , VOP3be , SIMCInstr { let AssemblerPredicates = [isSICI]; } -class VOP3b_Real_vi op, dag outs, dag ins, string asm, string opName> : - VOP3Common , +class VOP3b_Real_vi op, dag outs, dag ins, string asm, string opName, + bit HasMods = 0, bit VOP3Only = 0> : + VOP3Common , VOP3be_vi , SIMCInstr { let AssemblerPredicates = [isVI]; } multiclass VOP3_m pattern, - string opName, int NumSrcArgs, bit HasMods = 1> { + string opName, int NumSrcArgs, bit HasMods = 1, bit VOP3Only = 0> { def "" : VOP3_Pseudo ; - def _si : VOP3_Real_si , + def _si : VOP3_Real_si , VOP3DisableFields; - def _vi : VOP3_Real_vi , + def _vi : VOP3_Real_vi , VOP3DisableFields; @@ -1529,21 +1538,21 @@ multiclass VOP3_1_m pattern, string opName, bit HasMods = 1> { - def "" : VOP3_Pseudo ; + def "" : VOP3_Pseudo ; - def _si : VOP3_Real_si , + def _si : VOP3_Real_si , VOP3DisableFields<0, 0, HasMods>; - def _vi : VOP3_Real_vi , + def _vi : VOP3_Real_vi , VOP3DisableFields<0, 0, HasMods>; } multiclass VOP3SI_1_m pattern, string opName, bit HasMods = 1> { - def "" : VOP3_Pseudo ; + def "" : VOP3_Pseudo ; - def _si : VOP3_Real_si , + def _si : VOP3_Real_si , VOP3DisableFields<0, 0, HasMods>; // No VI instruction. This class is for SI only. } @@ -1552,13 +1561,13 @@ list pattern, string opName, string revOp, bit HasMods = 1> { - def "" : VOP3_Pseudo , + def "" : VOP3_Pseudo , VOP2_REV; - def _si : VOP3_Real_si , + def _si : VOP3_Real_si , VOP3DisableFields<1, 0, HasMods>; - def _vi : VOP3_Real_vi , + def _vi : VOP3_Real_vi , VOP3DisableFields<1, 0, HasMods>; } @@ -1566,10 +1575,10 @@ list pattern, string opName, string revOp, bit HasMods = 1> { - def "" : VOP3_Pseudo , + def "" : VOP3_Pseudo , VOP2_REV; - def _si : VOP3_Real_si , + def _si : VOP3_Real_si , VOP3DisableFields<1, 0, HasMods>; // No VI instruction. This class is for SI only. @@ -1594,19 +1603,19 @@ bit HasMods, bit defExec, string revOp, list sched> { - def "" : VOP3_Pseudo , + def "" : VOP3_Pseudo , VOP2_REV { let Defs = !if(defExec, [EXEC], []); let SchedRW = sched; } - def _si : VOP3_Real_si , + def _si : VOP3_Real_si , VOP3DisableFields<1, 0, HasMods> { let Defs = !if(defExec, [EXEC], []); let SchedRW = sched; } - def _vi : VOP3_Real_vi , + def _vi : VOP3_Real_vi , VOP3DisableFields<1, 0, HasMods> { let Defs = !if(defExec, [EXEC], []); let SchedRW = sched; @@ -1900,8 +1909,9 @@ VOPCX ; multiclass VOP3_Helper pat, int NumSrcArgs, bit HasMods> : VOP3_m < - op, outs, ins, opName#" "#asm, pat, opName, NumSrcArgs, HasMods + list pat, int NumSrcArgs, bit HasMods, + bit VOP3Only = 0> : VOP3_m < + op, outs, ins, opName#" "#asm, pat, opName, NumSrcArgs, HasMods, VOP3Only >; multiclass VOPC_CLASS_F32 : @@ -1917,7 +1927,8 @@ VOPCClassInst ; multiclass VOP3Inst : VOP3_Helper < + SDPatternOperator node = null_frag, bit VOP3Only = 0> : + VOP3_Helper < op, opName, (outs P.DstRC.RegClass:$dst), P.Ins64, P.Asm64, !if(!eq(P.NumSrcArgs, 3), !if(P.HasModifiers, @@ -1941,7 +1952,7 @@ (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod))))], [(set P.DstVT:$dst, (node P.Src0VT:$src0))]))), - P.NumSrcArgs, P.HasModifiers + P.NumSrcArgs, P.HasModifiers, VOP3Only >; // Special case for v_div_fmas_{f32|f64}, since it seems to be the Index: llvm/trunk/lib/Target/AMDGPU/SIInstructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstructions.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstructions.td @@ -1727,23 +1727,23 @@ let isCommutable = 1 in { defm V_ADD_F64 : VOP3Inst , "v_add_f64", - VOP_F64_F64_F64, fadd + VOP_F64_F64_F64, fadd, 1 >; defm V_MUL_F64 : VOP3Inst , "v_mul_f64", - VOP_F64_F64_F64, fmul + VOP_F64_F64_F64, fmul, 1 >; defm V_MIN_F64 : VOP3Inst , "v_min_f64", - VOP_F64_F64_F64, fminnum + VOP_F64_F64_F64, fminnum, 1 >; defm V_MAX_F64 : VOP3Inst , "v_max_f64", - VOP_F64_F64_F64, fmaxnum + VOP_F64_F64_F64, fmaxnum, 1 >; } // End isCommutable = 1 defm V_LDEXP_F64 : VOP3Inst , "v_ldexp_f64", - VOP_F64_F64_I32, AMDGPUldexp + VOP_F64_F64_I32, AMDGPUldexp, 1 >; } // End let SchedRW = [WriteDoubleAdd] Index: llvm/trunk/test/MC/AMDGPU/vop3.s =================================================================== --- llvm/trunk/test/MC/AMDGPU/vop3.s +++ llvm/trunk/test/MC/AMDGPU/vop3.s @@ -198,8 +198,7 @@ v_mac_legacy_f32 v1, v3, s5 // SICI: v_mac_legacy_f32_e64 v1, v3, s5 ; encoding: [0x01,0x00,0x0c,0xd2,0x03,0x0b,0x00,0x00] -// FIXME: The error message should be: error: instruction not supported on this GPU -// NOVI: error: invalid operand for instruction +// NOVI: error: instruction not supported on this GPU v_mul_legacy_f32 v1, v3, s5 // SICI: v_mul_legacy_f32_e64 v1, v3, s5 ; encoding: [0x01,0x00,0x0e,0xd2,0x03,0x0b,0x00,0x00] @@ -223,7 +222,51 @@ // SICI: v_mad_legacy_f32 v2, v4, v6, v8 ; encoding: [0x02,0x00,0x80,0xd2,0x04,0x0d,0x22,0x04] // VI: v_mad_legacy_f32 v2, v4, v6, v8 ; encoding: [0x02,0x00,0xc0,0xd1,0x04,0x0d,0x22,0x04] - - - +v_add_f64 v[0:1], v[2:3], v[5:6] +// SICI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x00] +// VI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x00] + +v_add_f64_e64 v[0:1], v[2:3], v[5:6] +// SICI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x00] +// VI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x00] + +v_add_f64 v[0:1], -v[2:3], v[5:6] +// SICI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x20] +// VI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x20] + +v_add_f64_e64 v[0:1], -v[2:3], v[5:6] +// SICI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x20] +// VI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x20] + +v_add_f64 v[0:1], v[2:3], -v[5:6] +// SICI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x40] +// VI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x40] + +v_add_f64_e64 v[0:1], v[2:3], -v[5:6] +// SICI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x40] +// VI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x40] + +v_add_f64 v[0:1], |v[2:3]|, v[5:6] +// SICI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0xc8,0xd2,0x02,0x0b,0x02,0x00] +// VI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0x80,0xd2,0x02,0x0b,0x02,0x00] + +v_add_f64_e64 v[0:1], |v[2:3]|, v[5:6] +// SICI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0xc8,0xd2,0x02,0x0b,0x02,0x00] +// VI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0x80,0xd2,0x02,0x0b,0x02,0x00] + +v_add_f64 v[0:1], v[2:3], |v[5:6]| +// SICI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0xc8,0xd2,0x02,0x0b,0x02,0x00] +// VI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0x80,0xd2,0x02,0x0b,0x02,0x00] + +v_add_f64_e64 v[0:1], v[2:3], |v[5:6]| +// SICI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0xc8,0xd2,0x02,0x0b,0x02,0x00] +// VI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0x80,0xd2,0x02,0x0b,0x02,0x00] + +v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 +// SICI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x0a,0xc8,0xd2,0x02,0x0b,0x02,0x30] +// VI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x82,0x80,0xd2,0x02,0x0b,0x02,0x30] + +v_add_f64_e64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 +// SICI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x0a,0xc8,0xd2,0x02,0x0b,0x02,0x30] +// VI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x82,0x80,0xd2,0x02,0x0b,0x02,0x30]