Index: lib/Target/Hexagon/HexagonInstrAlias.td =================================================================== --- lib/Target/Hexagon/HexagonInstrAlias.td +++ lib/Target/Hexagon/HexagonInstrAlias.td @@ -539,7 +539,7 @@ // maps "$Qd |= vcmp.eq($Vu.uw, $Vv.uw)" -> "$Qd |= vcmp.eq($Vu.w, $Vv.w)" def : InstAlias<"$Qd |= vcmp.eq($Vu.uw, $Vv.uw)", - (V6_veqh_or VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>, + (V6_veqw_or VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>, Requires<[HasV60T]>; // maps "$Qd ^= vcmp.eq($Vu.uw, $Vv.uw)" -> "$Qd ^= vcmp.eq($Vu.w, $Vv.w)" Index: lib/Target/Hexagon/HexagonInstrInfo.td =================================================================== --- lib/Target/Hexagon/HexagonInstrInfo.td +++ lib/Target/Hexagon/HexagonInstrInfo.td @@ -23,7 +23,7 @@ class T_CMP MajOp, bit isNot, Operand ImmOp> : ALU32Inst <(outs PredRegs:$dst), (ins IntRegs:$src1, ImmOp:$src2), - "$dst = "#!if(isNot, "!","")#mnemonic#"($src1, #$src2)", + "$dst="#!if(isNot, "!","")#mnemonic#"($src1,#$src2)", [], "",ALU32_2op_tc_2early_SLOT0123 >, ImmRegRel { bits<2> dst; bits<5> src1; @@ -57,7 +57,7 @@ class T_ALU32_3op MajOp, bits<3> MinOp, bit OpsRev, bit IsComm> : ALU32_rr<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rd = "#mnemonic#"($Rs, $Rt)", + "$Rd="#mnemonic#"($Rs,$Rt)", [], "", ALU32_3op_tc_1_SLOT0123>, ImmRegRel, PredRel { let isCommutable = IsComm; let BaseOpcode = mnemonic#_rr; @@ -81,7 +81,7 @@ bit OpsRev, bit PredNot, bit PredNew> : ALU32_rr<(outs IntRegs:$Rd), (ins PredRegs:$Pu, IntRegs:$Rs, IntRegs:$Rt), "if ("#!if(PredNot,"!","")#"$Pu"#!if(PredNew,".new","")#") "# - "$Rd = "#mnemonic#"($Rs, $Rt)", + "$Rd="#mnemonic#"($Rs,$Rt)", [], "", ALU32_3op_tc_1_SLOT0123>, ImmRegRel, PredNewRel { let isPredicated = 1; let isPredicatedFalse = PredNot; @@ -109,7 +109,7 @@ class T_ALU32_combineh MajOp, bits<3> MinOp, bit OpsRev> : T_ALU32_3op<"", MajOp, MinOp, OpsRev, 0> { - let AsmString = "$Rd = combine($Rs"#Op1#", $Rt"#Op2#")"; + let AsmString = "$Rd=combine($Rs"#Op1#",$Rt"#Op2#")"; } def A2_combine_hh : T_ALU32_combineh<".h", ".h", 0b011, 0b100, 1>; @@ -120,7 +120,7 @@ class T_ALU32_3op_sfx MajOp, bits<3> MinOp, bit OpsRev, bit IsComm> : T_ALU32_3op<"", MajOp, MinOp, OpsRev, IsComm> { - let AsmString = "$Rd = "#mnemonic#"($Rs, $Rt)"#suffix; + let AsmString = "$Rd="#mnemonic#"($Rs,$Rt)"#suffix; } def A2_svaddh : T_ALU32_3op<"vaddh", 0b110, 0b000, 0, 1>; @@ -179,7 +179,7 @@ let hasSideEffects = 0, hasNewValue = 1, isCompare = 1, InputType = "reg" in class T_ALU32_3op_cmp MinOp, bit IsNeg, bit IsComm> : ALU32_rr<(outs PredRegs:$Pd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Pd = "#mnemonic#"($Rs, $Rt)", + "$Pd="#mnemonic#"($Rs,$Rt)", [], "", ALU32_3op_tc_1_SLOT0123>, ImmRegRel { let CextOpcode = mnemonic; let isCommutable = IsComm; @@ -206,7 +206,7 @@ let CextOpcode = "MUX", InputType = "reg", hasNewValue = 1 in def C2_mux: ALU32_rr<(outs IntRegs:$Rd), (ins PredRegs:$Pu, IntRegs:$Rs, IntRegs:$Rt), - "$Rd = mux($Pu, $Rs, $Rt)", [], "", ALU32_3op_tc_1_SLOT0123>, ImmRegRel { + "$Rd=mux($Pu,$Rs,$Rt)", [], "", ALU32_3op_tc_1_SLOT0123>, ImmRegRel { bits<5> Rd; bits<2> Pu; bits<5> Rs; @@ -232,7 +232,7 @@ isExtentSigned = 1, isExtendable = 1, opExtentBits = 8, opExtendable = 1, AddedComplexity = 75 in def A2_combineii: ALU32Inst <(outs DoubleRegs:$Rdd), (ins s8_0Ext:$s8, s8_0Imm:$S8), - "$Rdd = combine(#$s8, #$S8)", + "$Rdd=combine(#$s8,#$S8)", []> { bits<5> Rdd; bits<8> s8; @@ -253,8 +253,8 @@ class T_Addri_Pred : ALU32_ri <(outs IntRegs:$Rd), (ins PredRegs:$Pu, IntRegs:$Rs, s8_0Ext:$s8), - !if(PredNot, "if (!$Pu", "if ($Pu")#!if(PredNew,".new) $Rd = ", - ") $Rd = ")#"add($Rs, #$s8)"> { + !if(PredNot, "if (!$Pu", "if ($Pu")#!if(PredNew,".new) $Rd=", + ") $Rd=")#"add($Rs,#$s8)"> { bits<5> Rd; bits<2> Pu; bits<5> Rs; @@ -279,7 +279,7 @@ class T_Addri : ALU32_ri <(outs IntRegs:$Rd), (ins IntRegs:$Rs, immOp:$s16), - "$Rd = add($Rs, #$s16)", [], "", ALU32_ADDI_tc_1_SLOT0123> { + "$Rd=add($Rs,#$s16)", [], "", ALU32_ADDI_tc_1_SLOT0123> { bits<5> Rd; bits<5> Rs; bits<16> s16; @@ -322,7 +322,7 @@ def A2_iconst : ALU32_ri <(outs IntRegs:$Rd), (ins s23_2Imm:$s23_2), - "$Rd = iconst(#$s23_2)"> {} + "$Rd=iconst(#$s23_2)"> {} //===----------------------------------------------------------------------===// // Template class used for the following ALU32 instructions. @@ -334,7 +334,7 @@ class T_ALU32ri_logical MinOp> : ALU32_ri <(outs IntRegs:$Rd), (ins IntRegs:$Rs, s10_0Ext:$s10), - "$Rd = "#mnemonic#"($Rs, #$s10)" , + "$Rd="#mnemonic#"($Rs,#$s10)" , []> { bits<5> Rd; bits<5> Rs; @@ -359,7 +359,7 @@ let isExtendable = 1, CextOpcode = "sub", opExtendable = 1, isExtentSigned = 1, opExtentBits = 10, InputType = "imm", hasNewValue = 1, hasSideEffects = 0 in def A2_subri: ALU32_ri <(outs IntRegs:$Rd), (ins s10_0Ext:$s10, IntRegs:$Rs), - "$Rd = sub(#$s10, $Rs)", []>, ImmRegRel { + "$Rd=sub(#$s10,$Rs)", []>, ImmRegRel { bits<5> Rd; bits<10> s10; bits<5> Rs; @@ -383,7 +383,7 @@ let hasSideEffects = 0, hasNewValue = 1 in class T_tfr16 : ALU32Inst <(outs IntRegs:$Rx), (ins IntRegs:$src1, u16_0Imm:$u16), - "$Rx"#!if(isHi, ".h", ".l")#" = #$u16", + "$Rx"#!if(isHi, ".h", ".l")#"=#$u16", [], "$src1 = $Rx" > { bits<5> Rx; bits<16> u16; @@ -407,7 +407,7 @@ (ins PredRegs:$src1, IntRegs:$src2), "if ("#!if(isPredNot, "!", "")# "$src1"#!if(isPredNew, ".new", "")# - ") $dst = $src2"> { + ") $dst=$src2"> { bits<5> dst; bits<2> src1; bits<5> src2; @@ -427,7 +427,7 @@ let isPredicable = 1 in class T_tfr : ALU32Inst<(outs IntRegs:$dst), (ins IntRegs:$src), - "$dst = $src"> { + "$dst=$src"> { bits<5> dst; bits<5> src; @@ -461,7 +461,7 @@ : ALU32_rr <(outs DoubleRegs:$dst), (ins PredRegs:$src1, DoubleRegs:$src2), "if ("#!if(PredNot, "!", "")#"$src1" - #!if(PredNew, ".new", "")#") $dst = $src2" > { + #!if(PredNew, ".new", "")#") $dst=$src2" > { let isPredicatedFalse = PredNot; let isPredicatedNew = PredNew; } @@ -471,7 +471,7 @@ // 'combine' before object code emission. class T_tfrp : ALU32Inst <(outs DoubleRegs:$dst), (ins DoubleRegs:$src), - "$dst = $src">; + "$dst=$src">; let hasSideEffects = 0 in multiclass TFR64_base { @@ -492,7 +492,7 @@ hasSideEffects = 0, isPredicated = 1, hasNewValue = 1 in class T_TFRI_Pred : ALU32_ri<(outs IntRegs:$Rd), (ins PredRegs:$Pu, s12_0Ext:$s12), - "if ("#!if(PredNot,"!","")#"$Pu"#!if(PredNew,".new","")#") $Rd = #$s12", + "if ("#!if(PredNot,"!","")#"$Pu"#!if(PredNew,".new","")#") $Rd=#$s12", [], "", ALU32_2op_tc_1_SLOT0123>, ImmRegRel, PredNewRel { let isPredicatedFalse = PredNot; let isPredicatedNew = PredNew; @@ -520,7 +520,7 @@ CextOpcode = "TFR", BaseOpcode = "TFRI", hasNewValue = 1, opNewValue = 0, isAsCheapAsAMove = 1 , opExtendable = 1, opExtentBits = 16, isMoveImm = 1, isPredicated = 0, isPredicable = 1, isReMaterializable = 1 in -def A2_tfrsi : ALU32Inst<(outs IntRegs:$Rd), (ins s16_0Ext:$s16), "$Rd = #$s16", +def A2_tfrsi : ALU32Inst<(outs IntRegs:$Rd), (ins s16_0Ext:$s16), "$Rd=#$s16", [], "", ALU32_2op_tc_1_SLOT0123>, ImmRegRel, PredRel { bits<5> Rd; @@ -540,7 +540,7 @@ let isReMaterializable = 1, isMoveImm = 1, isAsCheapAsAMove = 1, isAsmParserOnly = 1 in def A2_tfrpi : ALU64_rr<(outs DoubleRegs:$dst), (ins s8_0Imm64:$src1), - "$dst = #$src1", + "$dst=#$src1", []>; // TODO: see if this instruction can be deleted.. @@ -547,10 +547,10 @@ let isExtendable = 1, opExtendable = 1, opExtentBits = 6, isAsmParserOnly = 1 in { def TFRI64_V4 : ALU64_rr<(outs DoubleRegs:$dst), (ins u64_0Imm:$src1), - "$dst = #$src1">; + "$dst=#$src1">; def TFRI64_V2_ext : ALU64_rr<(outs DoubleRegs:$dst), (ins s8_0Ext:$src1, s8_0Imm:$src2), - "$dst = combine(##$src1, #$src2)">; + "$dst=combine(##$src1,#$src2)">; } //===----------------------------------------------------------------------===// @@ -583,11 +583,11 @@ let opExtendable = 2 in def C2_muxri : T_MUX1<0b1, (ins PredRegs:$Pu, s8_0Ext:$s8, IntRegs:$Rs), - "$Rd = mux($Pu, #$s8, $Rs)">; + "$Rd=mux($Pu,#$s8,$Rs)">; let opExtendable = 3 in def C2_muxir : T_MUX1<0b0, (ins PredRegs:$Pu, IntRegs:$Rs, s8_0Ext:$s8), - "$Rd = mux($Pu, $Rs, #$s8)">; + "$Rd=mux($Pu,$Rs,#$s8)">; // C2_muxii: Scalar mux immediates. let isExtentSigned = 1, hasNewValue = 1, isExtendable = 1, @@ -594,7 +594,7 @@ opExtentBits = 8, opExtendable = 2 in def C2_muxii: ALU32Inst <(outs IntRegs:$Rd), (ins PredRegs:$Pu, s8_0Ext:$s8, s8_0Imm:$S8), - "$Rd = mux($Pu, #$s8, #$S8)" , + "$Rd=mux($Pu,#$s8,#$S8)" , []> { bits<5> Rd; bits<2> Pu; @@ -624,7 +624,7 @@ let hasNewValue = 1, opNewValue = 0 in class T_ALU32_2op minOp> : ALU32Inst <(outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = "#mnemonic#"($Rs)", [] > { + "$Rd="#mnemonic#"($Rs)", [] > { bits<5> Rd; bits<5> Rs; @@ -646,7 +646,7 @@ bit isPredNew > : ALU32Inst <(outs IntRegs:$Rd), (ins PredRegs:$Pu, IntRegs:$Rs), !if(isPredNot, "if (!$Pu", "if ($Pu") - #!if(isPredNew, ".new) ",") ")#"$Rd = "#mnemonic#"($Rs)"> { + #!if(isPredNew, ".new) ",") ")#"$Rd="#mnemonic#"($Rs)"> { bits<5> Rd; bits<2> Pu; bits<5> Rs; @@ -699,7 +699,7 @@ let hasNewValue = 1, opNewValue = 0 in class T_ZXTB: ALU32Inst < (outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = zxtb($Rs)", [] > { // Rd = and(Rs,255) + "$Rd=zxtb($Rs)", [] > { // Rd = and(Rs,255) bits<5> Rd; bits<5> Rs; bits<10> s10 = 255; @@ -736,7 +736,7 @@ bit isSat, bit isRnd, bit isCrnd, bit SwapOps > : ALU64_rr < (outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Rdd = "#opc#"($Rss, $Rtt)"#!if(isRnd, ":rnd", "") + "$Rdd="#opc#"($Rss,$Rtt)"#!if(isRnd, ":rnd", "") #!if(isCrnd,":crnd","") #!if(isSat, ":sat", ""), [], "", ALU64_tc_2_SLOT23 > { @@ -842,7 +842,7 @@ class T_vcmp minOp> : ALU64_rr <(outs PredRegs:$Pd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Pd = "#Str#"($Rss, $Rtt)", [], + "$Pd="#Str#"($Rss,$Rtt)", [], "", ALU64_tc_2early_SLOT23> { bits<2> Pd; bits<5> Rss; @@ -885,10 +885,10 @@ let isPseudo = 1 in { def C2_cmpgei: ALU32Inst < (outs PredRegs:$Pd), (ins IntRegs:$Rs, s8_0Ext:$s8), - "$Pd = cmp.ge($Rs, #$s8)">; + "$Pd=cmp.ge($Rs,#$s8)">; def C2_cmpgeui: ALU32Inst < (outs PredRegs:$Pd), (ins IntRegs:$Rs, u8_0Ext:$s8), - "$Pd = cmp.geu($Rs, #$s8)">; + "$Pd=cmp.geu($Rs,#$s8)">; } @@ -913,8 +913,8 @@ let hasNewValue = 1, opNewValue = 0 in class T_XTYPE_ADD_SUB LHbits, bit isSat, bit hasShift, bit isSub> : ALU64Inst <(outs IntRegs:$Rd), (ins IntRegs:$Rt, IntRegs:$Rs), - "$Rd = "#!if(isSub,"sub","add")#"($Rt." - #!if(hasShift, !if(LHbits{1},"h","l"),"l") #", $Rs." + "$Rd="#!if(isSub,"sub","add")#"($Rt." + #!if(hasShift, !if(LHbits{1},"h","l"),"l") #",$Rs." #!if(hasShift, !if(LHbits{0},"h)","l)"), !if(LHbits{1},"h)","l)")) #!if(isSat,":sat","") #!if(hasShift,":<<16",""), [], "", ALU64_tc_1_SLOT23> { @@ -980,7 +980,7 @@ let hasSideEffects = 0, hasNewValue = 1 in def S2_parityp: ALU64Inst<(outs IntRegs:$Rd), (ins DoubleRegs:$Rs, DoubleRegs:$Rt), - "$Rd = parity($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> { + "$Rd=parity($Rs,$Rt)", [], "", ALU64_tc_2_SLOT23> { bits<5> Rd; bits<5> Rs; bits<5> Rt; @@ -995,8 +995,8 @@ let hasNewValue = 1, opNewValue = 0, hasSideEffects = 0 in class T_XTYPE_MIN_MAX < bit isMax, bit isUnsigned > : ALU64Inst < (outs IntRegs:$Rd), (ins IntRegs:$Rt, IntRegs:$Rs), - "$Rd = "#!if(isMax,"max","min")#!if(isUnsigned,"u","") - #"($Rt, $Rs)", [], "", ALU64_tc_2_SLOT23> { + "$Rd="#!if(isMax,"max","min")#!if(isUnsigned,"u","") + #"($Rt,$Rs)", [], "", ALU64_tc_2_SLOT23> { bits<5> Rd; bits<5> Rt; bits<5> Rs; @@ -1018,7 +1018,7 @@ class T_cmp64_rr MinOp, bit IsComm> : ALU64_rr<(outs PredRegs:$Pd), (ins DoubleRegs:$Rs, DoubleRegs:$Rt), - "$Pd = "#mnemonic#"($Rs, $Rt)", [], "", ALU64_tc_2early_SLOT23> { + "$Pd="#mnemonic#"($Rs,$Rt)", [], "", ALU64_tc_2early_SLOT23> { let isCompare = 1; let isCommutable = IsComm; let hasSideEffects = 0; @@ -1041,7 +1041,7 @@ def C2_vmux : ALU64_rr<(outs DoubleRegs:$Rd), (ins PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt), - "$Rd = vmux($Pu, $Rs, $Rt)", [], "", ALU64_tc_1_SLOT23> { + "$Rd=vmux($Pu,$Rs,$Rt)", [], "", ALU64_tc_1_SLOT23> { let hasSideEffects = 0; bits<5> Rd; @@ -1061,7 +1061,7 @@ bits<3> MajOp, bits<3> MinOp, bit OpsRev, bit IsComm, string Op2Pfx> : ALU64_rr<(outs DoubleRegs:$Rd), (ins DoubleRegs:$Rs, DoubleRegs:$Rt), - "$Rd = " #mnemonic# "($Rs, " #Op2Pfx# "$Rt)" #suffix, [], + "$Rd=" #mnemonic# "($Rs," #Op2Pfx# "$Rt)" #suffix, [], "", ALU64_tc_1_SLOT23> { let hasSideEffects = 0; let isCommutable = IsComm; @@ -1130,7 +1130,7 @@ let hasSideEffects = 0 in class T_LOGICAL_1OP OpBits> : CRInst<(outs PredRegs:$Pd), (ins PredRegs:$Ps), - "$Pd = " # MnOp # "($Ps)", [], "", CR_tc_2early_SLOT23> { + "$Pd=" # MnOp # "($Ps)", [], "", CR_tc_2early_SLOT23> { bits<2> Pd; bits<2> Ps; @@ -1150,7 +1150,7 @@ let hasSideEffects = 0 in class T_LOGICAL_2OP OpBits, bit IsNeg, bit Rev> : CRInst<(outs PredRegs:$Pd), (ins PredRegs:$Ps, PredRegs:$Pt), - "$Pd = " # MnOp # "($Ps, " # !if (IsNeg,"!","") # "$Pt)", + "$Pd=" # MnOp # "($Ps," # !if (IsNeg,"!","") # "$Pt)", [], "", CR_tc_2early_SLOT23> { bits<2> Pd; bits<2> Ps; @@ -1174,7 +1174,7 @@ let hasSideEffects = 0, hasNewValue = 1 in def C2_vitpack : SInst<(outs IntRegs:$Rd), (ins PredRegs:$Ps, PredRegs:$Pt), - "$Rd = vitpack($Ps, $Pt)", [], "", S_2op_tc_1_SLOT23> { + "$Rd=vitpack($Ps,$Pt)", [], "", S_2op_tc_1_SLOT23> { bits<5> Rd; bits<2> Ps; bits<2> Pt; @@ -1189,7 +1189,7 @@ let hasSideEffects = 0 in def C2_mask : SInst<(outs DoubleRegs:$Rd), (ins PredRegs:$Pt), - "$Rd = mask($Pt)", [], "", S_2op_tc_1_SLOT23> { + "$Rd=mask($Pt)", [], "", S_2op_tc_1_SLOT23> { bits<5> Rd; bits<2> Pt; @@ -1401,7 +1401,7 @@ class T_load_io MajOp, Operand ImmOp> : LDInst<(outs RC:$dst), (ins IntRegs:$src1, ImmOp:$offset), - "$dst = "#mnemonic#"($src1 + #$offset)", []>, AddrModeRel { + "$dst="#mnemonic#"($src1+#$offset)", []>, AddrModeRel { bits<4> name; bits<5> dst; bits<5> src1; @@ -1436,7 +1436,7 @@ (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset), "if ("#!if(isNot, "!$src1", "$src1") #!if(isPredNew, ".new", "") - #") $dst = "#mnemonic#"($src2 + #$offset)", + #") $dst="#mnemonic#"($src2+#$offset)", [],"", V2LDST_tc_ld_SLOT01> , AddrModeRel { bits<5> dst; bits<2> src1; @@ -1519,7 +1519,7 @@ class T_loadalign_io MajOp, Operand ImmOp> : LDInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2, ImmOp:$offset), - "$dst = "#str#"($src2 + #$offset)", [], + "$dst="#str#"($src2+#$offset)", [], "$src1 = $dst">, AddrModeRel { bits<4> name; bits<5> dst; @@ -1556,7 +1556,7 @@ bits<4> MajOp > : LDInstPI <(outs RC:$dst, IntRegs:$dst2), (ins IntRegs:$src1, ImmOp:$offset), - "$dst = "#mnemonic#"($src1++#$offset)" , + "$dst="#mnemonic#"($src1++#$offset)" , [], "$src1 = $dst2" > , PredNewRel { @@ -1591,7 +1591,7 @@ : LDInst <(outs RC:$dst, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset), !if(isPredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#"$dst = "#mnemonic#"($src2++#$offset)", + ") ")#"$dst="#mnemonic#"($src2++#$offset)", [] , "$src2 = $dst2" > , PredNewRel { @@ -1682,7 +1682,7 @@ class T_loadalign_pi MajOp > : LDInstPI <(outs DoubleRegs:$dst, IntRegs:$dst2), (ins DoubleRegs:$src1, IntRegs:$src2, ImmOp:$offset), - "$dst = "#mnemonic#"($src2++#$offset)" , + "$dst="#mnemonic#"($src2++#$offset)" , [], "$src2 = $dst2, $src1 = $dst" > , PredNewRel { bits<5> dst; @@ -1718,7 +1718,7 @@ MemAccessSize AccessSz> : LDInstPI <(outs RC:$dst, IntRegs:$_dst_), (ins IntRegs:$src1, ModRegs:$src2), - "$dst = "#mnemonic#"($src1++$src2)" , + "$dst="#mnemonic#"($src1++$src2)" , [], "$src1 = $_dst_" > { bits<5> dst; bits<5> src1; @@ -1778,7 +1778,7 @@ class T_load_pcr MajOp> : LDInst <(outs RC:$dst, IntRegs:$_dst_), (ins IntRegs:$Rz, ModRegs:$Mu), - "$dst = "#mnemonic#"($Rz ++ I:circ($Mu))", [], + "$dst="#mnemonic#"($Rz++I:circ($Mu))", [], "$Rz = $_dst_" > { bits<5> dst; bits<5> Rz; @@ -1825,7 +1825,7 @@ class T_loadalign_pcr MajOp, MemAccessSize AccessSz > : LDInst <(outs DoubleRegs:$dst, IntRegs:$_dst_), (ins DoubleRegs:$_src_, IntRegs:$Rz, ModRegs:$Mu), - "$dst = "#mnemonic#"($Rz ++ I:circ($Mu))", [], + "$dst="#mnemonic#"($Rz++I:circ($Mu))", [], "$Rz = $_dst_, $dst = $_src_" > { bits<5> dst; bits<5> Rz; @@ -1855,7 +1855,7 @@ Operand ImmOp, bits<4> MajOp> : LDInstPI<(outs RC:$dst, IntRegs:$_dst_), (ins IntRegs:$Rz, ImmOp:$offset, ModRegs:$Mu), - "$dst = "#mnemonic#"($Rz ++ #$offset:circ($Mu))", [], + "$dst="#mnemonic#"($Rz++#$offset:circ($Mu))", [], "$Rz = $_dst_"> { bits<5> dst; bits<5> Rz; @@ -1920,7 +1920,7 @@ class T_load_locked : LD0Inst <(outs RC:$dst), (ins IntRegs:$src), - "$dst = "#mnemonic#"($src)"> { + "$dst="#mnemonic#"($src)"> { bits<5> dst; bits<5> src; let IClass = 0b1001; @@ -1939,7 +1939,7 @@ let isSoloAX = 1, isPredicateLate = 1 in class T_store_locked : ST0Inst <(outs PredRegs:$Pd), (ins IntRegs:$Rs, RC:$Rt), - mnemonic#"($Rs, $Pd) = $Rt"> { + mnemonic#"($Rs,$Pd)=$Rt"> { bits<2> Pd; bits<5> Rs; bits<5> Rt; @@ -1968,7 +1968,7 @@ : LDInst <(outs RC:$dst, IntRegs:$_dst_), (ins IntRegs:$Rz, ModRegs:$Mu), - "$dst = "#mnemonic#"($Rz ++ $Mu:brev)" , + "$dst="#mnemonic#"($Rz++$Mu:brev)" , [] , "$Rz = $_dst_" > { let accessSize = addrSize; @@ -2038,8 +2038,8 @@ class T_M2_mpy < bits<2> LHbits, bit isSat, bit isRnd, bit hasShift, bit isUnsigned> : MInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rd = "#!if(isUnsigned,"mpyu","mpy")#"($Rs."#!if(LHbits{1},"h","l") - #", $Rt."#!if(LHbits{0},"h)","l)") + "$Rd="#!if(isUnsigned,"mpyu","mpy")#"($Rs."#!if(LHbits{1},"h","l") + #",$Rt."#!if(LHbits{0},"h)","l)") #!if(hasShift,":<<1","") #!if(isRnd,":rnd","") #!if(isSat,":sat",""), @@ -2124,9 +2124,9 @@ class T_M2_mpy_acc < bits<2> LHbits, bit isSat, bit isNac, bit hasShift, bit isUnsigned > : MInst_acc<(outs IntRegs:$Rx), (ins IntRegs:$dst2, IntRegs:$Rs, IntRegs:$Rt), - "$Rx "#!if(isNac,"-= ","+= ")#!if(isUnsigned,"mpyu","mpy") + "$Rx"#!if(isNac,"-=","+=")#!if(isUnsigned,"mpyu","mpy") #"($Rs."#!if(LHbits{1},"h","l") - #", $Rt."#!if(LHbits{0},"h)","l)") + #",$Rt."#!if(LHbits{0},"h)","l)") #!if(hasShift,":<<1","") #!if(isSat,":sat",""), [], "$dst2 = $Rx", M_tc_3x_SLOT23 > { @@ -2216,9 +2216,9 @@ class T_M2_mpyd_acc < bits<2> LHbits, bit isNac, bit hasShift, bit isUnsigned> : MInst_acc<(outs DoubleRegs:$Rxx), (ins DoubleRegs:$dst2, IntRegs:$Rs, IntRegs:$Rt), - "$Rxx "#!if(isNac,"-= ","+= ")#!if(isUnsigned,"mpyu","mpy") + "$Rxx"#!if(isNac,"-=","+=")#!if(isUnsigned,"mpyu","mpy") #"($Rs."#!if(LHbits{1},"h","l") - #", $Rt."#!if(LHbits{0},"h)","l)") + #",$Rt."#!if(LHbits{0},"h)","l)") #!if(hasShift,":<<1",""), [], "$dst2 = $Rxx", M_tc_3x_SLOT23 > { bits<5> Rxx; @@ -2285,7 +2285,7 @@ class T_M2_vmpy < string opc, bits<3> MajOp, bits<3> MinOp, bit hasShift, bit isRnd, bit isSat > : MInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Rdd = "#opc#"($Rss, $Rtt)"#!if(hasShift,":<<1","") + "$Rdd="#opc#"($Rss,$Rtt)"#!if(hasShift,":<<1","") #!if(isRnd,":rnd","") #!if(isSat,":sat",""), [] > { @@ -2350,8 +2350,8 @@ bits<3> MajOp, bits<3> MinOp, bit isSat = 0, bit isRnd = 0, string op2Suffix = "", bit isRaw = 0, bit isHi = 0 > : MInst <(outs IntRegs:$dst), (ins RC:$src1, RC:$src2), - "$dst = "#mnemonic - #"($src1, $src2"#op2Suffix#")" + "$dst="#mnemonic + #"($src1,$src2"#op2Suffix#")" #!if(MajOp{2}, ":<<1", "") #!if(isRnd, ":rnd", "") #!if(isSat, ":sat", "") @@ -2421,7 +2421,7 @@ let hasNewValue = 1, opNewValue = 0 in class T_MType_mpy_ri pattern> : MInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs, ImmOp:$u8), - "$Rd ="#!if(isNeg, "- ", "+ ")#"mpyi($Rs, #$u8)" , + "$Rd="#!if(isNeg, "-", "+")#"mpyi($Rs,#$u8)" , pattern, "", M_tc_3x_SLOT23> { bits<5> Rd; bits<5> Rs; @@ -2446,7 +2446,7 @@ let isAsmParserOnly = 1 in def M2_mpyui : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), - "$dst = mpyui($src1, $src2)">; + "$dst=mpyui($src1,$src2)">; // Rd=mpyi(Rs,#m9) // s9 is NOT the same as m9 - but it works.. so far. @@ -2456,7 +2456,7 @@ CextOpcode = "mpyi", InputType = "imm", hasNewValue = 1, isAsmParserOnly = 1 in def M2_mpysmi : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9_0Ext:$src2), - "$dst = mpyi($src1, #$src2)", []>, ImmRegRel; + "$dst=mpyi($src1,#$src2)", []>, ImmRegRel; let hasNewValue = 1, isExtendable = 1, opExtentBits = 8, opExtendable = 3, InputType = "imm" in @@ -2463,7 +2463,7 @@ class T_MType_acc_ri MajOp, Operand ImmOp, list pattern = []> : MInst < (outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, ImmOp:$src3), - "$dst "#mnemonic#"($src2, #$src3)", + "$dst"#mnemonic#"($src2,#$src3)", pattern, "$src1 = $dst", M_tc_2_SLOT23> { bits<5> dst; bits<5> src2; @@ -2485,7 +2485,7 @@ bit isSat = 0, bit isShift = 0> : MInst < (outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "$dst "#mnemonic#"($src2, "#!if(hasNot, "~$src3)","$src3)") + "$dst"#mnemonic#"($src2,"#!if(hasNot, "~$src3)","$src3)") #!if(isShift, ":<<1", "") #!if(isSat, ":sat", ""), pattern, "$src1 = $dst", M_tc_2_SLOT23 > { @@ -2505,30 +2505,30 @@ } let CextOpcode = "MPYI_acc", Itinerary = M_tc_3x_SLOT23 in { - def M2_macsip : T_MType_acc_ri <"+= mpyi", 0b010, u8_0Ext, []>, ImmRegRel; + def M2_macsip : T_MType_acc_ri <"+=mpyi", 0b010, u8_0Ext, []>, ImmRegRel; - def M2_maci : T_MType_acc_rr <"+= mpyi", 0b000, 0b000, 0, []>, ImmRegRel; + def M2_maci : T_MType_acc_rr <"+=mpyi", 0b000, 0b000, 0, []>, ImmRegRel; } let CextOpcode = "ADD_acc" in { let isExtentSigned = 1 in - def M2_accii : T_MType_acc_ri <"+= add", 0b100, s8_0Ext, []>, ImmRegRel; + def M2_accii : T_MType_acc_ri <"+=add", 0b100, s8_0Ext, []>, ImmRegRel; - def M2_acci : T_MType_acc_rr <"+= add", 0b000, 0b001, 0, []>, ImmRegRel; + def M2_acci : T_MType_acc_rr <"+=add", 0b000, 0b001, 0, []>, ImmRegRel; } let CextOpcode = "SUB_acc" in { let isExtentSigned = 1 in - def M2_naccii : T_MType_acc_ri <"-= add", 0b101, s8_0Ext>, ImmRegRel; + def M2_naccii : T_MType_acc_ri <"-=add", 0b101, s8_0Ext>, ImmRegRel; - def M2_nacci : T_MType_acc_rr <"-= add", 0b100, 0b001, 0>, ImmRegRel; + def M2_nacci : T_MType_acc_rr <"-=add", 0b100, 0b001, 0>, ImmRegRel; } let Itinerary = M_tc_3x_SLOT23 in -def M2_macsin : T_MType_acc_ri <"-= mpyi", 0b011, u8_0Ext>; +def M2_macsin : T_MType_acc_ri <"-=mpyi", 0b011, u8_0Ext>; -def M2_xor_xacc : T_MType_acc_rr < "^= xor", 0b100, 0b011, 0>; -def M2_subacc : T_MType_acc_rr <"+= sub", 0b000, 0b011, 1>; +def M2_xor_xacc : T_MType_acc_rr < "^=xor", 0b100, 0b011, 0>; +def M2_subacc : T_MType_acc_rr <"+=sub", 0b000, 0b011, 1>; //===----------------------------------------------------------------------===// // Template Class -- XType Vector Instructions @@ -2535,7 +2535,7 @@ //===----------------------------------------------------------------------===// class T_XTYPE_Vect < string opc, bits<3> MajOp, bits<3> MinOp, bit isConj > : MInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Rdd = "#opc#"($Rss, $Rtt"#!if(isConj,"*)",")"), + "$Rdd="#opc#"($Rss,$Rtt"#!if(isConj,"*)",")"), [] > { bits<5> Rdd; bits<5> Rss; @@ -2554,7 +2554,7 @@ class T_XTYPE_Vect_acc < string opc, bits<3> MajOp, bits<3> MinOp, bit isConj > : MInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Rdd += "#opc#"($Rss, $Rtt"#!if(isConj,"*)",")"), + "$Rdd+="#opc#"($Rss,$Rtt"#!if(isConj,"*)",")"), [], "$dst2 = $Rdd",M_tc_3x_SLOT23 > { bits<5> Rdd; bits<5> Rss; @@ -2572,7 +2572,7 @@ class T_XTYPE_Vect_diff < bits<3> MajOp, string opc > : MInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rtt, DoubleRegs:$Rss), - "$Rdd = "#opc#"($Rtt, $Rss)", + "$Rdd="#opc#"($Rtt,$Rss)", [], "",M_tc_2_SLOT23 > { bits<5> Rdd; bits<5> Rss; @@ -2628,7 +2628,7 @@ bit hasShift, bit isRnd > : MInst <(outs DoubleRegs:$Rxx), (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Rxx += "#opc#"($Rss, $Rtt)"#!if(hasShift,":<<1","") + "$Rxx+="#opc#"($Rss,$Rtt)"#!if(hasShift,":<<1","") #!if(isRnd,":rnd","")#":sat", [], "$dst2 = $Rxx",M_tc_3x_SLOT23 > { bits<5> Rxx; @@ -2649,7 +2649,7 @@ bit hasShift, bit isRnd > : MInst <(outs DoubleRegs:$Rxx), (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Rxx += "#opc#"($Rss, $Rtt)"#!if(hasShift,":<<1","") + "$Rxx+="#opc#"($Rss,$Rtt)"#!if(hasShift,":<<1","") #!if(isRnd,":rnd",""), [], "$dst2 = $Rxx",M_tc_3x_SLOT23 > { bits<5> Rxx; @@ -2712,8 +2712,8 @@ //===----------------------------------------------------------------------===// class T_M2_mpyd < bits<2> LHbits, bit isRnd, bit hasShift, bit isUnsigned > : MInst < (outs DoubleRegs:$Rdd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rdd = "#!if(isUnsigned,"mpyu","mpy")#"($Rs."#!if(LHbits{1},"h","l") - #", $Rt."#!if(LHbits{0},"h)","l)") + "$Rdd="#!if(isUnsigned,"mpyu","mpy")#"($Rs."#!if(LHbits{1},"h","l") + #",$Rt."#!if(LHbits{0},"h)","l)") #!if(hasShift,":<<1","") #!if(isRnd,":rnd",""), [] > { @@ -2775,7 +2775,7 @@ bit isSat, bit hasShift, bit isConj> : MInst <(outs DoubleRegs:$Rdd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rdd = "#mnemonic#"($Rs, $Rt"#!if(isConj,"*)",")") + "$Rdd="#mnemonic#"($Rs,$Rt"#!if(isConj,"*)",")") #!if(hasShift,":<<1","") #!if(isSat,":sat",""), [] > { @@ -2803,7 +2803,7 @@ bit isSat, bit hasShift, bit isConj> : MInst <(outs DoubleRegs:$Rxx), (ins DoubleRegs:$dst2, IntRegs:$Rs, IntRegs:$Rt), - "$Rxx "#op2#"= "#op1#"($Rs, $Rt"#!if(isConj,"*)",")") + "$Rxx"#op2#"="#op1#"($Rs,$Rt"#!if(isConj,"*)",")") #!if(hasShift,":<<1","") #!if(isSat,":sat",""), @@ -2913,7 +2913,7 @@ bits<4> MajOp, bit isHalf > : STInst <(outs IntRegs:$_dst_), (ins IntRegs:$src1, ImmOp:$offset, RC:$src2), - mnemonic#"($src1++#$offset) = $src2"#!if(isHalf, ".h", ""), + mnemonic#"($src1++#$offset)=$src2"#!if(isHalf, ".h", ""), [], "$src1 = $_dst_" >, AddrModeRel { bits<5> src1; @@ -2950,7 +2950,7 @@ : STInst <(outs IntRegs:$_dst_), (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset, RC:$src3), !if(isPredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#mnemonic#"($src2++#$offset) = $src3"#!if(isHalf, ".h", ""), + ") ")#mnemonic#"($src2++#$offset)=$src3"#!if(isHalf, ".h", ""), [], "$src2 = $_dst_" >, AddrModeRel { bits<2> src1; @@ -3023,7 +3023,7 @@ MemAccessSize AccessSz, bit isHalf = 0> : STInst <(outs IntRegs:$_dst_), (ins IntRegs:$src1, ModRegs:$src2, RC:$src3), - mnemonic#"($src1++$src2) = $src3"#!if(isHalf, ".h", ""), + mnemonic#"($src1++$src2)=$src3"#!if(isHalf, ".h", ""), [], "$src1 = $_dst_" > { bits<5> src1; bits<1> src2; @@ -3054,7 +3054,7 @@ bits<3> MajOp, bit isH = 0> : STInst <(outs), (ins IntRegs:$src1, ImmOp:$src2, RC:$src3), - mnemonic#"($src1+#$src2) = $src3"#!if(isH,".h","")>, + mnemonic#"($src1+#$src2)=$src3"#!if(isH,".h","")>, AddrModeRel, ImmRegRel { bits<5> src1; bits<14> src2; // Actual address offset @@ -3091,7 +3091,7 @@ : STInst <(outs), (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$src3, RC:$src4), !if(PredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#mnemonic#"($src2+#$src3) = $src4"#!if(isH,".h",""), + ") ")#mnemonic#"($src2+#$src3)=$src4"#!if(isH,".h",""), [],"",V2LDST_tc_st_SLOT01 >, AddrModeRel, ImmRegRel { bits<2> src1; @@ -3201,7 +3201,7 @@ MemAccessSize AlignSize, string RegSrc = "Rt"> : STInst <(outs IntRegs:$_dst_), (ins IntRegs:$Rz, Imm:$offset, ModRegs:$Mu, RC:$Rt), - #mnemonic#"($Rz ++ #$offset:circ($Mu)) = $"#RegSrc#"", + #mnemonic#"($Rz++#$offset:circ($Mu))=$"#RegSrc#"", [] , "$Rz = $_dst_" > { bits<5> Rz; @@ -3244,7 +3244,7 @@ bits<2>MajOp, MemAccessSize AlignSize> : NVInst < (outs IntRegs:$_dst_), (ins IntRegs:$Rz, Imm:$offset, ModRegs:$Mu, IntRegs:$Nt), - #mnemonic#"($Rz ++ #$offset:circ($Mu)) = $Nt.new", + #mnemonic#"($Rz++#$offset:circ($Mu))=$Nt.new", [], "$Rz = $_dst_"> { bits<5> Rz; @@ -3280,7 +3280,7 @@ MemAccessSize AlignSize, string RegSrc = "Rt"> : STInst <(outs IntRegs:$_dst_), (ins IntRegs:$Rz, ModRegs:$Mu, RC:$Rt), - #mnemonic#"($Rz ++ I:circ($Mu)) = $"#RegSrc#"", + #mnemonic#"($Rz++I:circ($Mu))=$"#RegSrc#"", [], "$Rz = $_dst_" > { bits<5> Rz; @@ -3317,7 +3317,7 @@ MemAccessSize AlignSize> : NVInst <(outs IntRegs:$_dst_), (ins IntRegs:$Rz, ModRegs:$Mu, IntRegs:$Nt), - #mnemonic#"($Rz ++ I:circ($Mu)) = $Nt.new" , + #mnemonic#"($Rz++I:circ($Mu))=$Nt.new" , [] , "$Rz = $_dst_"> { bits<5> Rz; @@ -3350,7 +3350,7 @@ : STInst <(outs IntRegs:$_dst_), (ins IntRegs:$Rz, ModRegs:$Mu, RC:$src), - #mnemonic#"($Rz ++ $Mu:brev) = $src"#!if (!eq(isHalf, 1), ".h", ""), + #mnemonic#"($Rz++$Mu:brev)=$src"#!if (!eq(isHalf, 1), ".h", ""), [], "$Rz = $_dst_" > { let accessSize = addrSize; @@ -3392,7 +3392,7 @@ class T_storenew_pbr majOp> : NVInst <(outs IntRegs:$_dst_), (ins IntRegs:$Rz, ModRegs:$Mu, IntRegs:$Nt), - #mnemonic#"($Rz ++ $Mu:brev) = $Nt.new", [], + #mnemonic#"($Rz++$Mu:brev)=$Nt.new", [], "$Rz = $_dst_">, NewValueRel { let accessSize = addrSize; bits<5> Rz; @@ -3429,7 +3429,7 @@ class T_S2op_1 RegTyBits, RegisterClass RCOut, RegisterClass RCIn, bits<2> MajOp, bits<3> MinOp, bit isSat> : SInst <(outs RCOut:$dst), (ins RCIn:$src), - "$dst = "#mnemonic#"($src)"#!if(isSat, ":sat", ""), + "$dst="#mnemonic#"($src)"#!if(isSat, ":sat", ""), [], "", S_2op_tc_1_SLOT23 > { bits<5> dst; bits<5> src; @@ -3525,7 +3525,7 @@ bit isSat, bit isRnd, list pattern = []> : SInst <(outs RCOut:$dst), (ins RCIn:$src, u5_0Imm:$u5), - "$dst = "#mnemonic#"($src, #$u5)"#!if(isSat, ":sat", "") + "$dst="#mnemonic#"($src,#$u5)"#!if(isSat, ":sat", "") #!if(isRnd, ":rnd", ""), pattern, "", S_2op_tc_2_SLOT23> { bits<5> dst; @@ -3579,16 +3579,16 @@ let isAsmParserOnly = 1 in def S2_asr_i_r_rnd_goodsyntax : SInst <(outs IntRegs:$dst), (ins IntRegs:$src, u5_0Imm:$u5), - "$dst = asrrnd($src, #$u5)", + "$dst=asrrnd($src,#$u5)", [], "", S_2op_tc_1_SLOT23>; let isAsmParserOnly = 1 in def A2_not: ALU32_rr<(outs IntRegs:$dst),(ins IntRegs:$src), - "$dst = not($src)">; + "$dst=not($src)">; class T_S2op_3MajOp, bits<3>minOp, bits<1> sat = 0> : SInst<(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss), - "$Rdd = "#opc#"($Rss)"#!if(!eq(sat, 1),":sat","")> { + "$Rdd="#opc#"($Rss)"#!if(!eq(sat, 1),":sat","")> { bits<5> Rss; bits<5> Rdd; let IClass = 0b1000; @@ -3633,7 +3633,7 @@ let hasSideEffects = 0, hasNewValue = 1 in class T_COUNT_LEADING MajOp, bits<3> MinOp, bit Is32, dag Out, dag Inp> - : SInst { + : SInst { bits<5> Rs; bits<5> Rd; let IClass = 0b1000; @@ -3671,7 +3671,7 @@ let hasSideEffects = 0, hasNewValue = 1 in class T_SCT_BIT_IMM MinOp> : SInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, u5_0Imm:$u5), - "$Rd = "#MnOp#"($Rs, #$u5)", [], "", S_2op_tc_1_SLOT23> { + "$Rd="#MnOp#"($Rs,#$u5)", [], "", S_2op_tc_1_SLOT23> { bits<5> Rd; bits<5> Rs; bits<5> u5; @@ -3687,7 +3687,7 @@ let hasSideEffects = 0, hasNewValue = 1 in class T_SCT_BIT_REG MinOp> : SInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rd = "#MnOp#"($Rs, $Rt)", [], "", S_3op_tc_1_SLOT23> { + "$Rd="#MnOp#"($Rs,$Rt)", [], "", S_3op_tc_1_SLOT23> { bits<5> Rd; bits<5> Rs; bits<5> Rt; @@ -3711,7 +3711,7 @@ let hasSideEffects = 0 in class T_TEST_BIT_IMM MajOp> : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, u5_0Imm:$u5), - "$Pd = "#MnOp#"($Rs, #$u5)", + "$Pd="#MnOp#"($Rs,#$u5)", [], "", S_2op_tc_2early_SLOT23> { bits<2> Pd; bits<5> Rs; @@ -3728,7 +3728,7 @@ let hasSideEffects = 0 in class T_TEST_BIT_REG : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Pd = "#MnOp#"($Rs, $Rt)", + "$Pd="#MnOp#"($Rs,$Rt)", [], "", S_3op_tc_2early_SLOT23> { bits<2> Pd; bits<5> Rs; @@ -3747,7 +3747,7 @@ let hasSideEffects = 0 in class T_TEST_BITS_IMM MajOp, bit IsNeg> : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, u6_0Imm:$u6), - "$Pd = "#MnOp#"($Rs, #$u6)", + "$Pd="#MnOp#"($Rs,#$u6)", [], "", S_2op_tc_2early_SLOT23> { bits<2> Pd; bits<5> Rs; @@ -3764,7 +3764,7 @@ let hasSideEffects = 0 in class T_TEST_BITS_REG MajOp, bit IsNeg> : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Pd = "#MnOp#"($Rs, $Rt)", + "$Pd="#MnOp#"($Rs,$Rt)", [], "", S_3op_tc_2early_SLOT23> { bits<2> Pd; bits<5> Rs; @@ -3808,7 +3808,7 @@ // Predicate transfer. let hasSideEffects = 0, hasNewValue = 1 in def C2_tfrpr : SInst<(outs IntRegs:$Rd), (ins PredRegs:$Ps), - "$Rd = $Ps", [], "", S_2op_tc_1_SLOT23> { + "$Rd=$Ps", [], "", S_2op_tc_1_SLOT23> { bits<5> Rd; bits<2> Ps; @@ -3822,7 +3822,7 @@ // Transfer general register to predicate. let hasSideEffects = 0 in def C2_tfrrp: SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs), - "$Pd = $Rs", [], "", S_2op_tc_2early_SLOT23> { + "$Pd=$Rs", [], "", S_2op_tc_2early_SLOT23> { bits<2> Pd; bits<5> Rs; @@ -3834,7 +3834,7 @@ let hasSideEffects = 0, isCodeGenOnly = 1 in def C2_pxfer_map: SInst<(outs PredRegs:$dst), (ins PredRegs:$src), - "$dst = $src">; + "$dst=$src">; //===----------------------------------------------------------------------===// // STYPE/PRED - @@ -3846,7 +3846,7 @@ class S_2OpInstImmMajOp, bits<3>MinOp, Operand Imm, list pattern = [], bit isRnd = 0> : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, Imm:$src2), - "$dst = "#Mnemonic#"($src1, #$src2)"#!if(isRnd, ":rnd", ""), + "$dst="#Mnemonic#"($src1,#$src2)"#!if(isRnd, ":rnd", ""), pattern> { bits<5> src1; bits<5> dst; @@ -3873,7 +3873,7 @@ let AddedComplexity = 100, hasNewValue = 1, hasSideEffects = 0 in def S2_addasl_rrri: SInst <(outs IntRegs:$Rd), (ins IntRegs:$Rt, IntRegs:$Rs, u3_0Imm:$u3), - "$Rd = addasl($Rt, $Rs, #$u3)" , [], + "$Rd=addasl($Rt,$Rs,#$u3)" , [], "", S_3op_tc_2_SLOT23> { bits<5> Rd; bits<5> Rt; @@ -3946,7 +3946,7 @@ opExtendable = 0, hasSideEffects = 0 in class LOOP_iBase : CRInst<(outs), (ins brOp:$offset, u10_0Imm:$src2), - #mnemonic#"($offset, #$src2)", + #mnemonic#"($offset,#$src2)", [], "" , CR_tc_3x_SLOT3> { bits<9> offset; bits<10> src2; @@ -3966,7 +3966,7 @@ opExtendable = 0, hasSideEffects = 0 in class LOOP_rBase : CRInst<(outs), (ins brOp:$offset, IntRegs:$src2), - #mnemonic#"($offset, $src2)", + #mnemonic#"($offset,$src2)", [], "" ,CR_tc_3x_SLOT3> { bits<9> offset; bits<5> src2; @@ -4018,7 +4018,7 @@ opExtendable = 0, isPredicateLate = 1 in class SPLOOP_iBase op> : CRInst <(outs), (ins brtarget:$r7_2, u10_0Imm:$U10), - "p3 = sp"#SP#"loop0($r7_2, #$U10)" > { + "p3=sp"#SP#"loop0($r7_2,#$U10)" > { bits<9> r7_2; bits<10> U10; @@ -4038,7 +4038,7 @@ opExtendable = 0, isPredicateLate = 1 in class SPLOOP_rBase op> : CRInst <(outs), (ins brtarget:$r7_2, IntRegs:$Rs), - "p3 = sp"#SP#"loop0($r7_2, $Rs)" > { + "p3=sp"#SP#"loop0($r7_2,$Rs)" > { bits<9> r7_2; bits<5> Rs; @@ -4094,7 +4094,7 @@ let hasSideEffects = 0 in class TFR_CR_RS_base : CRInst <(outs CTRC:$dst), (ins RC:$src), - "$dst = $src", [], "", CR_tc_3x_SLOT3> { + "$dst=$src", [], "", CR_tc_3x_SLOT3> { bits<5> dst; bits<5> src; @@ -4115,7 +4115,7 @@ let hasSideEffects = 0 in class TFR_RD_CR_base : CRInst <(outs RC:$dst), (ins CTRC:$src), - "$dst = $src", [], "", CR_tc_3x_SLOT3> { + "$dst=$src", [], "", CR_tc_3x_SLOT3> { bits<5> dst; bits<5> src; @@ -4151,7 +4151,7 @@ class REG_IMMED MajOp, bit MinOp> : ALU32_ri<(outs IntRegs:$dst), (ins u16_0Imm:$imm_value), - "$dst"#RegHalf#" = $imm_value", []> { + "$dst"#RegHalf#"=$imm_value", []> { bits<5> dst; bits<32> imm_value; let IClass = 0b0111; @@ -4171,9 +4171,9 @@ let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in { def CONST32 : CONSTLDInst<(outs IntRegs:$Rd), (ins i32imm:$v), - "$Rd = CONST32(#$v)", []>; + "$Rd=CONST32(#$v)", []>; def CONST64 : CONSTLDInst<(outs DoubleRegs:$Rd), (ins i64imm:$v), - "$Rd = CONST64(#$v)", []>; + "$Rd=CONST64(#$v)", []>; } let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1, @@ -4231,7 +4231,7 @@ SDNode OpNode2, bits<3> majOp, bits<2> minOp> : SInst_acc<(outs IntRegs:$Rx), (ins IntRegs:$src1, IntRegs:$Rs, u5_0Imm:$u5), - "$Rx "#opc2#opc1#"($Rs, #$u5)", [], + "$Rx"#opc2#opc1#"($Rs,#$u5)", [], "$src1 = $Rx", S_2op_tc_2_SLOT23> { bits<5> Rx; bits<5> Rs; @@ -4258,7 +4258,7 @@ SDNode OpNode2, bits<2> majOp, bits<2> minOp> : SInst_acc<(outs IntRegs:$Rx), (ins IntRegs:$src1, IntRegs:$Rs, IntRegs:$Rt), - "$Rx "#opc2#opc1#"($Rs, $Rt)", [], + "$Rx"#opc2#opc1#"($Rs,$Rt)", [], "$src1 = $Rx", S_3op_tc_2_SLOT23 > { bits<5> Rx; bits<5> Rs; @@ -4282,7 +4282,7 @@ SDNode OpNode2, bits<3> majOp, bits<2> minOp> : SInst_acc<(outs DoubleRegs:$Rxx), (ins DoubleRegs:$src1, DoubleRegs:$Rss, u6_0Imm:$u6), - "$Rxx "#opc2#opc1#"($Rss, #$u6)", [], + "$Rxx"#opc2#opc1#"($Rss,#$u6)", [], "$src1 = $Rxx", S_2op_tc_2_SLOT23> { bits<5> Rxx; bits<5> Rss; @@ -4309,7 +4309,7 @@ SDNode OpNode2, bits<3> majOp, bits<2> minOp> : SInst_acc<(outs DoubleRegs:$Rxx), (ins DoubleRegs:$src1, DoubleRegs:$Rss, IntRegs:$Rt), - "$Rxx "#opc2#opc1#"($Rss, $Rt)", [], + "$Rxx"#opc2#opc1#"($Rss,$Rt)", [], "$src1 = $Rxx", S_3op_tc_2_SLOT23> { bits<5> Rxx; bits<5> Rss; @@ -4339,16 +4339,16 @@ multiclass xtype_imm_accminOp> { let AddedComplexity = 100 in - defm _acc : xtype_imm_base< opc1, "+= ", OpNode, add, 0b001, minOp>; + defm _acc : xtype_imm_base< opc1, "+=", OpNode, add, 0b001, minOp>; - defm _nac : xtype_imm_base< opc1, "-= ", OpNode, sub, 0b000, minOp>; - defm _and : xtype_imm_base< opc1, "&= ", OpNode, and, 0b010, minOp>; - defm _or : xtype_imm_base< opc1, "|= ", OpNode, or, 0b011, minOp>; + defm _nac : xtype_imm_base< opc1, "-=", OpNode, sub, 0b000, minOp>; + defm _and : xtype_imm_base< opc1, "&=", OpNode, and, 0b010, minOp>; + defm _or : xtype_imm_base< opc1, "|=", OpNode, or, 0b011, minOp>; } multiclass xtype_xor_imm_accminOp> { let AddedComplexity = 100 in - defm _xacc : xtype_imm_base< opc1, "^= ", OpNode, xor, 0b100, minOp>; + defm _xacc : xtype_imm_base< opc1, "^=", OpNode, xor, 0b100, minOp>; } defm S2_asr : xtype_imm_acc<"asr", sra, 0b00>; @@ -4361,21 +4361,21 @@ multiclass xtype_reg_acc_rminOp> { let AddedComplexity = 100 in - def _acc : T_shift_reg_acc_r >1","") #!if(isSat, ":sat", ""), [], "", S_3op_tc_2_SLOT23 > { @@ -4433,7 +4433,7 @@ class T_S3op_2 MajOp, bit SwapOps> : SInst < (outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt, PredRegs:$Pu), - "$Rdd = "#mnemonic#"($Rss, $Rtt, $Pu)", + "$Rdd="#mnemonic#"($Rss,$Rtt,$Pu)", [], "", S_3op_tc_1_SLOT23 > { bits<5> Rdd; bits<5> Rss; @@ -4463,7 +4463,7 @@ bits<2> MinOp, bit isSat = 0, list pattern = [] > : SInst <(outs RC:$dst), (ins RC:$src1, IntRegs:$src2), - "$dst = "#mnemonic#"($src1, $src2)"#!if(isSat, ":sat", ""), + "$dst="#mnemonic#"($src1,$src2)"#!if(isSat, ":sat", ""), pattern, "", S_3op_tc_1_SLOT23> { bits<5> dst; bits<5> src1; @@ -4524,7 +4524,7 @@ class T_S3op_8 MinOp, bit isSat, bit isRnd, bit hasShift, bit hasSplat = 0> : SInst < (outs IntRegs:$Rd), (ins DoubleRegs:$Rss, IntRegs:$Rt), - "$Rd = "#opc#"($Rss, $Rt"#!if(hasSplat, "*", "")#")" + "$Rd="#opc#"($Rss,$Rt"#!if(hasSplat, "*", "")#")" #!if(hasShift, ":<<1", "") #!if(isRnd, ":rnd", "") #!if(isSat, ":sat", ""), @@ -4551,7 +4551,7 @@ class T_S3op_7 : SInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt, u3_0Imm:$u3), - "$Rdd = "#mnemonic#"($Rss, $Rtt, #$u3)" , + "$Rdd="#mnemonic#"($Rss,$Rtt,#$u3)" , [], "", S_3op_tc_1_SLOT23 > { bits<5> Rdd; bits<5> Rss; @@ -4578,7 +4578,7 @@ class T_S3op_insert : SInst <(outs RC:$dst), (ins RC:$src1, RC:$src2, DoubleRegs:$src3), - "$dst = "#mnemonic#"($src2, $src3)" , + "$dst="#mnemonic#"($src2,$src3)" , [], "$src1 = $dst", S_3op_tc_1_SLOT23 > { bits<5> dst; bits<5> src2; @@ -4597,7 +4597,7 @@ let hasSideEffects = 0 in class T_S2op_insert RegTyBits, RegisterClass RC, Operand ImmOp> : SInst <(outs RC:$dst), (ins RC:$dst2, RC:$src1, ImmOp:$src2, ImmOp:$src3), - "$dst = insert($src1, #$src2, #$src3)", + "$dst=insert($src1,#$src2,#$src3)", [], "$dst2 = $dst", S_2op_tc_2_SLOT23> { bits<5> dst; bits<5> src1; @@ -4641,7 +4641,7 @@ let hasNewValue = 1, hasSideEffects = 0 in class T_S3op_extract MinOp> : SInst <(outs IntRegs:$Rd), (ins IntRegs:$Rs, DoubleRegs:$Rtt), - "$Rd = "#mnemonic#"($Rs, $Rtt)", + "$Rd="#mnemonic#"($Rs,$Rtt)", [], "", S_3op_tc_2_SLOT23 > { bits<5> Rd; bits<5> Rs; @@ -4660,7 +4660,7 @@ class T_S2op_extract RegTyBits, RegisterClass RC, Operand ImmOp> : SInst <(outs RC:$dst), (ins RC:$src1, ImmOp:$src2, ImmOp:$src3), - "$dst = "#mnemonic#"($src1, #$src2, #$src3)", + "$dst="#mnemonic#"($src1,#$src2,#$src3)", [], "", S_2op_tc_2_SLOT23> { bits<5> dst; bits<5> src1; @@ -4709,7 +4709,7 @@ class tableidxRawMinOp> : SInst <(outs IntRegs:$Rx), (ins IntRegs:$_dst_, IntRegs:$Rs, u4_0Imm:$u4, s6_0Imm:$S6), - "$Rx = "#OpStr#"($Rs, #$u4, #$S6):raw", + "$Rx="#OpStr#"($Rs,#$u4,#$S6):raw", [], "$Rx = $_dst_" > { bits<5> Rx; bits<5> Rs; @@ -4740,7 +4740,7 @@ class tableidx_goodsyntax : SInst <(outs IntRegs:$Rx), (ins IntRegs:$_dst_, IntRegs:$Rs, u4_0Imm:$u4, u5_0Imm:$u5), - "$Rx = "#mnemonic#"($Rs, #$u4, #$u5)", + "$Rx="#mnemonic#"($Rs,#$u4,#$u5)", [], "$Rx = $_dst_" >; def S2_tableidxb_goodsyntax : tableidx_goodsyntax<"tableidxb">; Index: lib/Target/Hexagon/HexagonInstrInfoV3.td =================================================================== --- lib/Target/Hexagon/HexagonInstrInfoV3.td +++ lib/Target/Hexagon/HexagonInstrInfoV3.td @@ -101,7 +101,7 @@ let hasSideEffects = 0, isAsmParserOnly = 1 in def A2_addsp : ALU64_rr<(outs DoubleRegs:$Rd), - (ins IntRegs:$Rs, DoubleRegs:$Rt), "$Rd = add($Rs, $Rt)", [], + (ins IntRegs:$Rs, DoubleRegs:$Rt), "$Rd=add($Rs,$Rt)", [], "", ALU64_tc_1_SLOT23>; @@ -108,8 +108,8 @@ let hasSideEffects = 0 in class T_XTYPE_MIN_MAX_P : ALU64Inst<(outs DoubleRegs:$Rd), (ins DoubleRegs:$Rt, DoubleRegs:$Rs), - "$Rd = "#!if(isMax,"max","min")#!if(isUnsigned,"u","") - #"($Rt, $Rs)", [], "", ALU64_tc_2_SLOT23> { + "$Rd="#!if(isMax,"max","min")#!if(isUnsigned,"u","") + #"($Rt,$Rs)", [], "", ALU64_tc_2_SLOT23> { bits<5> Rd; bits<5> Rs; bits<5> Rt; @@ -143,7 +143,7 @@ class T_vrcmpRawMajOp>: MInst<(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Rdd = vrcmpys($Rss, $Rtt):<<1:sat:raw:"#HiLo, []> { + "$Rdd=vrcmpys($Rss,$Rtt):<<1:sat:raw:"#HiLo, []> { bits<5> Rdd; bits<5> Rss; bits<5> Rtt; @@ -172,7 +172,7 @@ class T_vrcmpys_accMajOp>: MInst <(outs DoubleRegs:$Rxx), (ins DoubleRegs:$_src_, DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Rxx += vrcmpys($Rss, $Rtt):<<1:sat:raw:"#HiLo, [], + "$Rxx+=vrcmpys($Rss,$Rtt):<<1:sat:raw:"#HiLo, [], "$Rxx = $_src_"> { bits<5> Rxx; bits<5> Rss; @@ -197,7 +197,7 @@ def M2_vrcmpys_acc_s1 : MInst <(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1, IntRegs:$src2), - "$dst += vrcmpys($src1, $src2):<<1:sat", [], + "$dst+=vrcmpys($src1,$src2):<<1:sat", [], "$dst2 = $dst">; def M2_vrcmpys_s1rp_h : T_MType_vrcmpy <"vrcmpys", 0b101, 0b110, 1>; Index: lib/Target/Hexagon/HexagonInstrInfoV4.td =================================================================== --- lib/Target/Hexagon/HexagonInstrInfoV4.td +++ lib/Target/Hexagon/HexagonInstrInfoV4.td @@ -117,7 +117,7 @@ class T_ALU32_3op_not MajOp, bits<3> MinOp, bit OpsRev> : T_ALU32_3op { - let AsmString = "$Rd = "#mnemonic#"($Rs, ~$Rt)"; + let AsmString = "$Rd="#mnemonic#"($Rs,~$Rt)"; } let BaseOpcode = "andn_rr", CextOpcode = "andn" in @@ -136,7 +136,7 @@ class T_CMP_rrbh MinOp, bit IsComm> : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Pd = "#mnemonic#"($Rs, $Rt)", [], "", S_3op_tc_2early_SLOT23>, + "$Pd="#mnemonic#"($Rs,$Rt)", [], "", S_3op_tc_2early_SLOT23>, ImmRegRel { let InputType = "reg"; let CextOpcode = mnemonic; @@ -166,7 +166,7 @@ class T_CMP_ribh MajOp, bit IsHalf, bit IsComm, Operand ImmType, bit IsImmExt, bit IsImmSigned, int ImmBits> : ALU64Inst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, ImmType:$Imm), - "$Pd = "#mnemonic#"($Rs, #$Imm)", [], "", ALU64_tc_2early_SLOT23>, + "$Pd="#mnemonic#"($Rs,#$Imm)", [], "", ALU64_tc_2early_SLOT23>, ImmRegRel { let InputType = "imm"; let CextOpcode = mnemonic; @@ -201,7 +201,7 @@ class T_RCMP_EQ_ri : ALU32_ri<(outs IntRegs:$Rd), (ins IntRegs:$Rs, s8_0Ext:$s8), - "$Rd = "#mnemonic#"($Rs, #$s8)", [], "", ALU32_2op_tc_1_SLOT0123>, + "$Rd="#mnemonic#"($Rs,#$s8)", [], "", ALU32_2op_tc_1_SLOT0123>, ImmRegRel { let InputType = "imm"; let CextOpcode = !if (IsNeg, "!rcmp.eq", "rcmp.eq"); @@ -257,16 +257,16 @@ let opExtendable = 2 in def A4_combineri : T_Combine1<0b00, (ins IntRegs:$Rs, s8_0Ext:$s8), - "$Rdd = combine($Rs, #$s8)">; + "$Rdd=combine($Rs,#$s8)">; let opExtendable = 1 in def A4_combineir : T_Combine1<0b01, (ins s8_0Ext:$s8, IntRegs:$Rs), - "$Rdd = combine(#$s8, $Rs)">; + "$Rdd=combine(#$s8,$Rs)">; // A4_combineii: Set two small immediates. let hasSideEffects = 0, isExtendable = 1, opExtentBits = 6, opExtendable = 2 in def A4_combineii: ALU32Inst<(outs DoubleRegs:$Rdd), (ins s8_0Imm:$s8, u6_0Ext:$U6), - "$Rdd = combine(#$s8, #$U6)"> { + "$Rdd=combine(#$s8,#$U6)"> { bits<5> Rdd; bits<8> s8; bits<6> U6; @@ -295,7 +295,7 @@ class T_LD_abs_setMajOp>: LDInst<(outs RC:$dst1, IntRegs:$dst2), (ins u6_0Ext:$addr), - "$dst1 = "#mnemonic#"($dst2 = #$addr)", + "$dst1="#mnemonic#"($dst2=#$addr)", []> { bits<7> name; bits<5> dst1; @@ -347,7 +347,7 @@ class T_LoadAbsReg MajOp> : LDInst <(outs RC:$dst), (ins IntRegs:$src1, u2_0Imm:$src2, u6_0Ext:$src3), - "$dst = "#mnemonic#"($src1<<#$src2 + #$src3)", + "$dst="#mnemonic#"($src1<<#$src2+#$src3)", [] >, ImmRegShl { bits<5> dst; bits<5> src1; @@ -400,7 +400,7 @@ //===----------------------------------------------------------------------===// class T_load_rr MajOp>: LDInst<(outs RC:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2_0Imm:$u2), - "$dst = "#mnemonic#"($src1 + $src2<<#$u2)", + "$dst="#mnemonic#"($src1+$src2<<#$u2)", [], "", V4LDST_tc_ld_SLOT01>, ImmRegShl, AddrModeRel { bits<5> dst; bits<5> src1; @@ -428,7 +428,7 @@ LDInst <(outs RC:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2_0Imm:$u2), !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#"$dst = "#mnemonic#"($src2+$src3<<#$u2)", + ") ")#"$dst="#mnemonic#"($src2+$src3<<#$u2)", [], "", V4LDST_tc_ld_SLOT01>, AddrModeRel { bits<5> dst; bits<2> src1; @@ -508,7 +508,7 @@ bits<3> MajOp, MemAccessSize AccessSz, bit isHalf = 0> : STInst<(outs IntRegs:$dst), (ins u6_0Ext:$addr, RC:$src), - mnemonic#"($dst = #$addr) = $src"#!if(isHalf, ".h","")>, NewValueRel { + mnemonic#"($dst=#$addr)=$src"#!if(isHalf, ".h","")>, NewValueRel { bits<5> dst; bits<6> addr; bits<5> src; @@ -547,7 +547,7 @@ MemAccessSize AccessSz > : NVInst <(outs IntRegs:$dst), (ins u6_0Ext:$addr, IntRegs:$src), - mnemonic#"($dst = #$addr) = $src.new">, NewValueRel { + mnemonic#"($dst=#$addr)=$src.new">, NewValueRel { bits<5> dst; bits<6> addr; bits<3> src; @@ -577,7 +577,7 @@ bits<3> MajOp, MemAccessSize AccessSz, bit isHalf = 0> : STInst<(outs), (ins IntRegs:$src1, u2_0Imm:$src2, u6_0Ext:$src3, RC:$src4), - mnemonic#"($src1<<#$src2 + #$src3) = $src4"#!if(isHalf, ".h",""), + mnemonic#"($src1<<#$src2+#$src3)=$src4"#!if(isHalf, ".h",""), []>, ImmRegShl, NewValueRel { bits<5> src1; @@ -619,7 +619,7 @@ MemAccessSize AccessSz> : NVInst <(outs ), (ins IntRegs:$src1, u2_0Imm:$src2, u6_0Ext:$src3, IntRegs:$src4), - mnemonic#"($src1<<#$src2 + #$src3) = $src4.new">, NewValueRel { + mnemonic#"($src1<<#$src2+#$src3)=$src4.new">, NewValueRel { bits<5> src1; bits<2> src2; bits<6> src3; @@ -651,7 +651,7 @@ let isPredicable = 1 in class T_store_rr MajOp, bit isH> : STInst < (outs ), (ins IntRegs:$Rs, IntRegs:$Ru, u2_0Imm:$u2, RC:$Rt), - mnemonic#"($Rs + $Ru<<#$u2) = $Rt"#!if(isH, ".h",""), + mnemonic#"($Rs+$Ru<<#$u2)=$Rt"#!if(isH, ".h",""), [],"",V4LDST_tc_st_SLOT01>, ImmRegShl, AddrModeRel { bits<5> Rs; @@ -684,7 +684,7 @@ (ins PredRegs:$Pv, IntRegs:$Rs, IntRegs:$Ru, u2_0Imm:$u2, RC:$Rt), !if(isNot, "if (!$Pv", "if ($Pv")#!if(isPredNew, ".new) ", - ") ")#mnemonic#"($Rs+$Ru<<#$u2) = $Rt"#!if(isH, ".h",""), + ") ")#mnemonic#"($Rs+$Ru<<#$u2)=$Rt"#!if(isH, ".h",""), [], "", V4LDST_tc_st_SLOT01> , AddrModeRel{ bits<2> Pv; bits<5> Rs; @@ -718,7 +718,7 @@ let isPredicable = 1, isNewValue = 1, opNewValue = 3 in class T_store_new_rr MajOp> : NVInst < (outs ), (ins IntRegs:$Rs, IntRegs:$Ru, u2_0Imm:$u2, IntRegs:$Nt), - mnemonic#"($Rs + $Ru<<#$u2) = $Nt.new", + mnemonic#"($Rs+$Ru<<#$u2)=$Nt.new", [],"",V4LDST_tc_st_SLOT0>, ImmRegShl, AddrModeRel { bits<5> Rs; @@ -746,7 +746,7 @@ : NVInst<(outs), (ins PredRegs:$Pv, IntRegs:$Rs, IntRegs:$Ru, u2_0Imm:$u2, IntRegs:$Nt), !if(isNot, "if (!$Pv", "if ($Pv")#!if(isPredNew, ".new) ", - ") ")#mnemonic#"($Rs+$Ru<<#$u2) = $Nt.new", + ") ")#mnemonic#"($Rs+$Ru<<#$u2)=$Nt.new", [], "", V4LDST_tc_st_SLOT0>, AddrModeRel { bits<2> Pv; bits<5> Rs; @@ -948,7 +948,7 @@ Operand ImmOp, bits<2>MajOp> : NVInst_V4 <(outs), (ins IntRegs:$src1, ImmOp:$src2, RC:$src3), - mnemonic#"($src1+#$src2) = $src3.new", + mnemonic#"($src1+#$src2)=$src3.new", [],"",ST_tc_st_SLOT0> { bits<5> src1; bits<13> src2; // Actual address offset @@ -985,7 +985,7 @@ : NVInst_V4 <(outs), (ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3, RC:$src4), !if(PredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#mnemonic#"($src2+#$src3) = $src4.new", + ") ")#mnemonic#"($src2+#$src3)=$src4.new", [],"",V2LDST_tc_st_SLOT0> { bits<2> src1; bits<5> src2; @@ -1069,7 +1069,7 @@ class T_loadalign_pr MajOp, MemAccessSize AccessSz> : LDInstPI <(outs DoubleRegs:$dst, IntRegs:$_dst_), (ins DoubleRegs:$src1, IntRegs:$src2, ModRegs:$src3), - "$dst = "#mnemonic#"($src2++$src3)", [], + "$dst="#mnemonic#"($src2++$src3)", [], "$src1 = $dst, $src2 = $_dst_"> { bits<5> dst; bits<5> src2; @@ -1099,7 +1099,7 @@ class T_StorePI_nv MajOp > : NVInstPI_V4 <(outs IntRegs:$_dst_), (ins IntRegs:$src1, ImmOp:$offset, IntRegs:$src2), - mnemonic#"($src1++#$offset) = $src2.new", + mnemonic#"($src1++#$offset)=$src2.new", [], "$src1 = $_dst_">, AddrModeRel { bits<5> src1; @@ -1135,7 +1135,7 @@ (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset, IntRegs:$src3), !if(isPredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#mnemonic#"($src2++#$offset) = $src3.new", + ") ")#mnemonic#"($src2++#$offset)=$src3.new", [], "$src2 = $_dst_">, AddrModeRel { bits<2> src1; @@ -1199,7 +1199,7 @@ class T_StorePI_RegNV MajOp, MemAccessSize AccessSz> : NVInstPI_V4 <(outs IntRegs:$_dst_), (ins IntRegs:$src1, ModRegs:$src2, IntRegs:$src3), - #mnemonic#"($src1++$src2) = $src3.new", + #mnemonic#"($src1++$src2)=$src3.new", [], "$src1 = $_dst_"> { bits<5> src1; bits<1> src2; @@ -1253,7 +1253,7 @@ : NVInst_V4<(outs), (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset), "if ("#!if(isNegCond, "!","")#mnemonic# - "($src1"#!if(!eq(NvOpNum, 0),".new, ",", ")# + "($src1"#!if(!eq(NvOpNum, 0),".new,",",")# "$src2"#!if(!eq(NvOpNum, 1),".new))","))")#" jump:" #!if(isTak, "t","nt")#" $offset", []> { @@ -1327,7 +1327,7 @@ bit isTak> : NVInst_V4<(outs), (ins IntRegs:$src1, u5_0Imm:$src2, brtarget:$offset), - "if ("#!if(isNegCond, "!","")#mnemonic#"($src1.new, #$src2)) jump:" + "if ("#!if(isNegCond, "!","")#mnemonic#"($src1.new,#$src2)) jump:" #!if(isTak, "t","nt")#" $offset", []> { let isTaken = isTak; @@ -1388,7 +1388,7 @@ (ins IntRegs:$src1, n1Const:$n1, brtarget:$offset), (ins IntRegs:$src1, brtarget:$offset)), "if ("#!if(isNegCond, "!","")#mnemonic - #"($src1.new, #" # !if(!eq(ImmVal, "{-1}"), "$n1", ImmVal) # ")) jump:" + #"($src1.new,#" # !if(!eq(ImmVal, "{-1}"), "$n1", ImmVal) # ")) jump:" #!if(isTak, "t","nt")#" $offset", []> { let isTaken = isTak; @@ -1459,7 +1459,7 @@ let hasNewValue = 1, isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 6, hasSideEffects = 0, Uses = [PC] in def C4_addipc : CRInst <(outs IntRegs:$Rd), (ins u6_0Ext:$u6), - "$Rd = add(pc, #$u6)", [], "", CR_tc_2_SLOT3 > { + "$Rd=add(pc,#$u6)", [], "", CR_tc_2_SLOT3 > { bits<5> Rd; bits<6> u6; @@ -1475,7 +1475,7 @@ class T_LOGICAL_3OP OpBits, bit IsNeg> : CRInst<(outs PredRegs:$Pd), (ins PredRegs:$Ps, PredRegs:$Pt, PredRegs:$Pu), - "$Pd = " # MnOp1 # "($Ps, " # MnOp2 # "($Pt, " # + "$Pd=" # MnOp1 # "($Ps," # MnOp2 # "($Pt," # !if (IsNeg,"!","") # "$Pu))", [], "", CR_tc_2early_SLOT23> { bits<2> Pd; @@ -1518,7 +1518,7 @@ let hasNewValue = 1, hasSideEffects = 0 in def S4_parity: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rd = parity($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> { + "$Rd=parity($Rs,$Rt)", [], "", ALU64_tc_2_SLOT23> { bits<5> Rd; bits<5> Rs; bits<5> Rt; @@ -1536,7 +1536,7 @@ opExtendable = 3 in def S4_addaddi : ALU64Inst <(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Ru, s6_0Ext:$s6), - "$Rd = add($Rs, add($Ru, #$s6))" , [], + "$Rd=add($Rs,add($Ru,#$s6))" , [], "", ALU64_tc_2_SLOT23> { bits<5> Rd; bits<5> Rs; @@ -1558,7 +1558,7 @@ opExtentBits = 6, opExtendable = 2 in def S4_subaddi: ALU64Inst <(outs IntRegs:$Rd), (ins IntRegs:$Rs, s6_0Ext:$s6, IntRegs:$Ru), - "$Rd = add($Rs, sub(#$s6, $Ru))", + "$Rd=add($Rs,sub(#$s6,$Ru))", [], "", ALU64_tc_2_SLOT23> { bits<5> Rd; bits<5> Rs; @@ -1598,8 +1598,8 @@ } let Itinerary = M_tc_3x_SLOT23, Defs = [USR_OVF] in { - def M4_mac_up_s1_sat: T_MType_acc_rr<"+= mpy", 0b011, 0b000, 0, [], 0, 1, 1>; - def M4_nac_up_s1_sat: T_MType_acc_rr<"-= mpy", 0b011, 0b001, 0, [], 0, 1, 1>; + def M4_mac_up_s1_sat: T_MType_acc_rr<"+=mpy", 0b011, 0b000, 0, [], 0, 1, 1>; + def M4_nac_up_s1_sat: T_MType_acc_rr<"-=mpy", 0b011, 0b001, 0, [], 0, 1, 1>; } // Logical xor with xor accumulation. @@ -1608,7 +1608,7 @@ def M4_xor_xacc : SInst <(outs DoubleRegs:$Rxx), (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Rxx ^= xor($Rss, $Rtt)", [], + "$Rxx^=xor($Rss,$Rtt)", [], "$dst2 = $Rxx", S_3op_tc_1_SLOT23> { bits<5> Rxx; bits<5> Rss; @@ -1629,7 +1629,7 @@ def S4_vrcrotate : SInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, IntRegs:$Rt, u2_0Imm:$u2), - "$Rdd = vrcrotate($Rss, $Rt, #$u2)", + "$Rdd=vrcrotate($Rss,$Rt,#$u2)", [], "", S_3op_tc_3x_SLOT23> { bits<5> Rdd; bits<5> Rss; @@ -1653,7 +1653,7 @@ def S4_vrcrotate_acc : SInst <(outs DoubleRegs:$Rxx), (ins DoubleRegs:$dst2, DoubleRegs:$Rss, IntRegs:$Rt, u2_0Imm:$u2), - "$Rxx += vrcrotate($Rss, $Rt, #$u2)", [], + "$Rxx+=vrcrotate($Rss,$Rt,#$u2)", [], "$dst2 = $Rxx", S_3op_tc_3x_SLOT23> { bits<5> Rxx; bits<5> Rss; @@ -1675,7 +1675,7 @@ def S2_vrcnegh : SInst <(outs DoubleRegs:$Rxx), (ins DoubleRegs:$dst2, DoubleRegs:$Rss, IntRegs:$Rt), - "$Rxx += vrcnegh($Rss, $Rt)", [], + "$Rxx+=vrcnegh($Rss,$Rt)", [], "$dst2 = $Rxx", S_3op_tc_3x_SLOT23> { bits<5> Rxx; bits<5> Rss; @@ -1709,7 +1709,7 @@ def S4_or_andix: ALU64Inst<(outs IntRegs:$Rx), (ins IntRegs:$Ru, IntRegs:$_src_, s10_0Ext:$s10), - "$Rx = or($Ru, and($_src_, #$s10))" , [] , + "$Rx=or($Ru,and($_src_,#$s10))" , [] , "$_src_ = $Rx", ALU64_tc_2_SLOT23> { bits<5> Rx; bits<5> Ru; @@ -1728,7 +1728,7 @@ // let hasNewValue = 1, hasSideEffects = 0 in def A4_modwrapu: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rd = modwrap($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> { + "$Rd=modwrap($Rs,$Rt)", [], "", ALU64_tc_2_SLOT23> { bits<5> Rd; bits<5> Rs; bits<5> Rt; @@ -1744,7 +1744,7 @@ let hasSideEffects = 0 in def A4_bitsplit: ALU64Inst<(outs DoubleRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rd = bitsplit($Rs, $Rt)", [], "", ALU64_tc_1_SLOT23> { + "$Rd=bitsplit($Rs,$Rt)", [], "", ALU64_tc_1_SLOT23> { bits<5> Rd; bits<5> Rs; bits<5> Rt; @@ -1760,7 +1760,7 @@ let hasSideEffects = 0 in def dep_S2_packhl: ALU64Inst<(outs DoubleRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rd = packhl($Rs, $Rt):deprecated", [], "", ALU64_tc_1_SLOT23> { + "$Rd=packhl($Rs,$Rt):deprecated", [], "", ALU64_tc_1_SLOT23> { bits<5> Rd; bits<5> Rs; bits<5> Rt; @@ -1776,7 +1776,7 @@ let hasNewValue = 1, hasSideEffects = 0 in def dep_A2_addsat: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rd = add($Rs, $Rt):sat:deprecated", [], "", ALU64_tc_2_SLOT23> { + "$Rd=add($Rs,$Rt):sat:deprecated", [], "", ALU64_tc_2_SLOT23> { bits<5> Rd; bits<5> Rs; bits<5> Rt; @@ -1792,7 +1792,7 @@ let hasNewValue = 1, hasSideEffects = 0 in def dep_A2_subsat: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rd = sub($Rs, $Rt):sat:deprecated", [], "", ALU64_tc_2_SLOT23> { + "$Rd=sub($Rs,$Rt):sat:deprecated", [], "", ALU64_tc_2_SLOT23> { bits<5> Rd; bits<5> Rs; bits<5> Rt; @@ -1806,27 +1806,27 @@ } // Rx[&|]=xor(Rs,Rt) -def M4_or_xor : T_MType_acc_rr < "|= xor", 0b110, 0b001, 0>; -def M4_and_xor : T_MType_acc_rr < "&= xor", 0b010, 0b010, 0>; +def M4_or_xor : T_MType_acc_rr < "|=xor", 0b110, 0b001, 0>; +def M4_and_xor : T_MType_acc_rr < "&=xor", 0b010, 0b010, 0>; // Rx[&|^]=or(Rs,Rt) -def M4_xor_or : T_MType_acc_rr < "^= or", 0b110, 0b011, 0>; +def M4_xor_or : T_MType_acc_rr < "^=or", 0b110, 0b011, 0>; let CextOpcode = "ORr_ORr" in -def M4_or_or : T_MType_acc_rr < "|= or", 0b110, 0b000, 0>; -def M4_and_or : T_MType_acc_rr < "&= or", 0b010, 0b001, 0>; +def M4_or_or : T_MType_acc_rr < "|=or", 0b110, 0b000, 0>; +def M4_and_or : T_MType_acc_rr < "&=or", 0b010, 0b001, 0>; // Rx[&|^]=and(Rs,Rt) -def M4_xor_and : T_MType_acc_rr < "^= and", 0b110, 0b010, 0>; +def M4_xor_and : T_MType_acc_rr < "^=and", 0b110, 0b010, 0>; let CextOpcode = "ORr_ANDr" in -def M4_or_and : T_MType_acc_rr < "|= and", 0b010, 0b011, 0>; -def M4_and_and : T_MType_acc_rr < "&= and", 0b010, 0b000, 0>; +def M4_or_and : T_MType_acc_rr < "|=and", 0b010, 0b011, 0>; +def M4_and_and : T_MType_acc_rr < "&=and", 0b010, 0b000, 0>; // Rx[&|^]=and(Rs,~Rt) -def M4_xor_andn : T_MType_acc_rr < "^= and", 0b001, 0b010, 0, [], 1>; -def M4_or_andn : T_MType_acc_rr < "|= and", 0b001, 0b000, 0, [], 1>; -def M4_and_andn : T_MType_acc_rr < "&= and", 0b001, 0b001, 0, [], 1>; +def M4_xor_andn : T_MType_acc_rr < "^=and", 0b001, 0b010, 0, [], 1>; +def M4_or_andn : T_MType_acc_rr < "|=and", 0b001, 0b000, 0, [], 1>; +def M4_and_andn : T_MType_acc_rr < "&=and", 0b001, 0b001, 0, [], 1>; // Compound or-or and or-and let isExtentSigned = 1, InputType = "imm", hasNewValue = 1, isExtendable = 1, @@ -1834,7 +1834,7 @@ class T_CompOR MajOp, SDNode OpNode> : MInst_acc <(outs IntRegs:$Rx), (ins IntRegs:$src1, IntRegs:$Rs, s10_0Ext:$s10), - "$Rx |= "#mnemonic#"($Rs, #$s10)", [], + "$Rx|="#mnemonic#"($Rs,#$s10)", [], "$src1 = $Rx", ALU64_tc_2_SLOT23>, ImmRegRel { bits<5> Rx; bits<5> Rs; @@ -1907,7 +1907,7 @@ let hasSideEffects = 0, hasNewValue = 1 in def S4_clbaddi : SInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, s6_0Imm:$s6), - "$Rd = add(clb($Rs), #$s6)", [], "", S_2op_tc_2_SLOT23> { + "$Rd=add(clb($Rs),#$s6)", [], "", S_2op_tc_2_SLOT23> { bits<5> Rs; bits<5> Rd; bits<6> s6; @@ -1922,7 +1922,7 @@ let hasSideEffects = 0, hasNewValue = 1 in def S4_clbpaddi : SInst<(outs IntRegs:$Rd), (ins DoubleRegs:$Rs, s6_0Imm:$s6), - "$Rd = add(clb($Rs), #$s6)", [], "", S_2op_tc_2_SLOT23> { + "$Rd=add(clb($Rs),#$s6)", [], "", S_2op_tc_2_SLOT23> { bits<5> Rs; bits<5> Rd; bits<6> s6; @@ -1957,7 +1957,7 @@ let hasNewValue = 1, isExtendable = 1, opExtentBits = 6, opExtendable = 1 in def M4_mpyri_addi : MInst<(outs IntRegs:$Rd), (ins u6_0Ext:$u6, IntRegs:$Rs, u6_0Imm:$U6), - "$Rd = add(#$u6, mpyi($Rs, #$U6))" , [],"",ALU64_tc_3x_SLOT23> { + "$Rd=add(#$u6,mpyi($Rs,#$U6))" , [],"",ALU64_tc_3x_SLOT23> { bits<5> Rd; bits<6> u6; bits<5> Rs; @@ -1980,7 +1980,7 @@ isExtendable = 1, opExtentBits = 6, opExtendable = 1 in def M4_mpyrr_addi : MInst <(outs IntRegs:$Rd), (ins u6_0Ext:$u6, IntRegs:$Rs, IntRegs:$Rt), - "$Rd = add(#$u6, mpyi($Rs, $Rt))" , [], "", ALU64_tc_3x_SLOT23>, ImmRegRel { + "$Rd=add(#$u6,mpyi($Rs,$Rt))" , [], "", ALU64_tc_3x_SLOT23>, ImmRegRel { bits<5> Rd; bits<6> u6; bits<5> Rs; @@ -2000,8 +2000,8 @@ let hasNewValue = 1 in class T_AddMpy : ALU64Inst <(outs IntRegs:$dst), ins, - "$dst = add($src1, mpyi("#!if(MajOp,"$src3, #$src2))", - "#$src2, $src3))"), [], + "$dst=add($src1,mpyi("#!if(MajOp,"$src3,#$src2))", + "#$src2,$src3))"), [], "", ALU64_tc_3x_SLOT23> { bits<5> dst; bits<5> src1; @@ -2034,7 +2034,7 @@ let CextOpcode = "ADD_MPY", InputType = "reg", hasNewValue = 1 in def M4_mpyrr_addr: MInst_acc <(outs IntRegs:$Rx), (ins IntRegs:$Ru, IntRegs:$_src_, IntRegs:$Rs), - "$Rx = add($Ru, mpyi($_src_, $Rs))", [], + "$Rx=add($Ru,mpyi($_src_,$Rs))", [], "$_src_ = $Rx", M_tc_3x_SLOT23>, ImmRegRel { bits<5> Rx; bits<5> Ru; @@ -2104,7 +2104,7 @@ class T_vcmpImm cmpOp, bits<2> minOp, Operand ImmOprnd> : ALU64_rr <(outs PredRegs:$Pd), (ins DoubleRegs:$Rss, ImmOprnd:$Imm), - "$Pd = "#Str#"($Rss, #$Imm)", + "$Pd="#Str#"($Rss,#$Imm)", [], "", ALU64_tc_2early_SLOT23> { bits<2> Pd; bits<5> Rss; @@ -2126,7 +2126,7 @@ // Vector compare bytes def A4_vcmpbgt : T_vcmp <"vcmpb.gt", 0b1010>; -let AsmString = "$Pd = any8(vcmpb.eq($Rss, $Rtt))" in +let AsmString = "$Pd=any8(vcmpb.eq($Rss,$Rtt))" in def A4_vcmpbeq_any : T_vcmp <"any8(vcmpb.gt", 0b1000>; def A4_vcmpbeqi : T_vcmpImm <"vcmpb.eq", 0b00, 0b00, u8_0Imm>; @@ -2156,7 +2156,7 @@ class T_S4_ShiftOperate MajOp, InstrItinClass Itin> : MInst_acc<(outs IntRegs:$Rd), (ins u8_0Ext:$u8, IntRegs:$Rx, u5_0Imm:$U5), - "$Rd = "#MnOp#"(#$u8, "#MnSh#"($Rx, #$U5))", + "$Rd="#MnOp#"(#$u8,"#MnSh#"($Rx,#$U5))", [], "$Rd = $Rx", Itin> { bits<5> Rd; @@ -2212,7 +2212,7 @@ class T_S3op_carry MajOp> : SInst < (outs DoubleRegs:$Rdd, PredRegs:$Px), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt, PredRegs:$Pu), - "$Rdd = "#mnemonic#"($Rss, $Rtt, $Pu):carry", + "$Rdd="#mnemonic#"($Rss,$Rtt,$Pu):carry", [], "$Px = $Pu", S_3op_tc_1_SLOT23 > { bits<5> Rdd; bits<5> Rss; @@ -2236,7 +2236,7 @@ class T_S3op_6 MinOp, bit isUnsigned> : SInst <(outs DoubleRegs:$Rxx), (ins DoubleRegs:$dst2, DoubleRegs:$Rss, IntRegs:$Ru), - "$Rxx = "#mnemonic#"($Rss, $Ru)" , + "$Rxx="#mnemonic#"($Rss,$Ru)" , [] , "$dst2 = $Rxx"> { bits<5> Rxx; bits<5> Rss; @@ -2275,7 +2275,7 @@ // Shift an immediate left by register amount. let hasNewValue = 1, hasSideEffects = 0 in def S4_lsli: SInst <(outs IntRegs:$Rd), (ins s6_0Imm:$s6, IntRegs:$Rt), - "$Rd = lsl(#$s6, $Rt)" , [], "", S_3op_tc_1_SLOT23> { + "$Rd=lsl(#$s6,$Rt)" , [], "", S_3op_tc_1_SLOT23> { bits<5> Rd; bits<6> s6; bits<5> Rt; @@ -2364,18 +2364,18 @@ // multiclass to define MemOp instructions with register operand. multiclass MemOp_rr opcBits, Operand ImmOp> { - def L4_add#NAME : MemOp_rr_base ; - def L4_ior#NAME : MemOp_ri_base; + def L4_iadd#NAME : MemOp_ri_base ; + def L4_ior#NAME : MemOp_ri_base; } multiclass MemOp_base opcBits, Operand ImmOp> { @@ -2539,7 +2539,7 @@ class T_StoreAbsGP MajOp, bit isAbs, bit isHalf> : STInst<(outs), (ins ImmOp:$addr, RC:$src), - mnemonic # "(#$addr) = $src"#!if(isHalf, ".h",""), + mnemonic # "(#$addr)=$src"#!if(isHalf, ".h",""), [], "", V2LDST_tc_st_SLOT01> { bits<19> addr; bits<5> src; @@ -2575,7 +2575,7 @@ bit isHalf, bit isNot, bit isNew> : STInst<(outs), (ins PredRegs:$src1, u32_0MustExt:$absaddr, RC: $src2), !if(isNot, "if (!$src1", "if ($src1")#!if(isNew, ".new) ", - ") ")#mnemonic#"(#$absaddr) = $src2"#!if(isHalf, ".h",""), + ") ")#mnemonic#"(#$absaddr)=$src2"#!if(isHalf, ".h",""), [], "", ST_tc_st_SLOT01>, AddrModeRel { bits<2> src1; bits<6> absaddr; @@ -2647,7 +2647,7 @@ isNewValue = 1, opNewValue = 1 in class T_StoreAbsGP_NV MajOp> : NVInst_V4<(outs), (ins ImmOp:$addr, IntRegs:$src), - mnemonic #"(#$addr) = $src.new", + mnemonic #"(#$addr)=$src.new", [], "", V2LDST_tc_st_SLOT0> { bits<19> addr; bits<3> src; @@ -2679,7 +2679,7 @@ class T_StoreAbs_NV_Pred MajOp, bit isNot, bit isNew> : NVInst_V4<(outs), (ins PredRegs:$src1, u32_0MustExt:$absaddr, IntRegs:$src2), !if(isNot, "if (!$src1", "if ($src1")#!if(isNew, ".new) ", - ") ")#mnemonic#"(#$absaddr) = $src2.new", + ") ")#mnemonic#"(#$absaddr)=$src2.new", [], "", ST_tc_st_SLOT0>, AddrModeRel { bits<2> src1; bits<6> absaddr; @@ -2818,7 +2818,7 @@ class T_LoadAbsGP MajOp> : LDInst <(outs RC:$dst), (ins ImmOp:$addr), - "$dst = "#mnemonic# "(#$addr)", + "$dst="#mnemonic# "(#$addr)", [], "", V2LDST_tc_ld_SLOT01> { bits<5> dst; bits<19> addr; @@ -2867,7 +2867,7 @@ bit isPredNot, bit isPredNew> : LDInst <(outs RC:$dst), (ins PredRegs:$src1, u32_0MustExt:$absaddr), !if(isPredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#"$dst = "#mnemonic#"(#$absaddr)">, AddrModeRel { + ") ")#"$dst="#mnemonic#"(#$absaddr)">, AddrModeRel { bits<5> dst; bits<2> src1; bits<6> absaddr; @@ -2968,7 +2968,7 @@ def A4_boundscheck_lo: ALU64Inst < (outs PredRegs:$Pd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Pd = boundscheck($Rss, $Rtt):raw:lo"> { + "$Pd=boundscheck($Rss,$Rtt):raw:lo"> { bits<2> Pd; bits<5> Rss; bits<5> Rtt; @@ -2988,7 +2988,7 @@ def A4_boundscheck_hi: ALU64Inst < (outs PredRegs:$Pd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), - "$Pd = boundscheck($Rss, $Rtt):raw:hi"> { + "$Pd=boundscheck($Rss,$Rtt):raw:hi"> { bits<2> Pd; bits<5> Rss; bits<5> Rtt; @@ -3012,7 +3012,7 @@ let isPredicateLate = 1, hasSideEffects = 0 in def A4_tlbmatch : ALU64Inst<(outs PredRegs:$Pd), (ins DoubleRegs:$Rs, IntRegs:$Rt), - "$Pd = tlbmatch($Rs, $Rt)", + "$Pd=tlbmatch($Rs,$Rt)", [], "", ALU64_tc_2early_SLOT23> { bits<2> Pd; bits<5> Rs; @@ -3031,7 +3031,7 @@ // really do a load. let hasSideEffects = 1, mayLoad = 0 in def Y2_dcfetchbo : LD0Inst<(outs), (ins IntRegs:$Rs, u11_3Imm:$u11_3), - "dcfetch($Rs + #$u11_3)", + "dcfetch($Rs+#$u11_3)", [], "", LD_tc_ld_SLOT0> { bits<5> Rs; bits<14> u11_3; @@ -3054,7 +3054,7 @@ isTerminator = 1 in class CJInst_tstbit_R0 : InstHexagon<(outs), (ins IntRegs:$Rs, brtarget:$r9_2), - ""#px#" = tstbit($Rs, #0); if (" + ""#px#"=tstbit($Rs,#0); if (" #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2", [], "", COMPOUND_CJ_ARCHDEPSLOT, TypeCOMPOUND>, OpcodeHexagon { bits<4> Rs; @@ -3100,7 +3100,7 @@ opExtendable = 2, isTerminator = 1 in class CJInst_RR : InstHexagon<(outs), (ins IntRegs:$Rs, IntRegs:$Rt, brtarget:$r9_2), - ""#px#" = cmp."#op#"($Rs, $Rt); if (" + ""#px#"=cmp."#op#"($Rs,$Rt); if (" #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2", [], "", COMPOUND_CJ_ARCHDEPSLOT, TypeCOMPOUND>, OpcodeHexagon { bits<4> Rs; @@ -3154,7 +3154,7 @@ opExtentAlign = 2, opExtendable = 2, isTerminator = 1 in class CJInst_RU5 : InstHexagon<(outs), (ins IntRegs:$Rs, u5_0Imm:$U5, brtarget:$r9_2), - ""#px#" = cmp."#op#"($Rs, #$U5); if (" + ""#px#"=cmp."#op#"($Rs,#$U5); if (" #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2", [], "", COMPOUND_CJ_ARCHDEPSLOT, TypeCOMPOUND>, OpcodeHexagon { bits<4> Rs; @@ -3209,7 +3209,7 @@ isTerminator = 1 in class CJInst_Rn1 : InstHexagon<(outs), (ins IntRegs:$Rs, n1Const:$n1, brtarget:$r9_2), - ""#px#" = cmp."#op#"($Rs,#$n1); if (" + ""#px#"=cmp."#op#"($Rs,#$n1); if (" #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2", [], "", COMPOUND_CJ_ARCHDEPSLOT, TypeCOMPOUND>, OpcodeHexagon { bits<4> Rs; @@ -3263,7 +3263,7 @@ def J4_jumpseti: CJInst_JMPSET < (outs IntRegs:$Rd), (ins u6_0Imm:$U6, brtarget:$r9_2), - "$Rd = #$U6 ; jump $r9_2"> { + "$Rd=#$U6 ; jump $r9_2"> { bits<4> Rd; bits<6> U6; bits<11> r9_2; @@ -3283,7 +3283,7 @@ def J4_jumpsetr: CJInst_JMPSET < (outs IntRegs:$Rd), (ins IntRegs:$Rs, brtarget:$r9_2), - "$Rd = $Rs ; jump $r9_2"> { + "$Rd=$Rs ; jump $r9_2"> { bits<4> Rd; bits<4> Rs; bits<11> r9_2; Index: lib/Target/Hexagon/HexagonInstrInfoV5.td =================================================================== --- lib/Target/Hexagon/HexagonInstrInfoV5.td +++ lib/Target/Hexagon/HexagonInstrInfoV5.td @@ -52,7 +52,7 @@ let isAsmParserOnly = 1 in def S2_asr_i_p_rnd_goodsyntax : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6_0Imm:$src2), - "$dst = asrrnd($src1, #$src2)">; + "$dst=asrrnd($src1,#$src2)">; def C4_fastcorner9 : T_LOGICAL_2OP<"fastcorner9", 0b000, 0, 0>, Requires<[HasV5T]> { @@ -66,7 +66,7 @@ let hasNewValue = 1, validSubTargets = HasV5SubT in def S5_popcountp : ALU64_rr<(outs IntRegs:$Rd), (ins DoubleRegs:$Rss), - "$Rd = popcount($Rss)", [], "", S_2op_tc_2_SLOT23>, + "$Rd=popcount($Rss)", [], "", S_2op_tc_2_SLOT23>, Requires<[HasV5T]> { bits<5> Rd; bits<5> Rss; @@ -83,7 +83,7 @@ class T_MInstFloat MajOp, bits<3> MinOp> : MInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rd = "#mnemonic#"($Rs, $Rt)", [], + "$Rd="#mnemonic#"($Rs,$Rt)", [], "" , M_tc_3or4x_SLOT23 > , Requires<[HasV5T]> { bits<5> Rd; @@ -124,7 +124,7 @@ def F2_sfrecipa: MInst < (outs IntRegs:$Rd, PredRegs:$Pe), (ins IntRegs:$Rs, IntRegs:$Rt), - "$Rd, $Pe = sfrecipa($Rs, $Rt)">, + "$Rd,$Pe=sfrecipa($Rs,$Rt)">, Requires<[HasV5T]> { bits<5> Rd; bits<2> Pe; @@ -146,7 +146,7 @@ class T_fcmp MinOp, list pattern = [] > : ALU64Inst <(outs PredRegs:$dst), (ins RC:$src1, RC:$src2), - "$dst = "#mnemonic#"($src1, $src2)", pattern, + "$dst="#mnemonic#"($src1,$src2)", pattern, "" , ALU64_tc_2early_SLOT23 > , Requires<[HasV5T]> { bits<2> dst; @@ -189,7 +189,7 @@ class F2_RDD_RSS_CONVERT MinOp, string chop =""> : SInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss), - "$Rdd = "#mnemonic#"($Rss)"#chop, [], "", + "$Rdd="#mnemonic#"($Rss)"#chop, [], "", S_2op_tc_3or4x_SLOT23> { bits<5> Rdd; bits<5> Rss; @@ -206,7 +206,7 @@ class F2_RDD_RS_CONVERT MinOp, string chop =""> : SInst <(outs DoubleRegs:$Rdd), (ins IntRegs:$Rs), - "$Rdd = "#mnemonic#"($Rs)"#chop, [], "", + "$Rdd="#mnemonic#"($Rs)"#chop, [], "", S_2op_tc_3or4x_SLOT23> { bits<5> Rdd; bits<5> Rs; @@ -223,7 +223,7 @@ class F2_RD_RSS_CONVERT MinOp, string chop =""> : SInst <(outs IntRegs:$Rd), (ins DoubleRegs:$Rss), - "$Rd = "#mnemonic#"($Rss)"#chop, [], "", + "$Rd="#mnemonic#"($Rss)"#chop, [], "", S_2op_tc_3or4x_SLOT23> { bits<5> Rd; bits<5> Rss; @@ -241,7 +241,7 @@ class F2_RD_RS_CONVERT MajOp, bits<3> MinOp, string chop =""> : SInst <(outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = "#mnemonic#"($Rs)"#chop, [], "", + "$Rd="#mnemonic#"($Rs)"#chop, [], "", S_2op_tc_3or4x_SLOT23> { bits<5> Rd; bits<5> Rs; @@ -296,7 +296,7 @@ // Fix up radicand. let Uses = [USR], isFP = 1, hasNewValue = 1 in def F2_sffixupr: SInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = sffixupr($Rs)", + "$Rd=sffixupr($Rs)", [], "" , S_2op_tc_3or4x_SLOT23>, Requires<[HasV5T]> { bits<5> Rd; bits<5> Rs; @@ -314,7 +314,7 @@ class T_sfmpy_acc : MInst<(outs IntRegs:$Rx), (ins IntRegs:$dst2, IntRegs:$Rs, IntRegs:$Rt), - "$Rx "#!if(isSub, "-=","+=")#" sfmpy($Rs, $Rt)"#!if(isLib, ":lib",""), + "$Rx"#!if(isSub, "-=","+=")#"sfmpy($Rs,$Rt)"#!if(isLib, ":lib",""), [], "$dst2 = $Rx" , M_tc_3or4x_SLOT23 > , Requires<[HasV5T]> { bits<5> Rx; @@ -343,7 +343,7 @@ def F2_sffma_sc: MInst < (outs IntRegs:$Rx), (ins IntRegs:$dst2, IntRegs:$Rs, IntRegs:$Rt, PredRegs:$Pu), - "$Rx += sfmpy($Rs, $Rt, $Pu):scale" , + "$Rx+=sfmpy($Rs,$Rt,$Pu):scale" , [], "$dst2 = $Rx" , M_tc_3or4x_SLOT23 > , Requires<[HasV5T]> { bits<5> Rx; @@ -371,7 +371,7 @@ class T_ASRHUB : SInst <(outs IntRegs:$Rd), (ins DoubleRegs:$Rss, u4_0Imm:$u4), - "$Rd = vasrhub($Rss, #$u4):"#!if(isSat, "sat", "raw"), + "$Rd=vasrhub($Rss,#$u4):"#!if(isSat, "sat", "raw"), [], "", S_2op_tc_2_SLOT23>, Requires<[HasV5T]> { bits<5> Rd; @@ -395,13 +395,13 @@ let isAsmParserOnly = 1 in def S5_asrhub_rnd_sat_goodsyntax : SInst <(outs IntRegs:$Rd), (ins DoubleRegs:$Rss, u4_0Imm:$u4), - "$Rd = vasrhub($Rss, #$u4):rnd:sat">, Requires<[HasV5T]>; + "$Rd=vasrhub($Rss,#$u4):rnd:sat">, Requires<[HasV5T]>; // S5_vasrhrnd: Vector arithmetic shift right by immediate with round. let hasSideEffects = 0 in def S5_vasrhrnd : SInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, u4_0Imm:$u4), - "$Rdd = vasrh($Rss, #$u4):raw">, + "$Rdd=vasrh($Rss,#$u4):raw">, Requires<[HasV5T]> { bits<5> Rdd; bits<5> Rss; @@ -420,7 +420,7 @@ let isAsmParserOnly = 1 in def S5_vasrhrnd_goodsyntax : SInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, u4_0Imm:$u4), - "$Rdd = vasrh($Rss,#$u4):rnd">, Requires<[HasV5T]>; + "$Rdd=vasrh($Rss,#$u4):rnd">, Requires<[HasV5T]>; // Floating point reciprocal square root approximation let Uses = [USR], isPredicateLate = 1, isFP = 1, @@ -429,7 +429,7 @@ def F2_sfinvsqrta: SInst < (outs IntRegs:$Rd, PredRegs:$Pe), (ins IntRegs:$Rs), - "$Rd, $Pe = sfinvsqrta($Rs)" > , + "$Rd,$Pe=sfinvsqrta($Rs)" > , Requires<[HasV5T]> { bits<5> Rd; bits<2> Pe; @@ -456,7 +456,7 @@ let Uses = [USR], isFP = 1 in def F2_dfclass: ALU64Inst<(outs PredRegs:$Pd), (ins DoubleRegs:$Rss, u5_0Imm:$u5), - "$Pd = dfclass($Rss, #$u5)", + "$Pd=dfclass($Rss,#$u5)", [], "" , ALU64_tc_2early_SLOT23 > , Requires<[HasV5T]> { bits<2> Pd; bits<5> Rss; @@ -474,7 +474,7 @@ // Instructions to create floating point constant class T_fimm RegType, bit isNeg> : ALU64Inst<(outs RC:$dst), (ins u10_0Imm:$src), - "$dst = "#mnemonic#"(#$src)"#!if(isNeg, ":neg", ":pos"), + "$dst="#mnemonic#"(#$src)"#!if(isNeg, ":neg", ":pos"), [], "", ALU64_tc_2_SLOT23>, Requires<[HasV5T]> { bits<5> dst; bits<10> src; Index: lib/Target/Hexagon/HexagonInstrInfoV60.td =================================================================== --- lib/Target/Hexagon/HexagonInstrInfoV60.td +++ lib/Target/Hexagon/HexagonInstrInfoV60.td @@ -40,51 +40,51 @@ asmStr>; let isCVLoadable = 1, hasNewValue = 1 in { - def V6_vL32b_ai : T_vload_ai <"$dst = vmem($src1+#$src2)">, + def V6_vL32b_ai : T_vload_ai <"$dst=vmem($src1+#$src2)">, V6_vL32b_ai_enc; - def V6_vL32b_nt_ai : T_vload_ai <"$dst = vmem($src1+#$src2):nt">, + def V6_vL32b_nt_ai : T_vload_ai <"$dst=vmem($src1+#$src2):nt">, V6_vL32b_nt_ai_enc; // 128B - def V6_vL32b_ai_128B : T_vload_ai_128B <"$dst = vmem($src1+#$src2)">, + def V6_vL32b_ai_128B : T_vload_ai_128B <"$dst=vmem($src1+#$src2)">, V6_vL32b_ai_128B_enc; - def V6_vL32b_nt_ai_128B : T_vload_ai_128B <"$dst = vmem($src1+#$src2):nt">, + def V6_vL32b_nt_ai_128B : T_vload_ai_128B <"$dst=vmem($src1+#$src2):nt">, V6_vL32b_nt_ai_128B_enc; } let Itinerary = CVI_VM_VP_LDU, Type = TypeCVI_VM_VP_LDU, hasNewValue = 1 in { - def V6_vL32Ub_ai : T_vload_ai <"$dst = vmemu($src1+#$src2)">, + def V6_vL32Ub_ai : T_vload_ai <"$dst=vmemu($src1+#$src2)">, V6_vL32Ub_ai_enc; - def V6_vL32Ub_ai_128B : T_vload_ai_128B <"$dst = vmemu($src1+#$src2)">, + def V6_vL32Ub_ai_128B : T_vload_ai_128B <"$dst=vmemu($src1+#$src2)">, V6_vL32Ub_ai_128B_enc; } let Itinerary = CVI_VM_LD, Type = TypeCVI_VM_LD, isCVLoad = 1, hasNewValue = 1 in { - def V6_vL32b_cur_ai : T_vload_ai <"$dst.cur = vmem($src1+#$src2)">, + def V6_vL32b_cur_ai : T_vload_ai <"$dst.cur=vmem($src1+#$src2)">, V6_vL32b_cur_ai_enc; - def V6_vL32b_nt_cur_ai : T_vload_ai <"$dst.cur = vmem($src1+#$src2):nt">, + def V6_vL32b_nt_cur_ai : T_vload_ai <"$dst.cur=vmem($src1+#$src2):nt">, V6_vL32b_nt_cur_ai_enc; // 128B def V6_vL32b_cur_ai_128B : T_vload_ai_128B - <"$dst.cur = vmem($src1+#$src2)">, + <"$dst.cur=vmem($src1+#$src2)">, V6_vL32b_cur_ai_128B_enc; def V6_vL32b_nt_cur_ai_128B : T_vload_ai_128B - <"$dst.cur = vmem($src1+#$src2):nt">, + <"$dst.cur=vmem($src1+#$src2):nt">, V6_vL32b_nt_cur_ai_128B_enc; } let Itinerary = CVI_VM_TMP_LD, Type = TypeCVI_VM_TMP_LD, hasNewValue = 1 in { - def V6_vL32b_tmp_ai : T_vload_ai <"$dst.tmp = vmem($src1+#$src2)">, + def V6_vL32b_tmp_ai : T_vload_ai <"$dst.tmp=vmem($src1+#$src2)">, V6_vL32b_tmp_ai_enc; - def V6_vL32b_nt_tmp_ai : T_vload_ai <"$dst.tmp = vmem($src1+#$src2):nt">, + def V6_vL32b_nt_tmp_ai : T_vload_ai <"$dst.tmp=vmem($src1+#$src2):nt">, V6_vL32b_nt_tmp_ai_enc; // 128B def V6_vL32b_tmp_ai_128B : T_vload_ai_128B - <"$dst.tmp = vmem($src1+#$src2)">, + <"$dst.tmp=vmem($src1+#$src2)">, V6_vL32b_tmp_ai_128B_enc; def V6_vL32b_nt_tmp_ai_128B : T_vload_ai_128B - <"$dst.tmp = vmem($src1+#$src2)">, + <"$dst.tmp=vmem($src1+#$src2)">, V6_vL32b_nt_tmp_ai_128B_enc; } @@ -95,7 +95,7 @@ class T_vstore_ai : V6_STInst <(outs), (ins IntRegs:$src1, ImmOp:$src2, RC:$src3), - mnemonic#"($src1+#$src2)"#!if(isNT, ":nt", "")#" = $src3">, NewValueRel { + mnemonic#"($src1+#$src2)"#!if(isNT, ":nt", "")#"=$src3">, NewValueRel { let BaseOpcode = baseOp; } @@ -134,7 +134,7 @@ isPredicable = 1, Itinerary = CVI_VM_NEW_ST, Type = TypeCVI_VM_NEW_ST in class T_vstore_new_ai : V6_STInst <(outs ), (ins IntRegs:$src1, ImmOp:$src2, RC:$src3), - "vmem($src1+#$src2)"#!if(isNT, ":nt", "")#" = $src3.new">, NewValueRel { + "vmem($src1+#$src2)"#!if(isNT, ":nt", "")#"=$src3.new">, NewValueRel { let BaseOpcode = baseOp; } @@ -166,7 +166,7 @@ : V6_STInst <(outs), (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$src3, RC:$src4), "if ("#!if(isPredNot, "!", "")#"$src1) " - #mnemonic#"($src2+#$src3)"#!if(isNT, ":nt", "")#" = $src4">, NewValueRel { + #mnemonic#"($src2+#$src3)"#!if(isNT, ":nt", "")#"=$src4">, NewValueRel { let isPredicatedFalse = isPredNot; let BaseOpcode = baseOp; } @@ -230,7 +230,7 @@ : V6_STInst <(outs), (ins VecPredRegs:$src1, IntRegs:$src2, ImmOp:$src3, RC:$src4), "if ("#!if(isPredNot, "!", "")#"$src1) vmem($src2+#$src3)" - #!if(isNT, ":nt", "")#" = $src4"> { + #!if(isNT, ":nt", "")#"=$src4"> { let isPredicatedFalse = isPredNot; } @@ -268,8 +268,8 @@ bit isPredNot, bit isNT> : V6_STInst <(outs), (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$src3, RC:$src4), - "if("#!if(isPredNot, "!", "")#"$src1) vmem($src2+#$src3)" - #!if(isNT, ":nt", "")#" = $src4.new">, NewValueRel { + "if ("#!if(isPredNot, "!", "")#"$src1) vmem($src2+#$src3)" + #!if(isNT, ":nt", "")#"=$src4.new">, NewValueRel { let isPredicatedFalse = isPredNot; let BaseOpcode = baseOp; } @@ -325,50 +325,50 @@ : T_vload_pi ; let isCVLoadable = 1 in { - def V6_vL32b_pi : T_vload_pi_64B <"$dst = vmem($src1++#$src2)">, + def V6_vL32b_pi : T_vload_pi_64B <"$dst=vmem($src1++#$src2)">, V6_vL32b_pi_enc; - def V6_vL32b_nt_pi : T_vload_pi_64B <"$dst = vmem($src1++#$src2):nt">, + def V6_vL32b_nt_pi : T_vload_pi_64B <"$dst=vmem($src1++#$src2):nt">, V6_vL32b_nt_pi_enc; // 128B - def V6_vL32b_pi_128B : T_vload_pi_128B <"$dst = vmem($src1++#$src2)">, + def V6_vL32b_pi_128B : T_vload_pi_128B <"$dst=vmem($src1++#$src2)">, V6_vL32b_pi_128B_enc; - def V6_vL32b_nt_pi_128B : T_vload_pi_128B <"$dst = vmem($src1++#$src2):nt">, + def V6_vL32b_nt_pi_128B : T_vload_pi_128B <"$dst=vmem($src1++#$src2):nt">, V6_vL32b_nt_pi_128B_enc; } let Itinerary = CVI_VM_VP_LDU, Type = TypeCVI_VM_VP_LDU in { - def V6_vL32Ub_pi : T_vload_pi_64B <"$dst = vmemu($src1++#$src2)">, + def V6_vL32Ub_pi : T_vload_pi_64B <"$dst=vmemu($src1++#$src2)">, V6_vL32Ub_pi_enc; // 128B - def V6_vL32Ub_pi_128B : T_vload_pi_128B <"$dst = vmemu($src1++#$src2)">, + def V6_vL32Ub_pi_128B : T_vload_pi_128B <"$dst=vmemu($src1++#$src2)">, V6_vL32Ub_pi_128B_enc; } let isCVLoad = 1, Itinerary = CVI_VM_LD, Type = TypeCVI_VM_LD in { - def V6_vL32b_cur_pi : T_vload_pi_64B <"$dst.cur = vmem($src1++#$src2)">, + def V6_vL32b_cur_pi : T_vload_pi_64B <"$dst.cur=vmem($src1++#$src2)">, V6_vL32b_cur_pi_enc; - def V6_vL32b_nt_cur_pi : T_vload_pi_64B <"$dst.cur = vmem($src1++#$src2):nt">, + def V6_vL32b_nt_cur_pi : T_vload_pi_64B <"$dst.cur=vmem($src1++#$src2):nt">, V6_vL32b_nt_cur_pi_enc; // 128B def V6_vL32b_cur_pi_128B : T_vload_pi_128B - <"$dst.cur = vmem($src1++#$src2)">, + <"$dst.cur=vmem($src1++#$src2)">, V6_vL32b_cur_pi_128B_enc; def V6_vL32b_nt_cur_pi_128B : T_vload_pi_128B - <"$dst.cur = vmem($src1++#$src2):nt">, + <"$dst.cur=vmem($src1++#$src2):nt">, V6_vL32b_nt_cur_pi_128B_enc; } let Itinerary = CVI_VM_TMP_LD, Type = TypeCVI_VM_TMP_LD in { - def V6_vL32b_tmp_pi : T_vload_pi_64B <"$dst.tmp = vmem($src1++#$src2)">, + def V6_vL32b_tmp_pi : T_vload_pi_64B <"$dst.tmp=vmem($src1++#$src2)">, V6_vL32b_tmp_pi_enc; - def V6_vL32b_nt_tmp_pi : T_vload_pi_64B <"$dst.tmp = vmem($src1++#$src2):nt">, + def V6_vL32b_nt_tmp_pi : T_vload_pi_64B <"$dst.tmp=vmem($src1++#$src2):nt">, V6_vL32b_nt_tmp_pi_enc; //128B def V6_vL32b_tmp_pi_128B : T_vload_pi_128B - <"$dst.tmp = vmem($src1++#$src2)">, + <"$dst.tmp=vmem($src1++#$src2)">, V6_vL32b_tmp_pi_128B_enc; def V6_vL32b_nt_tmp_pi_128B : T_vload_pi_128B - <"$dst.tmp = vmem($src1++#$src2):nt">, + <"$dst.tmp=vmem($src1++#$src2):nt">, V6_vL32b_nt_tmp_pi_128B_enc; } @@ -380,7 +380,7 @@ RegisterClass RC, bit isNT> : V6_STInst <(outs IntRegs:$_dst_), (ins IntRegs:$src1, ImmOp:$src2, RC:$src3), - mnemonic#"($src1++#$src2)"#!if(isNT, ":nt", "")#" = $src3", [], + mnemonic#"($src1++#$src2)"#!if(isNT, ":nt", "")#"=$src3", [], "$src1 = $_dst_">, NewValueRel { let BaseOpcode = baseOp; } @@ -423,7 +423,7 @@ class T_vstore_new_pi : V6_STInst <(outs IntRegs:$_dst_), (ins IntRegs:$src1, ImmOp:$src2, RC:$src3), - "vmem($src1++#$src2)"#!if(isNT, ":nt", "")#" = $src3.new", [], + "vmem($src1++#$src2)"#!if(isNT, ":nt", "")#"=$src3.new", [], "$src1 = $_dst_">, NewValueRel { let BaseOpcode = baseOp; } @@ -458,7 +458,7 @@ : V6_STInst<(outs IntRegs:$_dst_), (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$src3, RC:$src4), "if ("#!if(isPredNot, "!", "")#"$src1) "#mnemonic#"($src2++#$src3)" - #!if(isNT, ":nt", "")#" = $src4", [], + #!if(isNT, ":nt", "")#"=$src4", [], "$src2 = $_dst_">, NewValueRel { let isPredicatedFalse = isPredNot; let BaseOpcode = baseOp; @@ -521,7 +521,7 @@ : V6_STInst <(outs IntRegs:$_dst_), (ins VecPredRegs:$src1, IntRegs:$src2, ImmOp:$src3, RC:$src4), "if ("#!if(isPredNot, "!", "")#"$src1) vmem($src2++#$src3)" - #!if(isNT, ":nt", "")#" = $src4", [], + #!if(isNT, ":nt", "")#"=$src4", [], "$src2 = $_dst_">; let accessSize = Vector64Access in @@ -561,8 +561,8 @@ bit isPredNot, bit isNT> : V6_STInst <(outs IntRegs:$_dst_), (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$src3, RC:$src4), - "if("#!if(isPredNot, "!", "")#"$src1) vmem($src2++#$src3)" - #!if(isNT, ":nt", "")#" = $src4.new", [], + "if ("#!if(isPredNot, "!", "")#"$src1) vmem($src2++#$src3)" + #!if(isNT, ":nt", "")#"=$src4.new", [], "$src2 = $_dst_"> , NewValueRel { let isPredicatedFalse = isPredNot; let BaseOpcode = baseOp; @@ -610,27 +610,27 @@ "$src1 = $_dst_">, NewValueRel; let isCVLoadable = 1 in { - def V6_vL32b_ppu : T_vload_ppu <"$dst = vmem($src1++$src2)">, + def V6_vL32b_ppu : T_vload_ppu <"$dst=vmem($src1++$src2)">, V6_vL32b_ppu_enc; - def V6_vL32b_nt_ppu : T_vload_ppu <"$dst = vmem($src1++$src2):nt">, + def V6_vL32b_nt_ppu : T_vload_ppu <"$dst=vmem($src1++$src2):nt">, V6_vL32b_nt_ppu_enc; } let Itinerary = CVI_VM_VP_LDU, Type = TypeCVI_VM_VP_LDU in -def V6_vL32Ub_ppu : T_vload_ppu <"$dst = vmemu($src1++$src2)">, +def V6_vL32Ub_ppu : T_vload_ppu <"$dst=vmemu($src1++$src2)">, V6_vL32Ub_ppu_enc; let isCVLoad = 1, Itinerary = CVI_VM_CUR_LD, Type = TypeCVI_VM_CUR_LD in { - def V6_vL32b_cur_ppu : T_vload_ppu <"$dst.cur = vmem($src1++$src2)">, + def V6_vL32b_cur_ppu : T_vload_ppu <"$dst.cur=vmem($src1++$src2)">, V6_vL32b_cur_ppu_enc; - def V6_vL32b_nt_cur_ppu : T_vload_ppu <"$dst.cur = vmem($src1++$src2):nt">, + def V6_vL32b_nt_cur_ppu : T_vload_ppu <"$dst.cur=vmem($src1++$src2):nt">, V6_vL32b_nt_cur_ppu_enc; } let Itinerary = CVI_VM_TMP_LD, Type = TypeCVI_VM_TMP_LD in { - def V6_vL32b_tmp_ppu : T_vload_ppu <"$dst.tmp = vmem($src1++$src2)">, + def V6_vL32b_tmp_ppu : T_vload_ppu <"$dst.tmp=vmem($src1++$src2)">, V6_vL32b_tmp_ppu_enc; - def V6_vL32b_nt_tmp_ppu : T_vload_ppu <"$dst.tmp = vmem($src1++$src2):nt">, + def V6_vL32b_nt_tmp_ppu : T_vload_ppu <"$dst.tmp=vmem($src1++$src2):nt">, V6_vL32b_nt_tmp_ppu_enc; } @@ -641,7 +641,7 @@ class T_vstore_ppu : V6_STInst <(outs IntRegs:$_dst_), (ins IntRegs:$src1, ModRegs:$src2, VectorRegs:$src3), - mnemonic#"($src1++$src2)"#!if(isNT, ":nt", "")#" = $src3", [], + mnemonic#"($src1++$src2)"#!if(isNT, ":nt", "")#"=$src3", [], "$src1 = $_dst_">, NewValueRel; let isNVStorable = 1, BaseOpcode = "vS32b_ppu" in { @@ -663,7 +663,7 @@ class T_vstore_new_ppu : V6_STInst <(outs IntRegs:$_dst_), (ins IntRegs:$src1, ModRegs:$src2, VectorRegs:$src3), - "vmem($src1++$src2)"#!if(isNT, ":nt", "")#" = $src3.new", [], + "vmem($src1++$src2)"#!if(isNT, ":nt", "")#"=$src3.new", [], "$src1 = $_dst_">, NewValueRel; let BaseOpcode = "vS32b_ppu" in @@ -680,7 +680,7 @@ : V6_STInst<(outs IntRegs:$_dst_), (ins PredRegs:$src1, IntRegs:$src2, ModRegs:$src3, VectorRegs:$src4), "if ("#!if(isPredNot, "!", "")#"$src1) "#mnemonic#"($src2++$src3)" - #!if(isNT, ":nt", "")#" = $src4", [], + #!if(isNT, ":nt", "")#"=$src4", [], "$src2 = $_dst_">, NewValueRel { let isPredicatedFalse = isPredNot; } @@ -712,7 +712,7 @@ : V6_STInst <(outs IntRegs:$_dst_), (ins VecPredRegs:$src1, IntRegs:$src2, ModRegs:$src3, VectorRegs:$src4), "if ("#!if(isPredNot, "!", "")#"$src1) vmem($src2++$src3)" - #!if(isNT, ":nt", "")#" = $src4", [], + #!if(isNT, ":nt", "")#"=$src4", [], "$src2 = $_dst_">, NewValueRel; def V6_vS32b_qpred_ppu : T_vstore_qpred_ppu, V6_vS32b_qpred_ppu_enc; @@ -730,8 +730,8 @@ class T_vstore_new_pred_ppu : V6_STInst <(outs IntRegs:$_dst_), (ins PredRegs:$src1, IntRegs:$src2, ModRegs:$src3, VectorRegs:$src4), - "if("#!if(isPredNot, "!", "")#"$src1) vmem($src2++$src3)" - #!if(isNT, ":nt", "")#" = $src4.new", [], + "if ("#!if(isPredNot, "!", "")#"$src1) vmem($src2++$src3)" + #!if(isNT, ":nt", "")#"=$src4.new", [], "$src2 = $_dst_">, NewValueRel { let isPredicatedFalse = isPredNot; } @@ -851,56 +851,56 @@ multiclass T_vmpy_WV : T_vmpy ; -defm V6_vtmpyb :T_vmpy_WW<"$dst.h = vtmpy($src1.b,$src2.b)">, V6_vtmpyb_enc; -defm V6_vtmpybus :T_vmpy_WW<"$dst.h = vtmpy($src1.ub,$src2.b)">, V6_vtmpybus_enc; -defm V6_vdsaduh :T_vmpy_WW<"$dst.uw = vdsad($src1.uh,$src2.uh)">, V6_vdsaduh_enc; -defm V6_vmpybus :T_vmpy_WV<"$dst.h = vmpy($src1.ub,$src2.b)">, V6_vmpybus_enc; -defm V6_vmpabus :T_vmpy_WW<"$dst.h = vmpa($src1.ub,$src2.b)">, V6_vmpabus_enc; -defm V6_vmpahb :T_vmpy_WW<"$dst.w = vmpa($src1.h,$src2.b)">, V6_vmpahb_enc; -defm V6_vmpyh :T_vmpy_WV<"$dst.w = vmpy($src1.h,$src2.h)">, V6_vmpyh_enc; -defm V6_vmpyuh :T_vmpy_WV<"$dst.uw = vmpy($src1.uh,$src2.uh)">, V6_vmpyuh_enc; -defm V6_vmpyiwh :T_vmpy_VV<"$dst.w = vmpyi($src1.w,$src2.h)">, V6_vmpyiwh_enc; -defm V6_vtmpyhb :T_vmpy_WW<"$dst.w = vtmpy($src1.h,$src2.b)">, V6_vtmpyhb_enc; -defm V6_vmpyub :T_vmpy_WV<"$dst.uh = vmpy($src1.ub,$src2.ub)">, V6_vmpyub_enc; +defm V6_vtmpyb :T_vmpy_WW<"$dst.h=vtmpy($src1.b,$src2.b)">, V6_vtmpyb_enc; +defm V6_vtmpybus :T_vmpy_WW<"$dst.h=vtmpy($src1.ub,$src2.b)">, V6_vtmpybus_enc; +defm V6_vdsaduh :T_vmpy_WW<"$dst.uw=vdsad($src1.uh,$src2.uh)">, V6_vdsaduh_enc; +defm V6_vmpybus :T_vmpy_WV<"$dst.h=vmpy($src1.ub,$src2.b)">, V6_vmpybus_enc; +defm V6_vmpabus :T_vmpy_WW<"$dst.h=vmpa($src1.ub,$src2.b)">, V6_vmpabus_enc; +defm V6_vmpahb :T_vmpy_WW<"$dst.w=vmpa($src1.h,$src2.b)">, V6_vmpahb_enc; +defm V6_vmpyh :T_vmpy_WV<"$dst.w=vmpy($src1.h,$src2.h)">, V6_vmpyh_enc; +defm V6_vmpyuh :T_vmpy_WV<"$dst.uw=vmpy($src1.uh,$src2.uh)">, V6_vmpyuh_enc; +defm V6_vmpyiwh :T_vmpy_VV<"$dst.w=vmpyi($src1.w,$src2.h)">, V6_vmpyiwh_enc; +defm V6_vtmpyhb :T_vmpy_WW<"$dst.w=vtmpy($src1.h,$src2.b)">, V6_vtmpyhb_enc; +defm V6_vmpyub :T_vmpy_WV<"$dst.uh=vmpy($src1.ub,$src2.ub)">, V6_vmpyub_enc; let Itinerary = CVI_VX_LONG, Type = TypeCVI_VX in -defm V6_vmpyihb :T_vmpy_VV<"$dst.h = vmpyi($src1.h,$src2.b)">, V6_vmpyihb_enc; +defm V6_vmpyihb :T_vmpy_VV<"$dst.h=vmpyi($src1.h,$src2.b)">, V6_vmpyihb_enc; defm V6_vdmpybus_dv : - T_vmpy_WW <"$dst.h = vdmpy($src1.ub,$src2.b)">, V6_vdmpybus_dv_enc; + T_vmpy_WW <"$dst.h=vdmpy($src1.ub,$src2.b)">, V6_vdmpybus_dv_enc; defm V6_vdmpyhsusat : - T_vmpy_VV <"$dst.w = vdmpy($src1.h,$src2.uh):sat">, V6_vdmpyhsusat_enc; + T_vmpy_VV <"$dst.w=vdmpy($src1.h,$src2.uh):sat">, V6_vdmpyhsusat_enc; defm V6_vdmpyhsuisat : - T_vmpy_VW <"$dst.w = vdmpy($src1.h,$src2.uh,#1):sat">, V6_vdmpyhsuisat_enc; + T_vmpy_VW <"$dst.w=vdmpy($src1.h,$src2.uh,#1):sat">, V6_vdmpyhsuisat_enc; defm V6_vdmpyhsat : - T_vmpy_VV <"$dst.w = vdmpy($src1.h,$src2.h):sat">, V6_vdmpyhsat_enc; + T_vmpy_VV <"$dst.w=vdmpy($src1.h,$src2.h):sat">, V6_vdmpyhsat_enc; defm V6_vdmpyhisat : - T_vmpy_VW <"$dst.w = vdmpy($src1.h,$src2.h):sat">, V6_vdmpyhisat_enc; + T_vmpy_VW <"$dst.w=vdmpy($src1.h,$src2.h):sat">, V6_vdmpyhisat_enc; defm V6_vdmpyhb_dv : - T_vmpy_WW <"$dst.w = vdmpy($src1.h,$src2.b)">, V6_vdmpyhb_dv_enc; + T_vmpy_WW <"$dst.w=vdmpy($src1.h,$src2.b)">, V6_vdmpyhb_dv_enc; defm V6_vmpyhss : - T_vmpy_VV <"$dst.h = vmpy($src1.h,$src2.h):<<1:sat">, V6_vmpyhss_enc; + T_vmpy_VV <"$dst.h=vmpy($src1.h,$src2.h):<<1:sat">, V6_vmpyhss_enc; defm V6_vmpyhsrs : - T_vmpy_VV <"$dst.h = vmpy($src1.h,$src2.h):<<1:rnd:sat">, V6_vmpyhsrs_enc; + T_vmpy_VV <"$dst.h=vmpy($src1.h,$src2.h):<<1:rnd:sat">, V6_vmpyhsrs_enc; let Itinerary = CVI_VP, Type = TypeCVI_VP in -defm V6_vror : T_vmpy_VV <"$dst = vror($src1,$src2)">, V6_vror_enc; +defm V6_vror : T_vmpy_VV <"$dst=vror($src1,$src2)">, V6_vror_enc; let Itinerary = CVI_VX, Type = TypeCVI_VX in { -defm V6_vdmpyhb : T_vmpy_VV<"$dst.w = vdmpy($src1.h,$src2.b)">, V6_vdmpyhb_enc; -defm V6_vrmpybus : T_vmpy_VV<"$dst.w = vrmpy($src1.ub,$src2.b)">, V6_vrmpybus_enc; -defm V6_vdmpybus : T_vmpy_VV<"$dst.h = vdmpy($src1.ub,$src2.b)">, V6_vdmpybus_enc; -defm V6_vmpyiwb : T_vmpy_VV<"$dst.w = vmpyi($src1.w,$src2.b)">, V6_vmpyiwb_enc; -defm V6_vrmpyub : T_vmpy_VV<"$dst.uw = vrmpy($src1.ub,$src2.ub)">, V6_vrmpyub_enc; +defm V6_vdmpyhb : T_vmpy_VV<"$dst.w=vdmpy($src1.h,$src2.b)">, V6_vdmpyhb_enc; +defm V6_vrmpybus : T_vmpy_VV<"$dst.w=vrmpy($src1.ub,$src2.b)">, V6_vrmpybus_enc; +defm V6_vdmpybus : T_vmpy_VV<"$dst.h=vdmpy($src1.ub,$src2.b)">, V6_vdmpybus_enc; +defm V6_vmpyiwb : T_vmpy_VV<"$dst.w=vmpyi($src1.w,$src2.b)">, V6_vmpyiwb_enc; +defm V6_vrmpyub : T_vmpy_VV<"$dst.uw=vrmpy($src1.ub,$src2.ub)">, V6_vrmpyub_enc; } let Itinerary = CVI_VS, Type = TypeCVI_VS in { -defm V6_vasrw : T_vmpy_VV <"$dst.w = vasr($src1.w,$src2)">, V6_vasrw_enc; -defm V6_vasrh : T_vmpy_VV <"$dst.h = vasr($src1.h,$src2)">, V6_vasrh_enc; -defm V6_vaslw : T_vmpy_VV <"$dst.w = vasl($src1.w,$src2)">, V6_vaslw_enc; -defm V6_vaslh : T_vmpy_VV <"$dst.h = vasl($src1.h,$src2)">, V6_vaslh_enc; -defm V6_vlsrw : T_vmpy_VV <"$dst.uw = vlsr($src1.uw,$src2)">, V6_vlsrw_enc; -defm V6_vlsrh : T_vmpy_VV <"$dst.uh = vlsr($src1.uh,$src2)">, V6_vlsrh_enc; +defm V6_vasrw : T_vmpy_VV <"$dst.w=vasr($src1.w,$src2)">, V6_vasrw_enc; +defm V6_vasrh : T_vmpy_VV <"$dst.h=vasr($src1.h,$src2)">, V6_vasrh_enc; +defm V6_vaslw : T_vmpy_VV <"$dst.w=vasl($src1.w,$src2)">, V6_vaslw_enc; +defm V6_vaslh : T_vmpy_VV <"$dst.h=vasl($src1.h,$src2)">, V6_vaslh_enc; +defm V6_vlsrw : T_vmpy_VV <"$dst.uw=vlsr($src1.uw,$src2)">, V6_vlsrw_enc; +defm V6_vlsrh : T_vmpy_VV <"$dst.uh=vlsr($src1.uh,$src2)">, V6_vlsrh_enc; } let hasNewValue = 1 in @@ -933,142 +933,142 @@ let Itinerary = CVI_VX, Type = TypeCVI_VX in { defm V6_vrmpyubv : - T_HVX_alu_VV <"$dst.uw = vrmpy($src1.ub,$src2.ub)">, V6_vrmpyubv_enc; + T_HVX_alu_VV <"$dst.uw=vrmpy($src1.ub,$src2.ub)">, V6_vrmpyubv_enc; defm V6_vrmpybv : - T_HVX_alu_VV <"$dst.w = vrmpy($src1.b,$src2.b)">, V6_vrmpybv_enc; + T_HVX_alu_VV <"$dst.w=vrmpy($src1.b,$src2.b)">, V6_vrmpybv_enc; defm V6_vrmpybusv : - T_HVX_alu_VV <"$dst.w = vrmpy($src1.ub,$src2.b)">, V6_vrmpybusv_enc; + T_HVX_alu_VV <"$dst.w=vrmpy($src1.ub,$src2.b)">, V6_vrmpybusv_enc; defm V6_vabsdiffub : - T_HVX_alu_VV <"$dst.ub = vabsdiff($src1.ub,$src2.ub)">, V6_vabsdiffub_enc; + T_HVX_alu_VV <"$dst.ub=vabsdiff($src1.ub,$src2.ub)">, V6_vabsdiffub_enc; defm V6_vabsdiffh : - T_HVX_alu_VV <"$dst.uh = vabsdiff($src1.h,$src2.h)">, V6_vabsdiffh_enc; + T_HVX_alu_VV <"$dst.uh=vabsdiff($src1.h,$src2.h)">, V6_vabsdiffh_enc; defm V6_vabsdiffuh : - T_HVX_alu_VV <"$dst.uh = vabsdiff($src1.uh,$src2.uh)">, V6_vabsdiffuh_enc; + T_HVX_alu_VV <"$dst.uh=vabsdiff($src1.uh,$src2.uh)">, V6_vabsdiffuh_enc; defm V6_vabsdiffw : - T_HVX_alu_VV <"$dst.uw = vabsdiff($src1.w,$src2.w)">, V6_vabsdiffw_enc; + T_HVX_alu_VV <"$dst.uw=vabsdiff($src1.w,$src2.w)">, V6_vabsdiffw_enc; } let Itinerary = CVI_VX_DV, Type = TypeCVI_VX_DV in { defm V6_vdmpyhvsat : - T_HVX_alu_VV <"$dst.w = vdmpy($src1.h,$src2.h):sat">, V6_vdmpyhvsat_enc; + T_HVX_alu_VV <"$dst.w=vdmpy($src1.h,$src2.h):sat">, V6_vdmpyhvsat_enc; defm V6_vmpyhvsrs : - T_HVX_alu_VV<"$dst.h = vmpy($src1.h,$src2.h):<<1:rnd:sat">, V6_vmpyhvsrs_enc; + T_HVX_alu_VV<"$dst.h=vmpy($src1.h,$src2.h):<<1:rnd:sat">, V6_vmpyhvsrs_enc; defm V6_vmpyih : - T_HVX_alu_VV <"$dst.h = vmpyi($src1.h,$src2.h)">, V6_vmpyih_enc; + T_HVX_alu_VV <"$dst.h=vmpyi($src1.h,$src2.h)">, V6_vmpyih_enc; } defm V6_vand : - T_HVX_alu_VV <"$dst = vand($src1,$src2)">, V6_vand_enc; + T_HVX_alu_VV <"$dst=vand($src1,$src2)">, V6_vand_enc; defm V6_vor : - T_HVX_alu_VV <"$dst = vor($src1,$src2)">, V6_vor_enc; + T_HVX_alu_VV <"$dst=vor($src1,$src2)">, V6_vor_enc; defm V6_vxor : - T_HVX_alu_VV <"$dst = vxor($src1,$src2)">, V6_vxor_enc; + T_HVX_alu_VV <"$dst=vxor($src1,$src2)">, V6_vxor_enc; defm V6_vaddw : - T_HVX_alu_VV <"$dst.w = vadd($src1.w,$src2.w)">, V6_vaddw_enc; + T_HVX_alu_VV <"$dst.w=vadd($src1.w,$src2.w)">, V6_vaddw_enc; defm V6_vaddubsat : - T_HVX_alu_VV <"$dst.ub = vadd($src1.ub,$src2.ub):sat">, V6_vaddubsat_enc; + T_HVX_alu_VV <"$dst.ub=vadd($src1.ub,$src2.ub):sat">, V6_vaddubsat_enc; defm V6_vadduhsat : - T_HVX_alu_VV <"$dst.uh = vadd($src1.uh,$src2.uh):sat">, V6_vadduhsat_enc; + T_HVX_alu_VV <"$dst.uh=vadd($src1.uh,$src2.uh):sat">, V6_vadduhsat_enc; defm V6_vaddhsat : - T_HVX_alu_VV <"$dst.h = vadd($src1.h,$src2.h):sat">, V6_vaddhsat_enc; + T_HVX_alu_VV <"$dst.h=vadd($src1.h,$src2.h):sat">, V6_vaddhsat_enc; defm V6_vaddwsat : - T_HVX_alu_VV <"$dst.w = vadd($src1.w,$src2.w):sat">, V6_vaddwsat_enc; + T_HVX_alu_VV <"$dst.w=vadd($src1.w,$src2.w):sat">, V6_vaddwsat_enc; defm V6_vsubb : - T_HVX_alu_VV <"$dst.b = vsub($src1.b,$src2.b)">, V6_vsubb_enc; + T_HVX_alu_VV <"$dst.b=vsub($src1.b,$src2.b)">, V6_vsubb_enc; defm V6_vsubh : - T_HVX_alu_VV <"$dst.h = vsub($src1.h,$src2.h)">, V6_vsubh_enc; + T_HVX_alu_VV <"$dst.h=vsub($src1.h,$src2.h)">, V6_vsubh_enc; defm V6_vsubw : - T_HVX_alu_VV <"$dst.w = vsub($src1.w,$src2.w)">, V6_vsubw_enc; + T_HVX_alu_VV <"$dst.w=vsub($src1.w,$src2.w)">, V6_vsubw_enc; defm V6_vsububsat : - T_HVX_alu_VV <"$dst.ub = vsub($src1.ub,$src2.ub):sat">, V6_vsububsat_enc; + T_HVX_alu_VV <"$dst.ub=vsub($src1.ub,$src2.ub):sat">, V6_vsububsat_enc; defm V6_vsubuhsat : - T_HVX_alu_VV <"$dst.uh = vsub($src1.uh,$src2.uh):sat">, V6_vsubuhsat_enc; + T_HVX_alu_VV <"$dst.uh=vsub($src1.uh,$src2.uh):sat">, V6_vsubuhsat_enc; defm V6_vsubhsat : - T_HVX_alu_VV <"$dst.h = vsub($src1.h,$src2.h):sat">, V6_vsubhsat_enc; + T_HVX_alu_VV <"$dst.h=vsub($src1.h,$src2.h):sat">, V6_vsubhsat_enc; defm V6_vsubwsat : - T_HVX_alu_VV <"$dst.w = vsub($src1.w,$src2.w):sat">, V6_vsubwsat_enc; + T_HVX_alu_VV <"$dst.w=vsub($src1.w,$src2.w):sat">, V6_vsubwsat_enc; defm V6_vavgub : - T_HVX_alu_VV <"$dst.ub = vavg($src1.ub,$src2.ub)">, V6_vavgub_enc; + T_HVX_alu_VV <"$dst.ub=vavg($src1.ub,$src2.ub)">, V6_vavgub_enc; defm V6_vavguh : - T_HVX_alu_VV <"$dst.uh = vavg($src1.uh,$src2.uh)">, V6_vavguh_enc; + T_HVX_alu_VV <"$dst.uh=vavg($src1.uh,$src2.uh)">, V6_vavguh_enc; defm V6_vavgh : - T_HVX_alu_VV <"$dst.h = vavg($src1.h,$src2.h)">, V6_vavgh_enc; + T_HVX_alu_VV <"$dst.h=vavg($src1.h,$src2.h)">, V6_vavgh_enc; defm V6_vavgw : - T_HVX_alu_VV <"$dst.w = vavg($src1.w,$src2.w)">, V6_vavgw_enc; + T_HVX_alu_VV <"$dst.w=vavg($src1.w,$src2.w)">, V6_vavgw_enc; defm V6_vnavgub : - T_HVX_alu_VV <"$dst.b = vnavg($src1.ub,$src2.ub)">, V6_vnavgub_enc; + T_HVX_alu_VV <"$dst.b=vnavg($src1.ub,$src2.ub)">, V6_vnavgub_enc; defm V6_vnavgh : - T_HVX_alu_VV <"$dst.h = vnavg($src1.h,$src2.h)">, V6_vnavgh_enc; + T_HVX_alu_VV <"$dst.h=vnavg($src1.h,$src2.h)">, V6_vnavgh_enc; defm V6_vnavgw : - T_HVX_alu_VV <"$dst.w = vnavg($src1.w,$src2.w)">, V6_vnavgw_enc; + T_HVX_alu_VV <"$dst.w=vnavg($src1.w,$src2.w)">, V6_vnavgw_enc; defm V6_vavgubrnd : - T_HVX_alu_VV <"$dst.ub = vavg($src1.ub,$src2.ub):rnd">, V6_vavgubrnd_enc; + T_HVX_alu_VV <"$dst.ub=vavg($src1.ub,$src2.ub):rnd">, V6_vavgubrnd_enc; defm V6_vavguhrnd : - T_HVX_alu_VV <"$dst.uh = vavg($src1.uh,$src2.uh):rnd">, V6_vavguhrnd_enc; + T_HVX_alu_VV <"$dst.uh=vavg($src1.uh,$src2.uh):rnd">, V6_vavguhrnd_enc; defm V6_vavghrnd : - T_HVX_alu_VV <"$dst.h = vavg($src1.h,$src2.h):rnd">, V6_vavghrnd_enc; + T_HVX_alu_VV <"$dst.h=vavg($src1.h,$src2.h):rnd">, V6_vavghrnd_enc; defm V6_vavgwrnd : - T_HVX_alu_VV <"$dst.w = vavg($src1.w,$src2.w):rnd">, V6_vavgwrnd_enc; + T_HVX_alu_VV <"$dst.w=vavg($src1.w,$src2.w):rnd">, V6_vavgwrnd_enc; defm V6_vmpybv : - T_HVX_alu_WV <"$dst.h = vmpy($src1.b,$src2.b)">, V6_vmpybv_enc; + T_HVX_alu_WV <"$dst.h=vmpy($src1.b,$src2.b)">, V6_vmpybv_enc; defm V6_vmpyubv : - T_HVX_alu_WV <"$dst.uh = vmpy($src1.ub,$src2.ub)">, V6_vmpyubv_enc; + T_HVX_alu_WV <"$dst.uh=vmpy($src1.ub,$src2.ub)">, V6_vmpyubv_enc; defm V6_vmpybusv : - T_HVX_alu_WV <"$dst.h = vmpy($src1.ub,$src2.b)">, V6_vmpybusv_enc; + T_HVX_alu_WV <"$dst.h=vmpy($src1.ub,$src2.b)">, V6_vmpybusv_enc; defm V6_vmpyhv : - T_HVX_alu_WV <"$dst.w = vmpy($src1.h,$src2.h)">, V6_vmpyhv_enc; + T_HVX_alu_WV <"$dst.w=vmpy($src1.h,$src2.h)">, V6_vmpyhv_enc; defm V6_vmpyuhv : - T_HVX_alu_WV <"$dst.uw = vmpy($src1.uh,$src2.uh)">, V6_vmpyuhv_enc; + T_HVX_alu_WV <"$dst.uw=vmpy($src1.uh,$src2.uh)">, V6_vmpyuhv_enc; defm V6_vmpyhus : - T_HVX_alu_WV <"$dst.w = vmpy($src1.h,$src2.uh)">, V6_vmpyhus_enc; + T_HVX_alu_WV <"$dst.w=vmpy($src1.h,$src2.uh)">, V6_vmpyhus_enc; defm V6_vaddubh : - T_HVX_alu_WV <"$dst.h = vadd($src1.ub,$src2.ub)">, V6_vaddubh_enc; + T_HVX_alu_WV <"$dst.h=vadd($src1.ub,$src2.ub)">, V6_vaddubh_enc; defm V6_vadduhw : - T_HVX_alu_WV <"$dst.w = vadd($src1.uh,$src2.uh)">, V6_vadduhw_enc; + T_HVX_alu_WV <"$dst.w=vadd($src1.uh,$src2.uh)">, V6_vadduhw_enc; defm V6_vaddhw : - T_HVX_alu_WV <"$dst.w = vadd($src1.h,$src2.h)">, V6_vaddhw_enc; + T_HVX_alu_WV <"$dst.w=vadd($src1.h,$src2.h)">, V6_vaddhw_enc; defm V6_vsububh : - T_HVX_alu_WV <"$dst.h = vsub($src1.ub,$src2.ub)">, V6_vsububh_enc; + T_HVX_alu_WV <"$dst.h=vsub($src1.ub,$src2.ub)">, V6_vsububh_enc; defm V6_vsubuhw : - T_HVX_alu_WV <"$dst.w = vsub($src1.uh,$src2.uh)">, V6_vsubuhw_enc; + T_HVX_alu_WV <"$dst.w=vsub($src1.uh,$src2.uh)">, V6_vsubuhw_enc; defm V6_vsubhw : - T_HVX_alu_WV <"$dst.w = vsub($src1.h,$src2.h)">, V6_vsubhw_enc; + T_HVX_alu_WV <"$dst.w=vsub($src1.h,$src2.h)">, V6_vsubhw_enc; defm V6_vaddb_dv : - T_HVX_alu_WW <"$dst.b = vadd($src1.b,$src2.b)">, V6_vaddb_dv_enc; + T_HVX_alu_WW <"$dst.b=vadd($src1.b,$src2.b)">, V6_vaddb_dv_enc; defm V6_vaddh_dv : - T_HVX_alu_WW <"$dst.h = vadd($src1.h,$src2.h)">, V6_vaddh_dv_enc; + T_HVX_alu_WW <"$dst.h=vadd($src1.h,$src2.h)">, V6_vaddh_dv_enc; defm V6_vaddw_dv : - T_HVX_alu_WW <"$dst.w = vadd($src1.w,$src2.w)">, V6_vaddw_dv_enc; + T_HVX_alu_WW <"$dst.w=vadd($src1.w,$src2.w)">, V6_vaddw_dv_enc; defm V6_vaddubsat_dv : - T_HVX_alu_WW <"$dst.ub = vadd($src1.ub,$src2.ub):sat">, V6_vaddubsat_dv_enc; + T_HVX_alu_WW <"$dst.ub=vadd($src1.ub,$src2.ub):sat">, V6_vaddubsat_dv_enc; defm V6_vadduhsat_dv : - T_HVX_alu_WW <"$dst.uh = vadd($src1.uh,$src2.uh):sat">, V6_vadduhsat_dv_enc; + T_HVX_alu_WW <"$dst.uh=vadd($src1.uh,$src2.uh):sat">, V6_vadduhsat_dv_enc; defm V6_vaddhsat_dv : - T_HVX_alu_WW <"$dst.h = vadd($src1.h,$src2.h):sat">, V6_vaddhsat_dv_enc; + T_HVX_alu_WW <"$dst.h=vadd($src1.h,$src2.h):sat">, V6_vaddhsat_dv_enc; defm V6_vaddwsat_dv : - T_HVX_alu_WW <"$dst.w = vadd($src1.w,$src2.w):sat">, V6_vaddwsat_dv_enc; + T_HVX_alu_WW <"$dst.w=vadd($src1.w,$src2.w):sat">, V6_vaddwsat_dv_enc; defm V6_vsubb_dv : - T_HVX_alu_WW <"$dst.b = vsub($src1.b,$src2.b)">, V6_vsubb_dv_enc; + T_HVX_alu_WW <"$dst.b=vsub($src1.b,$src2.b)">, V6_vsubb_dv_enc; defm V6_vsubh_dv : - T_HVX_alu_WW <"$dst.h = vsub($src1.h,$src2.h)">, V6_vsubh_dv_enc; + T_HVX_alu_WW <"$dst.h=vsub($src1.h,$src2.h)">, V6_vsubh_dv_enc; defm V6_vsubw_dv : - T_HVX_alu_WW <"$dst.w = vsub($src1.w,$src2.w)">, V6_vsubw_dv_enc; + T_HVX_alu_WW <"$dst.w=vsub($src1.w,$src2.w)">, V6_vsubw_dv_enc; defm V6_vsububsat_dv : - T_HVX_alu_WW <"$dst.ub = vsub($src1.ub,$src2.ub):sat">, V6_vsububsat_dv_enc; + T_HVX_alu_WW <"$dst.ub=vsub($src1.ub,$src2.ub):sat">, V6_vsububsat_dv_enc; defm V6_vsubuhsat_dv : - T_HVX_alu_WW <"$dst.uh = vsub($src1.uh,$src2.uh):sat">, V6_vsubuhsat_dv_enc; + T_HVX_alu_WW <"$dst.uh=vsub($src1.uh,$src2.uh):sat">, V6_vsubuhsat_dv_enc; defm V6_vsubhsat_dv : - T_HVX_alu_WW <"$dst.h = vsub($src1.h,$src2.h):sat">, V6_vsubhsat_dv_enc; + T_HVX_alu_WW <"$dst.h=vsub($src1.h,$src2.h):sat">, V6_vsubhsat_dv_enc; defm V6_vsubwsat_dv : - T_HVX_alu_WW <"$dst.w = vsub($src1.w,$src2.w):sat">, V6_vsubwsat_dv_enc; + T_HVX_alu_WW <"$dst.w=vsub($src1.w,$src2.w):sat">, V6_vsubwsat_dv_enc; let Itinerary = CVI_VX_DV_LONG, Type = TypeCVI_VX_DV in { defm V6_vmpabusv : - T_HVX_alu_WW <"$dst.h = vmpa($src1.ub,$src2.b)">, V6_vmpabusv_enc; + T_HVX_alu_WW <"$dst.h=vmpa($src1.ub,$src2.b)">, V6_vmpabusv_enc; defm V6_vmpabuuv : - T_HVX_alu_WW <"$dst.h = vmpa($src1.ub,$src2.ub)">, V6_vmpabuuv_enc; + T_HVX_alu_WW <"$dst.h=vmpa($src1.ub,$src2.ub)">, V6_vmpabuuv_enc; } let isAccumulator = 1, hasNewValue = 1 in @@ -1112,123 +1112,123 @@ defm V6_vtmpyb_acc : - T_HVX_vmpyacc_WWR <"$dst.h += vtmpy($src1.b,$src2.b)">, + T_HVX_vmpyacc_WWR <"$dst.h+=vtmpy($src1.b,$src2.b)">, V6_vtmpyb_acc_enc; defm V6_vtmpybus_acc : - T_HVX_vmpyacc_WWR <"$dst.h += vtmpy($src1.ub,$src2.b)">, + T_HVX_vmpyacc_WWR <"$dst.h+=vtmpy($src1.ub,$src2.b)">, V6_vtmpybus_acc_enc; defm V6_vtmpyhb_acc : - T_HVX_vmpyacc_WWR <"$dst.w += vtmpy($src1.h,$src2.b)">, + T_HVX_vmpyacc_WWR <"$dst.w+=vtmpy($src1.h,$src2.b)">, V6_vtmpyhb_acc_enc; defm V6_vdmpyhb_acc : - T_HVX_vmpyacc_VVR <"$dst.w += vdmpy($src1.h,$src2.b)">, + T_HVX_vmpyacc_VVR <"$dst.w+=vdmpy($src1.h,$src2.b)">, V6_vdmpyhb_acc_enc; defm V6_vrmpyub_acc : - T_HVX_vmpyacc_VVR <"$dst.uw += vrmpy($src1.ub,$src2.ub)">, + T_HVX_vmpyacc_VVR <"$dst.uw+=vrmpy($src1.ub,$src2.ub)">, V6_vrmpyub_acc_enc; defm V6_vrmpybus_acc : - T_HVX_vmpyacc_VVR <"$dst.w += vrmpy($src1.ub,$src2.b)">, + T_HVX_vmpyacc_VVR <"$dst.w+=vrmpy($src1.ub,$src2.b)">, V6_vrmpybus_acc_enc; defm V6_vdmpybus_acc : - T_HVX_vmpyacc_VVR <"$dst.h += vdmpy($src1.ub,$src2.b)">, + T_HVX_vmpyacc_VVR <"$dst.h+=vdmpy($src1.ub,$src2.b)">, V6_vdmpybus_acc_enc; defm V6_vdmpybus_dv_acc : - T_HVX_vmpyacc_WWR <"$dst.h += vdmpy($src1.ub,$src2.b)">, + T_HVX_vmpyacc_WWR <"$dst.h+=vdmpy($src1.ub,$src2.b)">, V6_vdmpybus_dv_acc_enc; defm V6_vdmpyhsuisat_acc : - T_HVX_vmpyacc_VWR <"$dst.w += vdmpy($src1.h,$src2.uh,#1):sat">, + T_HVX_vmpyacc_VWR <"$dst.w+=vdmpy($src1.h,$src2.uh,#1):sat">, V6_vdmpyhsuisat_acc_enc; defm V6_vdmpyhisat_acc : - T_HVX_vmpyacc_VWR <"$dst.w += vdmpy($src1.h,$src2.h):sat">, + T_HVX_vmpyacc_VWR <"$dst.w+=vdmpy($src1.h,$src2.h):sat">, V6_vdmpyhisat_acc_enc; defm V6_vdmpyhb_dv_acc : - T_HVX_vmpyacc_WWR <"$dst.w += vdmpy($src1.h,$src2.b)">, + T_HVX_vmpyacc_WWR <"$dst.w+=vdmpy($src1.h,$src2.b)">, V6_vdmpyhb_dv_acc_enc; defm V6_vmpybus_acc : - T_HVX_vmpyacc_WVR <"$dst.h += vmpy($src1.ub,$src2.b)">, + T_HVX_vmpyacc_WVR <"$dst.h+=vmpy($src1.ub,$src2.b)">, V6_vmpybus_acc_enc; defm V6_vmpabus_acc : - T_HVX_vmpyacc_WWR <"$dst.h += vmpa($src1.ub,$src2.b)">, + T_HVX_vmpyacc_WWR <"$dst.h+=vmpa($src1.ub,$src2.b)">, V6_vmpabus_acc_enc; defm V6_vmpahb_acc : - T_HVX_vmpyacc_WWR <"$dst.w += vmpa($src1.h,$src2.b)">, + T_HVX_vmpyacc_WWR <"$dst.w+=vmpa($src1.h,$src2.b)">, V6_vmpahb_acc_enc; defm V6_vmpyhsat_acc : - T_HVX_vmpyacc_WVR <"$dst.w += vmpy($src1.h,$src2.h):sat">, + T_HVX_vmpyacc_WVR <"$dst.w+=vmpy($src1.h,$src2.h):sat">, V6_vmpyhsat_acc_enc; defm V6_vmpyuh_acc : - T_HVX_vmpyacc_WVR <"$dst.uw += vmpy($src1.uh,$src2.uh)">, + T_HVX_vmpyacc_WVR <"$dst.uw+=vmpy($src1.uh,$src2.uh)">, V6_vmpyuh_acc_enc; defm V6_vmpyiwb_acc : - T_HVX_vmpyacc_VVR <"$dst.w += vmpyi($src1.w,$src2.b)">, + T_HVX_vmpyacc_VVR <"$dst.w+=vmpyi($src1.w,$src2.b)">, V6_vmpyiwb_acc_enc; defm V6_vdsaduh_acc : - T_HVX_vmpyacc_WWR <"$dst.uw += vdsad($src1.uh,$src2.uh)">, + T_HVX_vmpyacc_WWR <"$dst.uw+=vdsad($src1.uh,$src2.uh)">, V6_vdsaduh_acc_enc; defm V6_vmpyihb_acc : - T_HVX_vmpyacc_VVR <"$dst.h += vmpyi($src1.h,$src2.b)">, + T_HVX_vmpyacc_VVR <"$dst.h+=vmpyi($src1.h,$src2.b)">, V6_vmpyihb_acc_enc; defm V6_vmpyub_acc : - T_HVX_vmpyacc_WVR <"$dst.uh += vmpy($src1.ub,$src2.ub)">, + T_HVX_vmpyacc_WVR <"$dst.uh+=vmpy($src1.ub,$src2.ub)">, V6_vmpyub_acc_enc; let Itinerary = CVI_VX_DV, Type = TypeCVI_VX_DV in { defm V6_vdmpyhsusat_acc : - T_HVX_vmpyacc_VVR <"$dst.w += vdmpy($src1.h,$src2.uh):sat">, + T_HVX_vmpyacc_VVR <"$dst.w+=vdmpy($src1.h,$src2.uh):sat">, V6_vdmpyhsusat_acc_enc; defm V6_vdmpyhsat_acc : - T_HVX_vmpyacc_VVR <"$dst.w += vdmpy($src1.h,$src2.h):sat">, + T_HVX_vmpyacc_VVR <"$dst.w+=vdmpy($src1.h,$src2.h):sat">, V6_vdmpyhsat_acc_enc; defm V6_vmpyiwh_acc : T_HVX_vmpyacc_VVR - <"$dst.w += vmpyi($src1.w,$src2.h)">, V6_vmpyiwh_acc_enc; + <"$dst.w+=vmpyi($src1.w,$src2.h)">, V6_vmpyiwh_acc_enc; } let Itinerary = CVI_VS, Type = TypeCVI_VS in { defm V6_vaslw_acc : - T_HVX_vmpyacc_VVR <"$dst.w += vasl($src1.w,$src2)">, V6_vaslw_acc_enc; + T_HVX_vmpyacc_VVR <"$dst.w+=vasl($src1.w,$src2)">, V6_vaslw_acc_enc; defm V6_vasrw_acc : - T_HVX_vmpyacc_VVR <"$dst.w += vasr($src1.w,$src2)">, V6_vasrw_acc_enc; + T_HVX_vmpyacc_VVR <"$dst.w+=vasr($src1.w,$src2)">, V6_vasrw_acc_enc; } defm V6_vdmpyhvsat_acc : - T_HVX_vmpyacc_VVV <"$dst.w += vdmpy($src1.h,$src2.h):sat">, + T_HVX_vmpyacc_VVV <"$dst.w+=vdmpy($src1.h,$src2.h):sat">, V6_vdmpyhvsat_acc_enc; defm V6_vmpybusv_acc : - T_HVX_vmpyacc_WVV <"$dst.h += vmpy($src1.ub,$src2.b)">, + T_HVX_vmpyacc_WVV <"$dst.h+=vmpy($src1.ub,$src2.b)">, V6_vmpybusv_acc_enc; defm V6_vmpybv_acc : - T_HVX_vmpyacc_WVV <"$dst.h += vmpy($src1.b,$src2.b)">, V6_vmpybv_acc_enc; + T_HVX_vmpyacc_WVV <"$dst.h+=vmpy($src1.b,$src2.b)">, V6_vmpybv_acc_enc; defm V6_vmpyhus_acc : - T_HVX_vmpyacc_WVV <"$dst.w += vmpy($src1.h,$src2.uh)">, V6_vmpyhus_acc_enc; + T_HVX_vmpyacc_WVV <"$dst.w+=vmpy($src1.h,$src2.uh)">, V6_vmpyhus_acc_enc; defm V6_vmpyhv_acc : - T_HVX_vmpyacc_WVV <"$dst.w += vmpy($src1.h,$src2.h)">, V6_vmpyhv_acc_enc; + T_HVX_vmpyacc_WVV <"$dst.w+=vmpy($src1.h,$src2.h)">, V6_vmpyhv_acc_enc; defm V6_vmpyiewh_acc : - T_HVX_vmpyacc_VVV <"$dst.w += vmpyie($src1.w,$src2.h)">, + T_HVX_vmpyacc_VVV <"$dst.w+=vmpyie($src1.w,$src2.h)">, V6_vmpyiewh_acc_enc; defm V6_vmpyiewuh_acc : - T_HVX_vmpyacc_VVV <"$dst.w += vmpyie($src1.w,$src2.uh)">, + T_HVX_vmpyacc_VVV <"$dst.w+=vmpyie($src1.w,$src2.uh)">, V6_vmpyiewuh_acc_enc; defm V6_vmpyih_acc : - T_HVX_vmpyacc_VVV <"$dst.h += vmpyi($src1.h,$src2.h)">, V6_vmpyih_acc_enc; + T_HVX_vmpyacc_VVV <"$dst.h+=vmpyi($src1.h,$src2.h)">, V6_vmpyih_acc_enc; defm V6_vmpyowh_rnd_sacc : - T_HVX_vmpyacc_VVV <"$dst.w += vmpyo($src1.w,$src2.h):<<1:rnd:sat:shift">, + T_HVX_vmpyacc_VVV <"$dst.w+=vmpyo($src1.w,$src2.h):<<1:rnd:sat:shift">, V6_vmpyowh_rnd_sacc_enc; defm V6_vmpyowh_sacc : - T_HVX_vmpyacc_VVV <"$dst.w += vmpyo($src1.w,$src2.h):<<1:sat:shift">, + T_HVX_vmpyacc_VVV <"$dst.w+=vmpyo($src1.w,$src2.h):<<1:sat:shift">, V6_vmpyowh_sacc_enc; defm V6_vmpyubv_acc : - T_HVX_vmpyacc_WVV <"$dst.uh += vmpy($src1.ub,$src2.ub)">, + T_HVX_vmpyacc_WVV <"$dst.uh+=vmpy($src1.ub,$src2.ub)">, V6_vmpyubv_acc_enc; defm V6_vmpyuhv_acc : - T_HVX_vmpyacc_WVV <"$dst.uw += vmpy($src1.uh,$src2.uh)">, + T_HVX_vmpyacc_WVV <"$dst.uw+=vmpy($src1.uh,$src2.uh)">, V6_vmpyuhv_acc_enc; defm V6_vrmpybusv_acc : - T_HVX_vmpyacc_VVV <"$dst.w += vrmpy($src1.ub,$src2.b)">, + T_HVX_vmpyacc_VVV <"$dst.w+=vrmpy($src1.ub,$src2.b)">, V6_vrmpybusv_acc_enc; defm V6_vrmpybv_acc : - T_HVX_vmpyacc_VVV <"$dst.w += vrmpy($src1.b,$src2.b)">, V6_vrmpybv_acc_enc; + T_HVX_vmpyacc_VVV <"$dst.w+=vrmpy($src1.b,$src2.b)">, V6_vrmpybv_acc_enc; defm V6_vrmpyubv_acc : - T_HVX_vmpyacc_VVV <"$dst.uw += vrmpy($src1.ub,$src2.ub)">, + T_HVX_vmpyacc_VVV <"$dst.uw+=vrmpy($src1.ub,$src2.ub)">, V6_vrmpyubv_acc_enc; @@ -1247,171 +1247,171 @@ } defm V6_veqb_and : - T_HVX_vcmp <"$dst &= vcmp.eq($src1.b,$src2.b)">, V6_veqb_and_enc; + T_HVX_vcmp <"$dst&=vcmp.eq($src1.b,$src2.b)">, V6_veqb_and_enc; defm V6_veqh_and : - T_HVX_vcmp <"$dst &= vcmp.eq($src1.h,$src2.h)">, V6_veqh_and_enc; + T_HVX_vcmp <"$dst&=vcmp.eq($src1.h,$src2.h)">, V6_veqh_and_enc; defm V6_veqw_and : - T_HVX_vcmp <"$dst &= vcmp.eq($src1.w,$src2.w)">, V6_veqw_and_enc; + T_HVX_vcmp <"$dst&=vcmp.eq($src1.w,$src2.w)">, V6_veqw_and_enc; defm V6_vgtb_and : - T_HVX_vcmp <"$dst &= vcmp.gt($src1.b,$src2.b)">, V6_vgtb_and_enc; + T_HVX_vcmp <"$dst&=vcmp.gt($src1.b,$src2.b)">, V6_vgtb_and_enc; defm V6_vgth_and : - T_HVX_vcmp <"$dst &= vcmp.gt($src1.h,$src2.h)">, V6_vgth_and_enc; + T_HVX_vcmp <"$dst&=vcmp.gt($src1.h,$src2.h)">, V6_vgth_and_enc; defm V6_vgtw_and : - T_HVX_vcmp <"$dst &= vcmp.gt($src1.w,$src2.w)">, V6_vgtw_and_enc; + T_HVX_vcmp <"$dst&=vcmp.gt($src1.w,$src2.w)">, V6_vgtw_and_enc; defm V6_vgtub_and : - T_HVX_vcmp <"$dst &= vcmp.gt($src1.ub,$src2.ub)">, V6_vgtub_and_enc; + T_HVX_vcmp <"$dst&=vcmp.gt($src1.ub,$src2.ub)">, V6_vgtub_and_enc; defm V6_vgtuh_and : - T_HVX_vcmp <"$dst &= vcmp.gt($src1.uh,$src2.uh)">, V6_vgtuh_and_enc; + T_HVX_vcmp <"$dst&=vcmp.gt($src1.uh,$src2.uh)">, V6_vgtuh_and_enc; defm V6_vgtuw_and : - T_HVX_vcmp <"$dst &= vcmp.gt($src1.uw,$src2.uw)">, V6_vgtuw_and_enc; + T_HVX_vcmp <"$dst&=vcmp.gt($src1.uw,$src2.uw)">, V6_vgtuw_and_enc; defm V6_veqb_or : - T_HVX_vcmp <"$dst |= vcmp.eq($src1.b,$src2.b)">, V6_veqb_or_enc; + T_HVX_vcmp <"$dst|=vcmp.eq($src1.b,$src2.b)">, V6_veqb_or_enc; defm V6_veqh_or : - T_HVX_vcmp <"$dst |= vcmp.eq($src1.h,$src2.h)">, V6_veqh_or_enc; + T_HVX_vcmp <"$dst|=vcmp.eq($src1.h,$src2.h)">, V6_veqh_or_enc; defm V6_veqw_or : - T_HVX_vcmp <"$dst |= vcmp.eq($src1.w,$src2.w)">, V6_veqw_or_enc; + T_HVX_vcmp <"$dst|=vcmp.eq($src1.w,$src2.w)">, V6_veqw_or_enc; defm V6_vgtb_or : - T_HVX_vcmp <"$dst |= vcmp.gt($src1.b,$src2.b)">, V6_vgtb_or_enc; + T_HVX_vcmp <"$dst|=vcmp.gt($src1.b,$src2.b)">, V6_vgtb_or_enc; defm V6_vgth_or : - T_HVX_vcmp <"$dst |= vcmp.gt($src1.h,$src2.h)">, V6_vgth_or_enc; + T_HVX_vcmp <"$dst|=vcmp.gt($src1.h,$src2.h)">, V6_vgth_or_enc; defm V6_vgtw_or : - T_HVX_vcmp <"$dst |= vcmp.gt($src1.w,$src2.w)">, V6_vgtw_or_enc; + T_HVX_vcmp <"$dst|=vcmp.gt($src1.w,$src2.w)">, V6_vgtw_or_enc; defm V6_vgtub_or : - T_HVX_vcmp <"$dst |= vcmp.gt($src1.ub,$src2.ub)">, V6_vgtub_or_enc; + T_HVX_vcmp <"$dst|=vcmp.gt($src1.ub,$src2.ub)">, V6_vgtub_or_enc; defm V6_vgtuh_or : - T_HVX_vcmp <"$dst |= vcmp.gt($src1.uh,$src2.uh)">, V6_vgtuh_or_enc; + T_HVX_vcmp <"$dst|=vcmp.gt($src1.uh,$src2.uh)">, V6_vgtuh_or_enc; defm V6_vgtuw_or : - T_HVX_vcmp <"$dst |= vcmp.gt($src1.uw,$src2.uw)">, V6_vgtuw_or_enc; + T_HVX_vcmp <"$dst|=vcmp.gt($src1.uw,$src2.uw)">, V6_vgtuw_or_enc; defm V6_veqb_xor : - T_HVX_vcmp <"$dst ^= vcmp.eq($src1.b,$src2.b)">, V6_veqb_xor_enc; + T_HVX_vcmp <"$dst^=vcmp.eq($src1.b,$src2.b)">, V6_veqb_xor_enc; defm V6_veqh_xor : - T_HVX_vcmp <"$dst ^= vcmp.eq($src1.h,$src2.h)">, V6_veqh_xor_enc; + T_HVX_vcmp <"$dst^=vcmp.eq($src1.h,$src2.h)">, V6_veqh_xor_enc; defm V6_veqw_xor : - T_HVX_vcmp <"$dst ^= vcmp.eq($src1.w,$src2.w)">, V6_veqw_xor_enc; + T_HVX_vcmp <"$dst^=vcmp.eq($src1.w,$src2.w)">, V6_veqw_xor_enc; defm V6_vgtb_xor : - T_HVX_vcmp <"$dst ^= vcmp.gt($src1.b,$src2.b)">, V6_vgtb_xor_enc; + T_HVX_vcmp <"$dst^=vcmp.gt($src1.b,$src2.b)">, V6_vgtb_xor_enc; defm V6_vgth_xor : - T_HVX_vcmp <"$dst ^= vcmp.gt($src1.h,$src2.h)">, V6_vgth_xor_enc; + T_HVX_vcmp <"$dst^=vcmp.gt($src1.h,$src2.h)">, V6_vgth_xor_enc; defm V6_vgtw_xor : - T_HVX_vcmp <"$dst ^= vcmp.gt($src1.w,$src2.w)">, V6_vgtw_xor_enc; + T_HVX_vcmp <"$dst^=vcmp.gt($src1.w,$src2.w)">, V6_vgtw_xor_enc; defm V6_vgtub_xor : - T_HVX_vcmp <"$dst ^= vcmp.gt($src1.ub,$src2.ub)">, V6_vgtub_xor_enc; + T_HVX_vcmp <"$dst^=vcmp.gt($src1.ub,$src2.ub)">, V6_vgtub_xor_enc; defm V6_vgtuh_xor : - T_HVX_vcmp <"$dst ^= vcmp.gt($src1.uh,$src2.uh)">, V6_vgtuh_xor_enc; + T_HVX_vcmp <"$dst^=vcmp.gt($src1.uh,$src2.uh)">, V6_vgtuh_xor_enc; defm V6_vgtuw_xor : - T_HVX_vcmp <"$dst ^= vcmp.gt($src1.uw,$src2.uw)">, V6_vgtuw_xor_enc; + T_HVX_vcmp <"$dst^=vcmp.gt($src1.uw,$src2.uw)">, V6_vgtuw_xor_enc; defm V6_vminub : - T_HVX_alu_VV <"$dst.ub = vmin($src1.ub,$src2.ub)">, V6_vminub_enc; + T_HVX_alu_VV <"$dst.ub=vmin($src1.ub,$src2.ub)">, V6_vminub_enc; defm V6_vminuh : - T_HVX_alu_VV <"$dst.uh = vmin($src1.uh,$src2.uh)">, V6_vminuh_enc; + T_HVX_alu_VV <"$dst.uh=vmin($src1.uh,$src2.uh)">, V6_vminuh_enc; defm V6_vminh : - T_HVX_alu_VV <"$dst.h = vmin($src1.h,$src2.h)">, V6_vminh_enc; + T_HVX_alu_VV <"$dst.h=vmin($src1.h,$src2.h)">, V6_vminh_enc; defm V6_vminw : - T_HVX_alu_VV <"$dst.w = vmin($src1.w,$src2.w)">, V6_vminw_enc; + T_HVX_alu_VV <"$dst.w=vmin($src1.w,$src2.w)">, V6_vminw_enc; defm V6_vmaxub : - T_HVX_alu_VV <"$dst.ub = vmax($src1.ub,$src2.ub)">, V6_vmaxub_enc; + T_HVX_alu_VV <"$dst.ub=vmax($src1.ub,$src2.ub)">, V6_vmaxub_enc; defm V6_vmaxuh : - T_HVX_alu_VV <"$dst.uh = vmax($src1.uh,$src2.uh)">, V6_vmaxuh_enc; + T_HVX_alu_VV <"$dst.uh=vmax($src1.uh,$src2.uh)">, V6_vmaxuh_enc; defm V6_vmaxh : - T_HVX_alu_VV <"$dst.h = vmax($src1.h,$src2.h)">, V6_vmaxh_enc; + T_HVX_alu_VV <"$dst.h=vmax($src1.h,$src2.h)">, V6_vmaxh_enc; defm V6_vmaxw : - T_HVX_alu_VV <"$dst.w = vmax($src1.w,$src2.w)">, V6_vmaxw_enc; + T_HVX_alu_VV <"$dst.w=vmax($src1.w,$src2.w)">, V6_vmaxw_enc; defm V6_vshuffeb : - T_HVX_alu_VV <"$dst.b = vshuffe($src1.b,$src2.b)">, V6_vshuffeb_enc; + T_HVX_alu_VV <"$dst.b=vshuffe($src1.b,$src2.b)">, V6_vshuffeb_enc; defm V6_vshuffob : - T_HVX_alu_VV <"$dst.b = vshuffo($src1.b,$src2.b)">, V6_vshuffob_enc; + T_HVX_alu_VV <"$dst.b=vshuffo($src1.b,$src2.b)">, V6_vshuffob_enc; defm V6_vshufeh : - T_HVX_alu_VV <"$dst.h = vshuffe($src1.h,$src2.h)">, V6_vshufeh_enc; + T_HVX_alu_VV <"$dst.h=vshuffe($src1.h,$src2.h)">, V6_vshufeh_enc; defm V6_vshufoh : - T_HVX_alu_VV <"$dst.h = vshuffo($src1.h,$src2.h)">, V6_vshufoh_enc; + T_HVX_alu_VV <"$dst.h=vshuffo($src1.h,$src2.h)">, V6_vshufoh_enc; let Itinerary = CVI_VX_DV, Type = TypeCVI_VX_DV in { defm V6_vmpyowh_rnd : - T_HVX_alu_VV <"$dst.w = vmpyo($src1.w,$src2.h):<<1:rnd:sat">, + T_HVX_alu_VV <"$dst.w=vmpyo($src1.w,$src2.h):<<1:rnd:sat">, V6_vmpyowh_rnd_enc; defm V6_vmpyiewuh : - T_HVX_alu_VV <"$dst.w = vmpyie($src1.w,$src2.uh)">, V6_vmpyiewuh_enc; + T_HVX_alu_VV <"$dst.w=vmpyie($src1.w,$src2.uh)">, V6_vmpyiewuh_enc; defm V6_vmpyewuh : - T_HVX_alu_VV <"$dst.w = vmpye($src1.w,$src2.uh)">, V6_vmpyewuh_enc; + T_HVX_alu_VV <"$dst.w=vmpye($src1.w,$src2.uh)">, V6_vmpyewuh_enc; defm V6_vmpyowh : - T_HVX_alu_VV <"$dst.w = vmpyo($src1.w,$src2.h):<<1:sat">, V6_vmpyowh_enc; + T_HVX_alu_VV <"$dst.w=vmpyo($src1.w,$src2.h):<<1:sat">, V6_vmpyowh_enc; defm V6_vmpyiowh : - T_HVX_alu_VV <"$dst.w = vmpyio($src1.w,$src2.h)">, V6_vmpyiowh_enc; + T_HVX_alu_VV <"$dst.w=vmpyio($src1.w,$src2.h)">, V6_vmpyiowh_enc; } let Itinerary = CVI_VX, Type = TypeCVI_VX in defm V6_vmpyieoh : - T_HVX_alu_VV <"$dst.w = vmpyieo($src1.h,$src2.h)">, V6_vmpyieoh_enc; + T_HVX_alu_VV <"$dst.w=vmpyieo($src1.h,$src2.h)">, V6_vmpyieoh_enc; let Itinerary = CVI_VA_DV, Type = TypeCVI_VA_DV in { defm V6_vshufoeh : - T_HVX_alu_WV <"$dst.h = vshuffoe($src1.h,$src2.h)">, V6_vshufoeh_enc; + T_HVX_alu_WV <"$dst.h=vshuffoe($src1.h,$src2.h)">, V6_vshufoeh_enc; defm V6_vshufoeb : - T_HVX_alu_WV <"$dst.b = vshuffoe($src1.b,$src2.b)">, V6_vshufoeb_enc; + T_HVX_alu_WV <"$dst.b=vshuffoe($src1.b,$src2.b)">, V6_vshufoeb_enc; } let isRegSequence = 1, Itinerary = CVI_VA_DV, Type = TypeCVI_VA_DV in defm V6_vcombine : - T_HVX_alu_WV <"$dst = vcombine($src1,$src2)">, V6_vcombine_enc; + T_HVX_alu_WV <"$dst=vcombine($src1,$src2)">, V6_vcombine_enc; let Itinerary = CVI_VINLANESAT, Type = TypeCVI_VINLANESAT in { defm V6_vsathub : - T_HVX_alu_VV <"$dst.ub = vsat($src1.h,$src2.h)">, V6_vsathub_enc; + T_HVX_alu_VV <"$dst.ub=vsat($src1.h,$src2.h)">, V6_vsathub_enc; defm V6_vsatwh : - T_HVX_alu_VV <"$dst.h = vsat($src1.w,$src2.w)">, V6_vsatwh_enc; + T_HVX_alu_VV <"$dst.h=vsat($src1.w,$src2.w)">, V6_vsatwh_enc; } let Itinerary = CVI_VS, Type = TypeCVI_VS in { defm V6_vroundwh : - T_HVX_alu_VV <"$dst.h = vround($src1.w,$src2.w):sat">, V6_vroundwh_enc; + T_HVX_alu_VV <"$dst.h=vround($src1.w,$src2.w):sat">, V6_vroundwh_enc; defm V6_vroundwuh : - T_HVX_alu_VV <"$dst.uh = vround($src1.w,$src2.w):sat">, V6_vroundwuh_enc; + T_HVX_alu_VV <"$dst.uh=vround($src1.w,$src2.w):sat">, V6_vroundwuh_enc; defm V6_vroundhb : - T_HVX_alu_VV <"$dst.b = vround($src1.h,$src2.h):sat">, V6_vroundhb_enc; + T_HVX_alu_VV <"$dst.b=vround($src1.h,$src2.h):sat">, V6_vroundhb_enc; defm V6_vroundhub : - T_HVX_alu_VV <"$dst.ub = vround($src1.h,$src2.h):sat">, V6_vroundhub_enc; + T_HVX_alu_VV <"$dst.ub=vround($src1.h,$src2.h):sat">, V6_vroundhub_enc; defm V6_vasrwv : - T_HVX_alu_VV <"$dst.w = vasr($src1.w,$src2.w)">, V6_vasrwv_enc; + T_HVX_alu_VV <"$dst.w=vasr($src1.w,$src2.w)">, V6_vasrwv_enc; defm V6_vlsrwv : - T_HVX_alu_VV <"$dst.w = vlsr($src1.w,$src2.w)">, V6_vlsrwv_enc; + T_HVX_alu_VV <"$dst.w=vlsr($src1.w,$src2.w)">, V6_vlsrwv_enc; defm V6_vlsrhv : - T_HVX_alu_VV <"$dst.h = vlsr($src1.h,$src2.h)">, V6_vlsrhv_enc; + T_HVX_alu_VV <"$dst.h=vlsr($src1.h,$src2.h)">, V6_vlsrhv_enc; defm V6_vasrhv : - T_HVX_alu_VV <"$dst.h = vasr($src1.h,$src2.h)">, V6_vasrhv_enc; + T_HVX_alu_VV <"$dst.h=vasr($src1.h,$src2.h)">, V6_vasrhv_enc; defm V6_vaslwv : - T_HVX_alu_VV <"$dst.w = vasl($src1.w,$src2.w)">, V6_vaslwv_enc; + T_HVX_alu_VV <"$dst.w=vasl($src1.w,$src2.w)">, V6_vaslwv_enc; defm V6_vaslhv : - T_HVX_alu_VV <"$dst.h = vasl($src1.h,$src2.h)">, V6_vaslhv_enc; + T_HVX_alu_VV <"$dst.h=vasl($src1.h,$src2.h)">, V6_vaslhv_enc; } defm V6_vaddb : - T_HVX_alu_VV <"$dst.b = vadd($src1.b,$src2.b)">, V6_vaddb_enc; + T_HVX_alu_VV <"$dst.b=vadd($src1.b,$src2.b)">, V6_vaddb_enc; defm V6_vaddh : - T_HVX_alu_VV <"$dst.h = vadd($src1.h,$src2.h)">, V6_vaddh_enc; + T_HVX_alu_VV <"$dst.h=vadd($src1.h,$src2.h)">, V6_vaddh_enc; let Itinerary = CVI_VP, Type = TypeCVI_VP in { defm V6_vdelta : - T_HVX_alu_VV <"$dst = vdelta($src1,$src2)">, V6_vdelta_enc; + T_HVX_alu_VV <"$dst=vdelta($src1,$src2)">, V6_vdelta_enc; defm V6_vrdelta : - T_HVX_alu_VV <"$dst = vrdelta($src1,$src2)">, V6_vrdelta_enc; + T_HVX_alu_VV <"$dst=vrdelta($src1,$src2)">, V6_vrdelta_enc; defm V6_vdealb4w : - T_HVX_alu_VV <"$dst.b = vdeale($src1.b,$src2.b)">, V6_vdealb4w_enc; + T_HVX_alu_VV <"$dst.b=vdeale($src1.b,$src2.b)">, V6_vdealb4w_enc; defm V6_vpackeb : - T_HVX_alu_VV <"$dst.b = vpacke($src1.h,$src2.h)">, V6_vpackeb_enc; + T_HVX_alu_VV <"$dst.b=vpacke($src1.h,$src2.h)">, V6_vpackeb_enc; defm V6_vpackeh : - T_HVX_alu_VV <"$dst.h = vpacke($src1.w,$src2.w)">, V6_vpackeh_enc; + T_HVX_alu_VV <"$dst.h=vpacke($src1.w,$src2.w)">, V6_vpackeh_enc; defm V6_vpackhub_sat : - T_HVX_alu_VV <"$dst.ub = vpack($src1.h,$src2.h):sat">, V6_vpackhub_sat_enc; + T_HVX_alu_VV <"$dst.ub=vpack($src1.h,$src2.h):sat">, V6_vpackhub_sat_enc; defm V6_vpackhb_sat : - T_HVX_alu_VV <"$dst.b = vpack($src1.h,$src2.h):sat">, V6_vpackhb_sat_enc; + T_HVX_alu_VV <"$dst.b=vpack($src1.h,$src2.h):sat">, V6_vpackhb_sat_enc; defm V6_vpackwuh_sat : - T_HVX_alu_VV <"$dst.uh = vpack($src1.w,$src2.w):sat">, V6_vpackwuh_sat_enc; + T_HVX_alu_VV <"$dst.uh=vpack($src1.w,$src2.w):sat">, V6_vpackwuh_sat_enc; defm V6_vpackwh_sat : - T_HVX_alu_VV <"$dst.h = vpack($src1.w,$src2.w):sat">, V6_vpackwh_sat_enc; + T_HVX_alu_VV <"$dst.h=vpack($src1.w,$src2.w):sat">, V6_vpackwh_sat_enc; defm V6_vpackob : - T_HVX_alu_VV <"$dst.b = vpacko($src1.h,$src2.h)">, V6_vpackob_enc; + T_HVX_alu_VV <"$dst.b=vpacko($src1.h,$src2.h)">, V6_vpackob_enc; defm V6_vpackoh : - T_HVX_alu_VV <"$dst.h = vpacko($src1.w,$src2.w)">, V6_vpackoh_enc; + T_HVX_alu_VV <"$dst.h=vpacko($src1.w,$src2.w)">, V6_vpackoh_enc; } let hasNewValue = 1, hasSideEffects = 0 in @@ -1429,29 +1429,29 @@ def NAME#_128B : T_HVX_condALU ; } -defm V6_vaddbq : T_HVX_condALU <"if ($src1) $dst.b += $src2.b">, +defm V6_vaddbq : T_HVX_condALU <"if ($src1) $dst.b+=$src2.b">, V6_vaddbq_enc; -defm V6_vaddhq : T_HVX_condALU <"if ($src1) $dst.h += $src2.h">, +defm V6_vaddhq : T_HVX_condALU <"if ($src1) $dst.h+=$src2.h">, V6_vaddhq_enc; -defm V6_vaddwq : T_HVX_condALU <"if ($src1) $dst.w += $src2.w">, +defm V6_vaddwq : T_HVX_condALU <"if ($src1) $dst.w+=$src2.w">, V6_vaddwq_enc; -defm V6_vsubbq : T_HVX_condALU <"if ($src1) $dst.b -= $src2.b">, +defm V6_vsubbq : T_HVX_condALU <"if ($src1) $dst.b-=$src2.b">, V6_vsubbq_enc; -defm V6_vsubhq : T_HVX_condALU <"if ($src1) $dst.h -= $src2.h">, +defm V6_vsubhq : T_HVX_condALU <"if ($src1) $dst.h-=$src2.h">, V6_vsubhq_enc; -defm V6_vsubwq : T_HVX_condALU <"if ($src1) $dst.w -= $src2.w">, +defm V6_vsubwq : T_HVX_condALU <"if ($src1) $dst.w-=$src2.w">, V6_vsubwq_enc; -defm V6_vaddbnq : T_HVX_condALU <"if (!$src1) $dst.b += $src2.b">, +defm V6_vaddbnq : T_HVX_condALU <"if (!$src1) $dst.b+=$src2.b">, V6_vaddbnq_enc; -defm V6_vaddhnq : T_HVX_condALU <"if (!$src1) $dst.h += $src2.h">, +defm V6_vaddhnq : T_HVX_condALU <"if (!$src1) $dst.h+=$src2.h">, V6_vaddhnq_enc; -defm V6_vaddwnq : T_HVX_condALU <"if (!$src1) $dst.w += $src2.w">, +defm V6_vaddwnq : T_HVX_condALU <"if (!$src1) $dst.w+=$src2.w">, V6_vaddwnq_enc; -defm V6_vsubbnq : T_HVX_condALU <"if (!$src1) $dst.b -= $src2.b">, +defm V6_vsubbnq : T_HVX_condALU <"if (!$src1) $dst.b-=$src2.b">, V6_vsubbnq_enc; -defm V6_vsubhnq : T_HVX_condALU <"if (!$src1) $dst.h -= $src2.h">, +defm V6_vsubhnq : T_HVX_condALU <"if (!$src1) $dst.h-=$src2.h">, V6_vsubhnq_enc; -defm V6_vsubwnq : T_HVX_condALU <"if (!$src1) $dst.w -= $src2.w">, +defm V6_vsubwnq : T_HVX_condALU <"if (!$src1) $dst.w-=$src2.w">, V6_vsubwnq_enc; let hasNewValue = 1 in @@ -1480,60 +1480,60 @@ T_HVX_alu_2op ; -defm V6_vabsh : T_HVX_alu_2op_VV <"$dst.h = vabs($src1.h)">, +defm V6_vabsh : T_HVX_alu_2op_VV <"$dst.h=vabs($src1.h)">, V6_vabsh_enc; -defm V6_vabsw : T_HVX_alu_2op_VV <"$dst.w = vabs($src1.w)">, +defm V6_vabsw : T_HVX_alu_2op_VV <"$dst.w=vabs($src1.w)">, V6_vabsw_enc; -defm V6_vabsh_sat : T_HVX_alu_2op_VV <"$dst.h = vabs($src1.h):sat">, +defm V6_vabsh_sat : T_HVX_alu_2op_VV <"$dst.h=vabs($src1.h):sat">, V6_vabsh_sat_enc; -defm V6_vabsw_sat : T_HVX_alu_2op_VV <"$dst.w = vabs($src1.w):sat">, +defm V6_vabsw_sat : T_HVX_alu_2op_VV <"$dst.w=vabs($src1.w):sat">, V6_vabsw_sat_enc; -defm V6_vnot : T_HVX_alu_2op_VV <"$dst = vnot($src1)">, +defm V6_vnot : T_HVX_alu_2op_VV <"$dst=vnot($src1)">, V6_vnot_enc; -defm V6_vassign : T_HVX_alu_2op_VV <"$dst = $src1">, +defm V6_vassign : T_HVX_alu_2op_VV <"$dst=$src1">, V6_vassign_enc; -defm V6_vzb : T_HVX_alu_2op_WV <"$dst.uh = vzxt($src1.ub)">, +defm V6_vzb : T_HVX_alu_2op_WV <"$dst.uh=vzxt($src1.ub)">, V6_vzb_enc; -defm V6_vzh : T_HVX_alu_2op_WV <"$dst.uw = vzxt($src1.uh)">, +defm V6_vzh : T_HVX_alu_2op_WV <"$dst.uw=vzxt($src1.uh)">, V6_vzh_enc; -defm V6_vsb : T_HVX_alu_2op_WV <"$dst.h = vsxt($src1.b)">, +defm V6_vsb : T_HVX_alu_2op_WV <"$dst.h=vsxt($src1.b)">, V6_vsb_enc; -defm V6_vsh : T_HVX_alu_2op_WV <"$dst.w = vsxt($src1.h)">, +defm V6_vsh : T_HVX_alu_2op_WV <"$dst.w=vsxt($src1.h)">, V6_vsh_enc; let Itinerary = CVI_VP, Type = TypeCVI_VP in { -defm V6_vdealh : T_HVX_alu_2op_VV <"$dst.h = vdeal($src1.h)">, +defm V6_vdealh : T_HVX_alu_2op_VV <"$dst.h=vdeal($src1.h)">, V6_vdealh_enc; -defm V6_vdealb : T_HVX_alu_2op_VV <"$dst.b = vdeal($src1.b)">, +defm V6_vdealb : T_HVX_alu_2op_VV <"$dst.b=vdeal($src1.b)">, V6_vdealb_enc; -defm V6_vshuffh : T_HVX_alu_2op_VV <"$dst.h = vshuff($src1.h)">, +defm V6_vshuffh : T_HVX_alu_2op_VV <"$dst.h=vshuff($src1.h)">, V6_vshuffh_enc; -defm V6_vshuffb : T_HVX_alu_2op_VV <"$dst.b = vshuff($src1.b)">, +defm V6_vshuffb : T_HVX_alu_2op_VV <"$dst.b=vshuff($src1.b)">, V6_vshuffb_enc; } let Itinerary = CVI_VP_VS, Type = TypeCVI_VP_VS in { -defm V6_vunpackub : T_HVX_alu_2op_WV <"$dst.uh = vunpack($src1.ub)">, +defm V6_vunpackub : T_HVX_alu_2op_WV <"$dst.uh=vunpack($src1.ub)">, V6_vunpackub_enc; -defm V6_vunpackuh : T_HVX_alu_2op_WV <"$dst.uw = vunpack($src1.uh)">, +defm V6_vunpackuh : T_HVX_alu_2op_WV <"$dst.uw=vunpack($src1.uh)">, V6_vunpackuh_enc; -defm V6_vunpackb : T_HVX_alu_2op_WV <"$dst.h = vunpack($src1.b)">, +defm V6_vunpackb : T_HVX_alu_2op_WV <"$dst.h=vunpack($src1.b)">, V6_vunpackb_enc; -defm V6_vunpackh : T_HVX_alu_2op_WV <"$dst.w = vunpack($src1.h)">, +defm V6_vunpackh : T_HVX_alu_2op_WV <"$dst.w=vunpack($src1.h)">, V6_vunpackh_enc; } let Itinerary = CVI_VS, Type = TypeCVI_VS in { -defm V6_vcl0w : T_HVX_alu_2op_VV <"$dst.uw = vcl0($src1.uw)">, +defm V6_vcl0w : T_HVX_alu_2op_VV <"$dst.uw=vcl0($src1.uw)">, V6_vcl0w_enc; -defm V6_vcl0h : T_HVX_alu_2op_VV <"$dst.uh = vcl0($src1.uh)">, +defm V6_vcl0h : T_HVX_alu_2op_VV <"$dst.uh=vcl0($src1.uh)">, V6_vcl0h_enc; -defm V6_vnormamtw : T_HVX_alu_2op_VV <"$dst.w = vnormamt($src1.w)">, +defm V6_vnormamtw : T_HVX_alu_2op_VV <"$dst.w=vnormamt($src1.w)">, V6_vnormamtw_enc; -defm V6_vnormamth : T_HVX_alu_2op_VV <"$dst.h = vnormamt($src1.h)">, +defm V6_vnormamth : T_HVX_alu_2op_VV <"$dst.h=vnormamt($src1.h)">, V6_vnormamth_enc; -defm V6_vpopcounth : T_HVX_alu_2op_VV <"$dst.h = vpopcount($src1.h)">, +defm V6_vpopcounth : T_HVX_alu_2op_VV <"$dst.h=vpopcount($src1.h)">, V6_vpopcounth_enc; } @@ -1553,13 +1553,13 @@ } defm V6_vrmpybusi_acc : - T_HVX_vmpyacc2<"$dst.w += vrmpy($src1.ub,$src2.b,#$src3)">, + T_HVX_vmpyacc2<"$dst.w+=vrmpy($src1.ub,$src2.b,#$src3)">, V6_vrmpybusi_acc_enc; defm V6_vrsadubi_acc : - T_HVX_vmpyacc2<"$dst.uw += vrsad($src1.ub,$src2.ub,#$src3)">, + T_HVX_vmpyacc2<"$dst.uw+=vrsad($src1.ub,$src2.ub,#$src3)">, V6_vrsadubi_acc_enc; defm V6_vrmpyubi_acc : - T_HVX_vmpyacc2<"$dst.uw += vrmpy($src1.ub,$src2.ub,#$src3)">, + T_HVX_vmpyacc2<"$dst.uw+=vrmpy($src1.ub,$src2.ub,#$src3)">, V6_vrmpyubi_acc_enc; @@ -1577,11 +1577,11 @@ } defm V6_vrmpybusi : - T_HVX_vmpy2 <"$dst.w = vrmpy($src1.ub,$src2.b,#$src3)">, V6_vrmpybusi_enc; + T_HVX_vmpy2 <"$dst.w=vrmpy($src1.ub,$src2.b,#$src3)">, V6_vrmpybusi_enc; defm V6_vrsadubi : - T_HVX_vmpy2 <"$dst.uw = vrsad($src1.ub,$src2.ub,#$src3)">, V6_vrsadubi_enc; + T_HVX_vmpy2 <"$dst.uw=vrsad($src1.ub,$src2.ub,#$src3)">, V6_vrsadubi_enc; defm V6_vrmpyubi : - T_HVX_vmpy2 <"$dst.uw = vrmpy($src1.ub,$src2.ub,#$src3)">, V6_vrmpyubi_enc; + T_HVX_vmpy2 <"$dst.uw=vrmpy($src1.ub,$src2.ub,#$src3)">, V6_vrmpyubi_enc; let Itinerary = CVI_VP_VS_LONG_EARLY, Type = TypeCVI_VP_VS, @@ -1607,7 +1607,7 @@ let isPredicated = 1, hasSideEffects = 0, hasNewValue = 1, opNewValue = 0 in class T_HVX_cmov : CVI_VA_Resource1 <(outs RC:$dst), (ins PredRegs:$src1, RC:$src2), - "if ("#!if(isPredNot, "!", "")#"$src1) $dst = $src2"> { + "if ("#!if(isPredNot, "!", "")#"$src1) $dst=$src2"> { let isPredicatedFalse = isPredNot; } @@ -1627,7 +1627,7 @@ class T_HVX_ccombine : CVI_VA_Resource1 < (outs RCout:$dst), (ins PredRegs:$src1, RCin:$src2, RCin:$src3), - "if ("#!if(isPredNot, "!", "")#"$src1) $dst = vcombine($src2,$src3)"> { + "if ("#!if(isPredNot, "!", "")#"$src1) $dst=vcombine($src2,$src3)"> { let isPredicatedFalse = isPredNot; } @@ -1663,31 +1663,31 @@ let Itinerary = CVI_VP_LONG, Type = TypeCVI_VP in { defm V6_valignb : - T_HVX_shift_VV <"$dst = valign($src1,$src2,$src3)">, V6_valignb_enc; + T_HVX_shift_VV <"$dst=valign($src1,$src2,$src3)">, V6_valignb_enc; defm V6_vlalignb : - T_HVX_shift_VV <"$dst = vlalign($src1,$src2,$src3)">, V6_vlalignb_enc; + T_HVX_shift_VV <"$dst=vlalign($src1,$src2,$src3)">, V6_vlalignb_enc; } let Itinerary = CVI_VS, Type = TypeCVI_VS in { defm V6_vasrwh : - T_HVX_shift_VV <"$dst.h = vasr($src1.w,$src2.w,$src3)">, V6_vasrwh_enc; + T_HVX_shift_VV <"$dst.h=vasr($src1.w,$src2.w,$src3)">, V6_vasrwh_enc; defm V6_vasrwhsat : - T_HVX_shift_VV <"$dst.h = vasr($src1.w,$src2.w,$src3):sat">, + T_HVX_shift_VV <"$dst.h=vasr($src1.w,$src2.w,$src3):sat">, V6_vasrwhsat_enc; defm V6_vasrwhrndsat : - T_HVX_shift_VV <"$dst.h = vasr($src1.w,$src2.w,$src3):rnd:sat">, + T_HVX_shift_VV <"$dst.h=vasr($src1.w,$src2.w,$src3):rnd:sat">, V6_vasrwhrndsat_enc; defm V6_vasrwuhsat : - T_HVX_shift_VV <"$dst.uh = vasr($src1.w,$src2.w,$src3):sat">, + T_HVX_shift_VV <"$dst.uh=vasr($src1.w,$src2.w,$src3):sat">, V6_vasrwuhsat_enc; defm V6_vasrhubsat : - T_HVX_shift_VV <"$dst.ub = vasr($src1.h,$src2.h,$src3):sat">, + T_HVX_shift_VV <"$dst.ub=vasr($src1.h,$src2.h,$src3):sat">, V6_vasrhubsat_enc; defm V6_vasrhubrndsat : - T_HVX_shift_VV <"$dst.ub = vasr($src1.h,$src2.h,$src3):rnd:sat">, + T_HVX_shift_VV <"$dst.ub=vasr($src1.h,$src2.h,$src3):rnd:sat">, V6_vasrhubrndsat_enc; defm V6_vasrhbrndsat : - T_HVX_shift_VV <"$dst.b = vasr($src1.h,$src2.h,$src3):rnd:sat">, + T_HVX_shift_VV <"$dst.b=vasr($src1.h,$src2.h,$src3):rnd:sat">, V6_vasrhbrndsat_enc; } @@ -1695,9 +1695,9 @@ //defm V6_vtran2x2vdd : T_HVX_shift_VV <"">, V6_vtran2x2vdd_enc; let Itinerary = CVI_VP_VS_LONG, Type = TypeCVI_VP_VS in { defm V6_vshuffvdd : - T_HVX_shift_WV <"$dst = vshuff($src1,$src2,$src3)">, V6_vshuffvdd_enc; + T_HVX_shift_WV <"$dst=vshuff($src1,$src2,$src3)">, V6_vshuffvdd_enc; defm V6_vdealvdd : - T_HVX_shift_WV <"$dst = vdeal($src1,$src2,$src3)">, V6_vdealvdd_enc; + T_HVX_shift_WV <"$dst=vdeal($src1,$src2,$src3)">, V6_vdealvdd_enc; } let hasNewValue = 1, Itinerary = CVI_VP_VS_LONG, Type = TypeCVI_VP_VS in @@ -1711,8 +1711,8 @@ def NAME#_128B : T_HVX_unpack ; } -defm V6_vunpackob : T_HVX_unpack <"$dst.h |= vunpacko($src1.b)">, V6_vunpackob_enc; -defm V6_vunpackoh : T_HVX_unpack <"$dst.w |= vunpacko($src1.h)">, V6_vunpackoh_enc; +defm V6_vunpackob : T_HVX_unpack <"$dst.h|=vunpacko($src1.b)">, V6_vunpackob_enc; +defm V6_vunpackoh : T_HVX_unpack <"$dst.w|=vunpacko($src1.h)">, V6_vunpackoh_enc; let Itinerary = CVI_VP_LONG, Type = TypeCVI_VP, hasNewValue = 1, hasSideEffects = 0 in @@ -1728,9 +1728,9 @@ } defm V6_valignbi : - T_HVX_valign <"$dst = valign($src1,$src2,#$src3)">, V6_valignbi_enc; + T_HVX_valign <"$dst=valign($src1,$src2,#$src3)">, V6_valignbi_enc; defm V6_vlalignbi : - T_HVX_valign <"$dst = vlalign($src1,$src2,#$src3)">, V6_vlalignbi_enc; + T_HVX_valign <"$dst=vlalign($src1,$src2,#$src3)">, V6_vlalignbi_enc; let Itinerary = CVI_VA_DV, Type = TypeCVI_VA_DV in class T_HVX_predAlu @@ -1744,17 +1744,17 @@ def NAME#_128B : T_HVX_predAlu ; } -defm V6_pred_and : T_HVX_predAlu <"$dst = and($src1,$src2)">, V6_pred_and_enc; -defm V6_pred_or : T_HVX_predAlu <"$dst = or($src1,$src2)">, V6_pred_or_enc; -defm V6_pred_xor : T_HVX_predAlu <"$dst = xor($src1,$src2)">, V6_pred_xor_enc; -defm V6_pred_or_n : T_HVX_predAlu <"$dst = or($src1,!$src2)">, V6_pred_or_n_enc; +defm V6_pred_and : T_HVX_predAlu <"$dst=and($src1,$src2)">, V6_pred_and_enc; +defm V6_pred_or : T_HVX_predAlu <"$dst=or($src1,$src2)">, V6_pred_or_enc; +defm V6_pred_xor : T_HVX_predAlu <"$dst=xor($src1,$src2)">, V6_pred_xor_enc; +defm V6_pred_or_n : T_HVX_predAlu <"$dst=or($src1,!$src2)">, V6_pred_or_n_enc; defm V6_pred_and_n : - T_HVX_predAlu <"$dst = and($src1,!$src2)">, V6_pred_and_n_enc; + T_HVX_predAlu <"$dst=and($src1,!$src2)">, V6_pred_and_n_enc; let Itinerary = CVI_VA, Type = TypeCVI_VA in class T_HVX_prednot : CVI_VA_Resource1<(outs RC:$dst), (ins RC:$src1), - "$dst = not($src1)">, V6_pred_not_enc; + "$dst=not($src1)">, V6_pred_not_enc; def V6_pred_not : T_HVX_prednot ; let isCodeGenOnly = 1 in @@ -1771,21 +1771,21 @@ def NAME#_128B : T_HVX_vcmp2 ; } -defm V6_veqb : T_HVX_vcmp2 <"$dst = vcmp.eq($src1.b,$src2.b)">, V6_veqb_enc; -defm V6_veqh : T_HVX_vcmp2 <"$dst = vcmp.eq($src1.h,$src2.h)">, V6_veqh_enc; -defm V6_veqw : T_HVX_vcmp2 <"$dst = vcmp.eq($src1.w,$src2.w)">, V6_veqw_enc; -defm V6_vgtb : T_HVX_vcmp2 <"$dst = vcmp.gt($src1.b,$src2.b)">, V6_vgtb_enc; -defm V6_vgth : T_HVX_vcmp2 <"$dst = vcmp.gt($src1.h,$src2.h)">, V6_vgth_enc; -defm V6_vgtw : T_HVX_vcmp2 <"$dst = vcmp.gt($src1.w,$src2.w)">, V6_vgtw_enc; -defm V6_vgtub : T_HVX_vcmp2 <"$dst = vcmp.gt($src1.ub,$src2.ub)">, V6_vgtub_enc; -defm V6_vgtuh : T_HVX_vcmp2 <"$dst = vcmp.gt($src1.uh,$src2.uh)">, V6_vgtuh_enc; -defm V6_vgtuw : T_HVX_vcmp2 <"$dst = vcmp.gt($src1.uw,$src2.uw)">, V6_vgtuw_enc; +defm V6_veqb : T_HVX_vcmp2 <"$dst=vcmp.eq($src1.b,$src2.b)">, V6_veqb_enc; +defm V6_veqh : T_HVX_vcmp2 <"$dst=vcmp.eq($src1.h,$src2.h)">, V6_veqh_enc; +defm V6_veqw : T_HVX_vcmp2 <"$dst=vcmp.eq($src1.w,$src2.w)">, V6_veqw_enc; +defm V6_vgtb : T_HVX_vcmp2 <"$dst=vcmp.gt($src1.b,$src2.b)">, V6_vgtb_enc; +defm V6_vgth : T_HVX_vcmp2 <"$dst=vcmp.gt($src1.h,$src2.h)">, V6_vgth_enc; +defm V6_vgtw : T_HVX_vcmp2 <"$dst=vcmp.gt($src1.w,$src2.w)">, V6_vgtw_enc; +defm V6_vgtub : T_HVX_vcmp2 <"$dst=vcmp.gt($src1.ub,$src2.ub)">, V6_vgtub_enc; +defm V6_vgtuh : T_HVX_vcmp2 <"$dst=vcmp.gt($src1.uh,$src2.uh)">, V6_vgtuh_enc; +defm V6_vgtuw : T_HVX_vcmp2 <"$dst=vcmp.gt($src1.uw,$src2.uw)">, V6_vgtuw_enc; let isAccumulator = 1, hasNewValue = 1, hasSideEffects = 0 in class T_V6_vandqrt_acc : CVI_VX_Resource_late<(outs RCout:$dst), (ins RCout:$_src_, RCin:$src1, IntRegs:$src2), - "$dst |= vand($src1,$src2)", [], "$dst = $_src_">, V6_vandqrt_acc_enc; + "$dst|=vand($src1,$src2)", [], "$dst = $_src_">, V6_vandqrt_acc_enc; def V6_vandqrt_acc : T_V6_vandqrt_acc ; let isCodeGenOnly = 1 in @@ -1795,7 +1795,7 @@ class T_V6_vandvrt_acc : CVI_VX_Resource_late<(outs RCout:$dst), (ins RCout:$_src_, RCin:$src1, IntRegs:$src2), - "$dst |= vand($src1,$src2)", [], "$dst = $_src_">, V6_vandvrt_acc_enc; + "$dst|=vand($src1,$src2)", [], "$dst = $_src_">, V6_vandvrt_acc_enc; def V6_vandvrt_acc : T_V6_vandvrt_acc ; let isCodeGenOnly = 1 in @@ -1805,7 +1805,7 @@ class T_V6_vandqrt : CVI_VX_Resource_late<(outs RCout:$dst), (ins RCin:$src1, IntRegs:$src2), - "$dst = vand($src1,$src2)" >, V6_vandqrt_enc; + "$dst=vand($src1,$src2)" >, V6_vandqrt_enc; def V6_vandqrt : T_V6_vandqrt ; let isCodeGenOnly = 1 in @@ -1814,7 +1814,7 @@ let hasNewValue = 1, hasSideEffects = 0 in class T_V6_lvsplatw : CVI_VX_Resource_late<(outs RC:$dst), (ins IntRegs:$src1), - "$dst = vsplat($src1)" >, V6_lvsplatw_enc; + "$dst=vsplat($src1)" >, V6_lvsplatw_enc; def V6_lvsplatw : T_V6_lvsplatw ; let isCodeGenOnly = 1 in @@ -1824,7 +1824,7 @@ let hasNewValue = 1 in class T_V6_vinsertwr : CVI_VX_Resource_late<(outs RC:$dst), (ins RC:$_src_, IntRegs:$src1), - "$dst.w = vinsert($src1)", [], "$dst = $_src_">, + "$dst.w=vinsert($src1)", [], "$dst = $_src_">, V6_vinsertwr_enc; def V6_vinsertwr : T_V6_vinsertwr ; @@ -1835,7 +1835,7 @@ let Itinerary = CVI_VP_LONG, Type = TypeCVI_VP in class T_V6_pred_scalar2 : CVI_VA_Resource1<(outs RC:$dst), (ins IntRegs:$src1), - "$dst = vsetq($src1)">, V6_pred_scalar2_enc; + "$dst=vsetq($src1)">, V6_pred_scalar2_enc; def V6_pred_scalar2 : T_V6_pred_scalar2 ; let isCodeGenOnly = 1 in @@ -1843,7 +1843,7 @@ class T_V6_vandvrt : CVI_VX_Resource_late<(outs RCout:$dst), (ins RCin:$src1, IntRegs:$src2), - "$dst = vand($src1,$src2)">, V6_vandvrt_enc; + "$dst=vand($src1,$src2)">, V6_vandvrt_enc; def V6_vandvrt : T_V6_vandvrt ; let isCodeGenOnly = 1 in @@ -1858,9 +1858,9 @@ class T_HVX_rol_P : T_HVX_rol ; -def S6_rol_i_p : T_HVX_rol_P <"$dst = rol($src1,#$src2)">, S6_rol_i_p_enc; +def S6_rol_i_p : T_HVX_rol_P <"$dst=rol($src1,#$src2)">, S6_rol_i_p_enc; let hasNewValue = 1, opNewValue = 0 in -def S6_rol_i_r : T_HVX_rol_R <"$dst = rol($src1,#$src2)">, S6_rol_i_r_enc; +def S6_rol_i_r : T_HVX_rol_R <"$dst=rol($src1,#$src2)">, S6_rol_i_r_enc; let validSubTargets = HasV60SubT in class T_HVX_rol_acc @@ -1874,33 +1874,33 @@ : T_HVX_rol_acc ; def S6_rol_i_p_nac : - T_HVX_rol_acc_P <"$dst -= rol($src1,#$src2)">, S6_rol_i_p_nac_enc; + T_HVX_rol_acc_P <"$dst-=rol($src1,#$src2)">, S6_rol_i_p_nac_enc; def S6_rol_i_p_acc : - T_HVX_rol_acc_P <"$dst += rol($src1,#$src2)">, S6_rol_i_p_acc_enc; + T_HVX_rol_acc_P <"$dst+=rol($src1,#$src2)">, S6_rol_i_p_acc_enc; def S6_rol_i_p_and : - T_HVX_rol_acc_P <"$dst &= rol($src1,#$src2)">, S6_rol_i_p_and_enc; + T_HVX_rol_acc_P <"$dst&=rol($src1,#$src2)">, S6_rol_i_p_and_enc; def S6_rol_i_p_or : - T_HVX_rol_acc_P <"$dst |= rol($src1,#$src2)">, S6_rol_i_p_or_enc; + T_HVX_rol_acc_P <"$dst|=rol($src1,#$src2)">, S6_rol_i_p_or_enc; def S6_rol_i_p_xacc : - T_HVX_rol_acc_P<"$dst ^= rol($src1,#$src2)">, S6_rol_i_p_xacc_enc; + T_HVX_rol_acc_P<"$dst^=rol($src1,#$src2)">, S6_rol_i_p_xacc_enc; let hasNewValue = 1, opNewValue = 0 in { def S6_rol_i_r_nac : - T_HVX_rol_acc_R <"$dst -= rol($src1,#$src2)">, S6_rol_i_r_nac_enc; + T_HVX_rol_acc_R <"$dst-=rol($src1,#$src2)">, S6_rol_i_r_nac_enc; def S6_rol_i_r_acc : - T_HVX_rol_acc_R <"$dst += rol($src1,#$src2)">, S6_rol_i_r_acc_enc; + T_HVX_rol_acc_R <"$dst+=rol($src1,#$src2)">, S6_rol_i_r_acc_enc; def S6_rol_i_r_and : - T_HVX_rol_acc_R <"$dst &= rol($src1,#$src2)">, S6_rol_i_r_and_enc; + T_HVX_rol_acc_R <"$dst&=rol($src1,#$src2)">, S6_rol_i_r_and_enc; def S6_rol_i_r_or : - T_HVX_rol_acc_R <"$dst |= rol($src1,#$src2)">, S6_rol_i_r_or_enc; + T_HVX_rol_acc_R <"$dst|=rol($src1,#$src2)">, S6_rol_i_r_or_enc; def S6_rol_i_r_xacc : - T_HVX_rol_acc_R <"$dst ^= rol($src1,#$src2)">, S6_rol_i_r_xacc_enc; + T_HVX_rol_acc_R <"$dst^=rol($src1,#$src2)">, S6_rol_i_r_xacc_enc; } let isSolo = 1, Itinerary = LD_tc_ld_SLOT0, Type = TypeLD in class T_V6_extractw : LD1Inst <(outs IntRegs:$dst), (ins RC:$src1, IntRegs:$src2), - "$dst = vextract($src1,$src2)">, V6_extractw_enc; + "$dst=vextract($src1,$src2)">, V6_extractw_enc; def V6_extractw : T_V6_extractw ; let isCodeGenOnly = 1 in @@ -1933,7 +1933,7 @@ let Itinerary = ST_tc_3stall_SLOT0, isPredicateLate = 1, isSoloAX = 1, validSubTargets = HasV55SubT in def Y5_l2locka : ST1Inst <(outs PredRegs:$dst), (ins IntRegs:$src1), - "$dst = l2locka($src1)">, Y5_l2locka_enc; + "$dst=l2locka($src1)">, Y5_l2locka_enc; // not defined on etc side. why? // defm S2_cabacencbin : _VV <"Rdd=encbin(Rss,$src2,Pu)">, S2_cabacencbin_enc; @@ -1943,7 +1943,7 @@ validSubTargets = HasV55SubT in def A5_ACS : MInst2 <(outs DoubleRegs:$dst1, PredRegs:$dst2), (ins DoubleRegs:$_src_, DoubleRegs:$src1, DoubleRegs:$src2), - "$dst1,$dst2 = vacsh($src1,$src2)", [], + "$dst1,$dst2=vacsh($src1,$src2)", [], "$dst1 = $_src_" >, Requires<[HasV55T]>, A5_ACS_enc; let Itinerary = CVI_VA_DV, Type = TypeCVI_VA_DV, hasNewValue = 1, @@ -1966,11 +1966,11 @@ multiclass T_HVX_alu2_W : T_HVX_alu2 ; -defm V6_vswap : T_HVX_alu2_W <"$dst = vswap($src1,$src2,$src3)">, V6_vswap_enc; +defm V6_vswap : T_HVX_alu2_W <"$dst=vswap($src1,$src2,$src3)">, V6_vswap_enc; let Itinerary = CVI_VA, Type = TypeCVI_VA, hasNewValue = 1, hasSideEffects = 0 in -defm V6_vmux : T_HVX_alu2_V <"$dst = vmux($src1,$src2,$src3)">, V6_vmux_enc; +defm V6_vmux : T_HVX_alu2_V <"$dst=vmux($src1,$src2,$src3)">, V6_vmux_enc; class T_HVX_vlutb : CVI_VA_Resource1<(outs RCout:$dst), @@ -2015,18 +2015,18 @@ let Itinerary = CVI_VP_LONG, Type = TypeCVI_VP, hasNewValue = 1 in defm V6_vlutvvb: - T_HVX_vlutb_V <"$dst.b = vlut32($src1.b,$src2.b,$src3)">, V6_vlutvvb_enc; + T_HVX_vlutb_V <"$dst.b=vlut32($src1.b,$src2.b,$src3)">, V6_vlutvvb_enc; let Itinerary = CVI_VP_VS_LONG, Type = TypeCVI_VP_VS, hasNewValue = 1 in defm V6_vlutvwh: - T_HVX_vlutb_W <"$dst.h = vlut16($src1.b,$src2.h,$src3)">, V6_vlutvwh_enc; + T_HVX_vlutb_W <"$dst.h=vlut16($src1.b,$src2.h,$src3)">, V6_vlutvwh_enc; let hasNewValue = 1 in { defm V6_vlutvvb_oracc: - T_HVX_vlutb_acc_V <"$dst.b |= vlut32($src1.b,$src2.b,$src3)">, + T_HVX_vlutb_acc_V <"$dst.b|=vlut32($src1.b,$src2.b,$src3)">, V6_vlutvvb_oracc_enc; defm V6_vlutvwh_oracc: - T_HVX_vlutb_acc_W <"$dst.h |= vlut16($src1.b,$src2.h,$src3)">, + T_HVX_vlutb_acc_W <"$dst.h|=vlut16($src1.b,$src2.h,$src3)">, V6_vlutvwh_oracc_enc; } @@ -2034,7 +2034,7 @@ def S2_cabacencbin : SInst2<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, PredRegs:$src3), - "$dst = encbin($src1,$src2,$src3)">, S2_cabacencbin_enc; + "$dst=encbin($src1,$src2,$src3)">, S2_cabacencbin_enc; // Vhist instructions def V6_vhistq @@ -2047,9 +2047,9 @@ let isPseudo = 1, isCodeGenOnly = 1, hasSideEffects = 0 in { - def V6_vd0: CVI_VA_Resource<(outs VectorRegs:$dst), (ins), "$dst = #0", []>; + def V6_vd0: CVI_VA_Resource<(outs VectorRegs:$dst), (ins), "$dst=#0", []>; def V6_vd0_128B: CVI_VA_Resource<(outs VectorRegs128B:$dst), (ins), - "$dst = #0", []>; + "$dst=#0", []>; def V6_vassignp: CVI_VA_Resource<(outs VecDblRegs:$dst), (ins VecDblRegs:$src), "", []>; Index: lib/Target/Hexagon/HexagonIsetDx.td =================================================================== --- lib/Target/Hexagon/HexagonIsetDx.td +++ lib/Target/Hexagon/HexagonIsetDx.td @@ -16,7 +16,7 @@ def SA1_combine1i: SUBInst < (outs DoubleRegs:$Rdd), (ins u2_0Imm:$u2), - "$Rdd = combine(#1, #$u2)"> { + "$Rdd=combine(#1,#$u2)"> { bits<3> Rdd; bits<2> u2; @@ -64,7 +64,7 @@ def SA1_combine3i: SUBInst < (outs DoubleRegs:$Rdd), (ins u2_0Imm:$u2), - "$Rdd = combine(#3, #$u2)"> { + "$Rdd=combine(#3,#$u2)"> { bits<3> Rdd; bits<2> u2; @@ -80,7 +80,7 @@ def SS2_storebi0: SUBInst < (outs ), (ins IntRegs:$Rs, u4_0Imm:$u4_0), - "memb($Rs + #$u4_0)=#0"> { + "memb($Rs+#$u4_0)=#0"> { bits<4> Rs; bits<4> u4_0; @@ -94,7 +94,7 @@ def SA1_clrtnew: SUBInst < (outs IntRegs:$Rd), (ins PredRegs:$Pu), - "if ($Pu.new) $Rd = #0"> { + "if ($Pu.new) $Rd=#0"> { bits<4> Rd; let Inst{12-9} = 0b1101; @@ -107,7 +107,7 @@ def SL2_loadruh_io: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs, u3_1Imm:$u3_1), - "$Rd = memuh($Rs + #$u3_1)"> { + "$Rd=memuh($Rs+#$u3_1)"> { bits<4> Rd; bits<4> Rs; bits<4> u3_1; @@ -133,7 +133,7 @@ def SA1_addi: SUBInst < (outs IntRegs:$Rx), (ins IntRegs:$_src_, s7_0Ext:$s7), - "$Rx = add($_src_, #$s7)" , + "$Rx=add($_src_,#$s7)" , [] , "$_src_ = $Rx"> { bits<4> Rx; @@ -149,7 +149,7 @@ def SL1_loadrub_io: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs, u4_0Imm:$u4_0), - "$Rd = memub($Rs + #$u4_0)"> { + "$Rd=memub($Rs+#$u4_0)"> { bits<4> Rd; bits<4> Rs; bits<4> u4_0; @@ -165,7 +165,7 @@ def SL1_loadri_io: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs, u4_2Imm:$u4_2), - "$Rd = memw($Rs + #$u4_2)"> { + "$Rd=memw($Rs+#$u4_2)"> { bits<4> Rd; bits<4> Rs; bits<6> u4_2; @@ -181,7 +181,7 @@ def SA1_cmpeqi: SUBInst < (outs ), (ins IntRegs:$Rs, u2_0Imm:$u2), - "p0 = cmp.eq($Rs, #$u2)"> { + "p0=cmp.eq($Rs,#$u2)"> { bits<4> Rs; bits<2> u2; @@ -195,7 +195,7 @@ def SA1_combinerz: SUBInst < (outs DoubleRegs:$Rdd), (ins IntRegs:$Rs), - "$Rdd = combine($Rs, #0)"> { + "$Rdd=combine($Rs,#0)"> { bits<3> Rdd; bits<4> Rs; @@ -234,7 +234,7 @@ def SS2_storeh_io: SUBInst < (outs ), (ins IntRegs:$Rs, u3_1Imm:$u3_1, IntRegs:$Rt), - "memh($Rs + #$u3_1) = $Rt"> { + "memh($Rs+#$u3_1)=$Rt"> { bits<4> Rs; bits<4> u3_1; bits<4> Rt; @@ -250,7 +250,7 @@ def SS2_storewi0: SUBInst < (outs ), (ins IntRegs:$Rs, u4_2Imm:$u4_2), - "memw($Rs + #$u4_2)=#0"> { + "memw($Rs+#$u4_2)=#0"> { bits<4> Rs; bits<6> u4_2; @@ -264,7 +264,7 @@ def SS2_storewi1: SUBInst < (outs ), (ins IntRegs:$Rs, u4_2Imm:$u4_2), - "memw($Rs + #$u4_2)=#1"> { + "memw($Rs+#$u4_2)=#1"> { bits<4> Rs; bits<6> u4_2; @@ -288,7 +288,7 @@ def SA1_combinezr: SUBInst < (outs DoubleRegs:$Rdd), (ins IntRegs:$Rs), - "$Rdd = combine(#0, $Rs)"> { + "$Rdd=combine(#0,$Rs)"> { bits<3> Rdd; bits<4> Rs; @@ -304,7 +304,7 @@ def SL2_loadrh_io: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs, u3_1Imm:$u3_1), - "$Rd = memh($Rs + #$u3_1)"> { + "$Rd=memh($Rs+#$u3_1)"> { bits<4> Rd; bits<4> Rs; bits<4> u3_1; @@ -320,7 +320,7 @@ def SA1_addrx: SUBInst < (outs IntRegs:$Rx), (ins IntRegs:$_src_, IntRegs:$Rs), - "$Rx = add($_src_, $Rs)" , + "$Rx=add($_src_,$Rs)" , [] , "$_src_ = $Rx"> { bits<4> Rx; @@ -336,7 +336,7 @@ def SA1_setin1: SUBInst < (outs IntRegs:$Rd), (ins ), - "$Rd = #{-1}"> { + "$Rd=#{-1}"> { bits<4> Rd; let Inst{12-9} = 0b1101; @@ -349,7 +349,7 @@ def SA1_sxth: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = sxth($Rs)"> { + "$Rd=sxth($Rs)"> { bits<4> Rd; bits<4> Rs; @@ -363,7 +363,7 @@ def SA1_combine0i: SUBInst < (outs DoubleRegs:$Rdd), (ins u2_0Imm:$u2), - "$Rdd = combine(#0, #$u2)"> { + "$Rdd=combine(#0,#$u2)"> { bits<3> Rdd; bits<2> u2; @@ -379,7 +379,7 @@ def SA1_combine2i: SUBInst < (outs DoubleRegs:$Rdd), (ins u2_0Imm:$u2), - "$Rdd = combine(#2, #$u2)"> { + "$Rdd=combine(#2,#$u2)"> { bits<3> Rdd; bits<2> u2; @@ -395,7 +395,7 @@ def SA1_sxtb: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = sxtb($Rs)"> { + "$Rd=sxtb($Rs)"> { bits<4> Rd; bits<4> Rs; @@ -410,7 +410,7 @@ def SA1_clrf: SUBInst < (outs IntRegs:$Rd), (ins PredRegs:$Pu), - "if (!$Pu) $Rd = #0"> { + "if (!$Pu) $Rd=#0"> { bits<4> Rd; let Inst{12-9} = 0b1101; @@ -423,7 +423,7 @@ def SL2_loadrb_io: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs, u3_0Imm:$u3_0), - "$Rd = memb($Rs + #$u3_0)"> { + "$Rd=memb($Rs+#$u3_0)"> { bits<4> Rd; bits<4> Rs; bits<3> u3_0; @@ -439,7 +439,7 @@ def SA1_tfr: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = $Rs"> { + "$Rd=$Rs"> { bits<4> Rd; bits<4> Rs; @@ -453,7 +453,7 @@ def SL2_loadrd_sp: SUBInst < (outs DoubleRegs:$Rdd), (ins u5_3Imm:$u5_3), - "$Rdd = memd(r29 + #$u5_3)"> { + "$Rdd=memd(r29+#$u5_3)"> { bits<3> Rdd; bits<8> u5_3; @@ -467,7 +467,7 @@ def SA1_and1: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = and($Rs, #1)"> { + "$Rd=and($Rs,#1)"> { bits<4> Rd; bits<4> Rs; @@ -481,7 +481,7 @@ def SS2_storebi1: SUBInst < (outs ), (ins IntRegs:$Rs, u4_0Imm:$u4_0), - "memb($Rs + #$u4_0)=#1"> { + "memb($Rs+#$u4_0)=#1"> { bits<4> Rs; bits<4> u4_0; @@ -495,7 +495,7 @@ def SA1_inc: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = add($Rs, #1)"> { + "$Rd=add($Rs,#1)"> { bits<4> Rd; bits<4> Rs; @@ -509,7 +509,7 @@ def SS2_stored_sp: SUBInst < (outs ), (ins s6_3Imm:$s6_3, DoubleRegs:$Rtt), - "memd(r29 + #$s6_3) = $Rtt"> { + "memd(r29+#$s6_3)=$Rtt"> { bits<9> s6_3; bits<3> Rtt; @@ -523,7 +523,7 @@ def SS2_storew_sp: SUBInst < (outs ), (ins u5_2Imm:$u5_2, IntRegs:$Rt), - "memw(r29 + #$u5_2) = $Rt"> { + "memw(r29+#$u5_2)=$Rt"> { bits<7> u5_2; bits<4> Rt; @@ -548,7 +548,7 @@ def SA1_clrt: SUBInst < (outs IntRegs:$Rd), (ins PredRegs:$Pu), - "if ($Pu) $Rd = #0"> { + "if ($Pu) $Rd=#0"> { bits<4> Rd; let Inst{12-9} = 0b1101; @@ -571,7 +571,7 @@ def SA1_dec: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = add($Rs,#{-1})"> { + "$Rd=add($Rs,#{-1})"> { bits<4> Rd; bits<4> Rs; @@ -585,7 +585,7 @@ def SA1_seti: SUBInst < (outs IntRegs:$Rd), (ins u6_0Ext:$u6), - "$Rd = #$u6"> { + "$Rd=#$u6"> { bits<4> Rd; bits<6> u6; @@ -610,7 +610,7 @@ def SA1_clrfnew: SUBInst < (outs IntRegs:$Rd), (ins PredRegs:$Pu), - "if (!$Pu.new) $Rd = #0"> { + "if (!$Pu.new) $Rd=#0"> { bits<4> Rd; let Inst{12-9} = 0b1101; @@ -623,7 +623,7 @@ def SS1_storew_io: SUBInst < (outs ), (ins IntRegs:$Rs, u4_2Imm:$u4_2, IntRegs:$Rt), - "memw($Rs + #$u4_2) = $Rt"> { + "memw($Rs+#$u4_2)=$Rt"> { bits<4> Rs; bits<6> u4_2; bits<4> Rt; @@ -639,7 +639,7 @@ def SA1_zxtb: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = and($Rs, #255)"> { + "$Rd=and($Rs,#255)"> { bits<4> Rd; bits<4> Rs; @@ -653,7 +653,7 @@ def SA1_addsp: SUBInst < (outs IntRegs:$Rd), (ins u6_2Imm:$u6_2), - "$Rd = add(r29, #$u6_2)"> { + "$Rd=add(r29,#$u6_2)"> { bits<4> Rd; bits<8> u6_2; @@ -667,7 +667,7 @@ def SL2_loadri_sp: SUBInst < (outs IntRegs:$Rd), (ins u5_2Imm:$u5_2), - "$Rd = memw(r29 + #$u5_2)"> { + "$Rd=memw(r29+#$u5_2)"> { bits<4> Rd; bits<7> u5_2; @@ -681,7 +681,7 @@ def SS1_storeb_io: SUBInst < (outs ), (ins IntRegs:$Rs, u4_0Imm:$u4_0, IntRegs:$Rt), - "memb($Rs + #$u4_0) = $Rt"> { + "memb($Rs+#$u4_0)=$Rt"> { bits<4> Rs; bits<4> u4_0; bits<4> Rt; @@ -717,7 +717,7 @@ def SA1_zxth: SUBInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = zxth($Rs)"> { + "$Rd=zxth($Rs)"> { bits<4> Rd; bits<4> Rs; Index: lib/Target/Hexagon/HexagonSystemInst.td =================================================================== --- lib/Target/Hexagon/HexagonSystemInst.td +++ lib/Target/Hexagon/HexagonSystemInst.td @@ -69,9 +69,9 @@ let isSoloAX = 1, hasSideEffects = 1, Rd = 0 in { def Y4_l2fetch: ST_MISC_CACHEOP_SYS<(outs), (ins IntRegs:$Rs, IntRegs:$Rt), - "l2fetch($Rs, $Rt)", [], 0b011, 0b000, 0b0>; + "l2fetch($Rs,$Rt)", [], 0b011, 0b000, 0b0>; def Y5_l2fetch: ST_MISC_CACHEOP_SYS<(outs), (ins IntRegs:$Rs, DoubleRegs:$Rt), - "l2fetch($Rs, $Rt)", [], 0b011, 0b010, 0b0>; + "l2fetch($Rs,$Rt)", [], 0b011, 0b010, 0b0>; } let hasSideEffects = 0, isSolo = 1 in Index: test/CodeGen/Hexagon/BranchPredict.ll =================================================================== --- test/CodeGen/Hexagon/BranchPredict.ll +++ test/CodeGen/Hexagon/BranchPredict.ll @@ -9,7 +9,7 @@ @j = external global i32 define i32 @foo(i32 %a) nounwind { -; CHECK: if{{ *}}(!p{{[0-3]}}.new) jump:nt +; CHECK: if (!p{{[0-3]}}.new) jump:nt entry: %tobool = icmp eq i32 %a, 0 br i1 %tobool, label %if.else, label %if.then, !prof !0 @@ -31,7 +31,7 @@ declare i32 @foobar(...) define i32 @bar(i32 %a) nounwind { -; CHECK: if{{ *}}(p{{[0-3]}}.new) jump:nt +; CHECK: if (p{{[0-3]}}.new) jump:nt entry: %tobool = icmp eq i32 %a, 0 br i1 %tobool, label %if.else, label %if.then, !prof !1 @@ -51,7 +51,7 @@ } define i32 @foo_bar(i32 %a, i16 signext %b) nounwind { -; CHECK: if{{ *}}(!cmp.eq(r{{[0-9]*}}.new, #0)) jump:nt +; CHECK: if (!cmp.eq(r{{[0-9]*}}.new,#0)) jump:nt entry: %0 = load i32, i32* @j, align 4 %tobool = icmp eq i32 %0, 0 Index: test/CodeGen/Hexagon/absaddr-store.ll =================================================================== --- test/CodeGen/Hexagon/absaddr-store.ll +++ test/CodeGen/Hexagon/absaddr-store.ll @@ -10,7 +10,7 @@ @d = external global i64 define zeroext i8 @absStoreByte() nounwind { -; CHECK: memb(##b1){{ *}}={{ *}}r{{[0-9]+}} +; CHECK: memb(##b1)=r{{[0-9]+}} entry: %0 = load i8, i8* @b0, align 1 %conv = zext i8 %0 to i32 @@ -21,7 +21,7 @@ } define signext i16 @absStoreHalf() nounwind { -; CHECK: memh(##c1){{ *}}={{ *}}r{{[0-9]+}} +; CHECK: memh(##c1)=r{{[0-9]+}} entry: %0 = load i16, i16* @c0, align 2 %conv = sext i16 %0 to i32 @@ -32,7 +32,7 @@ } define i32 @absStoreWord() nounwind { -; CHECK: memw(##a1){{ *}}={{ *}}r{{[0-9]+}} +; CHECK: memw(##a1)=r{{[0-9]+}} entry: %0 = load i32, i32* @a0, align 4 %mul = mul nsw i32 100, %0 @@ -41,7 +41,7 @@ } define void @absStoreDouble() nounwind { -; CHECK: memd(##d){{ *}}={{ *}}r{{[0-9]+}}:{{[0-9]+}} +; CHECK: memd(##d)=r{{[0-9]+}}:{{[0-9]+}} entry: store i64 100, i64* @d, align 8 ret void Index: test/CodeGen/Hexagon/absimm.ll =================================================================== --- test/CodeGen/Hexagon/absimm.ll +++ test/CodeGen/Hexagon/absimm.ll @@ -3,7 +3,7 @@ ; with immediate value. define i32 @f1(i32 %i) nounwind { -; CHECK: memw(##786432){{ *}}={{ *}}r{{[0-9]+}} +; CHECK: memw(##786432)=r{{[0-9]+}} entry: store volatile i32 %i, i32* inttoptr (i32 786432 to i32*), align 262144 ret i32 %i @@ -11,7 +11,7 @@ define i32* @f2(i32* nocapture %i) nounwind { entry: -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(##786432) +; CHECK: r{{[0-9]+}}=memw(##786432) %0 = load volatile i32, i32* inttoptr (i32 786432 to i32*), align 262144 %1 = inttoptr i32 %0 to i32* ret i32* %1 Index: test/CodeGen/Hexagon/adde.ll =================================================================== --- test/CodeGen/Hexagon/adde.ll +++ test/CodeGen/Hexagon/adde.ll @@ -1,17 +1,17 @@ ; RUN: llc -march=hexagon -disable-hsdr -hexagon-expand-condsets=0 -hexagon-bit=0 -disable-post-ra < %s | FileCheck %s -; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #1) -; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #0) -; CHECK: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}}=combine(#0,#1) +; CHECK: r{{[0-9]+:[0-9]+}}=combine(#0,#0) +; CHECK: r{{[0-9]+:[0-9]+}}=add(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK: p{{[0-9]+}}=cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK: p{{[0-9]+}}=cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK: r{{[0-9]+}}=mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}}=combine(r{{[0-9]+}},r{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}}=combine(r{{[0-9]+}},r{{[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}}=add(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) define void @check_adde_addc (i64 %AL, i64 %AH, i64 %BL, i64 %BH, i64* %RL, i64* %RH) { Index: test/CodeGen/Hexagon/addh-sext-trunc.ll =================================================================== --- test/CodeGen/Hexagon/addh-sext-trunc.ll +++ test/CodeGen/Hexagon/addh-sext-trunc.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{H|h}}) +; CHECK: r{{[0-9]+}}=add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{H|h}}) target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" target triple = "hexagon-unknown-none" Index: test/CodeGen/Hexagon/addh-shifted.ll =================================================================== --- test/CodeGen/Hexagon/addh-shifted.ll +++ test/CodeGen/Hexagon/addh-shifted.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{L|l}}):<<16 +; CHECK: r{{[0-9]+}}=add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{L|l}}):<<16 define i64 @test_cast(i64 %arg0, i16 zeroext %arg1, i16 zeroext %arg2) nounwind readnone { entry: Index: test/CodeGen/Hexagon/addh.ll =================================================================== --- test/CodeGen/Hexagon/addh.ll +++ test/CodeGen/Hexagon/addh.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{L|l}}) +; CHECK: r{{[0-9]+}}=add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{L|l}}) define i64 @test_cast(i64 %arg0, i16 zeroext %arg1, i16 zeroext %arg2) nounwind readnone { entry: Index: test/CodeGen/Hexagon/addr-calc-opt.ll =================================================================== --- test/CodeGen/Hexagon/addr-calc-opt.ll +++ test/CodeGen/Hexagon/addr-calc-opt.ll @@ -4,7 +4,7 @@ ; calculation. ; -; CHECK: r0 = memub(r{{[0-9]+}}<<#3{{ *}}+{{ *}}##the_global+516) +; CHECK: r0=memub(r{{[0-9]+}}<<#3+##the_global+516) %0 = type { [3 x %1] } %1 = type { %2, i8, i8, i8, i8, i8, [4 x i8], i8, [10 x i8], [10 x i8], [10 x i8], i8, [3 x %4], i16, i16, i16, i16, i32, i8, [4 x i8], i8, i8, i8, i8, %5, i8, i8, i8, i8, i8, i16, i8, i8, i8, i16, i16, i8, i8, [2 x i8], [2 x i8], i8, i8, i8, i8, i8, i16, i16, i8, i8, i8, i8, i8, i8, %9, i8, [6 x [2 x i8]], i16, i32, %10, [28 x i8], [4 x %17] } Index: test/CodeGen/Hexagon/addrmode-indoff.ll =================================================================== --- test/CodeGen/Hexagon/addrmode-indoff.ll +++ test/CodeGen/Hexagon/addrmode-indoff.ll @@ -5,7 +5,7 @@ @ga = common global [1024 x i8] zeroinitializer, align 8 @gb = common global [1024 x i8] zeroinitializer, align 8 -; CHECK: memub(r{{[0-9]+}}{{ *}}<<{{ *}}#0{{ *}}+{{ *}}##ga) +; CHECK: memub(r{{[0-9]+}}<<#0+##ga) define zeroext i8 @lf2(i32 %i) nounwind readonly { entry: %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %i @@ -13,7 +13,7 @@ ret i8 %0 } -; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#0{{ *}}+{{ *}}##gb) +; CHECK: memb(r{{[0-9]+}}<<#0+##gb) define signext i8 @lf2s(i32 %i) nounwind readonly { entry: %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @gb, i32 0, i32 %i @@ -21,7 +21,7 @@ ret i8 %0 } -; CHECK: memub(r{{[0-9]+}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##ga) +; CHECK: memub(r{{[0-9]+}}<<#2+##ga) define zeroext i8 @lf3(i32 %i) nounwind readonly { entry: %mul = shl nsw i32 %i, 2 @@ -30,7 +30,7 @@ ret i8 %0 } -; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##gb) +; CHECK: memb(r{{[0-9]+}}<<#2+##gb) define signext i8 @lf3s(i32 %i) nounwind readonly { entry: %mul = shl nsw i32 %i, 2 @@ -39,7 +39,7 @@ ret i8 %0 } -; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#0{{ *}}+{{ *}}##ga) +; CHECK: memb(r{{[0-9]+}}<<#0+##ga) define void @sf4(i32 %i, i8 zeroext %j) nounwind { entry: %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %i @@ -47,7 +47,7 @@ ret void } -; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#0{{ *}}+{{ *}}##gb) +; CHECK: memb(r{{[0-9]+}}<<#0+##gb) define void @sf4s(i32 %i, i8 signext %j) nounwind { entry: %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @gb, i32 0, i32 %i @@ -55,7 +55,7 @@ ret void } -; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##ga) +; CHECK: memb(r{{[0-9]+}}<<#2+##ga) define void @sf5(i32 %i, i8 zeroext %j) nounwind { entry: %mul = shl nsw i32 %i, 2 @@ -64,7 +64,7 @@ ret void } -; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##gb) +; CHECK: memb(r{{[0-9]+}}<<#2+##gb) define void @sf5s(i32 %i, i8 signext %j) nounwind { entry: %mul = shl nsw i32 %i, 2 Index: test/CodeGen/Hexagon/alu64.ll =================================================================== --- test/CodeGen/Hexagon/alu64.ll +++ test/CodeGen/Hexagon/alu64.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s ; CHECK-LABEL: @test00 -; CHECK: = cmp.eq(r1:0, r3:2) +; CHECK:=cmp.eq(r1:0,r3:2) define i32 @test00(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.C2.cmpeqp(i64 %Rs, i64 %Rt) @@ -9,7 +9,7 @@ } ; CHECK-LABEL: @test01 -; CHECK: = cmp.gt(r1:0, r3:2) +; CHECK:=cmp.gt(r1:0,r3:2) define i32 @test01(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.C2.cmpgtp(i64 %Rs, i64 %Rt) @@ -17,7 +17,7 @@ } ; CHECK-LABEL: @test02 -; CHECK: = cmp.gtu(r1:0, r3:2) +; CHECK:=cmp.gtu(r1:0,r3:2) define i32 @test02(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.C2.cmpgtup(i64 %Rs, i64 %Rt) @@ -25,7 +25,7 @@ } ; CHECK-LABEL: @test10 -; CHECK: = cmp.eq(r0, r1) +; CHECK:=cmp.eq(r0,r1) define i32 @test10(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.rcmpeq(i32 %Rs, i32 %Rt) @@ -33,7 +33,7 @@ } ; CHECK-LABEL: @test11 -; CHECK: = !cmp.eq(r0, r1) +; CHECK:=!cmp.eq(r0,r1) define i32 @test11(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.rcmpneq(i32 %Rs, i32 %Rt) @@ -41,7 +41,7 @@ } ; CHECK-LABEL: @test12 -; CHECK: = cmp.eq(r0, #23) +; CHECK:=cmp.eq(r0,#23) define i32 @test12(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.rcmpeqi(i32 %Rs, i32 23) @@ -49,7 +49,7 @@ } ; CHECK-LABEL: @test13 -; CHECK: = !cmp.eq(r0, #47) +; CHECK:=!cmp.eq(r0,#47) define i32 @test13(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.rcmpneqi(i32 %Rs, i32 47) @@ -57,7 +57,7 @@ } ; CHECK-LABEL: @test20 -; CHECK: = cmpb.eq(r0, r1) +; CHECK:=cmpb.eq(r0,r1) define i32 @test20(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbeq(i32 %Rs, i32 %Rt) @@ -65,7 +65,7 @@ } ; CHECK-LABEL: @test21 -; CHECK: = cmpb.gt(r0, r1) +; CHECK:=cmpb.gt(r0,r1) define i32 @test21(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbgt(i32 %Rs, i32 %Rt) @@ -73,7 +73,7 @@ } ; CHECK-LABEL: @test22 -; CHECK: = cmpb.gtu(r0, r1) +; CHECK:=cmpb.gtu(r0,r1) define i32 @test22(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbgtu(i32 %Rs, i32 %Rt) @@ -81,7 +81,7 @@ } ; CHECK-LABEL: @test23 -; CHECK: = cmpb.eq(r0, #56) +; CHECK:=cmpb.eq(r0,#56) define i32 @test23(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbeqi(i32 %Rs, i32 56) @@ -89,7 +89,7 @@ } ; CHECK-LABEL: @test24 -; CHECK: = cmpb.gt(r0, #29) +; CHECK:=cmpb.gt(r0,#29) define i32 @test24(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbgti(i32 %Rs, i32 29) @@ -97,7 +97,7 @@ } ; CHECK-LABEL: @test25 -; CHECK: = cmpb.gtu(r0, #111) +; CHECK:=cmpb.gtu(r0,#111) define i32 @test25(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbgtui(i32 %Rs, i32 111) @@ -105,7 +105,7 @@ } ; CHECK-LABEL: @test30 -; CHECK: = cmph.eq(r0, r1) +; CHECK:=cmph.eq(r0,r1) define i32 @test30(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpheq(i32 %Rs, i32 %Rt) @@ -113,7 +113,7 @@ } ; CHECK-LABEL: @test31 -; CHECK: = cmph.gt(r0, r1) +; CHECK:=cmph.gt(r0,r1) define i32 @test31(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmphgt(i32 %Rs, i32 %Rt) @@ -121,7 +121,7 @@ } ; CHECK-LABEL: @test32 -; CHECK: = cmph.gtu(r0, r1) +; CHECK:=cmph.gtu(r0,r1) define i32 @test32(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmphgtu(i32 %Rs, i32 %Rt) @@ -129,7 +129,7 @@ } ; CHECK-LABEL: @test33 -; CHECK: = cmph.eq(r0, #-123) +; CHECK:=cmph.eq(r0,#-123) define i32 @test33(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpheqi(i32 %Rs, i32 -123) @@ -137,7 +137,7 @@ } ; CHECK-LABEL: @test34 -; CHECK: = cmph.gt(r0, #-3) +; CHECK:=cmph.gt(r0,#-3) define i32 @test34(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmphgti(i32 %Rs, i32 -3) @@ -145,7 +145,7 @@ } ; CHECK-LABEL: @test35 -; CHECK: = cmph.gtu(r0, #13) +; CHECK:=cmph.gtu(r0,#13) define i32 @test35(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmphgtui(i32 %Rs, i32 13) @@ -153,7 +153,7 @@ } ; CHECK-LABEL: @test40 -; CHECK: = vmux(p0, r3:2, r5:4) +; CHECK:=vmux(p0,r3:2,r5:4) define i64 @test40(i32 %Pu, i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.C2.vmux(i32 %Pu, i64 %Rs, i64 %Rt) @@ -161,7 +161,7 @@ } ; CHECK-LABEL: @test41 -; CHECK: = any8(vcmpb.eq(r1:0, r3:2)) +; CHECK:=any8(vcmpb.eq(r1:0,r3:2)) define i32 @test41(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %Rs, i64 %Rt) @@ -169,7 +169,7 @@ } ; CHECK-LABEL: @test50 -; CHECK: = add(r1:0, r3:2) +; CHECK:=add(r1:0,r3:2) define i64 @test50(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.addp(i64 %Rs, i64 %Rt) @@ -177,7 +177,7 @@ } ; CHECK-LABEL: @test51 -; CHECK: = add(r1:0, r3:2):sat +; CHECK:=add(r1:0,r3:2):sat define i64 @test51(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.addpsat(i64 %Rs, i64 %Rt) @@ -185,7 +185,7 @@ } ; CHECK-LABEL: @test52 -; CHECK: = sub(r1:0, r3:2) +; CHECK:=sub(r1:0,r3:2) define i64 @test52(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.subp(i64 %Rs, i64 %Rt) @@ -193,7 +193,7 @@ } ; CHECK-LABEL: @test53 -; CHECK: = add(r1:0, r3:2):raw: +; CHECK: =add(r1:0,r3:2):raw: define i64 @test53(i32 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.addsp(i32 %Rs, i64 %Rt) @@ -201,7 +201,7 @@ } ; CHECK-LABEL: @test54 -; CHECK: = and(r1:0, r3:2) +; CHECK:=and(r1:0,r3:2) define i64 @test54(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.andp(i64 %Rs, i64 %Rt) @@ -209,7 +209,7 @@ } ; CHECK-LABEL: @test55 -; CHECK: = or(r1:0, r3:2) +; CHECK:=or(r1:0,r3:2) define i64 @test55(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.orp(i64 %Rs, i64 %Rt) @@ -217,7 +217,7 @@ } ; CHECK-LABEL: @test56 -; CHECK: = xor(r1:0, r3:2) +; CHECK:=xor(r1:0,r3:2) define i64 @test56(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.xorp(i64 %Rs, i64 %Rt) @@ -225,7 +225,7 @@ } ; CHECK-LABEL: @test57 -; CHECK: = and(r1:0, ~r3:2) +; CHECK:=and(r1:0,~r3:2) define i64 @test57(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A4.andnp(i64 %Rs, i64 %Rt) @@ -233,7 +233,7 @@ } ; CHECK-LABEL: @test58 -; CHECK: = or(r1:0, ~r3:2) +; CHECK:=or(r1:0,~r3:2) define i64 @test58(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A4.ornp(i64 %Rs, i64 %Rt) @@ -241,7 +241,7 @@ } ; CHECK-LABEL: @test60 -; CHECK: = add(r0.l, r1.l) +; CHECK:=add(r0.l,r1.l) define i32 @test60(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %Rs, i32 %Rt) @@ -249,7 +249,7 @@ } ; CHECK-LABEL: @test61 -; CHECK: = add(r0.l, r1.h) +; CHECK:=add(r0.l,r1.h) define i32 @test61(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %Rs, i32 %Rt) @@ -257,7 +257,7 @@ } ; CHECK-LABEL: @test62 -; CHECK: = add(r0.l, r1.l):sat +; CHECK:=add(r0.l,r1.l):sat define i32 @test62(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %Rs, i32 %Rt) @@ -265,7 +265,7 @@ } ; CHECK-LABEL: @test63 -; CHECK: = add(r0.l, r1.h):sat +; CHECK:=add(r0.l,r1.h):sat define i32 @test63(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %Rs, i32 %Rt) @@ -273,7 +273,7 @@ } ; CHECK-LABEL: @test64 -; CHECK: = add(r0.l, r1.l):<<16 +; CHECK:=add(r0.l,r1.l):<<16 define i32 @test64(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %Rs, i32 %Rt) @@ -281,7 +281,7 @@ } ; CHECK-LABEL: @test65 -; CHECK: = add(r0.l, r1.h):<<16 +; CHECK:=add(r0.l,r1.h):<<16 define i32 @test65(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %Rs, i32 %Rt) @@ -289,7 +289,7 @@ } ; CHECK-LABEL: @test66 -; CHECK: = add(r0.h, r1.l):<<16 +; CHECK:=add(r0.h,r1.l):<<16 define i32 @test66(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %Rs, i32 %Rt) @@ -297,7 +297,7 @@ } ; CHECK-LABEL: @test67 -; CHECK: = add(r0.h, r1.h):<<16 +; CHECK:=add(r0.h,r1.h):<<16 define i32 @test67(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %Rs, i32 %Rt) @@ -305,7 +305,7 @@ } ; CHECK-LABEL: @test68 -; CHECK: = add(r0.l, r1.l):sat:<<16 +; CHECK:=add(r0.l,r1.l):sat:<<16 define i32 @test68(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %Rs, i32 %Rt) @@ -313,7 +313,7 @@ } ; CHECK-LABEL: @test69 -; CHECK: = add(r0.l, r1.h):sat:<<16 +; CHECK:=add(r0.l,r1.h):sat:<<16 define i32 @test69(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %Rs, i32 %Rt) @@ -321,7 +321,7 @@ } ; CHECK-LABEL: @test6A -; CHECK: = add(r0.h, r1.l):sat:<<16 +; CHECK:=add(r0.h,r1.l):sat:<<16 define i32 @test6A(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %Rs, i32 %Rt) @@ -329,7 +329,7 @@ } ; CHECK-LABEL: @test6B -; CHECK: = add(r0.h, r1.h):sat:<<16 +; CHECK:=add(r0.h,r1.h):sat:<<16 define i32 @test6B(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %Rs, i32 %Rt) @@ -337,7 +337,7 @@ } ; CHECK-LABEL: @test70 -; CHECK: = sub(r0.l, r1.l) +; CHECK:=sub(r0.l,r1.l) define i32 @test70(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %Rs, i32 %Rt) @@ -345,7 +345,7 @@ } ; CHECK-LABEL: @test71 -; CHECK: = sub(r0.l, r1.h) +; CHECK:=sub(r0.l,r1.h) define i32 @test71(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %Rs, i32 %Rt) @@ -353,7 +353,7 @@ } ; CHECK-LABEL: @test72 -; CHECK: = sub(r0.l, r1.l):sat +; CHECK:=sub(r0.l,r1.l):sat define i32 @test72(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %Rs, i32 %Rt) @@ -361,7 +361,7 @@ } ; CHECK-LABEL: @test73 -; CHECK: = sub(r0.l, r1.h):sat +; CHECK:=sub(r0.l,r1.h):sat define i32 @test73(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %Rs, i32 %Rt) @@ -369,7 +369,7 @@ } ; CHECK-LABEL: @test74 -; CHECK: = sub(r0.l, r1.l):<<16 +; CHECK:=sub(r0.l,r1.l):<<16 define i32 @test74(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %Rs, i32 %Rt) @@ -377,7 +377,7 @@ } ; CHECK-LABEL: @test75 -; CHECK: = sub(r0.l, r1.h):<<16 +; CHECK:=sub(r0.l,r1.h):<<16 define i32 @test75(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %Rs, i32 %Rt) @@ -385,7 +385,7 @@ } ; CHECK-LABEL: @test76 -; CHECK: = sub(r0.h, r1.l):<<16 +; CHECK:=sub(r0.h,r1.l):<<16 define i32 @test76(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %Rs, i32 %Rt) @@ -393,7 +393,7 @@ } ; CHECK-LABEL: @test77 -; CHECK: = sub(r0.h, r1.h):<<16 +; CHECK:=sub(r0.h,r1.h):<<16 define i32 @test77(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %Rs, i32 %Rt) @@ -401,7 +401,7 @@ } ; CHECK-LABEL: @test78 -; CHECK: = sub(r0.l, r1.l):sat:<<16 +; CHECK:=sub(r0.l,r1.l):sat:<<16 define i32 @test78(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %Rs, i32 %Rt) @@ -409,7 +409,7 @@ } ; CHECK-LABEL: @test79 -; CHECK: = sub(r0.l, r1.h):sat:<<16 +; CHECK:=sub(r0.l,r1.h):sat:<<16 define i32 @test79(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %Rs, i32 %Rt) @@ -417,7 +417,7 @@ } ; CHECK-LABEL: @test7A -; CHECK: = sub(r0.h, r1.l):sat:<<16 +; CHECK:=sub(r0.h,r1.l):sat:<<16 define i32 @test7A(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %Rs, i32 %Rt) @@ -425,7 +425,7 @@ } ; CHECK-LABEL: @test7B -; CHECK: = sub(r0.h, r1.h):sat:<<16 +; CHECK:=sub(r0.h,r1.h):sat:<<16 define i32 @test7B(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %Rs, i32 %Rt) @@ -433,7 +433,7 @@ } ; CHECK-LABEL: @test90 -; CHECK: = and(#1, asl(r0, #2)) +; CHECK:=and(#1,asl(r0,#2)) define i32 @test90(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.andi.asl.ri(i32 1, i32 %Rs, i32 2) @@ -441,7 +441,7 @@ } ; CHECK-LABEL: @test91 -; CHECK: = or(#1, asl(r0, #2)) +; CHECK:=or(#1,asl(r0,#2)) define i32 @test91(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.ori.asl.ri(i32 1, i32 %Rs, i32 2) @@ -449,7 +449,7 @@ } ; CHECK-LABEL: @test92 -; CHECK: = add(#1, asl(r0, #2)) +; CHECK:=add(#1,asl(r0,#2)) define i32 @test92(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.addi.asl.ri(i32 1, i32 %Rs, i32 2) @@ -457,7 +457,7 @@ } ; CHECK-LABEL: @test93 -; CHECK: = sub(#1, asl(r0, #2)) +; CHECK:=sub(#1,asl(r0,#2)) define i32 @test93(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.subi.asl.ri(i32 1, i32 %Rs, i32 2) @@ -465,7 +465,7 @@ } ; CHECK-LABEL: @test94 -; CHECK: = and(#1, lsr(r0, #2)) +; CHECK:=and(#1,lsr(r0,#2)) define i32 @test94(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 1, i32 %Rs, i32 2) @@ -473,7 +473,7 @@ } ; CHECK-LABEL: @test95 -; CHECK: = or(#1, lsr(r0, #2)) +; CHECK:=or(#1,lsr(r0,#2)) define i32 @test95(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 1, i32 %Rs, i32 2) @@ -481,7 +481,7 @@ } ; CHECK-LABEL: @test96 -; CHECK: = add(#1, lsr(r0, #2)) +; CHECK:=add(#1,lsr(r0,#2)) define i32 @test96(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 1, i32 %Rs, i32 2) @@ -489,7 +489,7 @@ } ; CHECK-LABEL: @test97 -; CHECK: = sub(#1, lsr(r0, #2)) +; CHECK:=sub(#1,lsr(r0,#2)) define i32 @test97(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 1, i32 %Rs, i32 2) @@ -497,7 +497,7 @@ } ; CHECK-LABEL: @test100 -; CHECK: = bitsplit(r0, r1) +; CHECK:=bitsplit(r0,r1) define i64 @test100(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A4.bitsplit(i32 %Rs, i32 %Rt) @@ -505,7 +505,7 @@ } ; CHECK-LABEL: @test101 -; CHECK: = modwrap(r0, r1) +; CHECK:=modwrap(r0,r1) define i32 @test101(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.modwrapu(i32 %Rs, i32 %Rt) @@ -513,7 +513,7 @@ } ; CHECK-LABEL: @test102 -; CHECK: = parity(r1:0, r3:2) +; CHECK:=parity(r1:0,r3:2) define i32 @test102(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.S2.parityp(i64 %Rs, i64 %Rt) @@ -521,7 +521,7 @@ } ; CHECK-LABEL: @test103 -; CHECK: = parity(r0, r1) +; CHECK:=parity(r0,r1) define i32 @test103(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.parity(i32 %Rs, i32 %Rt) Index: test/CodeGen/Hexagon/always-ext.ll =================================================================== --- test/CodeGen/Hexagon/always-ext.ll +++ test/CodeGen/Hexagon/always-ext.ll @@ -7,7 +7,7 @@ ; CHECK: { ; CHECK-NOT: call abort ; CHECK: memw(##0) -; CHECK: memw(r{{[0-9+]}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##4) +; CHECK: memw(r{{[0-9+]}}<<#2+##4) ; CHECK: } %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111 = type { i8*, void (%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*)*, i32, i32, i8*, [23 x i32]* } Index: test/CodeGen/Hexagon/args.ll =================================================================== --- test/CodeGen/Hexagon/args.ll +++ test/CodeGen/Hexagon/args.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: r5:4 = combine(#6, #5) -; CHECK: r3:2 = combine(#4, #3) -; CHECK: r1:0 = combine(#2, #1) +; CHECK: r5:4=combine(#6,#5) +; CHECK: r3:2=combine(#4,#3) +; CHECK: r1:0=combine(#2,#1) ; CHECK: memw(r29+#0)=#7 Index: test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll =================================================================== --- test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll +++ test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll @@ -7,10 +7,10 @@ ; without adding an extra spill of that register. ; ; CHECK: PredSpill: -; CHECK: memd(r29{{.*}}) = r17:16 -; CHECK-DAG: r{{[0-9]+}} = p0 -; CHECK-DAG: p0 = r{{[0-9]+}} -; CHECK-NOT: = memw(r29 +; CHECK: memd(r29{{.*}})=r17:16 +; CHECK-DAG: r{{[0-9]+}}=p0 +; CHECK-DAG: p0=r{{[0-9]+}} +; CHECK-NOT: =memw(r29 ; define void @PredSpill() { Index: test/CodeGen/Hexagon/avoid-predspill.ll =================================================================== --- test/CodeGen/Hexagon/avoid-predspill.ll +++ test/CodeGen/Hexagon/avoid-predspill.ll @@ -3,7 +3,7 @@ ; This checks that predicate registers are moved to GPRs instead of spilling ; where possible. -; CHECK: p0 = +; CHECK: p0= ; CHECK-NOT: memw(r29 define i32 @f(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) { Index: test/CodeGen/Hexagon/bit-eval.ll =================================================================== --- test/CodeGen/Hexagon/bit-eval.ll +++ test/CodeGen/Hexagon/bit-eval.ll @@ -4,7 +4,7 @@ target triple = "hexagon" ; CHECK-LABEL: test1: -; CHECK: r0 = ##1073741824 +; CHECK: r0=##1073741824 define i32 @test1() #0 { entry: %0 = tail call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 2147483647, i32 0) @@ -12,7 +12,7 @@ } ; CHECK-LABEL: test2: -; CHECK: r0 = ##1073741824 +; CHECK: r0=##1073741824 define i32 @test2() #0 { entry: %0 = tail call i32 @llvm.hexagon.S2.asr.i.r.rnd.goodsyntax(i32 2147483647, i32 1) @@ -20,7 +20,7 @@ } ; CHECK-LABEL: test3: -; CHECK: r1:0 = combine(#0, #1) +; CHECK: r1:0=combine(#0,#1) define i64 @test3() #0 { entry: %0 = tail call i64 @llvm.hexagon.S4.extractp(i64 -1, i32 63, i32 63) @@ -28,7 +28,7 @@ } ; CHECK-LABEL: test4: -; CHECK: r0 = #1 +; CHECK: r0=#1 define i32 @test4() #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.extract(i32 -1, i32 31, i32 31) @@ -36,7 +36,7 @@ } ; CHECK-LABEL: test5: -; CHECK: r0 = ##-1073741569 +; CHECK: r0=##-1073741569 define i32 @test5() #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 255, i32 -2147483648, i32 1) Index: test/CodeGen/Hexagon/bit-loop.ll =================================================================== --- test/CodeGen/Hexagon/bit-loop.ll +++ test/CodeGen/Hexagon/bit-loop.ll @@ -1,8 +1,8 @@ ; RUN: llc < %s | FileCheck %s -; CHECK-DAG: memh(r{{[0-9]+}}+#0) = r{{[0-9]+}} -; CHECK-DAG: memh(r{{[0-9]+}}+#2) = r{{[0-9]+}}.h -; CHECK-DAG: memh(r{{[0-9]+}}+#4) = r{{[0-9]+}} -; CHECK-DAG: memh(r{{[0-9]+}}+#6) = r{{[0-9]+}}.h +; CHECK-DAG: memh(r{{[0-9]+}}+#0)=r{{[0-9]+}} +; CHECK-DAG: memh(r{{[0-9]+}}+#2)=r{{[0-9]+}}.h +; CHECK-DAG: memh(r{{[0-9]+}}+#4)=r{{[0-9]+}} +; CHECK-DAG: memh(r{{[0-9]+}}+#6)=r{{[0-9]+}}.h target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32" target triple = "hexagon" Index: test/CodeGen/Hexagon/bit-rie.ll =================================================================== --- test/CodeGen/Hexagon/bit-rie.ll +++ test/CodeGen/Hexagon/bit-rie.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; CHECK-LABEL: LBB0{{.*}}if.end -; CHECK: r[[REG:[0-9]+]] = zxth +; CHECK: r[[REG:[0-9]+]]=zxth ; CHECK: lsr(r[[REG]], target triple = "hexagon" Index: test/CodeGen/Hexagon/bit-skip-byval.ll =================================================================== --- test/CodeGen/Hexagon/bit-skip-byval.ll +++ test/CodeGen/Hexagon/bit-skip-byval.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; ; Either and or zxtb. -; CHECK: r0 = and(r1, #255) +; CHECK: r0=and(r1,#255) %struct.t0 = type { i32 } Index: test/CodeGen/Hexagon/bit-validate-reg.ll =================================================================== --- test/CodeGen/Hexagon/bit-validate-reg.ll +++ test/CodeGen/Hexagon/bit-validate-reg.ll @@ -3,7 +3,7 @@ ; Make sure we don't generate zxtb to transfer a predicate register into ; a general purpose register. -; CHECK: r0 = p0 +; CHECK: r0=p0 ; CHECK-NOT: zxtb(p target triple = "hexagon" Index: test/CodeGen/Hexagon/block-addr.ll =================================================================== --- test/CodeGen/Hexagon/block-addr.ll +++ test/CodeGen/Hexagon/block-addr.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; CHECK: .LJTI -; CHECK-DAG: r[[REG:[0-9]+]] = memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+<<#[0-9]+}}) +; CHECK-DAG: r[[REG:[0-9]+]]=memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+<<#[0-9]+}}) ; CHECK-DAG: jumpr:nt r[[REG]] define void @main() #0 { Index: test/CodeGen/Hexagon/branchfolder-keep-impdef.ll =================================================================== --- test/CodeGen/Hexagon/branchfolder-keep-impdef.ll +++ test/CodeGen/Hexagon/branchfolder-keep-impdef.ll @@ -3,7 +3,7 @@ ; Check that the testcase compiles successfully. Expect that if-conversion ; took place. ; CHECK-LABEL: fred: -; CHECK: if (!p0) r1 = memw(r0 + #0) +; CHECK: if (!p0) r1=memw(r0+#0) target triple = "hexagon" Index: test/CodeGen/Hexagon/brev_ld.ll =================================================================== --- test/CodeGen/Hexagon/brev_ld.ll +++ test/CodeGen/Hexagon/brev_ld.ll @@ -29,7 +29,7 @@ %1 = bitcast i64* %inputLR to i8* %sub = sub i32 13, %shr1 %shl = shl i32 1, %sub -; CHECK: = memd(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: =memd(r{{[0-9]*}}++m{{[0-1]}}:brev) %2 = call i8* @llvm.hexagon.brev.ldd(i8* %0, i8* %1, i32 %shl) %3 = bitcast i8* %1 to i64* %4 = load i64, i64* %3, align 8, !tbaa !0 @@ -49,7 +49,7 @@ %1 = bitcast i32* %inputLR to i8* %sub = sub i32 14, %shr1 %shl = shl i32 1, %sub -; CHECK: = memw(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: =memw(r{{[0-9]*}}++m{{[0-1]}}:brev) %2 = call i8* @llvm.hexagon.brev.ldw(i8* %0, i8* %1, i32 %shl) %3 = bitcast i8* %1 to i32* %4 = load i32, i32* %3, align 4, !tbaa !2 @@ -69,7 +69,7 @@ %1 = bitcast i16* %inputLR to i8* %sub = sub i32 15, %shr1 %shl = shl i32 1, %sub -; CHECK: = memh(r{{[0-9]*}} ++ m0:brev) +; CHECK: =memh(r{{[0-9]*}}++m0:brev) %2 = call i8* @llvm.hexagon.brev.ldh(i8* %0, i8* %1, i32 %shl) %3 = bitcast i8* %1 to i16* %4 = load i16, i16* %3, align 2, !tbaa !3 @@ -89,7 +89,7 @@ %1 = bitcast i16* %inputLR to i8* %sub = sub i32 15, %shr1 %shl = shl i32 1, %sub -; CHECK: = memuh(r{{[0-9]*}} ++ m0:brev) +; CHECK: =memuh(r{{[0-9]*}}++m0:brev) %2 = call i8* @llvm.hexagon.brev.lduh(i8* %0, i8* %1, i32 %shl) %3 = bitcast i8* %1 to i16* %4 = load i16, i16* %3, align 2, !tbaa !3 @@ -108,7 +108,7 @@ %0 = bitcast i16* %arrayidx to i8* %sub = sub nsw i32 16, %shr1 %shl = shl i32 1, %sub -; CHECK: = memub(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: =memub(r{{[0-9]*}}++m{{[0-1]}}:brev) %1 = call i8* @llvm.hexagon.brev.ldub(i8* %0, i8* %inputLR, i32 %shl) %2 = load i8, i8* %inputLR, align 1, !tbaa !0 ret i8 %2 @@ -126,7 +126,7 @@ %0 = bitcast i16* %arrayidx to i8* %sub = sub nsw i32 16, %shr1 %shl = shl i32 1, %sub -; CHECK: = memb(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: =memb(r{{[0-9]*}}++m{{[0-1]}}:brev) %1 = call i8* @llvm.hexagon.brev.ldb(i8* %0, i8* %inputLR, i32 %shl) %2 = load i8, i8* %inputLR, align 1, !tbaa !0 ret i8 %2 Index: test/CodeGen/Hexagon/brev_st.ll =================================================================== --- test/CodeGen/Hexagon/brev_st.ll +++ test/CodeGen/Hexagon/brev_st.ll @@ -26,7 +26,7 @@ %0 = bitcast i16* %arrayidx to i8* %sub = sub i32 13, %shr2 %shl = shl i32 1, %sub -; CHECK: memd(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: memd(r{{[0-9]*}}++m{{[0-1]}}:brev) %1 = tail call i8* @llvm.hexagon.brev.std(i8* %0, i64 undef, i32 %shl) ret i64 0 } @@ -42,7 +42,7 @@ %0 = bitcast i16* %arrayidx to i8* %sub = sub i32 14, %shr1 %shl = shl i32 1, %sub -; CHECK: memw(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: memw(r{{[0-9]*}}++m{{[0-1]}}:brev) %1 = tail call i8* @llvm.hexagon.brev.stw(i8* %0, i32 undef, i32 %shl) ret i32 0 } @@ -58,7 +58,7 @@ %0 = bitcast i16* %arrayidx to i8* %sub = sub i32 15, %shr2 %shl = shl i32 1, %sub -; CHECK: memh(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev) %1 = tail call i8* @llvm.hexagon.brev.sth(i8* %0, i32 0, i32 %shl) ret i16 0 } @@ -74,7 +74,7 @@ %0 = bitcast i16* %arrayidx to i8* %sub = sub i32 15, %shr2 %shl = shl i32 1, %sub -; CHECK: memh(r{{[0-9]*}} ++ m{{[0-1]}}:brev){{ *}}={{ *}}r{{[0-9]*}}.h +; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev)=r{{[0-9]*}}.h %1 = tail call i8* @llvm.hexagon.brev.sthhi(i8* %0, i32 0, i32 %shl) ret i16 0 } @@ -89,7 +89,7 @@ %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %sub = sub nsw i32 16, %shr2 - ; CHECK: memb(r{{[0-9]*}} ++ m{{[0-1]}}:brev) + ; CHECK: memb(r{{[0-9]*}}++m{{[0-1]}}:brev) %shl = shl i32 1, %sub %1 = tail call i8* @llvm.hexagon.brev.stb(i8* %0, i32 0, i32 %shl) ret i8 0 Index: test/CodeGen/Hexagon/builtin-prefetch-offset.ll =================================================================== --- test/CodeGen/Hexagon/builtin-prefetch-offset.ll +++ test/CodeGen/Hexagon/builtin-prefetch-offset.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; Check for the immediate offset. It must be a multiple of 8. -; CHECK: dcfetch({{.*}}+{{ *}}#8) +; CHECK: dcfetch({{.*}}+#8) ; In 6.2 (which supports v4+ only), we generate indexed dcfetch in all cases ; (unlike in 6.1, which supported v2, where dcfetch did not allow an immediate ; offset). @@ -9,7 +9,7 @@ ; possible one). Check for #0 anyways, if the test fails with a false ; positive, the second check can be eliminated, or rewritten, and in the ; meantime it can help catch real problems. -; CHECK: dcfetch({{.*}}+{{ *}}#0) +; CHECK: dcfetch({{.*}}+#0) target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" target triple = "hexagon" Index: test/CodeGen/Hexagon/calling-conv-2.ll =================================================================== --- test/CodeGen/Hexagon/calling-conv-2.ll +++ test/CodeGen/Hexagon/calling-conv-2.ll @@ -3,7 +3,7 @@ %struct.test_struct = type { i32, i8, i64 } -; CHECK-ONE: r1 = #45 +; CHECK-ONE: r1=#45 define void @foo(%struct.test_struct* noalias nocapture sret %agg.result, i32 %a) #0 { entry: call void @bar(%struct.test_struct* sret %agg.result, i32 45) #2 Index: test/CodeGen/Hexagon/callr-dep-edge.ll =================================================================== --- test/CodeGen/Hexagon/callr-dep-edge.ll +++ test/CodeGen/Hexagon/callr-dep-edge.ll @@ -5,7 +5,7 @@ @fp = common global i32 (...)* null, align 4 -; CHECK: r0 = memw +; CHECK: r0=memw ; CHECK: { ; CHECK: callr r0 Index: test/CodeGen/Hexagon/cext-check.ll =================================================================== --- test/CodeGen/Hexagon/cext-check.ll +++ test/CodeGen/Hexagon/cext-check.ll @@ -2,10 +2,10 @@ ; Check that we constant extended instructions only when necessary. define i32 @cext_test1(i32* %a) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}##8000) -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##300000) -; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}##4092) -; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##300) +; CHECK: r{{[0-9]+}}=memw(r{{[0-9]+}}+##8000) +; CHECK: r{{[0-9]+}}=add(r{{[0-9]+}},##300000) +; CHECK-NOT: r{{[0-9]+}}=memw(r{{[0-9]+}}+##4092) +; CHECK-NOT: r{{[0-9]+}}=add(r{{[0-9]+}},##300) entry: %0 = load i32, i32* %a, align 4 %tobool = icmp ne i32 %0, 0 Index: test/CodeGen/Hexagon/cext-valid-packet1.ll =================================================================== --- test/CodeGen/Hexagon/cext-valid-packet1.ll +++ test/CodeGen/Hexagon/cext-valid-packet1.ll @@ -3,8 +3,8 @@ ; Check that the packetizer generates valid packets with constant ; extended instructions. ; CHECK: { -; CHECK-NEXT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}, ##{{[0-9]+}}) -; CHECK-NEXT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}, ##{{[0-9]+}}) +; CHECK-NEXT: r{{[0-9]+}}=add(r{{[0-9]+}},##{{[0-9]+}}) +; CHECK-NEXT: r{{[0-9]+}}=add(r{{[0-9]+}},##{{[0-9]+}}) ; CHECK-NEXT: } define i32 @check-packet1(i32 %a, i32 %b, i32 %c) nounwind readnone { Index: test/CodeGen/Hexagon/cext.ll =================================================================== --- test/CodeGen/Hexagon/cext.ll +++ test/CodeGen/Hexagon/cext.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: memub(r{{[0-9]+}}{{ *}}<<{{ *}}#1{{ *}}+{{ *}}##a) +; CHECK: memub(r{{[0-9]+}}<<#1+##a) @a = external global [5 x [2 x i8]] Index: test/CodeGen/Hexagon/cexti16.ll =================================================================== --- test/CodeGen/Hexagon/cexti16.ll +++ test/CodeGen/Hexagon/cexti16.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: memuh(r{{[0-9]+}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##a) +; CHECK: memuh(r{{[0-9]+}}<<#2+##a) @a = external global [5 x [2 x i16]] Index: test/CodeGen/Hexagon/circ-load-isel.ll =================================================================== --- test/CodeGen/Hexagon/circ-load-isel.ll +++ test/CodeGen/Hexagon/circ-load-isel.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: = memw{{.*}}circ +; CHECK: =memw{{.*}}circ target triple = "hexagon" Index: test/CodeGen/Hexagon/circ_ld.ll =================================================================== --- test/CodeGen/Hexagon/circ_ld.ll +++ test/CodeGen/Hexagon/circ_ld.ll @@ -26,7 +26,7 @@ %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %or = or i32 %shr1, 33554432 -; CHECK: = memb(r{{[0-9]*.}}++{{.}}#-1:circ(m{{[0-1]}})) +; CHECK: =memb(r{{[0-9]*.}}++#-1:circ(m{{[0-1]}})) %1 = call i8* @llvm.hexagon.circ.ldb(i8* %0, i8* %inputLR, i32 %or, i32 -1) %2 = load i8, i8* %inputLR, align 1, !tbaa !0 ret i8 %2 @@ -45,7 +45,7 @@ %1 = bitcast i64* %inputLR to i8* %shl = shl nuw nsw i32 %shr1, 3 %or = or i32 %shl, 83886080 -; CHECK: = memd(r{{[0-9]*.}}++{{.}}#-8:circ(m{{[0-1]}})) +; CHECK: =memd(r{{[0-9]*.}}++#-8:circ(m{{[0-1]}})) %2 = call i8* @llvm.hexagon.circ.ldd(i8* %0, i8* %1, i32 %or, i32 -8) %3 = bitcast i8* %1 to i64* %4 = load i64, i64* %3, align 8, !tbaa !0 @@ -64,7 +64,7 @@ %0 = bitcast i16* %arrayidx to i8* %1 = bitcast i16* %inputLR to i8* %or = or i32 %shr1, 50331648 -; CHECK: = memh(r{{[0-9]*.}}++{{.}}#-2:circ(m{{[0-1]}})) +; CHECK: =memh(r{{[0-9]*.}}++#-2:circ(m{{[0-1]}})) %2 = call i8* @llvm.hexagon.circ.ldh(i8* %0, i8* %1, i32 %or, i32 -2) %3 = bitcast i8* %1 to i16* %4 = load i16, i16* %3, align 2, !tbaa !2 @@ -82,7 +82,7 @@ %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %or = or i32 %shr1, 33554432 -; CHECK: = memub(r{{[0-9]*.}}++{{.}}#-1:circ(m{{[0-1]}})) +; CHECK: =memub(r{{[0-9]*.}}++#-1:circ(m{{[0-1]}})) %1 = call i8* @llvm.hexagon.circ.ldub(i8* %0, i8* %inputLR, i32 %or, i32 -1) %2 = load i8, i8* %inputLR, align 1, !tbaa !0 ret i8 %2 @@ -100,7 +100,7 @@ %0 = bitcast i16* %arrayidx to i8* %1 = bitcast i16* %inputLR to i8* %or = or i32 %shr1, 50331648 -; CHECK: = memuh(r{{[0-9]*.}}++{{.}}#-2:circ(m{{[0-1]}})) +; CHECK: =memuh(r{{[0-9]*.}}++#-2:circ(m{{[0-1]}})) %2 = call i8* @llvm.hexagon.circ.lduh(i8* %0, i8* %1, i32 %or, i32 -2) %3 = bitcast i8* %1 to i16* %4 = load i16, i16* %3, align 2, !tbaa !2 @@ -120,7 +120,7 @@ %1 = bitcast i32* %inputLR to i8* %shl = shl nuw nsw i32 %shr1, 2 %or = or i32 %shl, 67108864 -; CHECK: = memw(r{{[0-9]*.}}++{{.}}#-4:circ(m{{[0-1]}})) +; CHECK: =memw(r{{[0-9]*.}}++#-4:circ(m{{[0-1]}})) %2 = call i8* @llvm.hexagon.circ.ldw(i8* %0, i8* %1, i32 %or, i32 -4) %3 = bitcast i8* %1 to i32* %4 = load i32, i32* %3, align 4, !tbaa !3 Index: test/CodeGen/Hexagon/circ_ldw.ll =================================================================== --- test/CodeGen/Hexagon/circ_ldw.ll +++ test/CodeGen/Hexagon/circ_ldw.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s -; CHECK: r{{[0-9]*}} = memw(r{{[0-9]*.}}++{{.}}#-4:circ(m0)) +; CHECK: r{{[0-9]*}}=memw(r{{[0-9]*.}}++#-4:circ(m0)) %union.vect64 = type { i64 } Index: test/CodeGen/Hexagon/circ_st.ll =================================================================== --- test/CodeGen/Hexagon/circ_st.ll +++ test/CodeGen/Hexagon/circ_st.ll @@ -23,7 +23,7 @@ %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %or = or i32 %shr2, 33554432 -; CHECK: memb(r{{[0-9]*}}{{.}}++{{.}}#-1:circ(m{{[0-1]}})) +; CHECK: memb(r{{[0-9]*}}{{.}}++#-1:circ(m{{[0-1]}})) %1 = tail call i8* @llvm.hexagon.circ.stb(i8* %0, i32 0, i32 %or, i32 -1) ret i8 0 } @@ -39,7 +39,7 @@ %0 = bitcast i16* %arrayidx to i8* %shl = shl nuw nsw i32 %shr1, 3 %or = or i32 %shl, 83886080 -; CHECK: memd(r{{[0-9]*}}{{.}}++{{.}}#-8:circ(m{{[0-1]}})) +; CHECK: memd(r{{[0-9]*}}{{.}}++#-8:circ(m{{[0-1]}})) %1 = tail call i8* @llvm.hexagon.circ.std(i8* %0, i64 undef, i32 %or, i32 -8) ret i64 0 } @@ -54,7 +54,7 @@ %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %or = or i32 %shr2, 50331648 -; CHECK: memh(r{{[0-9]*}}{{.}}++{{.}}#-2:circ(m{{[0-1]}})) +; CHECK: memh(r{{[0-9]*}}{{.}}++#-2:circ(m{{[0-1]}})) %1 = tail call i8* @llvm.hexagon.circ.sth(i8* %0, i32 0, i32 %or, i32 -2) ret i16 0 } @@ -69,7 +69,7 @@ %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %or = or i32 %shr2, 50331648 -; CHECK: memh(r{{[0-9]*}}{{.}}++{{.}}#-2:circ(m{{[0-1]}})){{ *}}={{ *}}r{{[0-9]*}}.h +; CHECK: memh(r{{[0-9]*}}{{.}}++#-2:circ(m{{[0-1]}}))=r{{[0-9]*}}.h %1 = tail call i8* @llvm.hexagon.circ.sthhi(i8* %0, i32 0, i32 %or, i32 -2) ret i16 0 } @@ -85,7 +85,7 @@ %0 = bitcast i16* %arrayidx to i8* %shl = shl nuw nsw i32 %shr1, 2 %or = or i32 %shl, 67108864 -; CHECK: memw(r{{[0-9]*}}{{.}}++{{.}}#-4:circ(m{{[0-1]}})) +; CHECK: memw(r{{[0-9]*}}{{.}}++#-4:circ(m{{[0-1]}})) %1 = tail call i8* @llvm.hexagon.circ.stw(i8* %0, i32 undef, i32 %or, i32 -4) ret i32 0 } Index: test/CodeGen/Hexagon/clr_set_toggle.ll =================================================================== --- test/CodeGen/Hexagon/clr_set_toggle.ll +++ test/CodeGen/Hexagon/clr_set_toggle.ll @@ -4,7 +4,7 @@ define i32 @my_clrbit(i32 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}}=clrbit(r{{[0-9]+}},#31) %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 %0 = load i32, i32* %x.addr, align 4 @@ -15,7 +15,7 @@ define i64 @my_clrbit2(i64 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit2 -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}}=clrbit(r{{[0-9]+}},#31) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -26,7 +26,7 @@ define i64 @my_clrbit3(i64 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit3 -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}}=clrbit(r{{[0-9]+}},#31) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -37,7 +37,7 @@ define i32 @my_clrbit4(i32 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit4 -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #13) +; CHECK: r{{[0-9]+}}=clrbit(r{{[0-9]+}},#13) %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 %0 = load i32, i32* %x.addr, align 4 @@ -48,7 +48,7 @@ define i64 @my_clrbit5(i64 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit5 -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #13) +; CHECK: r{{[0-9]+}}=clrbit(r{{[0-9]+}},#13) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -59,7 +59,7 @@ define i64 @my_clrbit6(i64 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit6 -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #27) +; CHECK: r{{[0-9]+}}=clrbit(r{{[0-9]+}},#27) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -70,7 +70,7 @@ define zeroext i16 @my_setbit(i16 zeroext %crc) nounwind { entry: ; CHECK-LABEL: my_setbit -; CHECK: memh(r{{[0-9]+}}+#{{[0-9]+}}){{ *}}={{ *}}setbit(#15) +; CHECK: memh(r{{[0-9]+}}+#{{[0-9]+}})=setbit(#15) %crc.addr = alloca i16, align 2 store i16 %crc, i16* %crc.addr, align 2 %0 = load i16, i16* %crc.addr, align 2 @@ -85,7 +85,7 @@ define i32 @my_setbit2(i32 %x) nounwind { entry: ; CHECK-LABEL: my_setbit2 -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #15) +; CHECK: r{{[0-9]+}}=setbit(r{{[0-9]+}},#15) %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 %0 = load i32, i32* %x.addr, align 4 @@ -96,7 +96,7 @@ define i64 @my_setbit3(i64 %x) nounwind { entry: ; CHECK-LABEL: my_setbit3 -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #15) +; CHECK: r{{[0-9]+}}=setbit(r{{[0-9]+}},#15) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -107,7 +107,7 @@ define i32 @my_setbit4(i32 %x) nounwind { entry: ; CHECK-LABEL: my_setbit4 -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}}=setbit(r{{[0-9]+}},#31) %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 %0 = load i32, i32* %x.addr, align 4 @@ -118,7 +118,7 @@ define i64 @my_setbit5(i64 %x) nounwind { entry: ; CHECK-LABEL: my_setbit5 -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #13) +; CHECK: r{{[0-9]+}}=setbit(r{{[0-9]+}},#13) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -129,7 +129,7 @@ define zeroext i16 @my_togglebit(i16 zeroext %crc) nounwind { entry: ; CHECK-LABEL: my_togglebit -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15) +; CHECK: r{{[0-9]+}}=togglebit(r{{[0-9]+}},#15) %crc.addr = alloca i16, align 2 store i16 %crc, i16* %crc.addr, align 2 %0 = load i16, i16* %crc.addr, align 2 @@ -144,7 +144,7 @@ define i32 @my_togglebit2(i32 %x) nounwind { entry: ; CHECK-LABEL: my_togglebit2 -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15) +; CHECK: r{{[0-9]+}}=togglebit(r{{[0-9]+}},#15) %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 %0 = load i32, i32* %x.addr, align 4 @@ -155,7 +155,7 @@ define i64 @my_togglebit3(i64 %x) nounwind { entry: ; CHECK-LABEL: my_togglebit3 -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15) +; CHECK: r{{[0-9]+}}=togglebit(r{{[0-9]+}},#15) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -166,7 +166,7 @@ define i64 @my_togglebit4(i64 %x) nounwind { entry: ; CHECK-LABEL: my_togglebit4 -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #20) +; CHECK: r{{[0-9]+}}=togglebit(r{{[0-9]+}},#20) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 Index: test/CodeGen/Hexagon/cmp-to-genreg.ll =================================================================== --- test/CodeGen/Hexagon/cmp-to-genreg.ll +++ test/CodeGen/Hexagon/cmp-to-genreg.ll @@ -2,7 +2,7 @@ ; Check that we generate compare to general register. define i32 @compare1(i32 %a) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}cmp.eq(r{{[0-9]+}},{{ *}}#120) +; CHECK: r{{[0-9]+}}=cmp.eq(r{{[0-9]+}},#120) entry: %cmp = icmp eq i32 %a, 120 %conv = zext i1 %cmp to i32 @@ -10,7 +10,7 @@ } define i32 @compare2(i32 %a) nounwind readnone { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}!cmp.eq(r{{[0-9]+}},{{ *}}#120) +; CHECK: r{{[0-9]+}}=!cmp.eq(r{{[0-9]+}},#120) entry: %cmp = icmp ne i32 %a, 120 %conv = zext i1 %cmp to i32 @@ -18,7 +18,7 @@ } define i32 @compare3(i32 %a, i32 %b) nounwind readnone { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}cmp.eq(r{{[0-9]+}},{{ *}}r{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=cmp.eq(r{{[0-9]+}},r{{[0-9]+}}) entry: %cmp = icmp eq i32 %a, %b %conv = zext i1 %cmp to i32 @@ -26,7 +26,7 @@ } define i32 @compare4(i32 %a, i32 %b) nounwind readnone { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}!cmp.eq(r{{[0-9]+}},{{ *}}r{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=!cmp.eq(r{{[0-9]+}},r{{[0-9]+}}) entry: %cmp = icmp ne i32 %a, %b %conv = zext i1 %cmp to i32 Index: test/CodeGen/Hexagon/cmp-to-predreg.ll =================================================================== --- test/CodeGen/Hexagon/cmp-to-predreg.ll +++ test/CodeGen/Hexagon/cmp-to-predreg.ll @@ -2,7 +2,7 @@ ; Check that we generate compare to predicate register. define i32 @compare1(i32 %a, i32 %b) nounwind { -; CHECK: p{{[0-3]}}{{ *}}={{ *[!]?}}cmp.eq(r{{[0-9]+}},{{ *}}r{{[0-9]+}}) +; CHECK: p{{[0-3]}}={{ *[!]?}}cmp.eq(r{{[0-9]+}},r{{[0-9]+}}) entry: %cmp = icmp ne i32 %a, %b %add = add nsw i32 %a, %b @@ -12,7 +12,7 @@ } define i32 @compare2(i32 %a) nounwind { -; CHECK: p{{[0-3]}}{{ *}}={{ *[!]?}}cmp.eq(r{{[0-9]+}},{{ *}}#10) +; CHECK: p{{[0-3]}}={{ *[!]?}}cmp.eq(r{{[0-9]+}},#10) entry: %cmp = icmp ne i32 %a, 10 %add = add nsw i32 %a, 10 @@ -22,7 +22,7 @@ } define i32 @compare3(i32 %a, i32 %b) nounwind { -; CHECK: p{{[0-3]}}{{ *}}={{ *}}cmp.gt(r{{[0-9]+}},{{ *}}r{{[0-9]+}}) +; CHECK: p{{[0-3]}}=cmp.gt(r{{[0-9]+}},r{{[0-9]+}}) entry: %cmp = icmp sgt i32 %a, %b %sub = sub nsw i32 %a, %b @@ -32,7 +32,7 @@ } define i32 @compare4(i32 %a) nounwind { -; CHECK: p{{[0-3]}}{{ *}}={{ *}}cmp.gt(r{{[0-9]+}},{{ *}}#10) +; CHECK: p{{[0-3]}}=cmp.gt(r{{[0-9]+}},#10) entry: %cmp = icmp sgt i32 %a, 10 %sub = sub nsw i32 %a, 10 Index: test/CodeGen/Hexagon/cmp.ll =================================================================== --- test/CodeGen/Hexagon/cmp.ll +++ test/CodeGen/Hexagon/cmp.ll @@ -9,7 +9,7 @@ %1 = call i32 @llvm.hexagon.C2.cmpeq(i32 %0, i32 1) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}}=cmp.eq(r{{[0-9]}},r{{[0-9]}}) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpeq(i32, i32) #1 @@ -23,7 +23,7 @@ %1 = call i32 @llvm.hexagon.C2.cmpgt(i32 %0, i32 2) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}}=cmp.gt(r{{[0-9]}},r{{[0-9]}}) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgt(i32, i32) #1 @@ -37,7 +37,7 @@ %1 = call i32 @llvm.hexagon.C2.cmpgtu(i32 %0, i32 3) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}}=cmp.gtu(r{{[0-9]}},r{{[0-9]}}) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgtu(i32, i32) #1 @@ -51,7 +51,7 @@ %1 = call i32 @llvm.hexagon.C2.cmplt(i32 %0, i32 4) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}}=cmp.gt(r{{[0-9]}},r{{[0-9]}}) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmplt(i32, i32) #1 @@ -65,7 +65,7 @@ %1 = call i32 @llvm.hexagon.C2.cmpltu(i32 %0, i32 5) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}}=cmp.gtu(r{{[0-9]}},r{{[0-9]}}) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpltu(i32, i32) #1 @@ -79,7 +79,7 @@ %1 = call i32 @llvm.hexagon.C2.cmpeqi(i32 %0, i32 10) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, {{.*}}#10) +; CHECK: { p{{[0-3]}}=cmp.eq(r{{[0-9]}},#10) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpeqi(i32, i32) #1 @@ -93,7 +93,7 @@ %1 = call i32 @llvm.hexagon.C2.cmpgti(i32 %0, i32 20) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, {{.*}}#20) +; CHECK: { p{{[0-3]}}=cmp.gt(r{{[0-9]}},#20) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgti(i32, i32) #1 @@ -107,7 +107,7 @@ %1 = call i32 @llvm.hexagon.C2.cmpgtui(i32 %0, i32 40) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, {{.*}}#40) +; CHECK: { p{{[0-3]}}=cmp.gtu(r{{[0-9]}},#40) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgtui(i32, i32) #1 @@ -121,7 +121,7 @@ %1 = call i32 @llvm.hexagon.C2.cmpgei(i32 %0, i32 3) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, {{.*}}#2) +; CHECK: { p{{[0-3]}}=cmp.gt(r{{[0-9]}},#2) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgei(i32, i32) #1 @@ -135,7 +135,7 @@ %1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 3) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, {{.*}}#2) +; CHECK: { p{{[0-3]}}=cmp.gtu(r{{[0-9]}},#2) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgeui(i32, i32) #1 @@ -149,7 +149,7 @@ %1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 0) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}}=cmp.eq(r{{[0-9]}},r{{[0-9]}}) attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } Index: test/CodeGen/Hexagon/cmpb-eq.ll =================================================================== --- test/CodeGen/Hexagon/cmpb-eq.ll +++ test/CodeGen/Hexagon/cmpb-eq.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK-NOT: cmpb.eq(r{{[0-9]+}}, #-1) +; CHECK-NOT: cmpb.eq(r{{[0-9]+}},#-1) target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" target triple = "hexagon" Index: test/CodeGen/Hexagon/combine.ll =================================================================== --- test/CodeGen/Hexagon/combine.ll +++ test/CodeGen/Hexagon/combine.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr -hexagon-bit=0 < %s | FileCheck %s -; CHECK: combine(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: combine(r{{[0-9]+}},r{{[0-9]+}}) @j = external global i32 @k = external global i64 Index: test/CodeGen/Hexagon/compound.ll =================================================================== --- test/CodeGen/Hexagon/compound.ll +++ test/CodeGen/Hexagon/compound.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon -filetype=obj -o - %s | llvm-objdump -d - | FileCheck %s -; CHECK: p0 = cmp.gt(r0,#-1); if (!p0.new) jump:nt +; CHECK: p0=cmp.gt(r0,#-1); if (!p0.new) jump:nt declare void @a() declare void @b() Index: test/CodeGen/Hexagon/constp-combine-neg.ll =================================================================== --- test/CodeGen/Hexagon/constp-combine-neg.ll +++ test/CodeGen/Hexagon/constp-combine-neg.ll @@ -19,9 +19,9 @@ ; The instructions seem to be in a different order in the .s file than ; the corresponding values in the .ll file, so just run the test three ; times and each time test for a different instruction. -; CHECK-TEST1: combine(#-2, #3) -; CHECK-TEST2: combine(#6, #-4) -; CHECK-TEST3: combine(#-10, #-8) +; CHECK-TEST1: combine(#-2,#3) +; CHECK-TEST2: combine(#6,#-4) +; CHECK-TEST3: combine(#-10,#-8) attributes #0 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } Index: test/CodeGen/Hexagon/convertdptoint.ll =================================================================== --- test/CodeGen/Hexagon/convertdptoint.ll +++ test/CodeGen/Hexagon/convertdptoint.ll @@ -2,7 +2,7 @@ ; Check that we generate conversion from double precision floating point ; to 32-bit int value in IEEE complaint mode in V5. -; CHECK: r{{[0-9]+}} = convert_df2w(r{{[0-9]+}}:{{[0-9]+}}):chop +; CHECK: r{{[0-9]+}}=convert_df2w(r{{[0-9]+}}:{{[0-9]+}}):chop define i32 @main() nounwind { entry: Index: test/CodeGen/Hexagon/convertdptoll.ll =================================================================== --- test/CodeGen/Hexagon/convertdptoll.ll +++ test/CodeGen/Hexagon/convertdptoll.ll @@ -2,7 +2,7 @@ ; Check that we generate conversion from double precision floating point ; to 64-bit integer value in IEEE complaint mode in V5. -; CHECK: r{{[0-9]+}}:{{[0-9]+}} = convert_df2d(r{{[0-9]+}}:{{[0-9]+}}):chop +; CHECK: r{{[0-9]+}}:{{[0-9]+}}=convert_df2d(r{{[0-9]+}}:{{[0-9]+}}):chop define i32 @main() nounwind { entry: Index: test/CodeGen/Hexagon/convertsptoint.ll =================================================================== --- test/CodeGen/Hexagon/convertsptoint.ll +++ test/CodeGen/Hexagon/convertsptoint.ll @@ -2,7 +2,7 @@ ; Check that we generate conversion from single precision floating point ; to 32-bit int value in IEEE complaint mode in V5. -; CHECK: r{{[0-9]+}} = convert_sf2w(r{{[0-9]+}}):chop +; CHECK: r{{[0-9]+}}=convert_sf2w(r{{[0-9]+}}):chop define i32 @main() nounwind { entry: Index: test/CodeGen/Hexagon/convertsptoll.ll =================================================================== --- test/CodeGen/Hexagon/convertsptoll.ll +++ test/CodeGen/Hexagon/convertsptoll.ll @@ -2,7 +2,7 @@ ; Check that we generate conversion from single precision floating point ; to 64-bit int value in IEEE complaint mode in V5. -; CHECK: r{{[0-9]+}}:{{[0-9]+}} = convert_sf2d(r{{[0-9]+}}) +; CHECK: r{{[0-9]+}}:{{[0-9]+}}=convert_sf2d(r{{[0-9]+}}) define i32 @main() nounwind { entry: Index: test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll =================================================================== --- test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll +++ test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll @@ -4,7 +4,7 @@ ; CHECK-DAG: cl0({{r[0-9]*:[0-9]*}}) ; CHECK-DAG: ct0({{r[0-9]*}}) ; CHECK-DAG: cl0({{r[0-9]*}}) -; CHECK-DAG: r{{[0-9]+}} += lsr(r{{[0-9]+}}, #4) +; CHECK-DAG: r{{[0-9]+}}+=lsr(r{{[0-9]+}},#4) define i32 @foo(i64 %a, i32 %b) nounwind { entry: Index: test/CodeGen/Hexagon/dead-store-stack.ll =================================================================== --- test/CodeGen/Hexagon/dead-store-stack.ll +++ test/CodeGen/Hexagon/dead-store-stack.ll @@ -1,7 +1,7 @@ ; RUN: llc -O2 -march=hexagon < %s | FileCheck %s ; CHECK: ParseFunc: -; CHECK: r[[ARG0:[0-9]+]] = memuh(r[[ARG1:[0-9]+]] + #[[OFFSET:[0-9]+]]) -; CHECK: memw(r[[ARG1]]+#[[OFFSET]]) = r[[ARG0]] +; CHECK: r[[ARG0:[0-9]+]]=memuh(r[[ARG1:[0-9]+]]+#[[OFFSET:[0-9]+]]) +; CHECK: memw(r[[ARG1]]+#[[OFFSET]])=r[[ARG0]] @.str.3 = external unnamed_addr constant [8 x i8], align 1 ; Function Attrs: nounwind Index: test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll =================================================================== --- test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll +++ test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll @@ -2,7 +2,7 @@ ; Check that we generate conversion from double precision floating point ; to 32-bit int value in IEEE rounding to the nearest mode in V5. -; CHECK: r{{[0-9]+}} = convert_df2w(r{{[0-9]+}}:{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=convert_df2w(r{{[0-9]+}}:{{[0-9]+}}) define i32 @main() nounwind { entry: Index: test/CodeGen/Hexagon/early-if-vecpi.ll =================================================================== --- test/CodeGen/Hexagon/early-if-vecpi.ll +++ test/CodeGen/Hexagon/early-if-vecpi.ll @@ -4,7 +4,7 @@ ; Check that we can predicate base+offset vector stores. ; CHECK-LABEL: sammy -; CHECK: if{{.*}}vmem(r{{[0-9]+}}+#0) = +; CHECK: if{{.*}}vmem(r{{[0-9]+}}+#0)= define void @sammy(<16 x i32>* nocapture %p, <16 x i32>* nocapture readonly %q, i32 %n) #0 { entry: %0 = load <16 x i32>, <16 x i32>* %q, align 64 @@ -35,7 +35,7 @@ ; Check that we can predicate post-increment vector stores. ; CHECK-LABEL: danny -; CHECK: if{{.*}}vmem(r{{[0-9]+}}++#1) = +; CHECK: if{{.*}}vmem(r{{[0-9]+}}++#1)= define void @danny(<16 x i32>* nocapture %p, <16 x i32>* nocapture readonly %q, i32 %n) #0 { entry: %0 = load <16 x i32>, <16 x i32>* %q, align 64 Index: test/CodeGen/Hexagon/eh_return.ll =================================================================== --- test/CodeGen/Hexagon/eh_return.ll +++ test/CodeGen/Hexagon/eh_return.ll @@ -4,7 +4,7 @@ ; CHECK: deallocframe ; CHECK-NEXT: } ; CHECK-NEXT: { -; CHECK-NEXT: r29 = add(r29, r28) +; CHECK-NEXT: r29=add(r29,r28) ; CHECK-NEXT: } ; CHECK-NEXT: { ; CHECK-NEXT: jumpr r31 Index: test/CodeGen/Hexagon/expand-vstorerw-undef.ll =================================================================== --- test/CodeGen/Hexagon/expand-vstorerw-undef.ll +++ test/CodeGen/Hexagon/expand-vstorerw-undef.ll @@ -11,8 +11,8 @@ ; do generate a store for the defined part of W0: ; CHECK-LABEL: fred: -; CHECK: v[[REG:[0-9]+]] = vsplat -; CHECK: vmem(r29+#6) = v[[REG]] +; CHECK: v[[REG:[0-9]+]]=vsplat +; CHECK: vmem(r29+#6)=v[[REG]] target triple = "hexagon" Index: test/CodeGen/Hexagon/extload-combine.ll =================================================================== --- test/CodeGen/Hexagon/extload-combine.ll +++ test/CodeGen/Hexagon/extload-combine.ll @@ -15,8 +15,8 @@ ; Function Attrs: nounwind define i64 @short_test1() #0 { -; CHECK: [[VAR:r[0-9]+]]{{ *}}={{ *}}memuh(## -; CHECK: combine(#0, [[VAR]]) +; CHECK: [[VAR:r[0-9]+]]=memuh(## +; CHECK: combine(#0,[[VAR]]) entry: store i16 0, i16* @a, align 2 %0 = load i16, i16* @b, align 2 @@ -26,7 +26,7 @@ ; Function Attrs: nounwind define i64 @short_test2() #0 { -; CHECK: [[VAR1:r[0-9]+]]{{ *}}={{ *}}memh(## +; CHECK: [[VAR1:r[0-9]+]]=memh(## ; CHECK: sxtw([[VAR1]]) entry: store i16 0, i16* @a, align 2 @@ -37,8 +37,8 @@ ; Function Attrs: nounwind define i64 @char_test1() #0 { -; CHECK: [[VAR2:r[0-9]+]]{{ *}}={{ *}}memub(## -; CHECK: combine(#0, [[VAR2]]) +; CHECK: [[VAR2:r[0-9]+]]=memub(## +; CHECK: combine(#0,[[VAR2]]) entry: store i8 0, i8* @char_a, align 1 %0 = load i8, i8* @char_b, align 1 @@ -48,7 +48,7 @@ ; Function Attrs: nounwind define i64 @char_test2() #0 { -; CHECK: [[VAR3:r[0-9]+]]{{ *}}={{ *}}memb(## +; CHECK: [[VAR3:r[0-9]+]]=memb(## ; CHECK: sxtw([[VAR3]]) entry: store i8 0, i8* @char_a, align 1 @@ -59,8 +59,8 @@ ; Function Attrs: nounwind define i64 @int_test1() #0 { -; CHECK: [[VAR4:r[0-9]+]]{{ *}}={{ *}}memw(## -; CHECK: combine(#0, [[VAR4]]) +; CHECK: [[VAR4:r[0-9]+]]=memw(## +; CHECK: combine(#0,[[VAR4]]) entry: store i32 0, i32* @int_a, align 4 %0 = load i32, i32* @int_b, align 4 @@ -70,7 +70,7 @@ ; Function Attrs: nounwind define i64 @int_test2() #0 { -; CHECK: [[VAR5:r[0-9]+]]{{ *}}={{ *}}memw(## +; CHECK: [[VAR5:r[0-9]+]]=memw(## ; CHECK: sxtw([[VAR5]]) entry: store i32 0, i32* @int_a, align 4 Index: test/CodeGen/Hexagon/extract-basic.ll =================================================================== --- test/CodeGen/Hexagon/extract-basic.ll +++ test/CodeGen/Hexagon/extract-basic.ll @@ -1,8 +1,8 @@ ; RUN: llc -O2 -march=hexagon < %s | FileCheck %s -; CHECK-DAG: extractu(r{{[0-9]*}}, #3, #4) -; CHECK-DAG: extractu(r{{[0-9]*}}, #8, #7) -; CHECK-DAG: extractu(r{{[0-9]*}}, #8, #16) +; CHECK-DAG: extractu(r{{[0-9]*}},#3,#4) +; CHECK-DAG: extractu(r{{[0-9]*}},#8,#7) +; CHECK-DAG: extractu(r{{[0-9]*}},#8,#16) ; C source: ; typedef struct { Index: test/CodeGen/Hexagon/fadd.ll =================================================================== --- test/CodeGen/Hexagon/fadd.ll +++ test/CodeGen/Hexagon/fadd.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that we generate sp floating point add in V5. -; CHECK: r{{[0-9]+}} = sfadd(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=sfadd(r{{[0-9]+}},r{{[0-9]+}}) define i32 @main() nounwind { entry: Index: test/CodeGen/Hexagon/fcmp.ll =================================================================== --- test/CodeGen/Hexagon/fcmp.ll +++ test/CodeGen/Hexagon/fcmp.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that we generate floating point compare in V5 -; CHECK: p{{[0-2]+}} = sfcmp.{{.}} +; CHECK: p{{[0-2]+}}=sfcmp.{{.}} define i32 @foo(float %y) nounwind { entry: Index: test/CodeGen/Hexagon/fixed-spill-mutable.ll =================================================================== --- test/CodeGen/Hexagon/fixed-spill-mutable.ll +++ test/CodeGen/Hexagon/fixed-spill-mutable.ll @@ -13,9 +13,9 @@ ; pair r17:16. ; ; Check that the store and the load are not in the same packet. -; CHECK: memd{{.*}} = r17:16 +; CHECK: memd{{.*}}=r17:16 ; CHECK: } -; CHECK: r17:16 = memd +; CHECK: r17:16=memd ; CHECK-LABEL: LBB0_1: target triple = "hexagon" Index: test/CodeGen/Hexagon/float-amode.ll =================================================================== --- test/CodeGen/Hexagon/float-amode.ll +++ test/CodeGen/Hexagon/float-amode.ll @@ -12,9 +12,9 @@ @a = common global float 0.000000e+00, align 4 ; CHECK-LABEL: test1 -; CHECK: [[REG11:(r[0-9]+)]]{{ *}}={{ *}}memw(r{{[0-9]+}} + r{{[0-9]+}}<<#2) -; CHECK: [[REG12:(r[0-9]+)]] += sfmpy({{.*}}[[REG11]] -; CHECK: memw(r{{[0-9]+}} + r{{[0-9]+}}<<#2) = [[REG12]].new +; CHECK: [[REG11:(r[0-9]+)]]=memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2) +; CHECK: [[REG12:(r[0-9]+)]]+=sfmpy({{.*}}[[REG11]] +; CHECK: memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2)=[[REG12]].new ; Function Attrs: norecurse nounwind define void @test1(%struct.matrix_params* nocapture readonly %params, i32 %col1) { @@ -35,9 +35,9 @@ } ; CHECK-LABEL: test2 -; CHECK: [[REG21:(r[0-9]+)]]{{ *}}={{ *}}memw(##globB+92) -; CHECK: [[REG22:(r[0-9]+)]] = sfadd({{.*}}[[REG21]] -; CHECK: memw(##globA+84) = [[REG22]] +; CHECK: [[REG21:(r[0-9]+)]]=memw(##globB+92) +; CHECK: [[REG22:(r[0-9]+)]]=sfadd({{.*}}[[REG21]] +; CHECK: memw(##globA+84)=[[REG22]] ; Function Attrs: norecurse nounwind define void @test2(%struct.matrix_params* nocapture readonly %params, i32 %col1) { @@ -54,9 +54,9 @@ } ; CHECK-LABEL: test3 -; CHECK: [[REG31:(r[0-9]+)]]{{ *}}={{ *}}memw(#b) -; CHECK: [[REG32:(r[0-9]+)]] = sfadd({{.*}}[[REG31]] -; CHECK: memw(#a) = [[REG32]] +; CHECK: [[REG31:(r[0-9]+)]]=memw(#b) +; CHECK: [[REG32:(r[0-9]+)]]=sfadd({{.*}}[[REG31]] +; CHECK: memw(#a)=[[REG32]] ; Function Attrs: norecurse nounwind define void @test3(%struct.matrix_params* nocapture readonly %params, i32 %col1) { @@ -73,9 +73,9 @@ } ; CHECK-LABEL: test4 -; CHECK: [[REG41:(r[0-9]+)]]{{ *}}={{ *}}memw(r0<<#2 + ##globB+52) -; CHECK: [[REG42:(r[0-9]+)]] = sfadd({{.*}}[[REG41]] -; CHECK: memw(r0<<#2 + ##globA+60) = [[REG42]] +; CHECK: [[REG41:(r[0-9]+)]]=memw(r0<<#2+##globB+52) +; CHECK: [[REG42:(r[0-9]+)]]=sfadd({{.*}}[[REG41]] +; CHECK: memw(r0<<#2+##globA+60)=[[REG42]] ; Function Attrs: noinline norecurse nounwind define void @test4(i32 %col1) { entry: Index: test/CodeGen/Hexagon/fmul.ll =================================================================== --- test/CodeGen/Hexagon/fmul.ll +++ test/CodeGen/Hexagon/fmul.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that we generate single precision floating point multiply in V5. -; CHECK: r{{[0-9]+}} = sfmpy(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=sfmpy(r{{[0-9]+}},r{{[0-9]+}}) define i32 @main() nounwind { Index: test/CodeGen/Hexagon/frame.ll =================================================================== --- test/CodeGen/Hexagon/frame.ll +++ test/CodeGen/Hexagon/frame.ll @@ -4,7 +4,7 @@ @acc = external global i32 @num2 = external global i32 -; CHECK: allocframe +; CHECK: allocframe( ; CHECK: dealloc_return define i32 @foo() nounwind { Index: test/CodeGen/Hexagon/fsel.ll =================================================================== --- test/CodeGen/Hexagon/fsel.ll +++ test/CodeGen/Hexagon/fsel.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s ; CHECK-LABEL: danny: -; CHECK: mux(p0, r1, ##1065353216) +; CHECK: mux(p0,r1,##1065353216) define float @danny(i32 %x, float %f) #0 { %t = icmp sgt i32 %x, 0 @@ -10,7 +10,7 @@ } ; CHECK-LABEL: sammy: -; CHECK: mux(p0, ##1069547520, r1) +; CHECK: mux(p0,##1069547520,r1) define float @sammy(i32 %x, float %f) #0 { %t = icmp sgt i32 %x, 0 Index: test/CodeGen/Hexagon/fsub.ll =================================================================== --- test/CodeGen/Hexagon/fsub.ll +++ test/CodeGen/Hexagon/fsub.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that we generate sp floating point subtract in V5. -; CHECK: r{{[0-9]+}} = sfsub(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=sfsub(r{{[0-9]+}},r{{[0-9]+}}) define i32 @main() nounwind { entry: Index: test/CodeGen/Hexagon/fusedandshift.ll =================================================================== --- test/CodeGen/Hexagon/fusedandshift.ll +++ test/CodeGen/Hexagon/fusedandshift.ll @@ -2,7 +2,7 @@ ; Check that we generate fused logical and with shift instruction. ; Disable "extract" generation, since it may eliminate the and/lsr. -; CHECK: r{{[0-9]+}} = and(#15, lsr(r{{[0-9]+}}, #{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=and(#15,lsr(r{{[0-9]+}},#{{[0-9]+}}) define i32 @main(i16* %a, i16* %b) nounwind { entry: Index: test/CodeGen/Hexagon/gp-plus-offset-load.ll =================================================================== --- test/CodeGen/Hexagon/gp-plus-offset-load.ll +++ test/CodeGen/Hexagon/gp-plus-offset-load.ll @@ -36,7 +36,7 @@ } define void @loadHWord(i32 %val1, i32 %val2, i16* %ival) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memuh(##foo{{ *}}+{{ *}}2) +; CHECK: r{{[0-9]+}}=memuh(##foo+2) entry: %cmp = icmp sgt i32 %val1, %val2 br i1 %cmp, label %if.then, label %if.end Index: test/CodeGen/Hexagon/gp-rel.ll =================================================================== --- test/CodeGen/Hexagon/gp-rel.ll +++ test/CodeGen/Hexagon/gp-rel.ll @@ -7,9 +7,9 @@ define i32 @foo(i32 %p) #0 { entry: -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#a) -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#b) -; CHECK: if{{ *}}(p{{[0-3]}}) memw(##c){{ *}}={{ *}}r{{[0-9]+}} +; CHECK: r{{[0-9]+}}=memw(#a) +; CHECK: r{{[0-9]+}}=memw(#b) +; CHECK: if (p{{[0-3]}}) memw(##c)=r{{[0-9]+}} %0 = load i32, i32* @a, align 4 %1 = load i32, i32* @b, align 4 %add = add nsw i32 %1, %0 Index: test/CodeGen/Hexagon/hwloop-cleanup.ll =================================================================== --- test/CodeGen/Hexagon/hwloop-cleanup.ll +++ test/CodeGen/Hexagon/hwloop-cleanup.ll @@ -5,7 +5,7 @@ ; Bug 6685. ; CHECK: loop0 -; CHECK-NOT: r{{[0-9]+}}{{.}}={{.}}add(r{{[0-9]+}},{{.}}#-1) +; CHECK-NOT: r{{[0-9]+}}=add(r{{[0-9]+}},#-1) ; CHECK-NOT: cmp.eq ; CHECK: endloop0 @@ -64,7 +64,7 @@ ; This test checks that we don't remove the induction variable since it's used. ; CHECK: loop0 -; CHECK: r{{[0-9]+}}{{.}}={{.}}add(r{{[0-9]+}},{{.}}#1) +; CHECK: r{{[0-9]+}}=add(r{{[0-9]+}},#1) ; CHECK-NOT: cmp.eq ; CHECK: endloop0 define i32 @test3(i32* nocapture %b) nounwind { Index: test/CodeGen/Hexagon/hwloop-crit-edge.ll =================================================================== --- test/CodeGen/Hexagon/hwloop-crit-edge.ll +++ test/CodeGen/Hexagon/hwloop-crit-edge.ll @@ -4,7 +4,7 @@ ; Generate hardware loop when loop 'latch' block is different ; from the loop 'exiting' block. -; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}}) ; CHECK: endloop0 define void @test(i32* nocapture %pFL, i16 signext %nBS, i16* nocapture readonly %pHT) #0 { Index: test/CodeGen/Hexagon/hwloop-loop1.ll =================================================================== --- test/CodeGen/Hexagon/hwloop-loop1.ll +++ test/CodeGen/Hexagon/hwloop-loop1.ll @@ -2,8 +2,8 @@ ; ; Generate loop1 instruction for double loop sequence. -; CHECK: loop1(.LBB{{.}}_{{.}}, #100) -; CHECK: loop0(.LBB{{.}}_{{.}}, #100) +; CHECK: loop1(.LBB{{.}}_{{.}},#100) +; CHECK: loop0(.LBB{{.}}_{{.}},#100) ; CHECK: endloop0 ; CHECK: endloop1 Index: test/CodeGen/Hexagon/hwloop1.ll =================================================================== --- test/CodeGen/Hexagon/hwloop1.ll +++ test/CodeGen/Hexagon/hwloop1.ll @@ -3,7 +3,7 @@ ; Case 1 : Loop with a constant number of iterations. ; CHECK-LABEL: @hwloop1 -; CHECK: loop0(.LBB{{.}}_{{.}}, #10) +; CHECK: loop0(.LBB{{.}}_{{.}},#10) ; CHECK: endloop0 @a = common global [10 x i32] zeroinitializer, align 4 @@ -23,7 +23,7 @@ ; Case 2 : Loop with a run-time number of iterations. ; CHECK-LABEL: @hwloop2 -; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}}) ; CHECK: endloop0 define i32 @hwloop2(i32 %n, i32* nocapture %b) nounwind { @@ -54,8 +54,8 @@ ; Case 3 : Induction variable increment more than 1. ; CHECK-LABEL: @hwloop3 -; CHECK: lsr(r{{[0-9]+}}, #2) -; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: lsr(r{{[0-9]+}},#2) +; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}}) ; CHECK: endloop0 define i32 @hwloop3(i32 %n, i32* nocapture %b) nounwind { @@ -86,7 +86,7 @@ ; Case 4 : Loop exit compare uses register instead of immediate value. ; CHECK-LABEL: @hwloop4 -; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}}) ; CHECK: endloop0 define i32 @hwloop4(i32 %n, i32* nocapture %b) nounwind { @@ -114,7 +114,7 @@ ; Case 5: After LSR, the initial value is 100 and the iv decrements to 0. ; CHECK-LABEL: @hwloop5 -; CHECK: loop0(.LBB{{.}}_{{.}}, #100) +; CHECK: loop0(.LBB{{.}}_{{.}},#100) ; CHECK: endloop0 define void @hwloop5(i32* nocapture %a, i32* nocapture %res) nounwind { @@ -138,8 +138,8 @@ ; Case 6: Large immediate offset ; CHECK-LABEL: @hwloop6 -; CHECK-NOT: loop0(.LBB{{.}}_{{.}}, #1024) -; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK-NOT: loop0(.LBB{{.}}_{{.}},#1024) +; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}}) ; CHECK: endloop0 define void @hwloop6(i32* nocapture %a, i32* nocapture %res) nounwind { Index: test/CodeGen/Hexagon/hwloop2.ll =================================================================== --- test/CodeGen/Hexagon/hwloop2.ll +++ test/CodeGen/Hexagon/hwloop2.ll @@ -2,7 +2,7 @@ ; Test for multiple phis with induction variables. -; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}}) ; CHECK: endloop0 define i32 @hwloop4(i32* nocapture %s, i32* nocapture %a, i32 %n) { Index: test/CodeGen/Hexagon/hwloop4.ll =================================================================== --- test/CodeGen/Hexagon/hwloop4.ll +++ test/CodeGen/Hexagon/hwloop4.ll @@ -2,9 +2,9 @@ ; ; Remove the unnecessary 'add' instruction used for the hardware loop setup. -; CHECK: [[OP0:r[0-9]+]] = add([[OP1:r[0-9]+]], #-[[OP2:[0-9]+]] -; CHECK-NOT: add([[OP0]], #[[OP2]]) -; CHECK: lsr([[OP1]], #{{[0-9]+}}) +; CHECK: [[OP0:r[0-9]+]]=add([[OP1:r[0-9]+]],#-[[OP2:[0-9]+]] +; CHECK-NOT: add([[OP0]],#[[OP2]]) +; CHECK: lsr([[OP1]],#{{[0-9]+}}) ; CHECK: loop0 define void @matrix_mul_matrix(i32 %N, i32* nocapture %C, i16* nocapture readnone %A, i16* nocapture readnone %B) #0 { Index: test/CodeGen/Hexagon/hwloop5.ll =================================================================== --- test/CodeGen/Hexagon/hwloop5.ll +++ test/CodeGen/Hexagon/hwloop5.ll @@ -2,9 +2,9 @@ ; ; Generate hardware loop when unknown trip count loop is vectorized. -; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}}, r{{[0-9]+}}) +; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}},r{{[0-9]+}}) ; CHECK: endloop0 -; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}}, r{{[0-9]+}}) +; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}},r{{[0-9]+}}) ; CHECK: endloop0 @A = common global [1000 x i32] zeroinitializer, align 8 Index: test/CodeGen/Hexagon/idxload-with-zero-offset.ll =================================================================== --- test/CodeGen/Hexagon/idxload-with-zero-offset.ll +++ test/CodeGen/Hexagon/idxload-with-zero-offset.ll @@ -4,7 +4,7 @@ ; load word define i32 @load_w(i32* nocapture %a, i32 %n, i32 %m) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#2) +; CHECK: r{{[0-9]+}}=memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2) entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i32, i32* %a, i32 %tmp @@ -15,7 +15,7 @@ ; load unsigned half word define i16 @load_uh(i16* nocapture %a, i32 %n, i32 %m) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memuh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#1) +; CHECK: r{{[0-9]+}}=memuh(r{{[0-9]+}}+r{{[0-9]+}}<<#1) entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i16, i16* %a, i32 %tmp @@ -26,7 +26,7 @@ ; load signed half word define i32 @load_h(i16* nocapture %a, i32 %n, i32 %m) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#1) +; CHECK: r{{[0-9]+}}=memh(r{{[0-9]+}}+r{{[0-9]+}}<<#1) entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i16, i16* %a, i32 %tmp @@ -38,7 +38,7 @@ ; load unsigned byte define i8 @load_ub(i8* nocapture %a, i32 %n, i32 %m) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#0) +; CHECK: r{{[0-9]+}}=memub(r{{[0-9]+}}+r{{[0-9]+}}<<#0) entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i8, i8* %a, i32 %tmp @@ -49,7 +49,7 @@ ; load signed byte define i32 @foo_2(i8* nocapture %a, i32 %n, i32 %m) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memb(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#0) +; CHECK: r{{[0-9]+}}=memb(r{{[0-9]+}}+r{{[0-9]+}}<<#0) entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i8, i8* %a, i32 %tmp @@ -61,7 +61,7 @@ ; load doubleword define i64 @load_d(i64* nocapture %a, i32 %n, i32 %m) nounwind { -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}memd(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#3) +; CHECK: r{{[0-9]+}}:{{[0-9]+}}=memd(r{{[0-9]+}}+r{{[0-9]+}}<<#3) entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i64, i64* %a, i32 %tmp Index: test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll =================================================================== --- test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll +++ test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll @@ -14,11 +14,11 @@ %cmp199 = icmp eq i16 %call197, 0 br i1 %cmp199, label %if.then200, label %if.else201 -; CHECK-DAG: [[R4:r[0-9]+]] = #4 -; CHECK: p0 = cmp.eq(r0, #0) -; CHECK: if (!p0.new) [[R3:r[0-9]+]] = #3 -; CHECK-DAG: if (!p0) memh(##t) = [[R3]] -; CHECK-DAG: if (p0) memh(##t) = [[R4]] +; CHECK-DAG: [[R4:r[0-9]+]]=#4 +; CHECK: p0=cmp.eq(r0,#0) +; CHECK: if (!p0.new) [[R3:r[0-9]+]]=#3 +; CHECK-DAG: if (!p0) memh(##t)=[[R3]] +; CHECK-DAG: if (p0) memh(##t)=[[R4]] if.then200: ; preds = %entry store i16 4, i16* getelementptr inbounds (%struct.0, %struct.0* @t, i32 0, i32 0), align 2 store i16 0, i16* getelementptr inbounds (%struct.0, %struct.0* @t, i32 0, i32 1), align 2 Index: test/CodeGen/Hexagon/insert-basic.ll =================================================================== --- test/CodeGen/Hexagon/insert-basic.ll +++ test/CodeGen/Hexagon/insert-basic.ll @@ -1,8 +1,8 @@ ; RUN: llc -O2 -march=hexagon < %s | FileCheck %s -; CHECK-DAG: insert(r{{[0-9]*}}, #17, #0) -; CHECK-DAG: insert(r{{[0-9]*}}, #18, #0) -; CHECK-DAG: insert(r{{[0-9]*}}, #22, #0) -; CHECK-DAG: insert(r{{[0-9]*}}, #12, #0) +; CHECK-DAG: insert(r{{[0-9]*}},#17,#0) +; CHECK-DAG: insert(r{{[0-9]*}},#18,#0) +; CHECK-DAG: insert(r{{[0-9]*}},#22,#0) +; CHECK-DAG: insert(r{{[0-9]*}},#12,#0) ; C source: ; typedef struct { Index: test/CodeGen/Hexagon/insert4.ll =================================================================== --- test/CodeGen/Hexagon/insert4.ll +++ test/CodeGen/Hexagon/insert4.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; ; Check that we no longer generate 4 inserts. -; CHECK: combine(r{{[0-9]+}}.l, r{{[0-9]+}}.l) -; CHECK: combine(r{{[0-9]+}}.l, r{{[0-9]+}}.l) +; CHECK: combine(r{{[0-9]+}}.l,r{{[0-9]+}}.l) +; CHECK: combine(r{{[0-9]+}}.l,r{{[0-9]+}}.l) ; CHECK-NOT: insert target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" Index: test/CodeGen/Hexagon/intrinsics/alu32_alu.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/alu32_alu.ll +++ test/CodeGen/Hexagon/intrinsics/alu32_alu.ll @@ -10,7 +10,7 @@ %z = call i32 @llvm.hexagon.A2.addi(i32 %a, i32 0) ret i32 %z } -; CHECK: = add({{.*}}, #0) +; CHECK: =add({{.*}},#0) declare i32 @llvm.hexagon.A2.add(i32, i32) define i32 @A2_add(i32 %a, i32 %b) { @@ -17,7 +17,7 @@ %z = call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}, {{.*}}) +; CHECK: =add({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.addsat(i32, i32) define i32 @A2_addsat(i32 %a, i32 %b) { @@ -24,7 +24,7 @@ %z = call i32 @llvm.hexagon.A2.addsat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}, {{.*}}):sat +; CHECK: =add({{.*}},{{.*}}):sat ; Logical operations declare i32 @llvm.hexagon.A2.and(i32, i32) @@ -32,7 +32,7 @@ %z = call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b) ret i32 %z } -; CHECK: = and({{.*}}, {{.*}}) +; CHECK: =and({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.or(i32, i32) define i32 @A2_or(i32 %a, i32 %b) { @@ -39,7 +39,7 @@ %z = call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b) ret i32 %z } -; CHECK: = or({{.*}}, {{.*}}) +; CHECK: =or({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.xor(i32, i32) define i32 @A2_xor(i32 %a, i32 %b) { @@ -46,7 +46,7 @@ %z = call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b) ret i32 %z } -; CHECK: = xor({{.*}}, {{.*}}) +; CHECK: =xor({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.andn(i32, i32) define i32 @A4_andn(i32 %a, i32 %b) { @@ -53,7 +53,7 @@ %z = call i32 @llvm.hexagon.A4.andn(i32 %a, i32 %b) ret i32 %z } -; CHECK: = and({{.*}}, ~{{.*}}) +; CHECK: =and({{.*}},~{{.*}}) declare i32 @llvm.hexagon.A4.orn(i32, i32) define i32 @A4_orn(i32 %a, i32 %b) { @@ -60,7 +60,7 @@ %z = call i32 @llvm.hexagon.A4.orn(i32 %a, i32 %b) ret i32 %z } -; CHECK: = or({{.*}}, ~{{.*}}) +; CHECK: =or({{.*}},~{{.*}}) ; Subtract declare i32 @llvm.hexagon.A2.sub(i32, i32) @@ -68,7 +68,7 @@ %z = call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}, {{.*}}) +; CHECK: =sub({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.subsat(i32, i32) define i32 @A2_subsat(i32 %a, i32 %b) { @@ -75,7 +75,7 @@ %z = call i32 @llvm.hexagon.A2.subsat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}, {{.*}}):sat +; CHECK: =sub({{.*}},{{.*}}):sat ; Sign extend declare i32 @llvm.hexagon.A2.sxtb(i32) @@ -83,7 +83,7 @@ %z = call i32 @llvm.hexagon.A2.sxtb(i32 %a) ret i32 %z } -; CHECK: = sxtb({{.*}}) +; CHECK: =sxtb({{.*}}) declare i32 @llvm.hexagon.A2.sxth(i32) define i32 @A2_sxth(i32 %a) { @@ -90,7 +90,7 @@ %z = call i32 @llvm.hexagon.A2.sxth(i32 %a) ret i32 %z } -; CHECK: = sxth({{.*}}) +; CHECK: =sxth({{.*}}) ; Transfer immediate declare i32 @llvm.hexagon.A2.tfril(i32, i32) @@ -98,7 +98,7 @@ %z = call i32 @llvm.hexagon.A2.tfril(i32 %a, i32 0) ret i32 %z } -; CHECK: = #0 +; CHECK: =#0 declare i32 @llvm.hexagon.A2.tfrih(i32, i32) define i32 @A2_tfrih(i32 %a) { @@ -105,7 +105,7 @@ %z = call i32 @llvm.hexagon.A2.tfrih(i32 %a, i32 0) ret i32 %z } -; CHECK: = #0 +; CHECK: =#0 declare i32 @llvm.hexagon.A2.tfrsi(i32) define i32 @A2_tfrsi() { @@ -112,7 +112,7 @@ %z = call i32 @llvm.hexagon.A2.tfrsi(i32 0) ret i32 %z } -; CHECK: = #0 +; CHECK: =#0 ; Transfer register declare i32 @llvm.hexagon.A2.tfr(i32) @@ -128,7 +128,7 @@ %z = call i32 @llvm.hexagon.A2.svaddh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vaddh({{.*}}, {{.*}}) +; CHECK: =vaddh({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.svaddhs(i32, i32) define i32 @A2_svaddhs(i32 %a, i32 %b) { @@ -135,7 +135,7 @@ %z = call i32 @llvm.hexagon.A2.svaddhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vaddh({{.*}}, {{.*}}):sat +; CHECK: =vaddh({{.*}},{{.*}}):sat declare i32 @llvm.hexagon.A2.svadduhs(i32, i32) define i32 @A2_svadduhs(i32 %a, i32 %b) { @@ -142,7 +142,7 @@ %z = call i32 @llvm.hexagon.A2.svadduhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vadduh({{.*}}, {{.*}}):sat +; CHECK: =vadduh({{.*}},{{.*}}):sat ; Vector average halfwords declare i32 @llvm.hexagon.A2.svavgh(i32, i32) @@ -150,7 +150,7 @@ %z = call i32 @llvm.hexagon.A2.svavgh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vavgh({{.*}}, {{.*}}) +; CHECK: =vavgh({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.svavghs(i32, i32) define i32 @A2_svavghs(i32 %a, i32 %b) { @@ -157,7 +157,7 @@ %z = call i32 @llvm.hexagon.A2.svavghs(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vavgh({{.*}}, {{.*}}):rnd +; CHECK: =vavgh({{.*}},{{.*}}):rnd declare i32 @llvm.hexagon.A2.svnavgh(i32, i32) define i32 @A2_svnavgh(i32 %a, i32 %b) { @@ -164,7 +164,7 @@ %z = call i32 @llvm.hexagon.A2.svnavgh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vnavgh({{.*}}, {{.*}}) +; CHECK: =vnavgh({{.*}},{{.*}}) ; Vector subtract halfwords declare i32 @llvm.hexagon.A2.svsubh(i32, i32) @@ -172,7 +172,7 @@ %z = call i32 @llvm.hexagon.A2.svsubh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vsubh({{.*}}, {{.*}}) +; CHECK: =vsubh({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.svsubhs(i32, i32) define i32 @A2_svsubhs(i32 %a, i32 %b) { @@ -179,7 +179,7 @@ %z = call i32 @llvm.hexagon.A2.svsubhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vsubh({{.*}}, {{.*}}):sat +; CHECK: =vsubh({{.*}},{{.*}}):sat declare i32 @llvm.hexagon.A2.svsubuhs(i32, i32) define i32 @A2_svsubuhs(i32 %a, i32 %b) { @@ -186,7 +186,7 @@ %z = call i32 @llvm.hexagon.A2.svsubuhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vsubuh({{.*}}, {{.*}}):sat +; CHECK: =vsubuh({{.*}},{{.*}}):sat ; Zero extend declare i32 @llvm.hexagon.A2.zxth(i32) @@ -194,4 +194,4 @@ %z = call i32 @llvm.hexagon.A2.zxth(i32 %a) ret i32 %z } -; CHECK: = zxth({{.*}}) +; CHECK: =zxth({{.*}}) Index: test/CodeGen/Hexagon/intrinsics/alu32_perm.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/alu32_perm.ll +++ test/CodeGen/Hexagon/intrinsics/alu32_perm.ll @@ -10,7 +10,7 @@ %z = call i64 @llvm.hexagon.A4.combineri(i32 %a, i32 0) ret i64 %z } -; CHECK: = combine({{.*}}, #0) +; CHECK: =combine({{.*}},#0) declare i64 @llvm.hexagon.A4.combineir(i32, i32) define i64 @A4_combineir(i32 %a) { @@ -17,7 +17,7 @@ %z = call i64 @llvm.hexagon.A4.combineir(i32 0, i32 %a) ret i64 %z } -; CHECK: = combine(#0, {{.*}}) +; CHECK: =combine(#0,{{.*}}) declare i64 @llvm.hexagon.A2.combineii(i32, i32) define i64 @A2_combineii() { @@ -24,7 +24,7 @@ %z = call i64 @llvm.hexagon.A2.combineii(i32 0, i32 0) ret i64 %z } -; CHECK: = combine(#0, #0) +; CHECK: =combine(#0,#0) declare i32 @llvm.hexagon.A2.combine.hh(i32, i32) define i32 @A2_combine_hh(i32 %a, i32 %b) { @@ -31,7 +31,7 @@ %z = call i32 @llvm.hexagon.A2.combine.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = combine({{.*}}, {{.*}}) +; CHECK: =combine({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.combine.hl(i32, i32) define i32 @A2_combine_hl(i32 %a, i32 %b) { @@ -38,7 +38,7 @@ %z = call i32 @llvm.hexagon.A2.combine.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = combine({{.*}}, {{.*}}) +; CHECK: =combine({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.combine.lh(i32, i32) define i32 @A2_combine_lh(i32 %a, i32 %b) { @@ -45,7 +45,7 @@ %z = call i32 @llvm.hexagon.A2.combine.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = combine({{.*}}, {{.*}}) +; CHECK: =combine({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) define i32 @A2_combine_ll(i32 %a, i32 %b) { @@ -52,7 +52,7 @@ %z = call i32 @llvm.hexagon.A2.combine.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = combine({{.*}}, {{.*}}) +; CHECK: =combine({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.combinew(i32, i32) define i64 @A2_combinew(i32 %a, i32 %b) { @@ -59,7 +59,7 @@ %z = call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b) ret i64 %z } -; CHECK: = combine({{.*}}, {{.*}}) +; CHECK: =combine({{.*}},{{.*}}) ; Mux declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32) @@ -67,7 +67,7 @@ %z = call i32 @llvm.hexagon.C2.muxri(i32 %a, i32 0, i32 %b) ret i32 %z } -; CHECK: = mux({{.*}}, #0, {{.*}}) +; CHECK: =mux({{.*}},#0,{{.*}}) declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32) define i32 @C2_muxir(i32 %a, i32 %b) { @@ -74,7 +74,7 @@ %z = call i32 @llvm.hexagon.C2.muxir(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: = mux({{.*}}, {{.*}}, #0) +; CHECK: =mux({{.*}},{{.*}},#0) declare i32 @llvm.hexagon.C2.mux(i32, i32, i32) define i32 @C2_mux(i32 %a, i32 %b, i32 %c) { @@ -81,7 +81,7 @@ %z = call i32 @llvm.hexagon.C2.mux(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = mux({{.*}}, {{.*}}, {{.*}}) +; CHECK: =mux({{.*}},{{.*}},{{.*}}) ; Shift word by 16 declare i32 @llvm.hexagon.A2.aslh(i32) @@ -89,7 +89,7 @@ %z = call i32 @llvm.hexagon.A2.aslh(i32 %a) ret i32 %z } -; CHECK: = aslh({{.*}}) +; CHECK: =aslh({{.*}}) declare i32 @llvm.hexagon.A2.asrh(i32) define i32 @A2_asrh(i32 %a) { @@ -96,7 +96,7 @@ %z = call i32 @llvm.hexagon.A2.asrh(i32 %a) ret i32 %z } -; CHECK: = asrh({{.*}}) +; CHECK: =asrh({{.*}}) ; Pack high and low halfwords declare i64 @llvm.hexagon.S2.packhl(i32, i32) @@ -104,4 +104,4 @@ %z = call i64 @llvm.hexagon.S2.packhl(i32 %a, i32 %b) ret i64 %z } -; CHECK: = packhl({{.*}}, {{.*}}) +; CHECK: =packhl({{.*}},{{.*}}) Index: test/CodeGen/Hexagon/intrinsics/cr.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/cr.ll +++ test/CodeGen/Hexagon/intrinsics/cr.ll @@ -10,7 +10,7 @@ %z = call i32@llvm.hexagon.C4.fastcorner9(i32 %a, i32 %b) ret i32 %z } -; CHECK: = fastcorner9({{.*}}, {{.*}}) +; CHECK: =fastcorner9({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.fastcorner9.not(i32, i32) define i32 @C4_fastcorner9_not(i32 %a, i32 %b) { @@ -17,7 +17,7 @@ %z = call i32@llvm.hexagon.C4.fastcorner9.not(i32 %a, i32 %b) ret i32 %z } -; CHECK: = !fastcorner9({{.*}}, {{.*}}) +; CHECK: =!fastcorner9({{.*}},{{.*}}) ; Logical reductions on predicates declare i32 @llvm.hexagon.C2.any8(i32) @@ -25,7 +25,7 @@ %z = call i32@llvm.hexagon.C2.any8(i32 %a) ret i32 %z } -; CHECK: = any8({{.*}}) +; CHECK: =any8({{.*}}) declare i32 @llvm.hexagon.C2.all8(i32) define i32 @C2_all8(i32 %a) { @@ -33,7 +33,7 @@ ret i32 %z } -; CHECK: = all8({{.*}}) +; CHECK: =all8({{.*}}) ; Logical operations on predicates declare i32 @llvm.hexagon.C2.and(i32, i32) @@ -41,7 +41,7 @@ %z = call i32@llvm.hexagon.C2.and(i32 %a, i32 %b) ret i32 %z } -; CHECK: = and({{.*}}, {{.*}}) +; CHECK: =and({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.and.and(i32, i32, i32) define i32 @C4_and_and(i32 %a, i32 %b, i32 %c) { @@ -48,7 +48,7 @@ %z = call i32@llvm.hexagon.C4.and.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = and({{.*}}, and({{.*}}, {{.*}})) +; CHECK: =and({{.*}},and({{.*}},{{.*}})) declare i32 @llvm.hexagon.C2.or(i32, i32) define i32 @C2_or(i32 %a, i32 %b) { @@ -55,7 +55,7 @@ %z = call i32@llvm.hexagon.C2.or(i32 %a, i32 %b) ret i32 %z } -; CHECK: = or({{.*}}, {{.*}}) +; CHECK: =or({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.and.or(i32, i32, i32) define i32 @C4_and_or(i32 %a, i32 %b, i32 %c) { @@ -62,7 +62,7 @@ %z = call i32@llvm.hexagon.C4.and.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = and({{.*}}, or({{.*}}, {{.*}})) +; CHECK: =and({{.*}},or({{.*}},{{.*}})) declare i32 @llvm.hexagon.C2.xor(i32, i32) define i32 @C2_xor(i32 %a, i32 %b) { @@ -69,7 +69,7 @@ %z = call i32@llvm.hexagon.C2.xor(i32 %a, i32 %b) ret i32 %z } -; CHECK: = xor({{.*}}, {{.*}}) +; CHECK: =xor({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.or.and(i32, i32, i32) define i32 @C4_or_and(i32 %a, i32 %b, i32 %c) { @@ -76,7 +76,7 @@ %z = call i32@llvm.hexagon.C4.or.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = or({{.*}}, and({{.*}}, {{.*}})) +; CHECK: =or({{.*}},and({{.*}},{{.*}})) declare i32 @llvm.hexagon.C2.andn(i32, i32) define i32 @C2_andn(i32 %a, i32 %b) { @@ -83,7 +83,7 @@ %z = call i32@llvm.hexagon.C2.andn(i32 %a, i32 %b) ret i32 %z } -; CHECK: = and({{.*}}, !{{.*}}) +; CHECK: =and({{.*}},!{{.*}}) declare i32 @llvm.hexagon.C4.or.or(i32, i32, i32) define i32 @C4_or_or(i32 %a, i32 %b, i32 %c) { @@ -90,7 +90,7 @@ %z = call i32@llvm.hexagon.C4.or.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = or({{.*}}, or({{.*}}, {{.*}})) +; CHECK: =or({{.*}},or({{.*}},{{.*}})) declare i32 @llvm.hexagon.C4.and.andn(i32, i32, i32) define i32 @C4_and_andn(i32 %a, i32 %b, i32 %c) { @@ -97,7 +97,7 @@ %z = call i32@llvm.hexagon.C4.and.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = and({{.*}}, and({{.*}}, !{{.*}})) +; CHECK: =and({{.*}},and({{.*}},!{{.*}})) declare i32 @llvm.hexagon.C4.and.orn(i32, i32, i32) define i32 @C4_and_orn(i32 %a, i32 %b, i32 %c) { @@ -104,7 +104,7 @@ %z = call i32@llvm.hexagon.C4.and.orn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = and({{.*}}, or({{.*}}, !{{.*}})) +; CHECK: =and({{.*}},or({{.*}},!{{.*}})) declare i32 @llvm.hexagon.C2.not(i32) define i32 @C2_not(i32 %a) { @@ -111,7 +111,7 @@ %z = call i32@llvm.hexagon.C2.not(i32 %a) ret i32 %z } -; CHECK: = not({{.*}}) +; CHECK: =not({{.*}}) declare i32 @llvm.hexagon.C4.or.andn(i32, i32, i32) define i32 @C4_or_andn(i32 %a, i32 %b, i32 %c) { @@ -118,7 +118,7 @@ %z = call i32@llvm.hexagon.C4.or.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = or({{.*}}, and({{.*}}, !{{.*}})) +; CHECK: =or({{.*}},and({{.*}},!{{.*}})) declare i32 @llvm.hexagon.C2.orn(i32, i32) define i32 @C2_orn(i32 %a, i32 %b) { @@ -125,7 +125,7 @@ %z = call i32@llvm.hexagon.C2.orn(i32 %a, i32 %b) ret i32 %z } -; CHECK: = or({{.*}}, !{{.*}}) +; CHECK: =or({{.*}},!{{.*}}) declare i32 @llvm.hexagon.C4.or.orn(i32, i32, i32) define i32 @C4_or_orn(i32 %a, i32 %b, i32 %c) { @@ -132,4 +132,4 @@ %z = call i32@llvm.hexagon.C4.or.orn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = or({{.*}}, or({{.*}}, !{{.*}})) +; CHECK: =or({{.*}},or({{.*}},!{{.*}})) Index: test/CodeGen/Hexagon/intrinsics/system_user.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/system_user.ll +++ test/CodeGen/Hexagon/intrinsics/system_user.ll @@ -10,4 +10,4 @@ call void @llvm.hexagon.prefetch(i8* %a) ret void } -; CHECK: dcfetch({{.*}} + #0) +; CHECK: dcfetch({{.*}}+#0) Index: test/CodeGen/Hexagon/intrinsics/xtype_alu.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/xtype_alu.ll +++ test/CodeGen/Hexagon/intrinsics/xtype_alu.ll @@ -11,7 +11,7 @@ %z = call i64 @llvm.hexagon.A2.absp(i64 %a) ret i64 %z } -; CHECK: = abs({{.*}}) +; CHECK: =abs({{.*}}) ; Absolute value word declare i32 @llvm.hexagon.A2.abs(i32) @@ -19,7 +19,7 @@ %z = call i32 @llvm.hexagon.A2.abs(i32 %a) ret i32 %z } -; CHECK: = abs({{.*}}) +; CHECK: =abs({{.*}}) declare i32 @llvm.hexagon.A2.abssat(i32) define i32 @A2_abssat(i32 %a) { @@ -26,7 +26,7 @@ %z = call i32 @llvm.hexagon.A2.abssat(i32 %a) ret i32 %z } -; CHECK: = abs({{.*}}):sat +; CHECK: =abs({{.*}}):sat ; Add and accumulate declare i32 @llvm.hexagon.S4.addaddi(i32, i32, i32) @@ -34,7 +34,7 @@ %z = call i32 @llvm.hexagon.S4.addaddi(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: = add({{.*}}, add({{.*}}, #0)) +; CHECK: =add({{.*}},add({{.*}},#0)) declare i32 @llvm.hexagon.S4.subaddi(i32, i32, i32) define i32 @S4_subaddi(i32 %a, i32 %b) { @@ -41,7 +41,7 @@ %z = call i32 @llvm.hexagon.S4.subaddi(i32 %a, i32 0, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}, sub(#0, {{.*}})) +; CHECK: =add({{.*}},sub(#0,{{.*}})) declare i32 @llvm.hexagon.M2.accii(i32, i32, i32) define i32 @M2_accii(i32 %a, i32 %b) { @@ -48,7 +48,7 @@ %z = call i32 @llvm.hexagon.M2.accii(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: += add({{.*}}, #0) +; CHECK: +=add({{.*}},#0) declare i32 @llvm.hexagon.M2.naccii(i32, i32, i32) define i32 @M2_naccii(i32 %a, i32 %b) { @@ -55,7 +55,7 @@ %z = call i32 @llvm.hexagon.M2.naccii(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: -= add({{.*}}, #0) +; CHECK: -=add({{.*}},#0) declare i32 @llvm.hexagon.M2.acci(i32, i32, i32) define i32 @M2_acci(i32 %a, i32 %b, i32 %c) { @@ -62,7 +62,7 @@ %z = call i32 @llvm.hexagon.M2.acci(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += add({{.*}}, {{.*}}) +; CHECK: +=add({{.*}},{{.*}}) declare i32 @llvm.hexagon.M2.nacci(i32, i32, i32) define i32 @M2_nacci(i32 %a, i32 %b, i32 %c) { @@ -69,7 +69,7 @@ %z = call i32 @llvm.hexagon.M2.nacci(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= add({{.*}}, {{.*}}) +; CHECK: -=add({{.*}},{{.*}}) ; Add doublewords declare i64 @llvm.hexagon.A2.addp(i64, i64) @@ -77,7 +77,7 @@ %z = call i64 @llvm.hexagon.A2.addp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = add({{.*}}, {{.*}}) +; CHECK: =add({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.addpsat(i64, i64) define i64 @A2_addpsat(i64 %a, i64 %b) { @@ -84,7 +84,7 @@ %z = call i64 @llvm.hexagon.A2.addpsat(i64 %a, i64 %b) ret i64 %z } -; CHECK: = add({{.*}}, {{.*}}):sat +; CHECK: =add({{.*}},{{.*}}):sat ; Add halfword declare i32 @llvm.hexagon.A2.addh.l16.ll(i32, i32) @@ -92,7 +92,7 @@ %z = call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.l) +; CHECK: =add({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.A2.addh.l16.hl(i32, i32) define i32 @A2_addh_l16_hl(i32 %a, i32 %b) { @@ -99,7 +99,7 @@ %z = call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.h) +; CHECK: =add({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32) define i32 @A2_addh_l16_sat.ll(i32 %a, i32 %b) { @@ -106,7 +106,7 @@ %z = call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.l):sat +; CHECK: =add({{.*}}.l,{{.*}}.l):sat declare i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32, i32) define i32 @A2_addh_l16_sat.hl(i32 %a, i32 %b) { @@ -113,7 +113,7 @@ %z = call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.h):sat +; CHECK: =add({{.*}}.l,{{.*}}.h):sat declare i32 @llvm.hexagon.A2.addh.h16.ll(i32, i32) define i32 @A2_addh_h16_ll(i32 %a, i32 %b) { @@ -120,7 +120,7 @@ %z = call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.l):<<16 +; CHECK: =add({{.*}}.l,{{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.addh.h16.lh(i32, i32) define i32 @A2_addh_h16_lh(i32 %a, i32 %b) { @@ -127,7 +127,7 @@ %z = call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.h):<<16 +; CHECK: =add({{.*}}.l,{{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.addh.h16.hl(i32, i32) define i32 @A2_addh_h16_hl(i32 %a, i32 %b) { @@ -134,7 +134,7 @@ %z = call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.h, {{.*}}.l):<<16 +; CHECK: =add({{.*}}.h,{{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.addh.h16.hh(i32, i32) define i32 @A2_addh_h16_hh(i32 %a, i32 %b) { @@ -141,7 +141,7 @@ %z = call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.h, {{.*}}.h):<<16 +; CHECK: =add({{.*}}.h,{{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32, i32) define i32 @A2_addh_h16_sat_ll(i32 %a, i32 %b) { @@ -148,7 +148,7 @@ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.l):sat:<<16 +; CHECK: =add({{.*}}.l,{{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32, i32) define i32 @A2_addh_h16_sat_lh(i32 %a, i32 %b) { @@ -155,7 +155,7 @@ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.h):sat:<<16 +; CHECK: =add({{.*}}.l,{{.*}}.h):sat:<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32, i32) define i32 @A2_addh_h16_sat_hl(i32 %a, i32 %b) { @@ -162,7 +162,7 @@ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.h, {{.*}}.l):sat:<<16 +; CHECK: =add({{.*}}.h,{{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32, i32) define i32 @A2_addh_h16_sat_hh(i32 %a, i32 %b) { @@ -169,7 +169,7 @@ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.h, {{.*}}.h):sat:<<16 +; CHECK: =add({{.*}}.h,{{.*}}.h):sat:<<16 ; Logical doublewords declare i64 @llvm.hexagon.A2.notp(i64) @@ -177,7 +177,7 @@ %z = call i64 @llvm.hexagon.A2.notp(i64 %a) ret i64 %z } -; CHECK: = not({{.*}}) +; CHECK: =not({{.*}}) declare i64 @llvm.hexagon.A2.andp(i64, i64) define i64 @A2_andp(i64 %a, i64 %b) { @@ -184,7 +184,7 @@ %z = call i64 @llvm.hexagon.A2.andp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = and({{.*}}, {{.*}}) +; CHECK: =and({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.andnp(i64, i64) define i64 @A2_andnp(i64 %a, i64 %b) { @@ -191,7 +191,7 @@ %z = call i64 @llvm.hexagon.A4.andnp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = and({{.*}}, ~{{.*}}) +; CHECK: =and({{.*}},~{{.*}}) declare i64 @llvm.hexagon.A2.orp(i64, i64) define i64 @A2_orp(i64 %a, i64 %b) { @@ -198,7 +198,7 @@ %z = call i64 @llvm.hexagon.A2.orp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = or({{.*}}, {{.*}}) +; CHECK: =or({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.ornp(i64, i64) define i64 @A2_ornp(i64 %a, i64 %b) { @@ -205,7 +205,7 @@ %z = call i64 @llvm.hexagon.A4.ornp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = or({{.*}}, ~{{.*}}) +; CHECK: =or({{.*}},~{{.*}}) declare i64 @llvm.hexagon.A2.xorp(i64, i64) define i64 @A2_xorp(i64 %a, i64 %b) { @@ -212,7 +212,7 @@ %z = call i64 @llvm.hexagon.A2.xorp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = xor({{.*}}, {{.*}}) +; CHECK: =xor({{.*}},{{.*}}) ; Logical-logical doublewords declare i64 @llvm.hexagon.M4.xor.xacc(i64, i64, i64) @@ -220,7 +220,7 @@ %z = call i64 @llvm.hexagon.M4.xor.xacc(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: ^= xor({{.*}}, {{.*}}) +; CHECK: ^=xor({{.*}},{{.*}}) ; Logical-logical words declare i32 @llvm.hexagon.S4.or.andi(i32, i32, i32) @@ -228,7 +228,7 @@ %z = call i32 @llvm.hexagon.S4.or.andi(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: |= and({{.*}}, #0) +; CHECK: |=and({{.*}},#0) declare i32 @llvm.hexagon.S4.or.andix(i32, i32, i32) define i32 @S4_or_andix(i32 %a, i32 %b) { @@ -235,7 +235,7 @@ %z = call i32 @llvm.hexagon.S4.or.andix(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: = or({{.*}}, and({{.*}}, #0)) +; CHECK: =or({{.*}},and({{.*}},#0)) declare i32 @llvm.hexagon.M4.or.andn(i32, i32, i32) define i32 @M4_or_andn(i32 %a, i32 %b, i32 %c) { @@ -242,7 +242,7 @@ %z = call i32 @llvm.hexagon.M4.or.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= and({{.*}}, ~{{.*}}) +; CHECK: |=and({{.*}},~{{.*}}) declare i32 @llvm.hexagon.M4.and.andn(i32, i32, i32) define i32 @M4_and_andn(i32 %a, i32 %b, i32 %c) { @@ -249,7 +249,7 @@ %z = call i32 @llvm.hexagon.M4.and.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= and({{.*}}, ~{{.*}}) +; CHECK: &=and({{.*}},~{{.*}}) declare i32 @llvm.hexagon.M4.xor.andn(i32, i32, i32) define i32 @M4_xor_andn(i32 %a, i32 %b, i32 %c) { @@ -256,7 +256,7 @@ %z = call i32 @llvm.hexagon.M4.xor.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: ^= and({{.*}}, ~{{.*}}) +; CHECK: ^=and({{.*}},~{{.*}}) declare i32 @llvm.hexagon.M4.and.and(i32, i32, i32) define i32 @M4_and_and(i32 %a, i32 %b, i32 %c) { @@ -263,7 +263,7 @@ %z = call i32 @llvm.hexagon.M4.and.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= and({{.*}}, {{.*}}) +; CHECK: &=and({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.and.or(i32, i32, i32) define i32 @M4_and_or(i32 %a, i32 %b, i32 %c) { @@ -270,7 +270,7 @@ %z = call i32 @llvm.hexagon.M4.and.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= or({{.*}}, {{.*}}) +; CHECK: &=or({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.and.xor(i32, i32, i32) define i32 @M4_and_xor(i32 %a, i32 %b, i32 %c) { @@ -277,7 +277,7 @@ %z = call i32 @llvm.hexagon.M4.and.xor(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= xor({{.*}}, {{.*}}) +; CHECK: &=xor({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.or.and(i32, i32, i32) define i32 @M4_or_and(i32 %a, i32 %b, i32 %c) { @@ -284,7 +284,7 @@ %z = call i32 @llvm.hexagon.M4.or.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= and({{.*}}, {{.*}}) +; CHECK: |=and({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.or.or(i32, i32, i32) define i32 @M4_or_or(i32 %a, i32 %b, i32 %c) { @@ -291,7 +291,7 @@ %z = call i32 @llvm.hexagon.M4.or.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= or({{.*}}, {{.*}}) +; CHECK: |=or({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.or.xor(i32, i32, i32) define i32 @M4_or_xor(i32 %a, i32 %b, i32 %c) { @@ -298,7 +298,7 @@ %z = call i32 @llvm.hexagon.M4.or.xor(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= xor({{.*}}, {{.*}}) +; CHECK: |=xor({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.xor.and(i32, i32, i32) define i32 @M4_xor_and(i32 %a, i32 %b, i32 %c) { @@ -305,7 +305,7 @@ %z = call i32 @llvm.hexagon.M4.xor.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: ^= and({{.*}}, {{.*}}) +; CHECK: ^=and({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.xor.or(i32, i32, i32) define i32 @M4_xor_or(i32 %a, i32 %b, i32 %c) { @@ -312,7 +312,7 @@ %z = call i32 @llvm.hexagon.M4.xor.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: ^= or({{.*}}, {{.*}}) +; CHECK: ^=or({{.*}},{{.*}}) ; Maximum words declare i32 @llvm.hexagon.A2.max(i32, i32) @@ -320,7 +320,7 @@ %z = call i32 @llvm.hexagon.A2.max(i32 %a, i32 %b) ret i32 %z } -; CHECK: = max({{.*}}, {{.*}}) +; CHECK: =max({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.maxu(i32, i32) define i32 @A2_maxu(i32 %a, i32 %b) { @@ -327,7 +327,7 @@ %z = call i32 @llvm.hexagon.A2.maxu(i32 %a, i32 %b) ret i32 %z } -; CHECK: = maxu({{.*}}, {{.*}}) +; CHECK: =maxu({{.*}},{{.*}}) ; Maximum doublewords declare i64 @llvm.hexagon.A2.maxp(i64, i64) @@ -335,7 +335,7 @@ %z = call i64 @llvm.hexagon.A2.maxp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = max({{.*}}, {{.*}}) +; CHECK: =max({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.maxup(i64, i64) define i64 @A2_maxup(i64 %a, i64 %b) { @@ -342,7 +342,7 @@ %z = call i64 @llvm.hexagon.A2.maxup(i64 %a, i64 %b) ret i64 %z } -; CHECK: = maxu({{.*}}, {{.*}}) +; CHECK: =maxu({{.*}},{{.*}}) ; Minimum words declare i32 @llvm.hexagon.A2.min(i32, i32) @@ -350,7 +350,7 @@ %z = call i32 @llvm.hexagon.A2.min(i32 %a, i32 %b) ret i32 %z } -; CHECK: = min({{.*}}, {{.*}}) +; CHECK: =min({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.minu(i32, i32) define i32 @A2_minu(i32 %a, i32 %b) { @@ -357,7 +357,7 @@ %z = call i32 @llvm.hexagon.A2.minu(i32 %a, i32 %b) ret i32 %z } -; CHECK: = minu({{.*}}, {{.*}}) +; CHECK: =minu({{.*}},{{.*}}) ; Minimum doublewords declare i64 @llvm.hexagon.A2.minp(i64, i64) @@ -365,7 +365,7 @@ %z = call i64 @llvm.hexagon.A2.minp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = min({{.*}}, {{.*}}) +; CHECK: =min({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.minup(i64, i64) define i64 @A2_minup(i64 %a, i64 %b) { @@ -372,7 +372,7 @@ %z = call i64 @llvm.hexagon.A2.minup(i64 %a, i64 %b) ret i64 %z } -; CHECK: = minu({{.*}}, {{.*}}) +; CHECK: =minu({{.*}},{{.*}}) ; Module wrap declare i32 @llvm.hexagon.A4.modwrapu(i32, i32) @@ -380,7 +380,7 @@ %z = call i32 @llvm.hexagon.A4.modwrapu(i32 %a, i32 %b) ret i32 %z } -; CHECK: = modwrap({{.*}}, {{.*}}) +; CHECK: =modwrap({{.*}},{{.*}}) ; Negate declare i64 @llvm.hexagon.A2.negp(i64) @@ -388,7 +388,7 @@ %z = call i64 @llvm.hexagon.A2.negp(i64 %a) ret i64 %z } -; CHECK: = neg({{.*}}) +; CHECK: =neg({{.*}}) declare i32 @llvm.hexagon.A2.negsat(i32) define i32 @A2_negsat(i32 %a) { @@ -395,7 +395,7 @@ %z = call i32 @llvm.hexagon.A2.negsat(i32 %a) ret i32 %z } -; CHECK: = neg({{.*}}):sat +; CHECK: =neg({{.*}}):sat ; Round declare i32 @llvm.hexagon.A2.roundsat(i64) @@ -403,7 +403,7 @@ %z = call i32 @llvm.hexagon.A2.roundsat(i64 %a) ret i32 %z } -; CHECK: = round({{.*}}):sat +; CHECK: =round({{.*}}):sat declare i32 @llvm.hexagon.A4.cround.ri(i32, i32) define i32 @A4_cround_ri(i32 %a) { @@ -410,7 +410,7 @@ %z = call i32 @llvm.hexagon.A4.cround.ri(i32 %a, i32 0) ret i32 %z } -; CHECK: = cround({{.*}}, #0) +; CHECK: =cround({{.*}},#0) declare i32 @llvm.hexagon.A4.round.ri(i32, i32) define i32 @A4_round_ri(i32 %a) { @@ -417,7 +417,7 @@ %z = call i32 @llvm.hexagon.A4.round.ri(i32 %a, i32 0) ret i32 %z } -; CHECK: = round({{.*}}, #0) +; CHECK: =round({{.*}},#0) declare i32 @llvm.hexagon.A4.round.ri.sat(i32, i32) define i32 @A4_round_ri_sat(i32 %a) { @@ -424,7 +424,7 @@ %z = call i32 @llvm.hexagon.A4.round.ri.sat(i32 %a, i32 0) ret i32 %z } -; CHECK: = round({{.*}}, #0):sat +; CHECK: =round({{.*}},#0):sat declare i32 @llvm.hexagon.A4.cround.rr(i32, i32) define i32 @A4_cround_rr(i32 %a, i32 %b) { @@ -431,7 +431,7 @@ %z = call i32 @llvm.hexagon.A4.cround.rr(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cround({{.*}}, {{.*}}) +; CHECK: =cround({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.round.rr(i32, i32) define i32 @A4_round_rr(i32 %a, i32 %b) { @@ -438,7 +438,7 @@ %z = call i32 @llvm.hexagon.A4.round.rr(i32 %a, i32 %b) ret i32 %z } -; CHECK: = round({{.*}}, {{.*}}) +; CHECK: =round({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.round.rr.sat(i32, i32) define i32 @A4_round_rr_sat(i32 %a, i32 %b) { @@ -445,7 +445,7 @@ %z = call i32 @llvm.hexagon.A4.round.rr.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = round({{.*}}, {{.*}}):sat +; CHECK: =round({{.*}},{{.*}}):sat ; Subtract doublewords declare i64 @llvm.hexagon.A2.subp(i64, i64) @@ -453,7 +453,7 @@ %z = call i64 @llvm.hexagon.A2.subp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = sub({{.*}}, {{.*}}) +; CHECK: =sub({{.*}},{{.*}}) ; Subtract and accumulate declare i32 @llvm.hexagon.M2.subacc(i32, i32, i32) @@ -461,7 +461,7 @@ %z = call i32 @llvm.hexagon.M2.subacc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += sub({{.*}}, {{.*}}) +; CHECK: +=sub({{.*}},{{.*}}) ; Subtract halfwords declare i32 @llvm.hexagon.A2.subh.l16.ll(i32, i32) @@ -469,7 +469,7 @@ %z = call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.l) +; CHECK: =sub({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.A2.subh.l16.hl(i32, i32) define i32 @A2_subh_l16_hl(i32 %a, i32 %b) { @@ -476,7 +476,7 @@ %z = call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.h) +; CHECK: =sub({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32) define i32 @A2_subh_l16_sat.ll(i32 %a, i32 %b) { @@ -483,7 +483,7 @@ %z = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.l):sat +; CHECK: =sub({{.*}}.l,{{.*}}.l):sat declare i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32, i32) define i32 @A2_subh_l16_sat.hl(i32 %a, i32 %b) { @@ -490,7 +490,7 @@ %z = call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.h):sat +; CHECK: =sub({{.*}}.l,{{.*}}.h):sat declare i32 @llvm.hexagon.A2.subh.h16.ll(i32, i32) define i32 @A2_subh_h16_ll(i32 %a, i32 %b) { @@ -497,7 +497,7 @@ %z = call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.l):<<16 +; CHECK: =sub({{.*}}.l,{{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.subh.h16.lh(i32, i32) define i32 @A2_subh_h16_lh(i32 %a, i32 %b) { @@ -504,7 +504,7 @@ %z = call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.h):<<16 +; CHECK: =sub({{.*}}.l,{{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.subh.h16.hl(i32, i32) define i32 @A2_subh_h16_hl(i32 %a, i32 %b) { @@ -511,7 +511,7 @@ %z = call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.h, {{.*}}.l):<<16 +; CHECK: =sub({{.*}}.h,{{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.subh.h16.hh(i32, i32) define i32 @A2_subh_h16_hh(i32 %a, i32 %b) { @@ -518,7 +518,7 @@ %z = call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.h, {{.*}}.h):<<16 +; CHECK: =sub({{.*}}.h,{{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32, i32) define i32 @A2_subh_h16_sat_ll(i32 %a, i32 %b) { @@ -525,7 +525,7 @@ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.l):sat:<<16 +; CHECK: =sub({{.*}}.l,{{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32, i32) define i32 @A2_subh_h16_sat_lh(i32 %a, i32 %b) { @@ -532,7 +532,7 @@ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.h):sat:<<16 +; CHECK: =sub({{.*}}.l,{{.*}}.h):sat:<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32, i32) define i32 @A2_subh_h16_sat_hl(i32 %a, i32 %b) { @@ -539,7 +539,7 @@ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.h, {{.*}}.l):sat:<<16 +; CHECK: =sub({{.*}}.h,{{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32, i32) define i32 @A2_subh_h16_sat_hh(i32 %a, i32 %b) { @@ -546,7 +546,7 @@ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.h, {{.*}}.h):sat:<<16 +; CHECK: =sub({{.*}}.h,{{.*}}.h):sat:<<16 ; Sign extend word to doubleword declare i64 @llvm.hexagon.A2.sxtw(i32) @@ -554,7 +554,7 @@ %z = call i64 @llvm.hexagon.A2.sxtw(i32 %a) ret i64 %z } -; CHECK: = sxtw({{.*}}) +; CHECK: =sxtw({{.*}}) ; Vector absolute value halfwords declare i64 @llvm.hexagon.A2.vabsh(i64) @@ -562,7 +562,7 @@ %z = call i64 @llvm.hexagon.A2.vabsh(i64 %a) ret i64 %z } -; CHECK: = vabsh({{.*}}) +; CHECK: =vabsh({{.*}}) declare i64 @llvm.hexagon.A2.vabshsat(i64) define i64 @A2_vabshsat(i64 %a) { @@ -569,7 +569,7 @@ %z = call i64 @llvm.hexagon.A2.vabshsat(i64 %a) ret i64 %z } -; CHECK: = vabsh({{.*}}):sat +; CHECK: =vabsh({{.*}}):sat ; Vector absolute value words declare i64 @llvm.hexagon.A2.vabsw(i64) @@ -577,7 +577,7 @@ %z = call i64 @llvm.hexagon.A2.vabsw(i64 %a) ret i64 %z } -; CHECK: = vabsw({{.*}}) +; CHECK: =vabsw({{.*}}) declare i64 @llvm.hexagon.A2.vabswsat(i64) define i64 @A2_vabswsat(i64 %a) { @@ -584,7 +584,7 @@ %z = call i64 @llvm.hexagon.A2.vabswsat(i64 %a) ret i64 %z } -; CHECK: = vabsw({{.*}}):sat +; CHECK: =vabsw({{.*}}):sat ; Vector absolute difference halfwords declare i64 @llvm.hexagon.M2.vabsdiffh(i64, i64) @@ -592,7 +592,7 @@ %z = call i64 @llvm.hexagon.M2.vabsdiffh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vabsdiffh({{.*}}, {{.*}}) +; CHECK: =vabsdiffh({{.*}},{{.*}}) ; Vector absolute difference words declare i64 @llvm.hexagon.M2.vabsdiffw(i64, i64) @@ -600,7 +600,7 @@ %z = call i64 @llvm.hexagon.M2.vabsdiffw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vabsdiffw({{.*}}, {{.*}}) +; CHECK: =vabsdiffw({{.*}},{{.*}}) ; Vector add halfwords declare i64 @llvm.hexagon.A2.vaddh(i64, i64) @@ -608,7 +608,7 @@ %z = call i64 @llvm.hexagon.A2.vaddh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddh({{.*}}, {{.*}}) +; CHECK: =vaddh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vaddhs(i64, i64) define i64 @A2_vaddhs(i64 %a, i64 %b) { @@ -615,7 +615,7 @@ %z = call i64 @llvm.hexagon.A2.vaddhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddh({{.*}}, {{.*}}):sat +; CHECK: =vaddh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.A2.vadduhs(i64, i64) define i64 @A2_vadduhs(i64 %a, i64 %b) { @@ -622,7 +622,7 @@ %z = call i64 @llvm.hexagon.A2.vadduhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vadduh({{.*}}, {{.*}}):sat +; CHECK: =vadduh({{.*}},{{.*}}):sat ; Vector add halfwords with saturate and pack to unsigned bytes declare i32 @llvm.hexagon.A5.vaddhubs(i64, i64) @@ -630,7 +630,7 @@ %z = call i32 @llvm.hexagon.A5.vaddhubs(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vaddhub({{.*}}, {{.*}}):sat +; CHECK: =vaddhub({{.*}},{{.*}}):sat ; Vector reduce add unsigned bytes declare i64 @llvm.hexagon.A2.vraddub(i64, i64) @@ -638,7 +638,7 @@ %z = call i64 @llvm.hexagon.A2.vraddub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vraddub({{.*}}, {{.*}}) +; CHECK: =vraddub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vraddub.acc(i64, i64, i64) define i64 @A2_vraddub_acc(i64 %a, i64 %b, i64 %c) { @@ -645,7 +645,7 @@ %z = call i64 @llvm.hexagon.A2.vraddub.acc(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vraddub({{.*}}, {{.*}}) +; CHECK: +=vraddub({{.*}},{{.*}}) ; Vector reduce add halfwords declare i32 @llvm.hexagon.M2.vradduh(i64, i64) @@ -653,7 +653,7 @@ %z = call i32 @llvm.hexagon.M2.vradduh(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vradduh({{.*}}, {{.*}}) +; CHECK: =vradduh({{.*}},{{.*}}) declare i32 @llvm.hexagon.M2.vraddh(i64, i64) define i32 @M2_vraddh(i64 %a, i64 %b) { @@ -660,7 +660,7 @@ %z = call i32 @llvm.hexagon.M2.vraddh(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vraddh({{.*}}, {{.*}}) +; CHECK: =vraddh({{.*}},{{.*}}) ; Vector add bytes declare i64 @llvm.hexagon.A2.vaddub(i64, i64) @@ -668,7 +668,7 @@ %z = call i64 @llvm.hexagon.A2.vaddub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddub({{.*}}, {{.*}}) +; CHECK: =vaddub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vaddubs(i64, i64) define i64 @A2_vaddubs(i64 %a, i64 %b) { @@ -675,7 +675,7 @@ %z = call i64 @llvm.hexagon.A2.vaddubs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddub({{.*}}, {{.*}}):sat +; CHECK: =vaddub({{.*}},{{.*}}):sat ; Vector add words declare i64 @llvm.hexagon.A2.vaddw(i64, i64) @@ -683,7 +683,7 @@ %z = call i64 @llvm.hexagon.A2.vaddw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddw({{.*}}, {{.*}}) +; CHECK: =vaddw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vaddws(i64, i64) define i64 @A2_vaddws(i64 %a, i64 %b) { @@ -690,7 +690,7 @@ %z = call i64 @llvm.hexagon.A2.vaddws(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddw({{.*}}, {{.*}}):sat +; CHECK: =vaddw({{.*}},{{.*}}):sat ; Vector average halfwords declare i64 @llvm.hexagon.A2.vavgh(i64, i64) @@ -698,7 +698,7 @@ %z = call i64 @llvm.hexagon.A2.vavgh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgh({{.*}}, {{.*}}) +; CHECK: =vavgh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vavghr(i64, i64) define i64 @A2_vavghr(i64 %a, i64 %b) { @@ -705,7 +705,7 @@ %z = call i64 @llvm.hexagon.A2.vavghr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgh({{.*}}, {{.*}}):rnd +; CHECK: =vavgh({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vavghcr(i64, i64) define i64 @A2_vavghcr(i64 %a, i64 %b) { @@ -712,7 +712,7 @@ %z = call i64 @llvm.hexagon.A2.vavghcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgh({{.*}}, {{.*}}):crnd +; CHECK: =vavgh({{.*}},{{.*}}):crnd declare i64 @llvm.hexagon.A2.vavguh(i64, i64) define i64 @A2_vavguh(i64 %a, i64 %b) { @@ -719,7 +719,7 @@ %z = call i64 @llvm.hexagon.A2.vavguh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavguh({{.*}}, {{.*}}) +; CHECK: =vavguh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vavguhr(i64, i64) define i64 @A2_vavguhr(i64 %a, i64 %b) { @@ -726,7 +726,7 @@ %z = call i64 @llvm.hexagon.A2.vavguhr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavguh({{.*}}, {{.*}}):rnd +; CHECK: =vavguh({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavgh(i64, i64) define i64 @A2_vnavgh(i64 %a, i64 %b) { @@ -733,7 +733,7 @@ %z = call i64 @llvm.hexagon.A2.vnavgh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgh({{.*}}, {{.*}}) +; CHECK: =vnavgh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vnavghr(i64, i64) define i64 @A2_vnavghr(i64 %a, i64 %b) { @@ -740,7 +740,7 @@ %z = call i64 @llvm.hexagon.A2.vnavghr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgh({{.*}}, {{.*}}):rnd +; CHECK: =vnavgh({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavghcr(i64, i64) define i64 @A2_vnavghcr(i64 %a, i64 %b) { @@ -747,7 +747,7 @@ %z = call i64 @llvm.hexagon.A2.vnavghcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgh({{.*}}, {{.*}}):crnd +; CHECK: =vnavgh({{.*}},{{.*}}):crnd ; Vector average unsigned bytes declare i64 @llvm.hexagon.A2.vavgub(i64, i64) @@ -755,7 +755,7 @@ %z = call i64 @llvm.hexagon.A2.vavgub(i64 %a, i64 %b) ret i64 %z } -; CHECK: vavgub({{.*}}, {{.*}}) +; CHECK: vavgub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vavgubr(i64, i64) define i64 @A2_vavgubr(i64 %a, i64 %b) { @@ -762,7 +762,7 @@ %z = call i64 @llvm.hexagon.A2.vavgubr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgub({{.*}}, {{.*}}):rnd +; CHECK: =vavgub({{.*}},{{.*}}):rnd ; Vector average words declare i64 @llvm.hexagon.A2.vavgw(i64, i64) @@ -770,7 +770,7 @@ %z = call i64 @llvm.hexagon.A2.vavgw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgw({{.*}}, {{.*}}) +; CHECK: =vavgw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vavgwr(i64, i64) define i64 @A2_vavgwr(i64 %a, i64 %b) { @@ -777,7 +777,7 @@ %z = call i64 @llvm.hexagon.A2.vavgwr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgw({{.*}}, {{.*}}):rnd +; CHECK: =vavgw({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vavgwcr(i64, i64) define i64 @A2_vavgwcr(i64 %a, i64 %b) { @@ -784,7 +784,7 @@ %z = call i64 @llvm.hexagon.A2.vavgwcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgw({{.*}}, {{.*}}):crnd +; CHECK: =vavgw({{.*}},{{.*}}):crnd declare i64 @llvm.hexagon.A2.vavguw(i64, i64) define i64 @A2_vavguw(i64 %a, i64 %b) { @@ -791,7 +791,7 @@ %z = call i64 @llvm.hexagon.A2.vavguw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavguw({{.*}}, {{.*}}) +; CHECK: =vavguw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vavguwr(i64, i64) define i64 @A2_vavguwr(i64 %a, i64 %b) { @@ -798,7 +798,7 @@ %z = call i64 @llvm.hexagon.A2.vavguwr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavguw({{.*}}, {{.*}}):rnd +; CHECK: =vavguw({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavgw(i64, i64) define i64 @A2_vnavgw(i64 %a, i64 %b) { @@ -805,7 +805,7 @@ %z = call i64 @llvm.hexagon.A2.vnavgw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgw({{.*}}, {{.*}}) +; CHECK: =vnavgw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vnavgwr(i64, i64) define i64 @A2_vnavgwr(i64 %a, i64 %b) { @@ -812,7 +812,7 @@ %z = call i64 @llvm.hexagon.A2.vnavgwr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgw({{.*}}, {{.*}}):rnd +; CHECK: =vnavgw({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavgwcr(i64, i64) define i64 @A2_vnavgwcr(i64 %a, i64 %b) { @@ -819,7 +819,7 @@ %z = call i64 @llvm.hexagon.A2.vnavgwcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgw({{.*}}, {{.*}}):crnd +; CHECK: =vnavgw({{.*}},{{.*}}):crnd ; Vector conditional negate declare i64 @llvm.hexagon.S2.vcnegh(i64, i32) @@ -827,7 +827,7 @@ %z = call i64 @llvm.hexagon.S2.vcnegh(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vcnegh({{.*}}, {{.*}}) +; CHECK: =vcnegh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.vrcnegh(i64, i64, i32) define i64 @S2_vrcnegh(i64 %a, i64 %b, i32 %c) { @@ -834,7 +834,7 @@ %z = call i64 @llvm.hexagon.S2.vrcnegh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: += vrcnegh({{.*}}, {{.*}}) +; CHECK: +=vrcnegh({{.*}},{{.*}}) ; Vector maximum bytes declare i64 @llvm.hexagon.A2.vmaxub(i64, i64) @@ -842,7 +842,7 @@ %z = call i64 @llvm.hexagon.A2.vmaxub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmaxub({{.*}}, {{.*}}) +; CHECK: =vmaxub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vmaxb(i64, i64) define i64 @A2_vmaxb(i64 %a, i64 %b) { @@ -849,7 +849,7 @@ %z = call i64 @llvm.hexagon.A2.vmaxb(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmaxb({{.*}}, {{.*}}) +; CHECK: =vmaxb({{.*}},{{.*}}) ; Vector maximum halfwords declare i64 @llvm.hexagon.A2.vmaxh(i64, i64) @@ -857,7 +857,7 @@ %z = call i64 @llvm.hexagon.A2.vmaxh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmaxh({{.*}}, {{.*}}) +; CHECK: =vmaxh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vmaxuh(i64, i64) define i64 @A2_vmaxuh(i64 %a, i64 %b) { @@ -864,7 +864,7 @@ %z = call i64 @llvm.hexagon.A2.vmaxuh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmaxuh({{.*}}, {{.*}}) +; CHECK: =vmaxuh({{.*}},{{.*}}) ; Vector reduce maximum halfwords declare i64 @llvm.hexagon.A4.vrmaxh(i64, i64, i32) @@ -872,7 +872,7 @@ %z = call i64 @llvm.hexagon.A4.vrmaxh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrmaxh({{.*}}, {{.*}}) +; CHECK: =vrmaxh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.vrmaxuh(i64, i64, i32) define i64 @A4_vrmaxuh(i64 %a, i64 %b, i32 %c) { @@ -879,7 +879,7 @@ %z = call i64 @llvm.hexagon.A4.vrmaxuh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrmaxuh({{.*}}, {{.*}}) +; CHECK: =vrmaxuh({{.*}},{{.*}}) ; Vector reduce maximum words declare i64 @llvm.hexagon.A4.vrmaxw(i64, i64, i32) @@ -887,7 +887,7 @@ %z = call i64 @llvm.hexagon.A4.vrmaxw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrmaxw({{.*}}, {{.*}}) +; CHECK: =vrmaxw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.vrmaxuw(i64, i64, i32) define i64 @A4_vrmaxuw(i64 %a, i64 %b, i32 %c) { @@ -894,7 +894,7 @@ %z = call i64 @llvm.hexagon.A4.vrmaxuw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: vrmaxuw({{.*}}, {{.*}}) +; CHECK: vrmaxuw({{.*}},{{.*}}) ; Vector minimum bytes declare i64 @llvm.hexagon.A2.vminub(i64, i64) @@ -902,7 +902,7 @@ %z = call i64 @llvm.hexagon.A2.vminub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vminub({{.*}}, {{.*}}) +; CHECK: =vminub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vminb(i64, i64) define i64 @A2_vminb(i64 %a, i64 %b) { @@ -909,7 +909,7 @@ %z = call i64 @llvm.hexagon.A2.vminb(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vminb({{.*}}, {{.*}}) +; CHECK: =vminb({{.*}},{{.*}}) ; Vector minimum halfwords declare i64 @llvm.hexagon.A2.vminh(i64, i64) @@ -917,7 +917,7 @@ %z = call i64 @llvm.hexagon.A2.vminh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vminh({{.*}}, {{.*}}) +; CHECK: =vminh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vminuh(i64, i64) define i64 @A2_vminuh(i64 %a, i64 %b) { @@ -924,7 +924,7 @@ %z = call i64 @llvm.hexagon.A2.vminuh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vminuh({{.*}}, {{.*}}) +; CHECK: =vminuh({{.*}},{{.*}}) ; Vector reduce minimum halfwords declare i64 @llvm.hexagon.A4.vrminh(i64, i64, i32) @@ -932,7 +932,7 @@ %z = call i64 @llvm.hexagon.A4.vrminh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrminh({{.*}}, {{.*}}) +; CHECK: =vrminh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.vrminuh(i64, i64, i32) define i64 @A4_vrminuh(i64 %a, i64 %b, i32 %c) { @@ -939,7 +939,7 @@ %z = call i64 @llvm.hexagon.A4.vrminuh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrminuh({{.*}}, {{.*}}) +; CHECK: =vrminuh({{.*}},{{.*}}) ; Vector reduce minimum words declare i64 @llvm.hexagon.A4.vrminw(i64, i64, i32) @@ -947,7 +947,7 @@ %z = call i64 @llvm.hexagon.A4.vrminw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrminw({{.*}}, {{.*}}) +; CHECK: =vrminw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.vrminuw(i64, i64, i32) define i64 @A4_vrminuw(i64 %a, i64 %b, i32 %c) { @@ -954,7 +954,7 @@ %z = call i64 @llvm.hexagon.A4.vrminuw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrminuw({{.*}}, {{.*}}) +; CHECK: =vrminuw({{.*}},{{.*}}) ; Vector sum of absolute differences unsigned bytes declare i64 @llvm.hexagon.A2.vrsadub(i64, i64) @@ -962,7 +962,7 @@ %z = call i64 @llvm.hexagon.A2.vrsadub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrsadub({{.*}}, {{.*}}) +; CHECK: =vrsadub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vrsadub.acc(i64, i64, i64) define i64 @A2_vrsadub_acc(i64 %a, i64 %b, i64 %c) { @@ -969,7 +969,7 @@ %z = call i64 @llvm.hexagon.A2.vrsadub.acc(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrsadub({{.*}}, {{.*}}) +; CHECK: +=vrsadub({{.*}},{{.*}}) ; Vector subtract halfwords declare i64 @llvm.hexagon.A2.vsubh(i64, i64) @@ -977,7 +977,7 @@ %z = call i64 @llvm.hexagon.A2.vsubh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubh({{.*}}, {{.*}}) +; CHECK: =vsubh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vsubhs(i64, i64) define i64 @A2_vsubhs(i64 %a, i64 %b) { @@ -984,7 +984,7 @@ %z = call i64 @llvm.hexagon.A2.vsubhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubh({{.*}}, {{.*}}):sat +; CHECK: =vsubh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.A2.vsubuhs(i64, i64) define i64 @A2_vsubuhs(i64 %a, i64 %b) { @@ -991,7 +991,7 @@ %z = call i64 @llvm.hexagon.A2.vsubuhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubuh({{.*}}, {{.*}}):sat +; CHECK: =vsubuh({{.*}},{{.*}}):sat ; Vector subtract bytes declare i64 @llvm.hexagon.A2.vsubub(i64, i64) @@ -999,7 +999,7 @@ %z = call i64 @llvm.hexagon.A2.vsubub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubub({{.*}}, {{.*}}) +; CHECK: =vsubub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vsububs(i64, i64) define i64 @A2_vsububs(i64 %a, i64 %b) { @@ -1006,7 +1006,7 @@ %z = call i64 @llvm.hexagon.A2.vsububs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubub({{.*}}, {{.*}}):sat +; CHECK: =vsubub({{.*}},{{.*}}):sat ; Vector subtract words declare i64 @llvm.hexagon.A2.vsubw(i64, i64) @@ -1014,7 +1014,7 @@ %z = call i64 @llvm.hexagon.A2.vsubw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubw({{.*}}, {{.*}}) +; CHECK: =vsubw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vsubws(i64, i64) define i64 @A2_vsubws(i64 %a, i64 %b) { @@ -1021,4 +1021,4 @@ %z = call i64 @llvm.hexagon.A2.vsubws(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubw({{.*}}, {{.*}}):sat +; CHECK: =vsubw({{.*}},{{.*}}):sat Index: test/CodeGen/Hexagon/intrinsics/xtype_bit.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/xtype_bit.ll +++ test/CodeGen/Hexagon/intrinsics/xtype_bit.ll @@ -10,7 +10,7 @@ %z = call i32 @llvm.hexagon.S2.clbp(i64 %a) ret i32 %z } -; CHECK: = clb({{.*}}) +; CHECK: =clb({{.*}}) declare i32 @llvm.hexagon.S2.cl0p(i64) define i32 @S2_cl0p(i64 %a) { @@ -17,7 +17,7 @@ %z = call i32 @llvm.hexagon.S2.cl0p(i64 %a) ret i32 %z } -; CHECK: = cl0({{.*}}) +; CHECK: =cl0({{.*}}) declare i32 @llvm.hexagon.S2.cl1p(i64) define i32 @S2_cl1p(i64 %a) { @@ -24,7 +24,7 @@ %z = call i32 @llvm.hexagon.S2.cl1p(i64 %a) ret i32 %z } -; CHECK: = cl1({{.*}}) +; CHECK: =cl1({{.*}}) declare i32 @llvm.hexagon.S4.clbpnorm(i64) define i32 @S4_clbpnorm(i64 %a) { @@ -31,7 +31,7 @@ %z = call i32 @llvm.hexagon.S4.clbpnorm(i64 %a) ret i32 %z } -; CHECK: = normamt({{.*}}) +; CHECK: =normamt({{.*}}) declare i32 @llvm.hexagon.S4.clbpaddi(i64, i32) define i32 @S4_clbpaddi(i64 %a) { @@ -38,7 +38,7 @@ %z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0) ret i32 %z } -; CHECK: = add(clb({{.*}}), #0) +; CHECK: =add(clb({{.*}}),#0) declare i32 @llvm.hexagon.S4.clbaddi(i32, i32) define i32 @S4_clbaddi(i32 %a) { @@ -45,7 +45,7 @@ %z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0) ret i32 %z } -; CHECK: = add(clb({{.*}}), #0) +; CHECK: =add(clb({{.*}}),#0) declare i32 @llvm.hexagon.S2.cl0(i32) define i32 @S2_cl0(i32 %a) { @@ -52,7 +52,7 @@ %z = call i32 @llvm.hexagon.S2.cl0(i32 %a) ret i32 %z } -; CHECK: = cl0({{.*}}) +; CHECK: =cl0({{.*}}) declare i32 @llvm.hexagon.S2.cl1(i32) define i32 @S2_cl1(i32 %a) { @@ -59,7 +59,7 @@ %z = call i32 @llvm.hexagon.S2.cl1(i32 %a) ret i32 %z } -; CHECK: = cl1({{.*}}) +; CHECK: =cl1({{.*}}) declare i32 @llvm.hexagon.S2.clbnorm(i32) define i32 @S4_clbnorm(i32 %a) { @@ -66,7 +66,7 @@ %z = call i32 @llvm.hexagon.S2.clbnorm(i32 %a) ret i32 %z } -; CHECK: = normamt({{.*}}) +; CHECK: =normamt({{.*}}) ; Count population declare i32 @llvm.hexagon.S5.popcountp(i64) @@ -74,7 +74,7 @@ %z = call i32 @llvm.hexagon.S5.popcountp(i64 %a) ret i32 %z } -; CHECK: = popcount({{.*}}) +; CHECK: =popcount({{.*}}) ; Count trailing declare i32 @llvm.hexagon.S2.ct0p(i64) @@ -82,7 +82,7 @@ %z = call i32 @llvm.hexagon.S2.ct0p(i64 %a) ret i32 %z } -; CHECK: = ct0({{.*}}) +; CHECK: =ct0({{.*}}) declare i32 @llvm.hexagon.S2.ct1p(i64) define i32 @S2_ct1p(i64 %a) { @@ -89,7 +89,7 @@ %z = call i32 @llvm.hexagon.S2.ct1p(i64 %a) ret i32 %z } -; CHECK: = ct1({{.*}}) +; CHECK: =ct1({{.*}}) declare i32 @llvm.hexagon.S2.ct0(i32) define i32 @S2_ct0(i32 %a) { @@ -96,7 +96,7 @@ %z = call i32 @llvm.hexagon.S2.ct0(i32 %a) ret i32 %z } -; CHECK: = ct0({{.*}}) +; CHECK: =ct0({{.*}}) declare i32 @llvm.hexagon.S2.ct1(i32) define i32 @S2_ct1(i32 %a) { @@ -103,7 +103,7 @@ %z = call i32 @llvm.hexagon.S2.ct1(i32 %a) ret i32 %z } -; CHECK: = ct1({{.*}}) +; CHECK: =ct1({{.*}}) ; Extract bitfield declare i64 @llvm.hexagon.S2.extractup(i64, i32, i32) @@ -111,7 +111,7 @@ %z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0) ret i64 %z } -; CHECK: = extractu({{.*}}, #0, #0) +; CHECK: =extractu({{.*}},#0,#0) declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32) define i64 @S2_extractp(i64 %a) { @@ -118,7 +118,7 @@ %z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0) ret i64 %z } -; CHECK: = extract({{.*}}, #0, #0) +; CHECK: =extract({{.*}},#0,#0) declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32) define i32 @S2_extractu(i32 %a) { @@ -125,7 +125,7 @@ %z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0) ret i32 %z } -; CHECK: = extractu({{.*}}, #0, #0) +; CHECK: =extractu({{.*}},#0,#0) declare i32 @llvm.hexagon.S4.extract(i32, i32, i32) define i32 @S2_extract(i32 %a) { @@ -132,7 +132,7 @@ %z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0) ret i32 %z } -; CHECK: = extract({{.*}}, #0, #0) +; CHECK: =extract({{.*}},#0,#0) declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64) define i64 @S2_extractup_rp(i64 %a, i64 %b) { @@ -139,7 +139,7 @@ %z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = extractu({{.*}}, {{.*}}) +; CHECK: =extractu({{.*}},{{.*}}) declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64) define i64 @S4_extractp_rp(i64 %a, i64 %b) { @@ -146,7 +146,7 @@ %z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = extract({{.*}}, {{.*}}) +; CHECK: =extract({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64) define i32 @S2_extractu_rp(i32 %a, i64 %b) { @@ -153,7 +153,7 @@ %z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b) ret i32 %z } -; CHECK: = extractu({{.*}}, {{.*}}) +; CHECK: =extractu({{.*}},{{.*}}) declare i32 @llvm.hexagon.S4.extract.rp(i32, i64) define i32 @S4_extract_rp(i32 %a, i64 %b) { @@ -160,7 +160,7 @@ %z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b) ret i32 %z } -; CHECK: = extract({{.*}}, {{.*}}) +; CHECK: =extract({{.*}},{{.*}}) ; Insert bitfield declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32) @@ -168,7 +168,7 @@ %z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0) ret i64 %z } -; CHECK: = insert({{.*}}, #0, #0) +; CHECK: =insert({{.*}},#0,#0) declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32) define i32 @S2_insert(i32 %a, i32 %b) { @@ -175,7 +175,7 @@ %z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: = insert({{.*}}, #0, #0) +; CHECK: =insert({{.*}},#0,#0) declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64) define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) { @@ -182,7 +182,7 @@ %z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c) ret i32 %z } -; CHECK: = insert({{.*}}, {{.*}}) +; CHECK: =insert({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64) define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) { @@ -189,7 +189,7 @@ %z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: = insert({{.*}}, r5:4) +; CHECK: =insert({{.*}},r5:4) ; Interleave/deinterleave declare i64 @llvm.hexagon.S2.deinterleave(i64) @@ -197,7 +197,7 @@ %z = call i64 @llvm.hexagon.S2.deinterleave(i64 %a) ret i64 %z } -; CHECK: = deinterleave({{.*}}) +; CHECK: =deinterleave({{.*}}) declare i64 @llvm.hexagon.S2.interleave(i64) define i64 @S2_interleave(i64 %a) { @@ -204,7 +204,7 @@ %z = call i64 @llvm.hexagon.S2.interleave(i64 %a) ret i64 %z } -; CHECK: = interleave({{.*}}) +; CHECK: =interleave({{.*}}) ; Linear feedback-shift operation declare i64 @llvm.hexagon.S2.lfsp(i64, i64) @@ -212,7 +212,7 @@ %z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = lfs({{.*}}, {{.*}}) +; CHECK: =lfs({{.*}},{{.*}}) ; Masked parity declare i32 @llvm.hexagon.S2.parityp(i64, i64) @@ -220,7 +220,7 @@ %z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b) ret i32 %z } -; CHECK: = parity({{.*}}, {{.*}}) +; CHECK: =parity({{.*}},{{.*}}) declare i32 @llvm.hexagon.S4.parity(i32, i32) define i32 @S4_parity(i32 %a, i32 %b) { @@ -227,7 +227,7 @@ %z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b) ret i32 %z } -; CHECK: = parity({{.*}}, {{.*}}) +; CHECK: =parity({{.*}},{{.*}}) ; Bit reverse declare i64 @llvm.hexagon.S2.brevp(i64) @@ -235,7 +235,7 @@ %z = call i64 @llvm.hexagon.S2.brevp(i64 %a) ret i64 %z } -; CHECK: = brev({{.*}}) +; CHECK: =brev({{.*}}) declare i32 @llvm.hexagon.S2.brev(i32) define i32 @S2_brev(i32 %a) { @@ -242,7 +242,7 @@ %z = call i32 @llvm.hexagon.S2.brev(i32 %a) ret i32 %z } -; CHECK: = brev({{.*}}) +; CHECK: =brev({{.*}}) ; Set/clear/toggle bit declare i32 @llvm.hexagon.S2.setbit.i(i32, i32) @@ -250,7 +250,7 @@ %z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: = setbit({{.*}}, #0) +; CHECK: =setbit({{.*}},#0) declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32) define i32 @S2_clrbit_i(i32 %a) { @@ -257,7 +257,7 @@ %z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: = clrbit({{.*}}, #0) +; CHECK: =clrbit({{.*}},#0) declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32) define i32 @S2_togglebit_i(i32 %a) { @@ -264,7 +264,7 @@ %z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: = togglebit({{.*}}, #0) +; CHECK: =togglebit({{.*}},#0) declare i32 @llvm.hexagon.S2.setbit.r(i32, i32) define i32 @S2_setbit_r(i32 %a, i32 %b) { @@ -271,7 +271,7 @@ %z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = setbit({{.*}}, {{.*}}) +; CHECK: =setbit({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32) define i32 @S2_clrbit_r(i32 %a, i32 %b) { @@ -278,7 +278,7 @@ %z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = clrbit({{.*}}, {{.*}}) +; CHECK: =clrbit({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32) define i32 @S2_togglebit_r(i32 %a, i32 %b) { @@ -285,7 +285,7 @@ %z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = togglebit({{.*}}, {{.*}}) +; CHECK: =togglebit({{.*}},{{.*}}) ; Split bitfield declare i64 @llvm.hexagon.A4.bitspliti(i32, i32) @@ -293,7 +293,7 @@ %z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0) ret i64 %z } -; CHECK: = bitsplit({{.*}}, #0) +; CHECK: =bitsplit({{.*}},#0) declare i64 @llvm.hexagon.A4.bitsplit(i32, i32) define i64 @A4_bitsplit(i32 %a, i32 %b) { @@ -300,7 +300,7 @@ %z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b) ret i64 %z } -; CHECK: = bitsplit({{.*}}, {{.*}}) +; CHECK: =bitsplit({{.*}},{{.*}}) ; Table index declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32) @@ -308,7 +308,7 @@ %z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: = tableidxb({{.*}}, #0, #0) +; CHECK: =tableidxb({{.*}},#0,#0) declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32) define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) { @@ -315,7 +315,7 @@ %z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: = tableidxh({{.*}}, #0, #-1) +; CHECK: =tableidxh({{.*}},#0,#-1) declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32) define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) { @@ -322,7 +322,7 @@ %z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: = tableidxw({{.*}}, #0, #-2) +; CHECK: =tableidxw({{.*}},#0,#-2) declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32) define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) { @@ -329,4 +329,4 @@ %z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: = tableidxd({{.*}}, #0, #-3) +; CHECK: =tableidxd({{.*}},#0,#-3) Index: test/CodeGen/Hexagon/intrinsics/xtype_complex.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/xtype_complex.ll +++ test/CodeGen/Hexagon/intrinsics/xtype_complex.ll @@ -10,7 +10,7 @@ %z = call i64 @llvm.hexagon.S4.vxaddsubh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxaddsubh({{.*}}, {{.*}}):sat +; CHECK: =vxaddsubh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.S4.vxsubaddh(i64, i64) define i64 @S4_vxsubaddh(i64 %a, i64 %b) { @@ -17,7 +17,7 @@ %z = call i64 @llvm.hexagon.S4.vxsubaddh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxsubaddh({{.*}}, {{.*}}):sat +; CHECK: =vxsubaddh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64) define i64 @S4_vxaddsubhr(i64 %a, i64 %b) { @@ -24,7 +24,7 @@ %z = call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxaddsubh({{.*}}, {{.*}}):rnd:>>1:sat +; CHECK: =vxaddsubh({{.*}},{{.*}}):rnd:>>1:sat declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64) define i64 @S4_vxsubaddhr(i64 %a, i64 %b) { @@ -31,7 +31,7 @@ %z = call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxsubaddh({{.*}}, {{.*}}):rnd:>>1:sat +; CHECK: =vxsubaddh({{.*}},{{.*}}):rnd:>>1:sat ; Complex add/sub words declare i64 @llvm.hexagon.S4.vxaddsubw(i64, i64) @@ -39,7 +39,7 @@ %z = call i64 @llvm.hexagon.S4.vxaddsubw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxaddsubw({{.*}}, {{.*}}):sat +; CHECK: =vxaddsubw({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.S4.vxsubaddw(i64, i64) define i64 @S4_vxsubaddw(i64 %a, i64 %b) { @@ -46,7 +46,7 @@ %z = call i64 @llvm.hexagon.S4.vxsubaddw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxsubaddw({{.*}}, {{.*}}):sat +; CHECK: =vxsubaddw({{.*}},{{.*}}):sat ; Complex multiply declare i64 @llvm.hexagon.M2.cmpys.s0(i32, i32) @@ -54,7 +54,7 @@ %z = call i64 @llvm.hexagon.M2.cmpys.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpy({{.*}}, {{.*}}):sat +; CHECK: =cmpy({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.cmpys.s1(i32, i32) define i64 @M2_cmpys_s1(i32 %a, i32 %b) { @@ -61,7 +61,7 @@ %z = call i64 @llvm.hexagon.M2.cmpys.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpy({{.*}}, {{.*}}):<<1:sat +; CHECK: =cmpy({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.cmpysc.s0(i32, i32) define i64 @M2_cmpysc_s0(i32 %a, i32 %b) { @@ -68,7 +68,7 @@ %z = call i64 @llvm.hexagon.M2.cmpysc.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpy({{.*}}, {{.*}}*):sat +; CHECK: =cmpy({{.*}},{{.*}}*):sat declare i64 @llvm.hexagon.M2.cmpysc.s1(i32, i32) define i64 @M2_cmpysc_s1(i32 %a, i32 %b) { @@ -75,7 +75,7 @@ %z = call i64 @llvm.hexagon.M2.cmpysc.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:sat +; CHECK: =cmpy({{.*}},{{.*}}*):<<1:sat declare i64 @llvm.hexagon.M2.cmacs.s0(i64, i32, i32) define i64 @M2_cmacs_s0(i64 %a, i32 %b, i32 %c) { @@ -82,7 +82,7 @@ %z = call i64 @llvm.hexagon.M2.cmacs.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpy({{.*}}, {{.*}}):sat +; CHECK: +=cmpy({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.cmacs.s1(i64, i32, i32) define i64 @M2_cmacs_s1(i64 %a, i32 %b, i32 %c) { @@ -89,7 +89,7 @@ %z = call i64 @llvm.hexagon.M2.cmacs.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpy({{.*}}, {{.*}}):<<1:sat +; CHECK: +=cmpy({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.cnacs.s0(i64, i32, i32) define i64 @M2_cnacs_s0(i64 %a, i32 %b, i32 %c) { @@ -96,7 +96,7 @@ %z = call i64 @llvm.hexagon.M2.cnacs.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= cmpy({{.*}}, {{.*}}):sat +; CHECK: -=cmpy({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.cnacs.s1(i64, i32, i32) define i64 @M2_cnacs_s1(i64 %a, i32 %b, i32 %c) { @@ -103,7 +103,7 @@ %z = call i64 @llvm.hexagon.M2.cnacs.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= cmpy({{.*}}, {{.*}}):<<1:sat +; CHECK: -=cmpy({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.cmacsc.s0(i64, i32, i32) define i64 @M2_cmacsc_s0(i64 %a, i32 %b, i32 %c) { @@ -110,7 +110,7 @@ %z = call i64 @llvm.hexagon.M2.cmacsc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpy({{.*}}, {{.*}}*):sat +; CHECK: +=cmpy({{.*}},{{.*}}*):sat declare i64 @llvm.hexagon.M2.cmacsc.s1(i64, i32, i32) define i64 @M2_cmacsc_s1(i64 %a, i32 %b, i32 %c) { @@ -117,7 +117,7 @@ %z = call i64 @llvm.hexagon.M2.cmacsc.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpy({{.*}}, {{.*}}*):<<1:sat +; CHECK: +=cmpy({{.*}},{{.*}}*):<<1:sat declare i64 @llvm.hexagon.M2.cnacsc.s0(i64, i32, i32) define i64 @M2_cnacsc_s0(i64 %a, i32 %b, i32 %c) { @@ -124,7 +124,7 @@ %z = call i64 @llvm.hexagon.M2.cnacsc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= cmpy({{.*}}, {{.*}}*):sat +; CHECK: -=cmpy({{.*}},{{.*}}*):sat declare i64 @llvm.hexagon.M2.cnacsc.s1(i64, i32, i32) define i64 @M2_cnacsc_s1(i64 %a, i32 %b, i32 %c) { @@ -131,7 +131,7 @@ %z = call i64 @llvm.hexagon.M2.cnacsc.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= cmpy({{.*}}, {{.*}}*):<<1:sat +; CHECK: -=cmpy({{.*}},{{.*}}*):<<1:sat ; Complex multiply real or imaginary declare i64 @llvm.hexagon.M2.cmpyi.s0(i32, i32) @@ -139,7 +139,7 @@ %z = call i64 @llvm.hexagon.M2.cmpyi.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpyi({{.*}}, {{.*}}) +; CHECK: =cmpyi({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.cmpyr.s0(i32, i32) define i64 @M2_cmpyr_s0(i32 %a, i32 %b) { @@ -146,7 +146,7 @@ %z = call i64 @llvm.hexagon.M2.cmpyr.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpyr({{.*}}, {{.*}}) +; CHECK: =cmpyr({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.cmaci.s0(i64, i32, i32) define i64 @M2_cmaci_s0(i64 %a, i32 %b, i32 %c) { @@ -153,7 +153,7 @@ %z = call i64 @llvm.hexagon.M2.cmaci.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpyi({{.*}}, {{.*}}) +; CHECK: +=cmpyi({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.cmacr.s0(i64, i32, i32) define i64 @M2_cmacr_s0(i64 %a, i32 %b, i32 %c) { @@ -160,7 +160,7 @@ %z = call i64 @llvm.hexagon.M2.cmacr.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpyr({{.*}}, {{.*}}) +; CHECK: +=cmpyr({{.*}},{{.*}}) ; Complex multiply with round and pack declare i32 @llvm.hexagon.M2.cmpyrs.s0(i32, i32) @@ -168,7 +168,7 @@ %z = call i32 @llvm.hexagon.M2.cmpyrs.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpy({{.*}}, {{.*}}):rnd:sat +; CHECK: =cmpy({{.*}},{{.*}}):rnd:sat declare i32 @llvm.hexagon.M2.cmpyrs.s1(i32, i32) define i32 @M2_cmpyrs_s1(i32 %a, i32 %b) { @@ -175,7 +175,7 @@ %z = call i32 @llvm.hexagon.M2.cmpyrs.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpy({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: =cmpy({{.*}},{{.*}}):<<1:rnd:sat declare i32 @llvm.hexagon.M2.cmpyrsc.s0(i32, i32) define i32 @M2_cmpyrsc_s0(i32 %a, i32 %b) { @@ -182,7 +182,7 @@ %z = call i32 @llvm.hexagon.M2.cmpyrsc.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpy({{.*}}, {{.*}}*):rnd:sat +; CHECK: =cmpy({{.*}},{{.*}}*):rnd:sat declare i32 @llvm.hexagon.M2.cmpyrsc.s1(i32, i32) define i32 @M2_cmpyrsc_s1(i32 %a, i32 %b) { @@ -189,7 +189,7 @@ %z = call i32 @llvm.hexagon.M2.cmpyrsc.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:rnd:sat +; CHECK: =cmpy({{.*}},{{.*}}*):<<1:rnd:sat ; Complex multiply 32x16 declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32) @@ -197,7 +197,7 @@ %z = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 %a, i32 %b) ret i32 %z } -; CHECK: = cmpyiwh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: =cmpyiwh({{.*}},{{.*}}):<<1:rnd:sat declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32) define i32 @M4_cmpyi_whc(i64 %a, i32 %b) { @@ -204,7 +204,7 @@ %z = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %a, i32 %b) ret i32 %z } -; CHECK: = cmpyiwh({{.*}}, {{.*}}*):<<1:rnd:sat +; CHECK: =cmpyiwh({{.*}},{{.*}}*):<<1:rnd:sat declare i32 @llvm.hexagon.M4.cmpyr.wh(i64, i32) define i32 @M4_cmpyr_wh(i64 %a, i32 %b) { @@ -211,7 +211,7 @@ %z = call i32 @llvm.hexagon.M4.cmpyr.wh(i64 %a, i32 %b) ret i32 %z } -; CHECK: = cmpyrwh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: =cmpyrwh({{.*}},{{.*}}):<<1:rnd:sat declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32) define i32 @M4_cmpyr_whc(i64 %a, i32 %b) { @@ -218,7 +218,7 @@ %z = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %a, i32 %b) ret i32 %z } -; CHECK: = cmpyrwh({{.*}}, {{.*}}*):<<1:rnd:sat +; CHECK: =cmpyrwh({{.*}},{{.*}}*):<<1:rnd:sat ; Vector complex multiply real or imaginary declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64, i64) @@ -226,7 +226,7 @@ %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vcmpyr({{.*}}, {{.*}}):sat +; CHECK: =vcmpyr({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64, i64) define i64 @M2_vcmpy_s1_sat_r(i64 %a, i64 %b) { @@ -233,7 +233,7 @@ %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vcmpyr({{.*}}, {{.*}}):<<1:sat +; CHECK: =vcmpyr({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64, i64) define i64 @M2_vcmpy_s0_sat_i(i64 %a, i64 %b) { @@ -240,7 +240,7 @@ %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vcmpyi({{.*}}, {{.*}}):sat +; CHECK: =vcmpyi({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64, i64) define i64 @M2_vcmpy_s1_sat_i(i64 %a, i64 %b) { @@ -247,7 +247,7 @@ %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vcmpyi({{.*}}, {{.*}}):<<1:sat +; CHECK: =vcmpyi({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64, i64, i64) define i64 @M2_vcmac_s0_sat_r(i64 %a, i64 %b, i64 %c) { @@ -254,7 +254,7 @@ %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vcmpyr({{.*}}, r5:4):sat +; CHECK: +=vcmpyr({{.*}},r5:4):sat declare i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64, i64, i64) define i64 @M2_vcmac_s0_sat_i(i64 %a, i64 %b, i64 %c) { @@ -261,7 +261,7 @@ %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vcmpyi({{.*}}, r5:4):sat +; CHECK: +=vcmpyi({{.*}},r5:4):sat ; Vector complex conjugate declare i64 @llvm.hexagon.A2.vconj(i64) @@ -269,7 +269,7 @@ %z = call i64 @llvm.hexagon.A2.vconj(i64 %a) ret i64 %z } -; CHECK: = vconj({{.*}}):sat +; CHECK: =vconj({{.*}}):sat ; Vector complex rotate declare i64 @llvm.hexagon.S2.vcrotate(i64, i32) @@ -277,7 +277,7 @@ %z = call i64 @llvm.hexagon.S2.vcrotate(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vcrotate({{.*}}, {{.*}}) +; CHECK: =vcrotate({{.*}},{{.*}}) ; Vector reduce complex multiply real or imaginary declare i64 @llvm.hexagon.M2.vrcmpyi.s0(i64, i64) @@ -285,7 +285,7 @@ %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrcmpyi({{.*}}, {{.*}}) +; CHECK: =vrcmpyi({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64) define i64 @M2_vrcmpyr_s0(i64 %a, i64 %b) { @@ -292,7 +292,7 @@ %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrcmpyr({{.*}}, {{.*}}) +; CHECK: =vrcmpyr({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64, i64) define i64 @M2_vrcmpyi_s0c(i64 %a, i64 %b) { @@ -299,7 +299,7 @@ %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrcmpyi({{.*}}, {{.*}}*) +; CHECK: =vrcmpyi({{.*}},{{.*}}*) declare i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64, i64) define i64 @M2_vrcmpyr_s0c(i64 %a, i64 %b) { @@ -306,7 +306,7 @@ %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrcmpyr({{.*}}, {{.*}}*) +; CHECK: =vrcmpyr({{.*}},{{.*}}*) declare i64 @llvm.hexagon.M2.vrcmaci.s0(i64, i64, i64) define i64 @M2_vrcmaci_s0(i64 %a, i64 %b, i64 %c) { @@ -313,7 +313,7 @@ %z = call i64 @llvm.hexagon.M2.vrcmaci.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrcmpyi({{.*}}, r5:4) +; CHECK: +=vrcmpyi({{.*}},r5:4) declare i64 @llvm.hexagon.M2.vrcmacr.s0(i64, i64, i64) define i64 @M2_vrcmacr_s0(i64 %a, i64 %b, i64 %c) { @@ -320,7 +320,7 @@ %z = call i64 @llvm.hexagon.M2.vrcmacr.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrcmpyr({{.*}}, r5:4) +; CHECK: +=vrcmpyr({{.*}},r5:4) declare i64 @llvm.hexagon.M2.vrcmaci.s0c(i64, i64, i64) define i64 @M2_vrcmaci_s0c(i64 %a, i64 %b, i64 %c) { @@ -327,7 +327,7 @@ %z = call i64 @llvm.hexagon.M2.vrcmaci.s0c(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrcmpyi({{.*}}, r5:4*) +; CHECK: +=vrcmpyi({{.*}},r5:4*) declare i64 @llvm.hexagon.M2.vrcmacr.s0c(i64, i64, i64) define i64 @M2_vrcmacr_s0c(i64 %a, i64 %b, i64 %c) { @@ -334,7 +334,7 @@ %z = call i64 @llvm.hexagon.M2.vrcmacr.s0c(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrcmpyr({{.*}}, r5:4*) +; CHECK: +=vrcmpyr({{.*}},r5:4*) ; Vector reduce complex rotate declare i64 @llvm.hexagon.S4.vrcrotate(i64, i32, i32) @@ -342,7 +342,7 @@ %z = call i64 @llvm.hexagon.S4.vrcrotate(i64 %a, i32 %b, i32 0) ret i64 %z } -; CHECK: = vrcrotate({{.*}}, {{.*}}, #0) +; CHECK: =vrcrotate({{.*}},{{.*}},#0) declare i64 @llvm.hexagon.S4.vrcrotate.acc(i64, i64, i32, i32) define i64 @S4_vrcrotate_acc(i64 %a, i64 %b, i32 %c) { @@ -349,4 +349,4 @@ %z = call i64 @llvm.hexagon.S4.vrcrotate.acc(i64 %a, i64 %b, i32 %c, i32 0) ret i64 %z } -; CHECK: += vrcrotate({{.*}}, {{.*}}, #0) +; CHECK: +=vrcrotate({{.*}},{{.*}},#0) Index: test/CodeGen/Hexagon/intrinsics/xtype_fp.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/xtype_fp.ll +++ test/CodeGen/Hexagon/intrinsics/xtype_fp.ll @@ -11,7 +11,7 @@ %z = call float @llvm.hexagon.F2.sfadd(float %a, float %b) ret float %z } -; CHECK: = sfadd({{.*}}, {{.*}}) +; CHECK: =sfadd({{.*}},{{.*}}) ; Classify floating-point value declare i32 @llvm.hexagon.F2.sfclass(float, i32) @@ -19,7 +19,7 @@ %z = call i32 @llvm.hexagon.F2.sfclass(float %a, i32 0) ret i32 %z } -; CHECK: = sfclass({{.*}}, #0) +; CHECK: =sfclass({{.*}},#0) declare i32 @llvm.hexagon.F2.dfclass(double, i32) define i32 @F2_dfclass(double %a) { @@ -26,7 +26,7 @@ %z = call i32 @llvm.hexagon.F2.dfclass(double %a, i32 0) ret i32 %z } -; CHECK: = dfclass({{.*}}, #0) +; CHECK: =dfclass({{.*}},#0) ; Compare floating-point value declare i32 @llvm.hexagon.F2.sfcmpge(float, float) @@ -34,7 +34,7 @@ %z = call i32 @llvm.hexagon.F2.sfcmpge(float %a, float %b) ret i32 %z } -; CHECK: = sfcmp.ge({{.*}}, {{.*}}) +; CHECK: =sfcmp.ge({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.sfcmpuo(float, float) define i32 @F2_sfcmpuo(float %a, float %b) { @@ -41,7 +41,7 @@ %z = call i32 @llvm.hexagon.F2.sfcmpuo(float %a, float %b) ret i32 %z } -; CHECK: = sfcmp.uo({{.*}}, {{.*}}) +; CHECK: =sfcmp.uo({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.sfcmpeq(float, float) define i32 @F2_sfcmpeq(float %a, float %b) { @@ -48,7 +48,7 @@ %z = call i32 @llvm.hexagon.F2.sfcmpeq(float %a, float %b) ret i32 %z } -; CHECK: = sfcmp.eq({{.*}}, {{.*}}) +; CHECK: =sfcmp.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.sfcmpgt(float, float) define i32 @F2_sfcmpgt(float %a, float %b) { @@ -55,7 +55,7 @@ %z = call i32 @llvm.hexagon.F2.sfcmpgt(float %a, float %b) ret i32 %z } -; CHECK: = sfcmp.gt({{.*}}, {{.*}}) +; CHECK: =sfcmp.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.dfcmpge(double, double) define i32 @F2_dfcmpge(double %a, double %b) { @@ -62,7 +62,7 @@ %z = call i32 @llvm.hexagon.F2.dfcmpge(double %a, double %b) ret i32 %z } -; CHECK: = dfcmp.ge({{.*}}, {{.*}}) +; CHECK: =dfcmp.ge({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.dfcmpuo(double, double) define i32 @F2_dfcmpuo(double %a, double %b) { @@ -69,7 +69,7 @@ %z = call i32 @llvm.hexagon.F2.dfcmpuo(double %a, double %b) ret i32 %z } -; CHECK: = dfcmp.uo({{.*}}, {{.*}}) +; CHECK: =dfcmp.uo({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.dfcmpeq(double, double) define i32 @F2_dfcmpeq(double %a, double %b) { @@ -76,7 +76,7 @@ %z = call i32 @llvm.hexagon.F2.dfcmpeq(double %a, double %b) ret i32 %z } -; CHECK: = dfcmp.eq({{.*}}, {{.*}}) +; CHECK: =dfcmp.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.dfcmpgt(double, double) define i32 @F2_dfcmpgt(double %a, double %b) { @@ -83,7 +83,7 @@ %z = call i32 @llvm.hexagon.F2.dfcmpgt(double %a, double %b) ret i32 %z } -; CHECK: = dfcmp.gt({{.*}}, {{.*}}) +; CHECK: =dfcmp.gt({{.*}},{{.*}}) ; Convert floating-point value to other format declare double @llvm.hexagon.F2.conv.sf2df(float) @@ -91,7 +91,7 @@ %z = call double @llvm.hexagon.F2.conv.sf2df(float %a) ret double %z } -; CHECK: = convert_sf2df({{.*}}) +; CHECK: =convert_sf2df({{.*}}) declare float @llvm.hexagon.F2.conv.df2sf(double) define float @F2_conv_df2sf(double %a) { @@ -98,7 +98,7 @@ %z = call float @llvm.hexagon.F2.conv.df2sf(double %a) ret float %z } -; CHECK: = convert_df2sf({{.*}}) +; CHECK: =convert_df2sf({{.*}}) ; Convert integer to floating-point value declare double @llvm.hexagon.F2.conv.ud2df(i64) @@ -106,7 +106,7 @@ %z = call double @llvm.hexagon.F2.conv.ud2df(i64 %a) ret double %z } -; CHECK: = convert_ud2df({{.*}}) +; CHECK: =convert_ud2df({{.*}}) declare double @llvm.hexagon.F2.conv.d2df(i64) define double @F2_conv_d2df(i64 %a) { @@ -113,7 +113,7 @@ %z = call double @llvm.hexagon.F2.conv.d2df(i64 %a) ret double %z } -; CHECK: = convert_d2df({{.*}}) +; CHECK: =convert_d2df({{.*}}) declare double @llvm.hexagon.F2.conv.uw2df(i32) define double @F2_conv_uw2df(i32 %a) { @@ -120,7 +120,7 @@ %z = call double @llvm.hexagon.F2.conv.uw2df(i32 %a) ret double %z } -; CHECK: = convert_uw2df({{.*}}) +; CHECK: =convert_uw2df({{.*}}) declare double @llvm.hexagon.F2.conv.w2df(i32) define double @F2_conv_w2df(i32 %a) { @@ -127,7 +127,7 @@ %z = call double @llvm.hexagon.F2.conv.w2df(i32 %a) ret double %z } -; CHECK: = convert_w2df({{.*}}) +; CHECK: =convert_w2df({{.*}}) declare float @llvm.hexagon.F2.conv.ud2sf(i64) define float @F2_conv_ud2sf(i64 %a) { @@ -134,7 +134,7 @@ %z = call float @llvm.hexagon.F2.conv.ud2sf(i64 %a) ret float %z } -; CHECK: = convert_ud2sf({{.*}}) +; CHECK: =convert_ud2sf({{.*}}) declare float @llvm.hexagon.F2.conv.d2sf(i64) define float @F2_conv_d2sf(i64 %a) { @@ -141,7 +141,7 @@ %z = call float @llvm.hexagon.F2.conv.d2sf(i64 %a) ret float %z } -; CHECK: = convert_d2sf({{.*}}) +; CHECK: =convert_d2sf({{.*}}) declare float @llvm.hexagon.F2.conv.uw2sf(i32) define float @F2_conv_uw2sf(i32 %a) { @@ -148,7 +148,7 @@ %z = call float @llvm.hexagon.F2.conv.uw2sf(i32 %a) ret float %z } -; CHECK: = convert_uw2sf({{.*}}) +; CHECK: =convert_uw2sf({{.*}}) declare float @llvm.hexagon.F2.conv.w2sf(i32) define float @F2_conv_w2sf(i32 %a) { @@ -155,7 +155,7 @@ %z = call float @llvm.hexagon.F2.conv.w2sf(i32 %a) ret float %z } -; CHECK: = convert_w2sf({{.*}}) +; CHECK: =convert_w2sf({{.*}}) ; Convert floating-point value to integer declare i64 @llvm.hexagon.F2.conv.df2d(double) @@ -163,7 +163,7 @@ %z = call i64 @llvm.hexagon.F2.conv.df2d(double %a) ret i64 %z } -; CHECK: = convert_df2d({{.*}}) +; CHECK: =convert_df2d({{.*}}) declare i64 @llvm.hexagon.F2.conv.df2ud(double) define i64 @F2_conv_df2ud(double %a) { @@ -170,7 +170,7 @@ %z = call i64 @llvm.hexagon.F2.conv.df2ud(double %a) ret i64 %z } -; CHECK: {{.*}} = convert_df2ud({{.*}}) +; CHECK: {{.*}}=convert_df2ud({{.*}}) declare i64 @llvm.hexagon.F2.conv.df2d.chop(double) define i64 @F2_conv_df2d_chop(double %a) { @@ -177,7 +177,7 @@ %z = call i64 @llvm.hexagon.F2.conv.df2d.chop(double %a) ret i64 %z } -; CHECK: = convert_df2d({{.*}}):chop +; CHECK: =convert_df2d({{.*}}):chop declare i64 @llvm.hexagon.F2.conv.df2ud.chop(double) define i64 @F2_conv_df2ud_chop(double %a) { @@ -184,7 +184,7 @@ %z = call i64 @llvm.hexagon.F2.conv.df2ud.chop(double %a) ret i64 %z } -; CHECK: = convert_df2ud({{.*}}):chop +; CHECK: =convert_df2ud({{.*}}):chop declare i64 @llvm.hexagon.F2.conv.sf2ud(float) define i64 @F2_conv_sf2ud(float %a) { @@ -191,7 +191,7 @@ %z = call i64 @llvm.hexagon.F2.conv.sf2ud(float %a) ret i64 %z } -; CHECK: = convert_sf2ud({{.*}}) +; CHECK: =convert_sf2ud({{.*}}) declare i64 @llvm.hexagon.F2.conv.sf2d(float) define i64 @F2_conv_sf2d(float %a) { @@ -198,7 +198,7 @@ %z = call i64 @llvm.hexagon.F2.conv.sf2d(float %a) ret i64 %z } -; CHECK: = convert_sf2d({{.*}}) +; CHECK: =convert_sf2d({{.*}}) declare i64 @llvm.hexagon.F2.conv.sf2d.chop(float) define i64 @F2_conv_sf2d_chop(float %a) { @@ -205,7 +205,7 @@ %z = call i64 @llvm.hexagon.F2.conv.sf2d.chop(float %a) ret i64 %z } -; CHECK: = convert_sf2d({{.*}}):chop +; CHECK: =convert_sf2d({{.*}}):chop declare i64 @llvm.hexagon.F2.conv.sf2ud.chop(float) define i64 @F2_conv_sf2ud_chop(float %a) { @@ -212,7 +212,7 @@ %z = call i64 @llvm.hexagon.F2.conv.sf2ud.chop(float %a) ret i64 %z } -; CHECK: = convert_sf2ud({{.*}}):chop +; CHECK: =convert_sf2ud({{.*}}):chop declare i32 @llvm.hexagon.F2.conv.df2uw(double) define i32 @F2_conv_df2uw(double %a) { @@ -219,7 +219,7 @@ %z = call i32 @llvm.hexagon.F2.conv.df2uw(double %a) ret i32 %z } -; CHECK: = convert_df2uw({{.*}}) +; CHECK: =convert_df2uw({{.*}}) declare i32 @llvm.hexagon.F2.conv.df2w(double) define i32 @F2_conv_df2w(double %a) { @@ -226,7 +226,7 @@ %z = call i32 @llvm.hexagon.F2.conv.df2w(double %a) ret i32 %z } -; CHECK: = convert_df2w({{.*}}) +; CHECK: =convert_df2w({{.*}}) declare i32 @llvm.hexagon.F2.conv.df2w.chop(double) define i32 @F2_conv_df2w_chop(double %a) { @@ -233,7 +233,7 @@ %z = call i32 @llvm.hexagon.F2.conv.df2w.chop(double %a) ret i32 %z } -; CHECK: = convert_df2w({{.*}}):chop +; CHECK: =convert_df2w({{.*}}):chop declare i32 @llvm.hexagon.F2.conv.df2uw.chop(double) define i32 @F2_conv_df2uw_chop(double %a) { @@ -240,7 +240,7 @@ %z = call i32 @llvm.hexagon.F2.conv.df2uw.chop(double %a) ret i32 %z } -; CHECK: = convert_df2uw({{.*}}):chop +; CHECK: =convert_df2uw({{.*}}):chop declare i32 @llvm.hexagon.F2.conv.sf2uw(float) define i32 @F2_conv_sf2uw(float %a) { @@ -247,7 +247,7 @@ %z = call i32 @llvm.hexagon.F2.conv.sf2uw(float %a) ret i32 %z } -; CHECK: = convert_sf2uw({{.*}}) +; CHECK: =convert_sf2uw({{.*}}) declare i32 @llvm.hexagon.F2.conv.sf2uw.chop(float) define i32 @F2_conv_sf2uw_chop(float %a) { @@ -254,7 +254,7 @@ %z = call i32 @llvm.hexagon.F2.conv.sf2uw.chop(float %a) ret i32 %z } -; CHECK: = convert_sf2uw({{.*}}):chop +; CHECK: =convert_sf2uw({{.*}}):chop declare i32 @llvm.hexagon.F2.conv.sf2w(float) define i32 @F2_conv_sf2w(float %a) { @@ -261,7 +261,7 @@ %z = call i32 @llvm.hexagon.F2.conv.sf2w(float %a) ret i32 %z } -; CHECK: = convert_sf2w({{.*}}) +; CHECK: =convert_sf2w({{.*}}) declare i32 @llvm.hexagon.F2.conv.sf2w.chop(float) define i32 @F2_conv_sf2w_chop(float %a) { @@ -268,7 +268,7 @@ %z = call i32 @llvm.hexagon.F2.conv.sf2w.chop(float %a) ret i32 %z } -; CHECK: = convert_sf2w({{.*}}):chop +; CHECK: =convert_sf2w({{.*}}):chop ; Floating point extreme value assistance declare float @llvm.hexagon.F2.sffixupr(float) @@ -276,7 +276,7 @@ %z = call float @llvm.hexagon.F2.sffixupr(float %a) ret float %z } -; CHECK: = sffixupr({{.*}}) +; CHECK: =sffixupr({{.*}}) declare float @llvm.hexagon.F2.sffixupn(float, float) define float @F2_sffixupn(float %a, float %b) { @@ -283,7 +283,7 @@ %z = call float @llvm.hexagon.F2.sffixupn(float %a, float %b) ret float %z } -; CHECK: = sffixupn({{.*}}, {{.*}}) +; CHECK: =sffixupn({{.*}},{{.*}}) declare float @llvm.hexagon.F2.sffixupd(float, float) define float @F2_sffixupd(float %a, float %b) { @@ -290,7 +290,7 @@ %z = call float @llvm.hexagon.F2.sffixupd(float %a, float %b) ret float %z } -; CHECK: = sffixupd({{.*}}, {{.*}}) +; CHECK: =sffixupd({{.*}},{{.*}}) ; Floating point fused multiply-add declare float @llvm.hexagon.F2.sffma(float, float, float) @@ -298,7 +298,7 @@ %z = call float @llvm.hexagon.F2.sffma(float %a, float %b, float %c) ret float %z } -; CHECK: += sfmpy({{.*}}, {{.*}}) +; CHECK: +=sfmpy({{.*}},{{.*}}) declare float @llvm.hexagon.F2.sffms(float, float, float) define float @F2_sffms(float %a, float %b, float %c) { @@ -305,7 +305,7 @@ %z = call float @llvm.hexagon.F2.sffms(float %a, float %b, float %c) ret float %z } -; CHECK: -= sfmpy({{.*}}, {{.*}}) +; CHECK: -=sfmpy({{.*}},{{.*}}) ; Floating point fused multiply-add with scaling declare float @llvm.hexagon.F2.sffma.sc(float, float, float, i32) @@ -313,7 +313,7 @@ %z = call float @llvm.hexagon.F2.sffma.sc(float %a, float %b, float %c, i32 %d) ret float %z } -; CHECK: += sfmpy({{.*}}, {{.*}}, {{.*}}):scale +; CHECK: +=sfmpy({{.*}},{{.*}},{{.*}}):scale ; Floating point fused multiply-add for library routines declare float @llvm.hexagon.F2.sffma.lib(float, float, float) @@ -321,7 +321,7 @@ %z = call float @llvm.hexagon.F2.sffma.lib(float %a, float %b, float %c) ret float %z } -; CHECK: += sfmpy({{.*}}, {{.*}}):lib +; CHECK: +=sfmpy({{.*}},{{.*}}):lib declare float @llvm.hexagon.F2.sffms.lib(float, float, float) define float @F2_sffms_lib(float %a, float %b, float %c) { @@ -328,7 +328,7 @@ %z = call float @llvm.hexagon.F2.sffms.lib(float %a, float %b, float %c) ret float %z } -; CHECK: -= sfmpy({{.*}}, {{.*}}):lib +; CHECK: -=sfmpy({{.*}},{{.*}}):lib ; Create floating-point constant declare float @llvm.hexagon.F2.sfimm.p(i32) @@ -336,7 +336,7 @@ %z = call float @llvm.hexagon.F2.sfimm.p(i32 0) ret float %z } -; CHECK: = sfmake(#0):pos +; CHECK: =sfmake(#0):pos declare float @llvm.hexagon.F2.sfimm.n(i32) define float @F2_sfimm_n() { @@ -343,7 +343,7 @@ %z = call float @llvm.hexagon.F2.sfimm.n(i32 0) ret float %z } -; CHECK: = sfmake(#0):neg +; CHECK: =sfmake(#0):neg declare double @llvm.hexagon.F2.dfimm.p(i32) define double @F2_dfimm_p() { @@ -350,7 +350,7 @@ %z = call double @llvm.hexagon.F2.dfimm.p(i32 0) ret double %z } -; CHECK: = dfmake(#0):pos +; CHECK: =dfmake(#0):pos declare double @llvm.hexagon.F2.dfimm.n(i32) define double @F2_dfimm_n() { @@ -357,7 +357,7 @@ %z = call double @llvm.hexagon.F2.dfimm.n(i32 0) ret double %z } -; CHECK: = dfmake(#0):neg +; CHECK: =dfmake(#0):neg ; Floating point maximum declare float @llvm.hexagon.F2.sfmax(float, float) @@ -365,7 +365,7 @@ %z = call float @llvm.hexagon.F2.sfmax(float %a, float %b) ret float %z } -; CHECK: = sfmax({{.*}}, {{.*}}) +; CHECK: =sfmax({{.*}},{{.*}}) ; Floating point minimum declare float @llvm.hexagon.F2.sfmin(float, float) @@ -373,7 +373,7 @@ %z = call float @llvm.hexagon.F2.sfmin(float %a, float %b) ret float %z } -; CHECK: = sfmin({{.*}}, {{.*}}) +; CHECK: =sfmin({{.*}},{{.*}}) ; Floating point multiply declare float @llvm.hexagon.F2.sfmpy(float, float) @@ -381,7 +381,7 @@ %z = call float @llvm.hexagon.F2.sfmpy(float %a, float %b) ret float %z } -; CHECK: = sfmpy({{.*}}, {{.*}}) +; CHECK: =sfmpy({{.*}},{{.*}}) ; Floating point subtraction declare float @llvm.hexagon.F2.sfsub(float, float) @@ -389,4 +389,4 @@ %z = call float @llvm.hexagon.F2.sfsub(float %a, float %b) ret float %z } -; CHECK: = sfsub({{.*}}, {{.*}}) +; CHECK: =sfsub({{.*}},{{.*}}) Index: test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll +++ test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll @@ -8,24 +8,24 @@ ; Multiply and use lower result declare i32 @llvm.hexagon.M4.mpyrr.addi(i32, i32, i32) define i32 @M4_mpyrr_addi(i32 %a, i32 %b) { - %z = call i32 @llvm.hexagon.M4.mpyrr.addi(i32 0, i32 %a, i32 %b) + %z = call i32 @llvm.hexagon.M4.mpyrr.addi(i32 0,i32 %a, i32 %b) ret i32 %z } -; CHECK: = add(#0, mpyi({{.*}}, {{.*}})) +; CHECK: =add(#0,mpyi({{.*}},{{.*}})) declare i32 @llvm.hexagon.M4.mpyri.addi(i32, i32, i32) define i32 @M4_mpyri_addi(i32 %a) { - %z = call i32 @llvm.hexagon.M4.mpyri.addi(i32 0, i32 %a, i32 0) + %z = call i32 @llvm.hexagon.M4.mpyri.addi(i32 0,i32 %a, i32 0) ret i32 %z } -; CHECK: = add(#0, mpyi({{.*}}, #0)) +; CHECK: =add(#0,mpyi({{.*}},#0)) declare i32 @llvm.hexagon.M4.mpyri.addr.u2(i32, i32, i32) define i32 @M4_mpyri_addr_u2(i32 %a, i32 %b) { - %z = call i32 @llvm.hexagon.M4.mpyri.addr.u2(i32 %a, i32 0, i32 %b) + %z = call i32 @llvm.hexagon.M4.mpyri.addr.u2(i32 %a, i32 0,i32 %b) ret i32 %z } -; CHECK: = add({{.*}}, mpyi(#0, {{.*}})) +; CHECK: =add({{.*}},mpyi(#0,{{.*}})) declare i32 @llvm.hexagon.M4.mpyri.addr(i32, i32, i32) define i32 @M4_mpyri_addr(i32 %a, i32 %b) { @@ -32,7 +32,7 @@ %z = call i32 @llvm.hexagon.M4.mpyri.addr(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: = add({{.*}}, mpyi({{.*}}, #0)) +; CHECK: =add({{.*}},mpyi({{.*}},#0)) declare i32 @llvm.hexagon.M4.mpyrr.addr(i32, i32, i32) define i32 @M4_mpyrr_addr(i32 %a, i32 %b, i32 %c) { @@ -39,7 +39,7 @@ %z = call i32 @llvm.hexagon.M4.mpyrr.addr(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = add({{.*}}, mpyi({{.*}}, {{.*}})) +; CHECK: =add({{.*}},mpyi({{.*}},{{.*}})) ; Vector multiply word by signed half (32x16) declare i64 @llvm.hexagon.M2.mmpyl.s0(i64, i64) @@ -47,7 +47,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyl.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweh({{.*}}, {{.*}}):sat +; CHECK: =vmpyweh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64) define i64 @M2_mmpyl_s1(i64 %a, i64 %b) { @@ -54,7 +54,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:sat +; CHECK: =vmpyweh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyh.s0(i64, i64) define i64 @M2_mmpyh_s0(i64 %a, i64 %b) { @@ -61,7 +61,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywoh({{.*}}, {{.*}}):sat +; CHECK: =vmpywoh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyh.s1(i64, i64) define i64 @M2_mmpyh_s1(i64 %a, i64 %b) { @@ -68,7 +68,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:sat +; CHECK: =vmpywoh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyl.rs0(i64, i64) define i64 @M2_mmpyl_rs0(i64 %a, i64 %b) { @@ -75,7 +75,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyl.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweh({{.*}}, {{.*}}):rnd:sat +; CHECK: =vmpyweh({{.*}},{{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyl.rs1(i64, i64) define i64 @M2_mmpyl_rs1(i64 %a, i64 %b) { @@ -82,7 +82,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyl.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: =vmpyweh({{.*}},{{.*}}):<<1:rnd:sat declare i64 @llvm.hexagon.M2.mmpyh.rs0(i64, i64) define i64 @M2_mmpyh_rs0(i64 %a, i64 %b) { @@ -89,7 +89,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyh.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywoh({{.*}}, {{.*}}):rnd:sat +; CHECK: =vmpywoh({{.*}},{{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyh.rs1(i64, i64) define i64 @M2_mmpyh_rs1(i64 %a, i64 %b) { @@ -96,7 +96,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyh.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: =vmpywoh({{.*}},{{.*}}):<<1:rnd:sat ; Vector multiply word by unsigned half (32x16) declare i64 @llvm.hexagon.M2.mmpyul.s0(i64, i64) @@ -104,7 +104,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyul.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweuh({{.*}}, {{.*}}):sat +; CHECK: =vmpyweuh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyul.s1(i64, i64) define i64 @M2_mmpyul_s1(i64 %a, i64 %b) { @@ -111,7 +111,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyul.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:sat +; CHECK: =vmpyweuh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyuh.s0(i64, i64) define i64 @M2_mmpyuh_s0(i64 %a, i64 %b) { @@ -118,7 +118,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyuh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywouh({{.*}}, {{.*}}):sat +; CHECK: =vmpywouh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyuh.s1(i64, i64) define i64 @M2_mmpyuh_s1(i64 %a, i64 %b) { @@ -125,7 +125,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyuh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:sat +; CHECK: =vmpywouh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyul.rs0(i64, i64) define i64 @M2_mmpyul_rs0(i64 %a, i64 %b) { @@ -132,7 +132,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyul.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweuh({{.*}}, {{.*}}):rnd:sat +; CHECK: =vmpyweuh({{.*}},{{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyul.rs1(i64, i64) define i64 @M2_mmpyul_rs1(i64 %a, i64 %b) { @@ -139,7 +139,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyul.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: =vmpyweuh({{.*}},{{.*}}):<<1:rnd:sat declare i64 @llvm.hexagon.M2.mmpyuh.rs0(i64, i64) define i64 @M2_mmpyuh_rs0(i64 %a, i64 %b) { @@ -146,7 +146,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyuh.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywouh({{.*}}, {{.*}}):rnd:sat +; CHECK: =vmpywouh({{.*}},{{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyuh.rs1(i64, i64) define i64 @M2_mmpyuh_rs1(i64 %a, i64 %b) { @@ -153,7 +153,7 @@ %z = call i64 @llvm.hexagon.M2.mmpyuh.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: =vmpywouh({{.*}},{{.*}}):<<1:rnd:sat ; Multiply signed halfwords declare i64 @llvm.hexagon.M2.mpyd.ll.s0(i32, i32) @@ -161,7 +161,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.ll.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l) +; CHECK: =mpy({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32) define i64 @M2_mpyd_ll_s1(i32 %a, i32 %b) { @@ -168,7 +168,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.ll.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: =mpy({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.lh.s0(i32, i32) define i64 @M2_mpyd_lh_s0(i32 %a, i32 %b) { @@ -175,7 +175,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.lh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h) +; CHECK: =mpy({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.lh.s1(i32, i32) define i64 @M2_mpyd_lh_s1(i32 %a, i32 %b) { @@ -182,7 +182,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.lh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: =mpy({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.hl.s0(i32, i32) define i64 @M2_mpyd_hl_s0(i32 %a, i32 %b) { @@ -189,7 +189,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.hl.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l) +; CHECK: =mpy({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.hl.s1(i32, i32) define i64 @M2_mpyd_hl_s1(i32 %a, i32 %b) { @@ -196,7 +196,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.hl.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: =mpy({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.hh.s0(i32, i32) define i64 @M2_mpyd_hh_s0(i32 %a, i32 %b) { @@ -203,7 +203,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.hh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h) +; CHECK: =mpy({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.hh.s1(i32, i32) define i64 @M2_mpyd_hh_s1(i32 %a, i32 %b) { @@ -210,7 +210,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.hh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: =mpy({{.*}}.h,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32, i32) define i64 @M2_mpyd_rnd_ll_s0(i32 %a, i32 %b) { @@ -217,7 +217,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd +; CHECK: =mpy({{.*}}.l,{{.*}}.l):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32, i32) define i64 @M2_mpyd_rnd_ll_s1(i32 %a, i32 %b) { @@ -224,7 +224,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd +; CHECK: =mpy({{.*}}.l,{{.*}}.l):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32, i32) define i64 @M2_mpyd_rnd_lh_s0(i32 %a, i32 %b) { @@ -231,7 +231,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd +; CHECK: =mpy({{.*}}.l,{{.*}}.h):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32, i32) define i64 @M2_mpyd_rnd_lh_s1(i32 %a, i32 %b) { @@ -238,7 +238,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd +; CHECK: =mpy({{.*}}.l,{{.*}}.h):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32, i32) define i64 @M2_mpyd_rnd_hl_s0(i32 %a, i32 %b) { @@ -245,7 +245,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd +; CHECK: =mpy({{.*}}.h,{{.*}}.l):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32, i32) define i64 @M2_mpyd_rnd_hl_s1(i32 %a, i32 %b) { @@ -252,7 +252,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd +; CHECK: =mpy({{.*}}.h,{{.*}}.l):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32, i32) define i64 @M2_mpyd_rnd_hh_s0(i32 %a, i32 %b) { @@ -259,7 +259,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd +; CHECK: =mpy({{.*}}.h,{{.*}}.h):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32, i32) define i64 @M2_mpyd_rnd_hh_s1(i32 %a, i32 %b) { @@ -266,7 +266,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd +; CHECK: =mpy({{.*}}.h,{{.*}}.h):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64, i32, i32) define i64 @M2_mpyd_acc_ll_s0(i64 %a, i32 %b, i32 %c) { @@ -273,7 +273,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l) +; CHECK: +=mpy({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64, i32, i32) define i64 @M2_mpyd_acc_ll_s1(i64 %a, i32 %b, i32 %c) { @@ -280,7 +280,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: +=mpy({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64, i32, i32) define i64 @M2_mpyd_acc_lh_s0(i64 %a, i32 %b, i32 %c) { @@ -287,7 +287,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h) +; CHECK: +=mpy({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64, i32, i32) define i64 @M2_mpyd_acc_lh_s1(i64 %a, i32 %b, i32 %c) { @@ -294,7 +294,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: +=mpy({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64, i32, i32) define i64 @M2_mpyd_acc_hl_s0(i64 %a, i32 %b, i32 %c) { @@ -301,7 +301,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l) +; CHECK: +=mpy({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64, i32, i32) define i64 @M2_mpyd_acc_hl_s1(i64 %a, i32 %b, i32 %c) { @@ -308,7 +308,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: +=mpy({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64, i32, i32) define i64 @M2_mpyd_acc_hh_s0(i64 %a, i32 %b, i32 %c) { @@ -315,7 +315,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h) +; CHECK: +=mpy({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64, i32, i32) define i64 @M2_mpyd_acc_hh_s1(i64 %a, i32 %b, i32 %c) { @@ -322,7 +322,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: +=mpy({{.*}}.h,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64, i32, i32) define i64 @M2_mpyd_nac_ll_s0(i64 %a, i32 %b, i32 %c) { @@ -329,7 +329,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l) +; CHECK: -=mpy({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64, i32, i32) define i64 @M2_mpyd_nac_ll_s1(i64 %a, i32 %b, i32 %c) { @@ -336,7 +336,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: -=mpy({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64, i32, i32) define i64 @M2_mpyd_nac_lh_s0(i64 %a, i32 %b, i32 %c) { @@ -343,7 +343,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h) +; CHECK: -=mpy({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64, i32, i32) define i64 @M2_mpyd_nac_lh_s1(i64 %a, i32 %b, i32 %c) { @@ -350,7 +350,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: -=mpy({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64, i32, i32) define i64 @M2_mpyd_nac_hl_s0(i64 %a, i32 %b, i32 %c) { @@ -357,7 +357,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l) +; CHECK: -=mpy({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64, i32, i32) define i64 @M2_mpyd_nac_hl_s1(i64 %a, i32 %b, i32 %c) { @@ -364,7 +364,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: -=mpy({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64, i32, i32) define i64 @M2_mpyd_nac_hh_s0(i64 %a, i32 %b, i32 %c) { @@ -371,7 +371,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h) +; CHECK: -=mpy({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64, i32, i32) define i64 @M2_mpyd_nac_hh_s1(i64 %a, i32 %b, i32 %c) { @@ -378,7 +378,7 @@ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: -=mpy({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.ll.s0(i32, i32) define i32 @M2_mpy_ll_s0(i32 %a, i32 %b) { @@ -385,7 +385,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l) +; CHECK: =mpy({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.ll.s1(i32, i32) define i32 @M2_mpy_ll_s1(i32 %a, i32 %b) { @@ -392,7 +392,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: =mpy({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.lh.s0(i32, i32) define i32 @M2_mpy_lh_s0(i32 %a, i32 %b) { @@ -399,7 +399,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h) +; CHECK: =mpy({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.lh.s1(i32, i32) define i32 @M2_mpy_lh_s1(i32 %a, i32 %b) { @@ -406,7 +406,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: =mpy({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.hl.s0(i32, i32) define i32 @M2_mpy_hl_s0(i32 %a, i32 %b) { @@ -413,7 +413,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l) +; CHECK: =mpy({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.hl.s1(i32, i32) define i32 @M2_mpy_hl_s1(i32 %a, i32 %b) { @@ -420,7 +420,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: =mpy({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.hh.s0(i32, i32) define i32 @M2_mpy_hh_s0(i32 %a, i32 %b) { @@ -427,7 +427,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h) +; CHECK: =mpy({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.hh.s1(i32, i32) define i32 @M2_mpy_hh_s1(i32 %a, i32 %b) { @@ -434,7 +434,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: =mpy({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32, i32) define i32 @M2_mpy_sat_ll_s0(i32 %a, i32 %b) { @@ -441,7 +441,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):sat +; CHECK: =mpy({{.*}}.l,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32, i32) define i32 @M2_mpy_sat_ll_s1(i32 %a, i32 %b) { @@ -448,7 +448,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:sat +; CHECK: =mpy({{.*}}.l,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32, i32) define i32 @M2_mpy_sat_lh_s0(i32 %a, i32 %b) { @@ -455,7 +455,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):sat +; CHECK: =mpy({{.*}}.l,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32, i32) define i32 @M2_mpy_sat_lh_s1(i32 %a, i32 %b) { @@ -462,7 +462,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:sat +; CHECK: =mpy({{.*}}.l,{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32, i32) define i32 @M2_mpy_sat_hl_s0(i32 %a, i32 %b) { @@ -469,7 +469,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):sat +; CHECK: =mpy({{.*}}.h,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32, i32) define i32 @M2_mpy_sat_hl_s1(i32 %a, i32 %b) { @@ -476,7 +476,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:sat +; CHECK: =mpy({{.*}}.h,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32, i32) define i32 @M2_mpy_sat_hh_s0(i32 %a, i32 %b) { @@ -483,7 +483,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):sat +; CHECK: =mpy({{.*}}.h,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32, i32) define i32 @M2_mpy_sat_hh_s1(i32 %a, i32 %b) { @@ -490,7 +490,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:sat +; CHECK: =mpy({{.*}}.h,{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32, i32) define i32 @M2_mpy_sat_rnd_ll_s0(i32 %a, i32 %b) { @@ -497,7 +497,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd:sat +; CHECK: =mpy({{.*}}.l,{{.*}}.l):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32, i32) define i32 @M2_mpy_sat_rnd_ll_s1(i32 %a, i32 %b) { @@ -504,7 +504,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd:sat +; CHECK: =mpy({{.*}}.l,{{.*}}.l):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32, i32) define i32 @M2_mpy_sat_rnd_lh_s0(i32 %a, i32 %b) { @@ -511,7 +511,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd:sat +; CHECK: =mpy({{.*}}.l,{{.*}}.h):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32, i32) define i32 @M2_mpy_sat_rnd_lh_s1(i32 %a, i32 %b) { @@ -518,7 +518,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd:sat +; CHECK: =mpy({{.*}}.l,{{.*}}.h):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32, i32) define i32 @M2_mpy_sat_rnd_hl_s0(i32 %a, i32 %b) { @@ -525,7 +525,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd:sat +; CHECK: =mpy({{.*}}.h,{{.*}}.l):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32, i32) define i32 @M2_mpy_sat_rnd_hl_s1(i32 %a, i32 %b) { @@ -532,7 +532,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd:sat +; CHECK: =mpy({{.*}}.h,{{.*}}.l):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32, i32) define i32 @M2_mpy_sat_rnd_hh_s0(i32 %a, i32 %b) { @@ -539,7 +539,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd:sat +; CHECK: =mpy({{.*}}.h,{{.*}}.h):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32, i32) define i32 @M2_mpy_sat_rnd_hh_s1(i32 %a, i32 %b) { @@ -546,7 +546,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd:sat +; CHECK: =mpy({{.*}}.h,{{.*}}.h):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32, i32, i32) define i32 @M2_mpy_acc_ll_s0(i32 %a, i32 %b, i32 %c) { @@ -553,7 +553,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l) +; CHECK: +=mpy({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32, i32, i32) define i32 @M2_mpy_acc_ll_s1(i32 %a, i32 %b, i32 %c) { @@ -560,7 +560,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: +=mpy({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32, i32, i32) define i32 @M2_mpy_acc_lh_s0(i32 %a, i32 %b, i32 %c) { @@ -567,7 +567,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h) +; CHECK: +=mpy({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32, i32, i32) define i32 @M2_mpy_acc_lh_s1(i32 %a, i32 %b, i32 %c) { @@ -574,7 +574,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: +=mpy({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32, i32, i32) define i32 @M2_mpy_acc_hl_s0(i32 %a, i32 %b, i32 %c) { @@ -581,7 +581,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l) +; CHECK: +=mpy({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32, i32, i32) define i32 @M2_mpy_acc_hl_s1(i32 %a, i32 %b, i32 %c) { @@ -588,7 +588,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: +=mpy({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32, i32, i32) define i32 @M2_mpy_acc_hh_s0(i32 %a, i32 %b, i32 %c) { @@ -595,7 +595,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h) +; CHECK: +=mpy({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32, i32, i32) define i32 @M2_mpy_acc_hh_s1(i32 %a, i32 %b, i32 %c) { @@ -602,7 +602,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: +=mpy({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_ll_s0(i32 %a, i32 %b, i32 %c) { @@ -609,7 +609,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l):sat +; CHECK: +=mpy({{.*}}.l,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_ll_s1(i32 %a, i32 %b, i32 %c) { @@ -616,7 +616,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1:sat +; CHECK: +=mpy({{.*}}.l,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_lh_s0(i32 %a, i32 %b, i32 %c) { @@ -623,7 +623,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h):sat +; CHECK: +=mpy({{.*}}.l,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_lh_s1(i32 %a, i32 %b, i32 %c) { @@ -630,7 +630,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1:sat +; CHECK: +=mpy({{.*}}.l,{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_hl_s0(i32 %a, i32 %b, i32 %c) { @@ -637,7 +637,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l):sat +; CHECK: +=mpy({{.*}}.h,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_hl_s1(i32 %a, i32 %b, i32 %c) { @@ -644,7 +644,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1:sat +; CHECK: +=mpy({{.*}}.h,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_hh_s0(i32 %a, i32 %b, i32 %c) { @@ -651,7 +651,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h):sat +; CHECK: +=mpy({{.*}}.h,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_hh_s1(i32 %a, i32 %b, i32 %c) { @@ -658,7 +658,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1:sat +; CHECK: +=mpy({{.*}}.h,{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32, i32, i32) define i32 @M2_mpy_nac_ll_s0(i32 %a, i32 %b, i32 %c) { @@ -665,7 +665,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l) +; CHECK: -=mpy({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32, i32, i32) define i32 @M2_mpy_nac_ll_s1(i32 %a, i32 %b, i32 %c) { @@ -672,7 +672,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: -=mpy({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32, i32, i32) define i32 @M2_mpy_nac_lh_s0(i32 %a, i32 %b, i32 %c) { @@ -679,7 +679,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h) +; CHECK: -=mpy({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32, i32, i32) define i32 @M2_mpy_nac_lh_s1(i32 %a, i32 %b, i32 %c) { @@ -686,7 +686,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: -=mpy({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32, i32, i32) define i32 @M2_mpy_nac_hl_s0(i32 %a, i32 %b, i32 %c) { @@ -693,7 +693,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l) +; CHECK: -=mpy({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32, i32, i32) define i32 @M2_mpy_nac_hl_s1(i32 %a, i32 %b, i32 %c) { @@ -700,7 +700,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: -=mpy({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32, i32, i32) define i32 @M2_mpy_nac_hh_s0(i32 %a, i32 %b, i32 %c) { @@ -707,7 +707,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h) +; CHECK: -=mpy({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32, i32, i32) define i32 @M2_mpy_nac_hh_s1(i32 %a, i32 %b, i32 %c) { @@ -714,7 +714,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: -=mpy({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_ll_s0(i32 %a, i32 %b, i32 %c) { @@ -721,7 +721,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l):sat +; CHECK: -=mpy({{.*}}.l,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_ll_s1(i32 %a, i32 %b, i32 %c) { @@ -728,7 +728,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1:sat +; CHECK: -=mpy({{.*}}.l,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_lh_s0(i32 %a, i32 %b, i32 %c) { @@ -735,7 +735,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h):sat +; CHECK: -=mpy({{.*}}.l,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_lh_s1(i32 %a, i32 %b, i32 %c) { @@ -742,7 +742,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1:sat +; CHECK: -=mpy({{.*}}.l,{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_hl_s0(i32 %a, i32 %b, i32 %c) { @@ -749,7 +749,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l):sat +; CHECK: -=mpy({{.*}}.h,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_hl_s1(i32 %a, i32 %b, i32 %c) { @@ -756,7 +756,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1:sat +; CHECK: -=mpy({{.*}}.h,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_hh_s0(i32 %a, i32 %b, i32 %c) { @@ -763,7 +763,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h):sat +; CHECK: -=mpy({{.*}}.h,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_hh_s1(i32 %a, i32 %b, i32 %c) { @@ -770,7 +770,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1:sat +; CHECK: -=mpy({{.*}}.h,{{.*}}.h):<<1:sat ; Multiply unsigned halfwords declare i64 @llvm.hexagon.M2.mpyud.ll.s0(i32, i32) @@ -778,7 +778,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.ll.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.l) +; CHECK: =mpyu({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.ll.s1(i32, i32) define i64 @M2_mpyud_ll_s1(i32 %a, i32 %b) { @@ -785,7 +785,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.ll.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: =mpyu({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.lh.s0(i32, i32) define i64 @M2_mpyud_lh_s0(i32 %a, i32 %b) { @@ -792,7 +792,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.lh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.h) +; CHECK: =mpyu({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.lh.s1(i32, i32) define i64 @M2_mpyud_lh_s1(i32 %a, i32 %b) { @@ -799,7 +799,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.lh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: =mpyu({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.hl.s0(i32, i32) define i64 @M2_mpyud_hl_s0(i32 %a, i32 %b) { @@ -806,7 +806,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.hl.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.l) +; CHECK: =mpyu({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.hl.s1(i32, i32) define i64 @M2_mpyud_hl_s1(i32 %a, i32 %b) { @@ -813,7 +813,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.hl.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: =mpyu({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.hh.s0(i32, i32) define i64 @M2_mpyud_hh_s0(i32 %a, i32 %b) { @@ -820,7 +820,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.hh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.h) +; CHECK: =mpyu({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.hh.s1(i32, i32) define i64 @M2_mpyud_hh_s1(i32 %a, i32 %b) { @@ -827,7 +827,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.hh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: =mpyu({{.*}}.h,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64, i32, i32) define i64 @M2_mpyud_acc_ll_s0(i64 %a, i32 %b, i32 %c) { @@ -834,7 +834,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.l) +; CHECK: +=mpyu({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64, i32, i32) define i64 @M2_mpyud_acc_ll_s1(i64 %a, i32 %b, i32 %c) { @@ -841,7 +841,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: +=mpyu({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64, i32, i32) define i64 @M2_mpyud_acc_lh_s0(i64 %a, i32 %b, i32 %c) { @@ -848,7 +848,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.h) +; CHECK: +=mpyu({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64, i32, i32) define i64 @M2_mpyud_acc_lh_s1(i64 %a, i32 %b, i32 %c) { @@ -855,7 +855,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: +=mpyu({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64, i32, i32) define i64 @M2_mpyud_acc_hl_s0(i64 %a, i32 %b, i32 %c) { @@ -862,7 +862,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.l) +; CHECK: +=mpyu({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64, i32, i32) define i64 @M2_mpyud_acc_hl_s1(i64 %a, i32 %b, i32 %c) { @@ -869,7 +869,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: +=mpyu({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64, i32, i32) define i64 @M2_mpyud_acc_hh_s0(i64 %a, i32 %b, i32 %c) { @@ -876,7 +876,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.h) +; CHECK: +=mpyu({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64, i32, i32) define i64 @M2_mpyud_acc_hh_s1(i64 %a, i32 %b, i32 %c) { @@ -883,7 +883,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: +=mpyu({{.*}}.h,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64, i32, i32) define i64 @M2_mpyud_nac_ll_s0(i64 %a, i32 %b, i32 %c) { @@ -890,7 +890,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.l) +; CHECK: -=mpyu({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64, i32, i32) define i64 @M2_mpyud_nac_ll_s1(i64 %a, i32 %b, i32 %c) { @@ -897,7 +897,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: -=mpyu({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64, i32, i32) define i64 @M2_mpyud_nac_lh_s0(i64 %a, i32 %b, i32 %c) { @@ -904,7 +904,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.h) +; CHECK: -=mpyu({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64, i32, i32) define i64 @M2_mpyud_nac_lh_s1(i64 %a, i32 %b, i32 %c) { @@ -911,7 +911,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: -=mpyu({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64, i32, i32) define i64 @M2_mpyud_nac_hl_s0(i64 %a, i32 %b, i32 %c) { @@ -918,7 +918,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.l) +; CHECK: -=mpyu({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64, i32, i32) define i64 @M2_mpyud_nac_hl_s1(i64 %a, i32 %b, i32 %c) { @@ -925,7 +925,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: -=mpyu({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64, i32, i32) define i64 @M2_mpyud_nac_hh_s0(i64 %a, i32 %b, i32 %c) { @@ -932,7 +932,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.h) +; CHECK: -=mpyu({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64, i32, i32) define i64 @M2_mpyud_nac_hh_s1(i64 %a, i32 %b, i32 %c) { @@ -939,7 +939,7 @@ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: -=mpyu({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.ll.s0(i32, i32) define i32 @M2_mpyu_ll_s0(i32 %a, i32 %b) { @@ -946,7 +946,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.l) +; CHECK: =mpyu({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.ll.s1(i32, i32) define i32 @M2_mpyu_ll_s1(i32 %a, i32 %b) { @@ -953,7 +953,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: =mpyu({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.lh.s0(i32, i32) define i32 @M2_mpyu_lh_s0(i32 %a, i32 %b) { @@ -960,7 +960,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.h) +; CHECK: =mpyu({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.lh.s1(i32, i32) define i32 @M2_mpyu_lh_s1(i32 %a, i32 %b) { @@ -967,7 +967,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: =mpyu({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.hl.s0(i32, i32) define i32 @M2_mpyu_hl_s0(i32 %a, i32 %b) { @@ -974,7 +974,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.l) +; CHECK: =mpyu({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.hl.s1(i32, i32) define i32 @M2_mpyu_hl_s1(i32 %a, i32 %b) { @@ -981,7 +981,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: =mpyu({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.hh.s0(i32, i32) define i32 @M2_mpyu_hh_s0(i32 %a, i32 %b) { @@ -988,7 +988,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.h) +; CHECK: =mpyu({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.hh.s1(i32, i32) define i32 @M2_mpyu_hh_s1(i32 %a, i32 %b) { @@ -995,7 +995,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: =mpyu({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32, i32, i32) define i32 @M2_mpyu_acc_ll_s0(i32 %a, i32 %b, i32 %c) { @@ -1002,7 +1002,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.l) +; CHECK: +=mpyu({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32, i32, i32) define i32 @M2_mpyu_acc_ll_s1(i32 %a, i32 %b, i32 %c) { @@ -1009,7 +1009,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: +=mpyu({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32, i32, i32) define i32 @M2_mpyu_acc_lh_s0(i32 %a, i32 %b, i32 %c) { @@ -1016,7 +1016,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.h) +; CHECK: +=mpyu({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32, i32, i32) define i32 @M2_mpyu_acc_lh_s1(i32 %a, i32 %b, i32 %c) { @@ -1023,7 +1023,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: +=mpyu({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32, i32, i32) define i32 @M2_mpyu_acc_hl_s0(i32 %a, i32 %b, i32 %c) { @@ -1030,7 +1030,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.l) +; CHECK: +=mpyu({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32, i32, i32) define i32 @M2_mpyu_acc_hl_s1(i32 %a, i32 %b, i32 %c) { @@ -1037,7 +1037,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: +=mpyu({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32, i32, i32) define i32 @M2_mpyu_acc_hh_s0(i32 %a, i32 %b, i32 %c) { @@ -1044,7 +1044,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.h) +; CHECK: +=mpyu({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32, i32, i32) define i32 @M2_mpyu_acc_hh_s1(i32 %a, i32 %b, i32 %c) { @@ -1051,7 +1051,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: +=mpyu({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32, i32, i32) define i32 @M2_mpyu_nac_ll_s0(i32 %a, i32 %b, i32 %c) { @@ -1058,7 +1058,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.l) +; CHECK: -=mpyu({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32, i32, i32) define i32 @M2_mpyu_nac_ll_s1(i32 %a, i32 %b, i32 %c) { @@ -1065,7 +1065,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: -=mpyu({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32, i32, i32) define i32 @M2_mpyu_nac_lh_s0(i32 %a, i32 %b, i32 %c) { @@ -1072,7 +1072,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.h) +; CHECK: -=mpyu({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32, i32, i32) define i32 @M2_mpyu_nac_lh_s1(i32 %a, i32 %b, i32 %c) { @@ -1079,7 +1079,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: -=mpyu({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32, i32, i32) define i32 @M2_mpyu_nac_hl_s0(i32 %a, i32 %b, i32 %c) { @@ -1086,7 +1086,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.l) +; CHECK: -=mpyu({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32, i32, i32) define i32 @M2_mpyu_nac_hl_s1(i32 %a, i32 %b, i32 %c) { @@ -1093,7 +1093,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: -=mpyu({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32, i32, i32) define i32 @M2_mpyu_nac_hh_s0(i32 %a, i32 %b, i32 %c) { @@ -1100,7 +1100,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.h) +; CHECK: -=mpyu({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32, i32, i32) define i32 @M2_mpyu_nac_hh_s1(i32 %a, i32 %b, i32 %c) { @@ -1107,7 +1107,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: -=mpyu({{.*}}.h,{{.*}}.h):<<1 ; Polynomial multiply words declare i64 @llvm.hexagon.M4.pmpyw(i32, i32) @@ -1115,7 +1115,7 @@ %z = call i64 @llvm.hexagon.M4.pmpyw(i32 %a, i32 %b) ret i64 %z } -; CHECK: = pmpyw({{.*}}, {{.*}}) +; CHECK: =pmpyw({{.*}},{{.*}}) declare i64 @llvm.hexagon.M4.pmpyw.acc(i64, i32, i32) define i64 @M4_pmpyw_acc(i64 %a, i32 %b, i32 %c) { @@ -1122,7 +1122,7 @@ %z = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: ^= pmpyw({{.*}}, {{.*}}) +; CHECK: ^=pmpyw({{.*}},{{.*}}) ; Vector reduce multiply word by signed half declare i64 @llvm.hexagon.M4.vrmpyoh.s0(i64, i64) @@ -1130,7 +1130,7 @@ %z = call i64 @llvm.hexagon.M4.vrmpyoh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpywoh({{.*}}, {{.*}}) +; CHECK: =vrmpywoh({{.*}},{{.*}}) declare i64 @llvm.hexagon.M4.vrmpyoh.s1(i64, i64) define i64 @M4_vrmpyoh_s1(i64 %a, i64 %b) { @@ -1137,7 +1137,7 @@ %z = call i64 @llvm.hexagon.M4.vrmpyoh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpywoh({{.*}}, {{.*}}):<<1 +; CHECK: =vrmpywoh({{.*}},{{.*}}):<<1 declare i64 @llvm.hexagon.M4.vrmpyeh.s0(i64, i64) define i64 @M4_vrmpyeh_s0(i64 %a, i64 %b) { @@ -1144,7 +1144,7 @@ %z = call i64 @llvm.hexagon.M4.vrmpyeh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpyweh({{.*}}, {{.*}}) +; CHECK: =vrmpyweh({{.*}},{{.*}}) declare i64 @llvm.hexagon.M4.vrmpyeh.s1(i64, i64) define i64 @M4_vrmpyeh_s1(i64 %a, i64 %b) { @@ -1151,7 +1151,7 @@ %z = call i64 @llvm.hexagon.M4.vrmpyeh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpyweh({{.*}}, {{.*}}):<<1 +; CHECK: =vrmpyweh({{.*}},{{.*}}):<<1 declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64, i64, i64) define i64 @M4_vrmpyoh_acc_s0(i64 %a, i64 %b, i64 %c) { @@ -1158,7 +1158,7 @@ %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpywoh({{.*}}, r5:4) +; CHECK: +=vrmpywoh({{.*}},r5:4) declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64, i64, i64) define i64 @M4_vrmpyoh_acc_s1(i64 %a, i64 %b, i64 %c) { @@ -1165,7 +1165,7 @@ %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpywoh({{.*}}, r5:4):<<1 +; CHECK: +=vrmpywoh({{.*}},r5:4):<<1 declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64, i64, i64) define i64 @M4_vrmpyeh_acc_s0(i64 %a, i64 %b, i64 %c) { @@ -1172,7 +1172,7 @@ %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpyweh({{.*}}, r5:4) +; CHECK: +=vrmpyweh({{.*}},r5:4) declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64, i64, i64) define i64 @M4_vrmpyeh_acc_s1(i64 %a, i64 %b, i64 %c) { @@ -1179,7 +1179,7 @@ %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpyweh({{.*}}, r5:4):<<1 +; CHECK: +=vrmpyweh({{.*}},r5:4):<<1 ; Multiply and use upper result declare i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32, i32) @@ -1187,7 +1187,7 @@ %z = call i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}):rnd +; CHECK: =mpy({{.*}},{{.*}}):rnd declare i32 @llvm.hexagon.M2.mpyu.up(i32, i32) define i32 @M2_mpyu_up(i32 %a, i32 %b) { @@ -1194,7 +1194,7 @@ %z = call i32 @llvm.hexagon.M2.mpyu.up(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}, {{.*}}) +; CHECK: =mpyu({{.*}},{{.*}}) declare i32 @llvm.hexagon.M2.mpysu.up(i32, i32) define i32 @M2_mpysu_up(i32 %a, i32 %b) { @@ -1201,7 +1201,7 @@ %z = call i32 @llvm.hexagon.M2.mpysu.up(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpysu({{.*}}, {{.*}}) +; CHECK: =mpysu({{.*}},{{.*}}) declare i32 @llvm.hexagon.M2.hmmpyh.s1(i32, i32) define i32 @M2_hmmpyh_s1(i32 %a, i32 %b) { @@ -1208,7 +1208,7 @@ %z = call i32 @llvm.hexagon.M2.hmmpyh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:sat +; CHECK: =mpy({{.*}},{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.hmmpyl.s1(i32, i32) define i32 @M2_hmmpyl_s1(i32 %a, i32 %b) { @@ -1215,7 +1215,7 @@ %z = call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:sat +; CHECK: =mpy({{.*}},{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.hmmpyh.rs1(i32, i32) define i32 @M2_hmmpyh_rs1(i32 %a, i32 %b) { @@ -1222,7 +1222,7 @@ %z = call i32 @llvm.hexagon.M2.hmmpyh.rs1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:rnd:sat +; CHECK: =mpy({{.*}},{{.*}}.h):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32, i32) define i32 @M2_mpy_up_s1_sat(i32 %a, i32 %b) { @@ -1229,7 +1229,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}):<<1:sat +; CHECK: =mpy({{.*}},{{.*}}):<<1:sat declare i32 @llvm.hexagon.M2.hmmpyl.rs1(i32, i32) define i32 @M2_hmmpyl_rs1(i32 %a, i32 %b) { @@ -1236,7 +1236,7 @@ %z = call i32 @llvm.hexagon.M2.hmmpyl.rs1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:rnd:sat +; CHECK: =mpy({{.*}},{{.*}}.l):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.up(i32, i32) define i32 @M2_mpy_up(i32 %a, i32 %b) { @@ -1243,7 +1243,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.up(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}) +; CHECK: =mpy({{.*}},{{.*}}) declare i32 @llvm.hexagon.M2.mpy.up.s1(i32, i32) define i32 @M2_mpy_up_s1(i32 %a, i32 %b) { @@ -1250,7 +1250,7 @@ %z = call i32 @llvm.hexagon.M2.mpy.up.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}):<<1 +; CHECK: =mpy({{.*}},{{.*}}):<<1 declare i32 @llvm.hexagon.M4.mac.up.s1.sat(i32, i32, i32) define i32 @M4_mac_up_s1_sat(i32 %a, i32 %b, i32 %c) { @@ -1257,7 +1257,7 @@ %z = call i32 @llvm.hexagon.M4.mac.up.s1.sat(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}, {{.*}}):<<1:sat +; CHECK: +=mpy({{.*}},{{.*}}):<<1:sat declare i32 @llvm.hexagon.M4.nac.up.s1.sat(i32, i32, i32) define i32 @M4_nac_up_s1_sat(i32 %a, i32 %b, i32 %c) { @@ -1264,7 +1264,7 @@ %z = call i32 @llvm.hexagon.M4.nac.up.s1.sat(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}, {{.*}}):<<1:sat +; CHECK: -=mpy({{.*}},{{.*}}):<<1:sat ; Multiply and use full result declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32) @@ -1272,7 +1272,7 @@ %z = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}, {{.*}}) +; CHECK: =mpy({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.dpmpyuu.s0(i32, i32) define i64 @M2_dpmpyuu_s0(i32 %a, i32 %b) { @@ -1279,7 +1279,7 @@ %z = call i64 @llvm.hexagon.M2.dpmpyuu.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}, {{.*}}) +; CHECK: =mpyu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64, i32, i32) define i64 @M2_dpmpyss_acc_s0(i64 %a, i32 %b, i32 %c) { @@ -1286,7 +1286,7 @@ %z = call i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}, {{.*}}) +; CHECK: +=mpy({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64, i32, i32) define i64 @M2_dpmpyss_nac_s0(i64 %a, i32 %b, i32 %c) { @@ -1293,7 +1293,7 @@ %z = call i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}, {{.*}}) +; CHECK: -=mpy({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64, i32, i32) define i64 @M2_dpmpyuu_acc_s0(i64 %a, i32 %b, i32 %c) { @@ -1300,7 +1300,7 @@ %z = call i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}, {{.*}}) +; CHECK: +=mpyu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64, i32, i32) define i64 @M2_dpmpyuu_nac_s0(i64 %a, i32 %b, i32 %c) { @@ -1307,7 +1307,7 @@ %z = call i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}, {{.*}}) +; CHECK: -=mpyu({{.*}},{{.*}}) ; Vector dual multiply declare i64 @llvm.hexagon.M2.vdmpys.s0(i64, i64) @@ -1315,7 +1315,7 @@ %z = call i64 @llvm.hexagon.M2.vdmpys.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vdmpy({{.*}}, {{.*}}):sat +; CHECK: =vdmpy({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vdmpys.s1(i64, i64) define i64 @M2_vdmpys_s1(i64 %a, i64 %b) { @@ -1322,7 +1322,7 @@ %z = call i64 @llvm.hexagon.M2.vdmpys.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vdmpy({{.*}}, {{.*}}):<<1:sat +; CHECK: =vdmpy({{.*}},{{.*}}):<<1:sat ; Vector reduce multiply bytes declare i64 @llvm.hexagon.M5.vrmpybuu(i64, i64) @@ -1330,7 +1330,7 @@ %z = call i64 @llvm.hexagon.M5.vrmpybuu(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpybu({{.*}}, {{.*}}) +; CHECK: =vrmpybu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M5.vrmpybsu(i64, i64) define i64 @M5_vrmpybsu(i64 %a, i64 %b) { @@ -1337,7 +1337,7 @@ %z = call i64 @llvm.hexagon.M5.vrmpybsu(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpybsu({{.*}}, {{.*}}) +; CHECK: =vrmpybsu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M5.vrmacbuu(i64, i64, i64) define i64 @M5_vrmacbuu(i64 %a, i64 %b, i64 %c) { @@ -1344,7 +1344,7 @@ %z = call i64 @llvm.hexagon.M5.vrmacbuu(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpybu({{.*}}, r5:4) +; CHECK: +=vrmpybu({{.*}},r5:4) declare i64 @llvm.hexagon.M5.vrmacbsu(i64, i64, i64) define i64 @M5_vrmacbsu(i64 %a, i64 %b, i64 %c) { @@ -1351,7 +1351,7 @@ %z = call i64 @llvm.hexagon.M5.vrmacbsu(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpybsu({{.*}}, r5:4) +; CHECK: +=vrmpybsu({{.*}},r5:4) ; Vector dual multiply signed by unsigned bytes declare i64 @llvm.hexagon.M5.vdmpybsu(i64, i64) @@ -1359,7 +1359,7 @@ %z = call i64 @llvm.hexagon.M5.vdmpybsu(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vdmpybsu({{.*}}, {{.*}}):sat +; CHECK: =vdmpybsu({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M5.vdmacbsu(i64, i64, i64) define i64 @M5_vdmacbsu(i64 %a, i64 %b, i64 %c) { @@ -1366,7 +1366,7 @@ %z = call i64 @llvm.hexagon.M5.vdmacbsu(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vdmpybsu({{.*}}, r5:4):sat +; CHECK: +=vdmpybsu({{.*}},r5:4):sat ; Vector multiply even halfwords declare i64 @llvm.hexagon.M2.vmpy2es.s0(i64, i64) @@ -1374,7 +1374,7 @@ %z = call i64 @llvm.hexagon.M2.vmpy2es.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyeh({{.*}}, {{.*}}):sat +; CHECK: =vmpyeh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vmpy2es.s1(i64, i64) define i64 @M2_vmpy2es_s1(i64 %a, i64 %b) { @@ -1381,7 +1381,7 @@ %z = call i64 @llvm.hexagon.M2.vmpy2es.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyeh({{.*}}, {{.*}}):<<1:sat +; CHECK: =vmpyeh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vmac2es(i64, i64, i64) define i64 @M2_vmac2es(i64 %a, i64 %b, i64 %c) { @@ -1388,7 +1388,7 @@ %z = call i64 @llvm.hexagon.M2.vmac2es(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vmpyeh({{.*}}, r5:4) +; CHECK: +=vmpyeh({{.*}},r5:4) declare i64 @llvm.hexagon.M2.vmac2es.s0(i64, i64, i64) define i64 @M2_vmac2es_s0(i64 %a, i64 %b, i64 %c) { @@ -1395,7 +1395,7 @@ %z = call i64 @llvm.hexagon.M2.vmac2es.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vmpyeh({{.*}}, r5:4):sat +; CHECK: +=vmpyeh({{.*}},r5:4):sat declare i64 @llvm.hexagon.M2.vmac2es.s1(i64, i64, i64) define i64 @M2_vmac2es_s1(i64 %a, i64 %b, i64 %c) { @@ -1402,7 +1402,7 @@ %z = call i64 @llvm.hexagon.M2.vmac2es.s1(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vmpyeh({{.*}}, r5:4):<<1:sat +; CHECK: +=vmpyeh({{.*}},r5:4):<<1:sat ; Vector multiply halfwords declare i64 @llvm.hexagon.M2.vmpy2s.s0(i32, i32) @@ -1410,7 +1410,7 @@ %z = call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpyh({{.*}}, {{.*}}):sat +; CHECK: =vmpyh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vmpy2s.s1(i32, i32) define i64 @M2_vmpy2s_s1(i32 %a, i32 %b) { @@ -1417,7 +1417,7 @@ %z = call i64 @llvm.hexagon.M2.vmpy2s.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpyh({{.*}}, {{.*}}):<<1:sat +; CHECK: =vmpyh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vmac2(i64, i32, i32) define i64 @M2_vmac2(i64 %a, i32 %b, i32 %c) { @@ -1424,7 +1424,7 @@ %z = call i64 @llvm.hexagon.M2.vmac2(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpyh({{.*}}, {{.*}}) +; CHECK: +=vmpyh({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.vmac2s.s0(i64, i32, i32) define i64 @M2_vmac2s_s0(i64 %a, i32 %b, i32 %c) { @@ -1431,7 +1431,7 @@ %z = call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpyh({{.*}}, {{.*}}):sat +; CHECK: +=vmpyh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vmac2s.s1(i64, i32, i32) define i64 @M2_vmac2s_s1(i64 %a, i32 %b, i32 %c) { @@ -1438,7 +1438,7 @@ %z = call i64 @llvm.hexagon.M2.vmac2s.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpyh({{.*}}, {{.*}}):<<1:sat +; CHECK: +=vmpyh({{.*}},{{.*}}):<<1:sat ; Vector multiply halfwords signed by unsigned declare i64 @llvm.hexagon.M2.vmpy2su.s0(i32, i32) @@ -1446,7 +1446,7 @@ %z = call i64 @llvm.hexagon.M2.vmpy2su.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpyhsu({{.*}}, {{.*}}):sat +; CHECK: =vmpyhsu({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vmpy2su.s1(i32, i32) define i64 @M2_vmpy2su_s1(i32 %a, i32 %b) { @@ -1453,7 +1453,7 @@ %z = call i64 @llvm.hexagon.M2.vmpy2su.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpyhsu({{.*}}, {{.*}}):<<1:sat +; CHECK: =vmpyhsu({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vmac2su.s0(i64, i32, i32) define i64 @M2_vmac2su_s0(i64 %a, i32 %b, i32 %c) { @@ -1460,7 +1460,7 @@ %z = call i64 @llvm.hexagon.M2.vmac2su.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpyhsu({{.*}}, {{.*}}):sat +; CHECK: +=vmpyhsu({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vmac2su.s1(i64, i32, i32) define i64 @M2_vmac2su_s1(i64 %a, i32 %b, i32 %c) { @@ -1467,7 +1467,7 @@ %z = call i64 @llvm.hexagon.M2.vmac2su.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpyhsu({{.*}}, {{.*}}):<<1:sat +; CHECK: +=vmpyhsu({{.*}},{{.*}}):<<1:sat ; Vector reduce multiply halfwords declare i64 @llvm.hexagon.M2.vrmpy.s0(i64, i64) @@ -1475,7 +1475,7 @@ %z = call i64 @llvm.hexagon.M2.vrmpy.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpyh({{.*}}, {{.*}}) +; CHECK: =vrmpyh({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.vrmac.s0(i64, i64, i64) define i64 @M2_vrmac_s0(i64 %a, i64 %b, i64 %c) { @@ -1482,7 +1482,7 @@ %z = call i64 @llvm.hexagon.M2.vrmac.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpyh({{.*}}, r5:4) +; CHECK: +=vrmpyh({{.*}},r5:4) ; Vector multiply bytes declare i64 @llvm.hexagon.M5.vmpybsu(i32, i32) @@ -1490,7 +1490,7 @@ %z = call i64 @llvm.hexagon.M5.vmpybsu(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpybsu({{.*}}, {{.*}}) +; CHECK: =vmpybsu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M5.vmpybuu(i32, i32) define i64 @M2_vmpybuu(i32 %a, i32 %b) { @@ -1497,7 +1497,7 @@ %z = call i64 @llvm.hexagon.M5.vmpybuu(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpybu({{.*}}, {{.*}}) +; CHECK: =vmpybu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M5.vmacbuu(i64, i32, i32) define i64 @M2_vmacbuu(i64 %a, i32 %b, i32 %c) { @@ -1504,7 +1504,7 @@ %z = call i64 @llvm.hexagon.M5.vmacbuu(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpybu({{.*}}, {{.*}}) +; CHECK: +=vmpybu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M5.vmacbsu(i64, i32, i32) define i64 @M2_vmacbsu(i64 %a, i32 %b, i32 %c) { @@ -1511,7 +1511,7 @@ %z = call i64 @llvm.hexagon.M5.vmacbsu(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpybsu({{.*}}, {{.*}}) +; CHECK: +=vmpybsu({{.*}},{{.*}}) ; Vector polynomial multiply halfwords declare i64 @llvm.hexagon.M4.vpmpyh(i32, i32) @@ -1519,7 +1519,7 @@ %z = call i64 @llvm.hexagon.M4.vpmpyh(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vpmpyh({{.*}}, {{.*}}) +; CHECK: =vpmpyh({{.*}},{{.*}}) declare i64 @llvm.hexagon.M4.vpmpyh.acc(i64, i32, i32) define i64 @M4_vpmpyh_acc(i64 %a, i32 %b, i32 %c) { @@ -1526,4 +1526,4 @@ %z = call i64 @llvm.hexagon.M4.vpmpyh.acc(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: ^= vpmpyh({{.*}}, {{.*}}) +; CHECK: ^=vpmpyh({{.*}},{{.*}}) Index: test/CodeGen/Hexagon/intrinsics/xtype_perm.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/xtype_perm.ll +++ test/CodeGen/Hexagon/intrinsics/xtype_perm.ll @@ -10,7 +10,7 @@ %z = call i32 @llvm.hexagon.A2.sat(i64 %a) ret i32 %z } -; CHECK: = sat({{.*}}) +; CHECK: =sat({{.*}}) declare i32 @llvm.hexagon.A2.sath(i32) define i32 @A2_sath(i32 %a) { @@ -17,7 +17,7 @@ %z = call i32 @llvm.hexagon.A2.sath(i32 %a) ret i32 %z } -; CHECK: = sath({{.*}}) +; CHECK: =sath({{.*}}) declare i32 @llvm.hexagon.A2.satuh(i32) define i32 @A2_satuh(i32 %a) { @@ -24,7 +24,7 @@ %z = call i32 @llvm.hexagon.A2.satuh(i32 %a) ret i32 %z } -; CHECK: = satuh({{.*}}) +; CHECK: =satuh({{.*}}) declare i32 @llvm.hexagon.A2.satub(i32) define i32 @A2_satub(i32 %a) { @@ -31,7 +31,7 @@ %z = call i32 @llvm.hexagon.A2.satub(i32 %a) ret i32 %z } -; CHECK: = satub({{.*}}) +; CHECK: =satub({{.*}}) declare i32 @llvm.hexagon.A2.satb(i32) define i32 @A2_satb(i32 %a) { @@ -38,7 +38,7 @@ %z = call i32 @llvm.hexagon.A2.satb(i32 %a) ret i32 %z } -; CHECK: = satb({{.*}}) +; CHECK: =satb({{.*}}) ; Swizzle bytes declare i32 @llvm.hexagon.A2.swiz(i32) @@ -46,7 +46,7 @@ %z = call i32 @llvm.hexagon.A2.swiz(i32 %a) ret i32 %z } -; CHECK: = swiz({{.*}}) +; CHECK: =swiz({{.*}}) ; Vector round and pack declare i32 @llvm.hexagon.S2.vrndpackwh(i64) @@ -54,7 +54,7 @@ %z = call i32 @llvm.hexagon.S2.vrndpackwh(i64 %a) ret i32 %z } -; CHECK: = vrndwh({{.*}}) +; CHECK: =vrndwh({{.*}}) declare i32 @llvm.hexagon.S2.vrndpackwhs(i64) define i32 @S2_vrndpackwhs(i64 %a) { @@ -61,7 +61,7 @@ %z = call i32 @llvm.hexagon.S2.vrndpackwhs(i64 %a) ret i32 %z } -; CHECK: = vrndwh({{.*}}):sat +; CHECK: =vrndwh({{.*}}):sat ; Vector saturate and pack declare i32 @llvm.hexagon.S2.vsathub(i64) @@ -69,7 +69,7 @@ %z = call i32 @llvm.hexagon.S2.vsathub(i64 %a) ret i32 %z } -; CHECK: = vsathub({{.*}}) +; CHECK: =vsathub({{.*}}) declare i32 @llvm.hexagon.S2.vsatwh(i64) define i32 @S2_vsatwh(i64 %a) { @@ -76,7 +76,7 @@ %z = call i32 @llvm.hexagon.S2.vsatwh(i64 %a) ret i32 %z } -; CHECK: = vsatwh({{.*}}) +; CHECK: =vsatwh({{.*}}) declare i32 @llvm.hexagon.S2.vsatwuh(i64) define i32 @S2_vsatwuh(i64 %a) { @@ -83,7 +83,7 @@ %z = call i32 @llvm.hexagon.S2.vsatwuh(i64 %a) ret i32 %z } -; CHECK: = vsatwuh({{.*}}) +; CHECK: =vsatwuh({{.*}}) declare i32 @llvm.hexagon.S2.vsathb(i64) define i32 @S2_vsathb(i64 %a) { @@ -90,7 +90,7 @@ %z = call i32 @llvm.hexagon.S2.vsathb(i64 %a) ret i32 %z } -; CHECK: = vsathb({{.*}}) +; CHECK: =vsathb({{.*}}) declare i32 @llvm.hexagon.S2.svsathb(i32) define i32 @S2_svsathb(i32 %a) { @@ -97,7 +97,7 @@ %z = call i32 @llvm.hexagon.S2.svsathb(i32 %a) ret i32 %z } -; CHECK: = vsathb({{.*}}) +; CHECK: =vsathb({{.*}}) declare i32 @llvm.hexagon.S2.svsathub(i32) define i32 @S2_svsathub(i32 %a) { @@ -104,7 +104,7 @@ %z = call i32 @llvm.hexagon.S2.svsathub(i32 %a) ret i32 %z } -; CHECK: = vsathub({{.*}}) +; CHECK: =vsathub({{.*}}) ; Vector saturate without pack declare i64 @llvm.hexagon.S2.vsathub.nopack(i64) @@ -112,7 +112,7 @@ %z = call i64 @llvm.hexagon.S2.vsathub.nopack(i64 %a) ret i64 %z } -; CHECK: = vsathub({{.*}}) +; CHECK: =vsathub({{.*}}) declare i64 @llvm.hexagon.S2.vsatwuh.nopack(i64) define i64 @S2_vsatwuh_nopack(i64 %a) { @@ -119,7 +119,7 @@ %z = call i64 @llvm.hexagon.S2.vsatwuh.nopack(i64 %a) ret i64 %z } -; CHECK: = vsatwuh({{.*}}) +; CHECK: =vsatwuh({{.*}}) declare i64 @llvm.hexagon.S2.vsatwh.nopack(i64) define i64 @S2_vsatwh_nopack(i64 %a) { @@ -126,7 +126,7 @@ %z = call i64 @llvm.hexagon.S2.vsatwh.nopack(i64 %a) ret i64 %z } -; CHECK: = vsatwh({{.*}}) +; CHECK: =vsatwh({{.*}}) declare i64 @llvm.hexagon.S2.vsathb.nopack(i64) define i64 @S2_vsathb_nopack(i64 %a) { @@ -133,7 +133,7 @@ %z = call i64 @llvm.hexagon.S2.vsathb.nopack(i64 %a) ret i64 %z } -; CHECK: = vsathb({{.*}}) +; CHECK: =vsathb({{.*}}) ; Vector shuffle declare i64 @llvm.hexagon.S2.shuffeb(i64, i64) @@ -141,7 +141,7 @@ %z = call i64 @llvm.hexagon.S2.shuffeb(i64 %a, i64 %b) ret i64 %z } -; CHECK: = shuffeb({{.*}}, {{.*}}) +; CHECK: =shuffeb({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.shuffob(i64, i64) define i64 @S2_shuffob(i64 %a, i64 %b) { @@ -148,7 +148,7 @@ %z = call i64 @llvm.hexagon.S2.shuffob(i64 %a, i64 %b) ret i64 %z } -; CHECK: = shuffob({{.*}}, {{.*}}) +; CHECK: =shuffob({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.shuffeh(i64, i64) define i64 @S2_shuffeh(i64 %a, i64 %b) { @@ -155,7 +155,7 @@ %z = call i64 @llvm.hexagon.S2.shuffeh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = shuffeh({{.*}}, {{.*}}) +; CHECK: =shuffeh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.shuffoh(i64, i64) define i64 @S2_shuffoh(i64 %a, i64 %b) { @@ -162,7 +162,7 @@ %z = call i64 @llvm.hexagon.S2.shuffoh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = shuffoh({{.*}}, {{.*}}) +; CHECK: =shuffoh({{.*}},{{.*}}) ; Vector splat bytes declare i32 @llvm.hexagon.S2.vsplatrb(i32) @@ -170,7 +170,7 @@ %z = call i32 @llvm.hexagon.S2.vsplatrb(i32 %a) ret i32 %z } -; CHECK: = vsplatb({{.*}}) +; CHECK: =vsplatb({{.*}}) ; Vector splat halfwords declare i64 @llvm.hexagon.S2.vsplatrh(i32) @@ -178,7 +178,7 @@ %z = call i64 @llvm.hexagon.S2.vsplatrh(i32 %a) ret i64 %z } -; CHECK: = vsplath({{.*}}) +; CHECK: =vsplath({{.*}}) ; Vector splice declare i64 @llvm.hexagon.S2.vspliceib(i64, i64, i32) @@ -186,7 +186,7 @@ %z = call i64 @llvm.hexagon.S2.vspliceib(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: = vspliceb({{.*}}, {{.*}}, #0) +; CHECK: =vspliceb({{.*}},{{.*}},#0) declare i64 @llvm.hexagon.S2.vsplicerb(i64, i64, i32) define i64 @S2_vsplicerb(i64 %a, i64 %b, i32 %c) { @@ -193,7 +193,7 @@ %z = call i64 @llvm.hexagon.S2.vsplicerb(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vspliceb({{.*}}, {{.*}}, {{.*}}) +; CHECK: =vspliceb({{.*}},{{.*}},{{.*}}) ; Vector sign extend declare i64 @llvm.hexagon.S2.vsxtbh(i32) @@ -201,7 +201,7 @@ %z = call i64 @llvm.hexagon.S2.vsxtbh(i32 %a) ret i64 %z } -; CHECK: = vsxtbh({{.*}}) +; CHECK: =vsxtbh({{.*}}) declare i64 @llvm.hexagon.S2.vsxthw(i32) define i64 @S2_vsxthw(i32 %a) { @@ -208,7 +208,7 @@ %z = call i64 @llvm.hexagon.S2.vsxthw(i32 %a) ret i64 %z } -; CHECK: = vsxthw({{.*}}) +; CHECK: =vsxthw({{.*}}) ; Vector truncate declare i32 @llvm.hexagon.S2.vtrunohb(i64) @@ -216,7 +216,7 @@ %z = call i32 @llvm.hexagon.S2.vtrunohb(i64 %a) ret i32 %z } -; CHECK: = vtrunohb({{.*}}) +; CHECK: =vtrunohb({{.*}}) declare i32 @llvm.hexagon.S2.vtrunehb(i64) define i32 @S2_vtrunehb(i64 %a) { @@ -223,7 +223,7 @@ %z = call i32 @llvm.hexagon.S2.vtrunehb(i64 %a) ret i32 %z } -; CHECK: = vtrunehb({{.*}}) +; CHECK: =vtrunehb({{.*}}) declare i64 @llvm.hexagon.S2.vtrunowh(i64, i64) define i64 @S2_vtrunowh(i64 %a, i64 %b) { @@ -230,7 +230,7 @@ %z = call i64 @llvm.hexagon.S2.vtrunowh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vtrunowh({{.*}}, {{.*}}) +; CHECK: =vtrunowh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.vtrunewh(i64, i64) define i64 @S2_vtrunewh(i64 %a, i64 %b) { @@ -237,7 +237,7 @@ %z = call i64 @llvm.hexagon.S2.vtrunewh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vtrunewh({{.*}}, {{.*}}) +; CHECK: =vtrunewh({{.*}},{{.*}}) ; Vector zero extend declare i64 @llvm.hexagon.S2.vzxtbh(i32) @@ -245,7 +245,7 @@ %z = call i64 @llvm.hexagon.S2.vzxtbh(i32 %a) ret i64 %z } -; CHECK: = vzxtbh({{.*}}) +; CHECK: =vzxtbh({{.*}}) declare i64 @llvm.hexagon.S2.vzxthw(i32) define i64 @S2_vzxthw(i32 %a) { @@ -252,4 +252,4 @@ %z = call i64 @llvm.hexagon.S2.vzxthw(i32 %a) ret i64 %z } -; CHECK: = vzxthw({{.*}}) +; CHECK: =vzxthw({{.*}}) Index: test/CodeGen/Hexagon/intrinsics/xtype_pred.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/xtype_pred.ll +++ test/CodeGen/Hexagon/intrinsics/xtype_pred.ll @@ -10,7 +10,7 @@ %z = call i32 @llvm.hexagon.A4.cmpbgt(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpb.gt({{.*}}, {{.*}}) +; CHECK: =cmpb.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmpbeq(i32, i32) define i32 @A4_cmpbeq(i32 %a, i32 %b) { @@ -17,7 +17,7 @@ %z = call i32 @llvm.hexagon.A4.cmpbeq(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpb.eq({{.*}}, {{.*}}) +; CHECK: =cmpb.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmpbgtu(i32, i32) define i32 @A4_cmpbgtu(i32 %a, i32 %b) { @@ -24,7 +24,7 @@ %z = call i32 @llvm.hexagon.A4.cmpbgtu(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpb.gtu({{.*}}, {{.*}}) +; CHECK: =cmpb.gtu({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmpbgti(i32, i32) define i32 @A4_cmpbgti(i32 %a) { @@ -31,7 +31,7 @@ %z = call i32 @llvm.hexagon.A4.cmpbgti(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmpb.gt({{.*}}, #0) +; CHECK: =cmpb.gt({{.*}},#0) declare i32 @llvm.hexagon.A4.cmpbeqi(i32, i32) define i32 @A4_cmpbeqi(i32 %a) { @@ -38,7 +38,7 @@ %z = call i32 @llvm.hexagon.A4.cmpbeqi(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmpb.eq({{.*}}, #0) +; CHECK: =cmpb.eq({{.*}},#0) declare i32 @llvm.hexagon.A4.cmpbgtui(i32, i32) define i32 @A4_cmpbgtui(i32 %a) { @@ -45,7 +45,7 @@ %z = call i32 @llvm.hexagon.A4.cmpbgtui(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmpb.gtu({{.*}}, #0) +; CHECK: =cmpb.gtu({{.*}},#0) ; Compare half declare i32 @llvm.hexagon.A4.cmphgt(i32, i32) @@ -53,7 +53,7 @@ %z = call i32 @llvm.hexagon.A4.cmphgt(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmph.gt({{.*}}, {{.*}}) +; CHECK: =cmph.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmpheq(i32, i32) define i32 @A4_cmpheq(i32 %a, i32 %b) { @@ -60,7 +60,7 @@ %z = call i32 @llvm.hexagon.A4.cmpheq(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmph.eq({{.*}}, {{.*}}) +; CHECK: =cmph.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmphgtu(i32, i32) define i32 @A4_cmphgtu(i32 %a, i32 %b) { @@ -67,7 +67,7 @@ %z = call i32 @llvm.hexagon.A4.cmphgtu(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmph.gtu({{.*}}, {{.*}}) +; CHECK: =cmph.gtu({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmphgti(i32, i32) define i32 @A4_cmphgti(i32 %a) { @@ -74,7 +74,7 @@ %z = call i32 @llvm.hexagon.A4.cmphgti(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmph.gt({{.*}}, #0) +; CHECK: =cmph.gt({{.*}},#0) declare i32 @llvm.hexagon.A4.cmpheqi(i32, i32) define i32 @A4_cmpheqi(i32 %a) { @@ -81,7 +81,7 @@ %z = call i32 @llvm.hexagon.A4.cmpheqi(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmph.eq({{.*}}, #0) +; CHECK: =cmph.eq({{.*}},#0) declare i32 @llvm.hexagon.A4.cmphgtui(i32, i32) define i32 @A4_cmphgtui(i32 %a) { @@ -88,7 +88,7 @@ %z = call i32 @llvm.hexagon.A4.cmphgtui(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmph.gtu({{.*}}, #0) +; CHECK: =cmph.gtu({{.*}},#0) ; Compare doublewords declare i32 @llvm.hexagon.C2.cmpgtp(i64, i64) @@ -96,7 +96,7 @@ %z = call i32 @llvm.hexagon.C2.cmpgtp(i64 %a, i64 %b) ret i32 %z } -; CHECK: = cmp.gt({{.*}}, {{.*}}) +; CHECK: =cmp.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.C2.cmpeqp(i64, i64) define i32 @C2_cmpeqp(i64 %a, i64 %b) { @@ -103,7 +103,7 @@ %z = call i32 @llvm.hexagon.C2.cmpeqp(i64 %a, i64 %b) ret i32 %z } -; CHECK: = cmp.eq({{.*}}, {{.*}}) +; CHECK: =cmp.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.C2.cmpgtup(i64, i64) define i32 @C2_cmpgtup(i64 %a, i64 %b) { @@ -110,7 +110,7 @@ %z = call i32 @llvm.hexagon.C2.cmpgtup(i64 %a, i64 %b) ret i32 %z } -; CHECK: = cmp.gtu({{.*}}, {{.*}}) +; CHECK: =cmp.gtu({{.*}},{{.*}}) ; Compare bitmask declare i32 @llvm.hexagon.C2.bitsclri(i32, i32) @@ -118,7 +118,7 @@ %z = call i32 @llvm.hexagon.C2.bitsclri(i32 %a, i32 0) ret i32 %z } -; CHECK: = bitsclr({{.*}}, #0) +; CHECK: =bitsclr({{.*}},#0) declare i32 @llvm.hexagon.C4.nbitsclri(i32, i32) define i32 @C4_nbitsclri(i32 %a) { @@ -125,7 +125,7 @@ %z = call i32 @llvm.hexagon.C4.nbitsclri(i32 %a, i32 0) ret i32 %z } -; CHECK: = !bitsclr({{.*}}, #0) +; CHECK: =!bitsclr({{.*}},#0) declare i32 @llvm.hexagon.C2.bitsset(i32, i32) define i32 @C2_bitsset(i32 %a, i32 %b) { @@ -132,7 +132,7 @@ %z = call i32 @llvm.hexagon.C2.bitsset(i32 %a, i32 %b) ret i32 %z } -; CHECK: = bitsset({{.*}}, {{.*}}) +; CHECK: =bitsset({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.nbitsset(i32, i32) define i32 @C4_nbitsset(i32 %a, i32 %b) { @@ -139,7 +139,7 @@ %z = call i32 @llvm.hexagon.C4.nbitsset(i32 %a, i32 %b) ret i32 %z } -; CHECK: = !bitsset({{.*}}, {{.*}}) +; CHECK: =!bitsset({{.*}},{{.*}}) declare i32 @llvm.hexagon.C2.bitsclr(i32, i32) define i32 @C2_bitsclr(i32 %a, i32 %b) { @@ -146,7 +146,7 @@ %z = call i32 @llvm.hexagon.C2.bitsclr(i32 %a, i32 %b) ret i32 %z } -; CHECK: = bitsclr({{.*}}, {{.*}}) +; CHECK: =bitsclr({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.nbitsclr(i32, i32) define i32 @C4_nbitsclr(i32 %a, i32 %b) { @@ -153,7 +153,7 @@ %z = call i32 @llvm.hexagon.C4.nbitsclr(i32 %a, i32 %b) ret i32 %z } -; CHECK: = !bitsclr({{.*}}, {{.*}}) +; CHECK: =!bitsclr({{.*}},{{.*}}) ; Mask generate from predicate declare i64 @llvm.hexagon.C2.mask(i32) @@ -161,7 +161,7 @@ %z = call i64 @llvm.hexagon.C2.mask(i32 %a) ret i64 %z } -; CHECK: = mask({{.*}}) +; CHECK: =mask({{.*}}) ; Check for TLB match declare i32 @llvm.hexagon.A4.tlbmatch(i64, i32) @@ -169,7 +169,7 @@ %z = call i32 @llvm.hexagon.A4.tlbmatch(i64 %a, i32 %b) ret i32 %z } -; CHECK: = tlbmatch({{.*}}, {{.*}}) +; CHECK: =tlbmatch({{.*}},{{.*}}) ; Test bit declare i32 @llvm.hexagon.S2.tstbit.i(i32, i32) @@ -177,7 +177,7 @@ %z = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: = tstbit({{.*}}, #0) +; CHECK: =tstbit({{.*}},#0) declare i32 @llvm.hexagon.S4.ntstbit.i(i32, i32) define i32 @S4_ntstbit_i(i32 %a) { @@ -184,7 +184,7 @@ %z = call i32 @llvm.hexagon.S4.ntstbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: = !tstbit({{.*}}, #0) +; CHECK: =!tstbit({{.*}},#0) declare i32 @llvm.hexagon.S2.tstbit.r(i32, i32) define i32 @S2_tstbit_r(i32 %a, i32 %b) { @@ -191,7 +191,7 @@ %z = call i32 @llvm.hexagon.S2.tstbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = tstbit({{.*}}, {{.*}}) +; CHECK: =tstbit({{.*}},{{.*}}) declare i32 @llvm.hexagon.S4.ntstbit.r(i32, i32) define i32 @S4_ntstbit_r(i32 %a, i32 %b) { @@ -198,7 +198,7 @@ %z = call i32 @llvm.hexagon.S4.ntstbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = !tstbit({{.*}}, {{.*}}) +; CHECK: =!tstbit({{.*}},{{.*}}) ; Vector compare halfwords declare i32 @llvm.hexagon.A2.vcmpheq(i64, i64) @@ -206,7 +206,7 @@ %z = call i32 @llvm.hexagon.A2.vcmpheq(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmph.eq({{.*}}, {{.*}}) +; CHECK: =vcmph.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.vcmphgt(i64, i64) define i32 @A2_vcmphgt(i64 %a, i64 %b) { @@ -213,7 +213,7 @@ %z = call i32 @llvm.hexagon.A2.vcmphgt(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmph.gt({{.*}}, {{.*}}) +; CHECK: =vcmph.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.vcmphgtu(i64, i64) define i32 @A2_vcmphgtu(i64 %a, i64 %b) { @@ -220,7 +220,7 @@ %z = call i32 @llvm.hexagon.A2.vcmphgtu(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmph.gtu({{.*}}, {{.*}}) +; CHECK: =vcmph.gtu({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.vcmpheqi(i64, i32) define i32 @A4_vcmpheqi(i64 %a) { @@ -227,7 +227,7 @@ %z = call i32 @llvm.hexagon.A4.vcmpheqi(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmph.eq({{.*}}, #0) +; CHECK: =vcmph.eq({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmphgti(i64, i32) define i32 @A4_vcmphgti(i64 %a) { @@ -234,7 +234,7 @@ %z = call i32 @llvm.hexagon.A4.vcmphgti(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmph.gt({{.*}}, #0) +; CHECK: =vcmph.gt({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmphgtui(i64, i32) define i32 @A4_vcmphgtui(i64 %a) { @@ -241,7 +241,7 @@ %z = call i32 @llvm.hexagon.A4.vcmphgtui(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmph.gtu({{.*}}, #0) +; CHECK: =vcmph.gtu({{.*}},#0) ; Vector compare bytes for any match declare i32 @llvm.hexagon.A4.vcmpbeq.any(i64, i64) @@ -249,7 +249,7 @@ %z = call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %a, i64 %b) ret i32 %z } -; CHECK: = any8(vcmpb.eq({{.*}}, {{.*}})) +; CHECK: =any8(vcmpb.eq({{.*}},{{.*}})) ; Vector compare bytes declare i32 @llvm.hexagon.A2.vcmpbeq(i64, i64) @@ -257,7 +257,7 @@ %z = call i32 @llvm.hexagon.A2.vcmpbeq(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpb.eq({{.*}}, {{.*}}) +; CHECK: =vcmpb.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.vcmpbgtu(i64, i64) define i32 @A2_vcmpbgtu(i64 %a, i64 %b) { @@ -264,7 +264,7 @@ %z = call i32 @llvm.hexagon.A2.vcmpbgtu(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpb.gtu({{.*}}, {{.*}}) +; CHECK: =vcmpb.gtu({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.vcmpbgt(i64, i64) define i32 @A4_vcmpbgt(i64 %a, i64 %b) { @@ -271,7 +271,7 @@ %z = call i32 @llvm.hexagon.A4.vcmpbgt(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpb.gt({{.*}}, {{.*}}) +; CHECK: =vcmpb.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.vcmpbeqi(i64, i32) define i32 @A4_vcmpbeqi(i64 %a) { @@ -278,7 +278,7 @@ %z = call i32 @llvm.hexagon.A4.vcmpbeqi(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpb.eq({{.*}}, #0) +; CHECK: =vcmpb.eq({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmpbgti(i64, i32) define i32 @A4_vcmpbgti(i64 %a) { @@ -285,7 +285,7 @@ %z = call i32 @llvm.hexagon.A4.vcmpbgti(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpb.gt({{.*}}, #0) +; CHECK: =vcmpb.gt({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmpbgtui(i64, i32) define i32 @A4_vcmpbgtui(i64 %a) { @@ -292,7 +292,7 @@ %z = call i32 @llvm.hexagon.A4.vcmpbgtui(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpb.gtu({{.*}}, #0) +; CHECK: =vcmpb.gtu({{.*}},#0) ; Vector compare words declare i32 @llvm.hexagon.A2.vcmpweq(i64, i64) @@ -300,7 +300,7 @@ %z = call i32 @llvm.hexagon.A2.vcmpweq(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpw.eq({{.*}}, {{.*}}) +; CHECK: =vcmpw.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.vcmpwgt(i64, i64) define i32 @A2_vcmpwgt(i64 %a, i64 %b) { @@ -307,7 +307,7 @@ %z = call i32 @llvm.hexagon.A2.vcmpwgt(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpw.gt({{.*}}, {{.*}}) +; CHECK: =vcmpw.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.vcmpwgtu(i64, i64) define i32 @A2_vcmpwgtu(i64 %a, i64 %b) { @@ -314,7 +314,7 @@ %z = call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpw.gtu({{.*}}, {{.*}}) +; CHECK: =vcmpw.gtu({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.vcmpweqi(i64, i32) define i32 @A4_vcmpweqi(i64 %a) { @@ -321,7 +321,7 @@ %z = call i32 @llvm.hexagon.A4.vcmpweqi(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpw.eq({{.*}}, #0) +; CHECK: =vcmpw.eq({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmpwgti(i64, i32) define i32 @A4_vcmpwgti(i64 %a) { @@ -328,7 +328,7 @@ %z = call i32 @llvm.hexagon.A4.vcmpwgti(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpw.gt({{.*}}, #0) +; CHECK: =vcmpw.gt({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmpwgtui(i64, i32) define i32 @A4_vcmpwgtui(i64 %a) { @@ -335,7 +335,7 @@ %z = call i32 @llvm.hexagon.A4.vcmpwgtui(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpw.gtu({{.*}}, #0) +; CHECK: =vcmpw.gtu({{.*}},#0) ; Viterbi pack even and odd predicate bitsclr declare i32 @llvm.hexagon.C2.vitpack(i32, i32) @@ -343,7 +343,7 @@ %z = call i32 @llvm.hexagon.C2.vitpack(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vitpack({{.*}}, {{.*}}) +; CHECK: =vitpack({{.*}},{{.*}}) ; Vector mux declare i64 @llvm.hexagon.C2.vmux(i32, i64, i64) @@ -351,4 +351,4 @@ %z = call i64 @llvm.hexagon.C2.vmux(i32 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: = vmux({{.*}}, {{.*}}, {{.*}}) +; CHECK: =vmux({{.*}},{{.*}},{{.*}}) Index: test/CodeGen/Hexagon/intrinsics/xtype_shift.ll =================================================================== --- test/CodeGen/Hexagon/intrinsics/xtype_shift.ll +++ test/CodeGen/Hexagon/intrinsics/xtype_shift.ll @@ -10,7 +10,7 @@ %z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0) ret i64 %z } -; CHECK: = asr({{.*}}, #0) +; CHECK: =asr({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32) define i64 @S2_lsr_i_p(i64 %a) { @@ -17,7 +17,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0) ret i64 %z } -; CHECK: = lsr({{.*}}, #0) +; CHECK: =lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32) define i64 @S2_asl_i_p(i64 %a) { @@ -24,7 +24,7 @@ %z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0) ret i64 %z } -; CHECK: = asl({{.*}}, #0) +; CHECK: =asl({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32) define i32 @S2_asr_i_r(i32 %a) { @@ -31,7 +31,7 @@ %z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0) ret i32 %z } -; CHECK: = asr({{.*}}, #0) +; CHECK: =asr({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32) define i32 @S2_lsr_i_r(i32 %a) { @@ -38,7 +38,7 @@ %z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0) ret i32 %z } -; CHECK: = lsr({{.*}}, #0) +; CHECK: =lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32) define i32 @S2_asl_i_r(i32 %a) { @@ -45,7 +45,7 @@ %z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0) ret i32 %z } -; CHECK: = asl({{.*}}, #0) +; CHECK: =asl({{.*}},#0) ; Shift by immediate and accumulate declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32) @@ -53,7 +53,7 @@ %z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: -= asr({{.*}}, #0) +; CHECK: -=asr({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32) define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) { @@ -60,7 +60,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: -= lsr({{.*}}, #0) +; CHECK: -=lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32) define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) { @@ -67,7 +67,7 @@ %z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: -= asl({{.*}}, #0) +; CHECK: -=asl({{.*}},#0) declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32) define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) { @@ -74,7 +74,7 @@ %z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: += asr({{.*}}, #0) +; CHECK: +=asr({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32) define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) { @@ -81,7 +81,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: += lsr({{.*}}, #0) +; CHECK: +=lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32) define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) { @@ -88,7 +88,7 @@ %z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: += asl({{.*}}, #0) +; CHECK: +=asl({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32) define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) { @@ -95,7 +95,7 @@ %z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: -= asr({{.*}}, #0) +; CHECK: -=asr({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32) define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) { @@ -102,7 +102,7 @@ %z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: -= lsr({{.*}}, #0) +; CHECK: -=lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32) define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) { @@ -109,7 +109,7 @@ %z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: -= asl({{.*}}, #0) +; CHECK: -=asl({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32) define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) { @@ -116,7 +116,7 @@ %z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: += asr({{.*}}, #0) +; CHECK: +=asr({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32) define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) { @@ -123,7 +123,7 @@ %z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: += lsr({{.*}}, #0) +; CHECK: +=lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32) define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) { @@ -130,36 +130,36 @@ %z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: += asl({{.*}}, #0) +; CHECK: +=asl({{.*}},#0) ; Shift by immediate and add declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32) define i32 @S4_addi_asl_ri(i32 %a) { - %z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0) + %z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0,i32 %a, i32 0) ret i32 %z } -; CHECK: = add(#0, asl({{.*}}, #0)) +; CHECK: =add(#0,asl({{.*}},#0)) declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32) define i32 @S4_subi_asl_ri(i32 %a) { - %z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0) + %z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0,i32 %a, i32 0) ret i32 %z } -; CHECK: = sub(#0, asl({{.*}}, #0)) +; CHECK: =sub(#0,asl({{.*}},#0)) declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32) define i32 @S4_addi_lsr_ri(i32 %a) { - %z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0) + %z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0,i32 %a, i32 0) ret i32 %z } -; CHECK: = add(#0, lsr({{.*}}, #0)) +; CHECK: =add(#0,lsr({{.*}},#0)) declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32) define i32 @S4_subi_lsr_ri(i32 %a) { - %z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0) + %z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0,i32 %a, i32 0) ret i32 %z } -; CHECK: = sub(#0, lsr({{.*}}, #0)) +; CHECK: =sub(#0,lsr({{.*}},#0)) declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32) define i32 @S2_addasl_rrri(i32 %a, i32 %b) { @@ -166,7 +166,7 @@ %z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: = addasl({{.*}}, {{.*}}, #0) +; CHECK: =addasl({{.*}},{{.*}},#0) ; Shift by immediate and logical declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32) @@ -174,7 +174,7 @@ %z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: &= asr({{.*}}, #0) +; CHECK: &=asr({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32) define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) { @@ -181,7 +181,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: {{.*}} &= lsr({{.*}}, #0) +; CHECK: {{.*}}&=lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32) define i64 @S2_asl_i_p_and(i64 %a, i64 %b) { @@ -188,7 +188,7 @@ %z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: &= asl({{.*}}, #0) +; CHECK: &=asl({{.*}},#0) declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32) define i64 @S2_asr_i_p_or(i64 %a, i64 %b) { @@ -195,7 +195,7 @@ %z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: |= asr({{.*}}, #0) +; CHECK: |=asr({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32) define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) { @@ -202,7 +202,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: |= lsr({{.*}}, #0) +; CHECK: |=lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32) define i64 @S2_asl_i_p_or(i64 %a, i64 %b) { @@ -209,7 +209,7 @@ %z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: |= asl({{.*}}, #0) +; CHECK: |=asl({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32) define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) { @@ -216,7 +216,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: ^= lsr({{.*}}, #0) +; CHECK: ^=lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32) define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) { @@ -223,7 +223,7 @@ %z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: ^= asl({{.*}}, #0) +; CHECK: ^=asl({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32) define i32 @S2_asr_i_r_and(i32 %a, i32 %b) { @@ -230,7 +230,7 @@ %z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: &= asr({{.*}}, #0) +; CHECK: &=asr({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32) define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) { @@ -237,7 +237,7 @@ %z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: &= lsr({{.*}}, #0) +; CHECK: &=lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32) define i32 @S2_asl_i_r_and(i32 %a, i32 %b) { @@ -244,7 +244,7 @@ %z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: &= asl({{.*}}, #0) +; CHECK: &=asl({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32) define i32 @S2_asr_i_r_or(i32 %a, i32 %b) { @@ -251,7 +251,7 @@ %z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: |= asr({{.*}}, #0) +; CHECK: |=asr({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32) define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) { @@ -258,7 +258,7 @@ %z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: |= lsr({{.*}}, #0) +; CHECK: |=lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32) define i32 @S2_asl_i_r_or(i32%a, i32 %b) { @@ -265,7 +265,7 @@ %z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: |= asl({{.*}}, #0) +; CHECK: |=asl({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32) define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) { @@ -272,7 +272,7 @@ %z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0) ret i32 %z } -; CHECK: ^= lsr({{.*}}, #0) +; CHECK: ^=lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32) define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) { @@ -279,35 +279,35 @@ %z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: ^= asl({{.*}}, #0) +; CHECK: ^=asl({{.*}},#0) declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32) define i32 @S4_andi_asl_ri(i32 %a) { - %z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0) + %z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0,i32 %a, i32 0) ret i32 %z } -; CHECK: = and(#0, asl({{.*}}, #0)) +; CHECK: =and(#0,asl({{.*}},#0)) declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32) define i32 @S4_ori_asl_ri(i32 %a) { - %z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0) + %z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0,i32 %a, i32 0) ret i32 %z } -; CHECK: = or(#0, asl({{.*}}, #0)) +; CHECK: =or(#0,asl({{.*}},#0)) declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32) define i32 @S4_andi_lsr_ri(i32 %a) { - %z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0) + %z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0,i32 %a, i32 0) ret i32 %z } -; CHECK: = and(#0, lsr({{.*}}, #0)) +; CHECK: =and(#0,lsr({{.*}},#0)) declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32) define i32 @S4_ori_lsr_ri(i32 %a) { - %z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0) + %z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0,i32 %a, i32 0) ret i32 %z } -; CHECK: = or(#0, lsr({{.*}}, #0)) +; CHECK: =or(#0,lsr({{.*}},#0)) ; Shift right by immediate with rounding declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32) @@ -315,7 +315,7 @@ %z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0) ret i64 %z } -; CHECK: = asr({{.*}}, #0):rnd +; CHECK: =asr({{.*}},#0):rnd declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32) define i32 @S2_asr_i_r_rnd(i32 %a) { @@ -322,7 +322,7 @@ %z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0) ret i32 %z } -; CHECK: = asr({{.*}}, #0):rnd +; CHECK: =asr({{.*}},#0):rnd ; Shift left by immediate with saturation declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32) @@ -330,7 +330,7 @@ %z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0) ret i32 %z } -; CHECK: = asl({{.*}}, #0):sat +; CHECK: =asl({{.*}},#0):sat ; Shift by register declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32) @@ -338,7 +338,7 @@ %z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: = asr({{.*}}, {{.*}}) +; CHECK: =asr({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32) define i64 @S2_lsr_r_p(i64 %a, i32 %b) { @@ -345,7 +345,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: = lsr({{.*}}, {{.*}}) +; CHECK: =lsr({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32) define i64 @S2_asl_r_p(i64 %a, i32 %b) { @@ -352,7 +352,7 @@ %z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: = asl({{.*}}, {{.*}}) +; CHECK: =asl({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32) define i64 @S2_lsl_r_p(i64 %a, i32 %b) { @@ -359,7 +359,7 @@ %z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: = lsl({{.*}}, {{.*}}) +; CHECK: =lsl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32) define i32 @S2_asr_r_r(i32 %a, i32 %b) { @@ -366,7 +366,7 @@ %z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = asr({{.*}}, {{.*}}) +; CHECK: =asr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32) define i32 @S2_lsr_r_r(i32 %a, i32 %b) { @@ -373,7 +373,7 @@ %z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = lsr({{.*}}, {{.*}}) +; CHECK: =lsr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32) define i32 @S2_asl_r_r(i32 %a, i32 %b) { @@ -380,7 +380,7 @@ %z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = asl({{.*}}, {{.*}}) +; CHECK: =asl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32) define i32 @S2_lsl_r_r(i32 %a, i32 %b) { @@ -387,14 +387,14 @@ %z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = lsl({{.*}}, {{.*}}) +; CHECK: =lsl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S4.lsli(i32, i32) define i32 @S4_lsli(i32 %a) { - %z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a) + %z = call i32 @llvm.hexagon.S4.lsli(i32 0,i32 %a) ret i32 %z } -; CHECK: = lsl(#0, {{.*}}) +; CHECK: =lsl(#0,{{.*}}) ; Shift by register and accumulate declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32) @@ -402,7 +402,7 @@ %z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: -= asr({{.*}}, r4) +; CHECK: -=asr({{.*}},r4) declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32) define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) { @@ -409,7 +409,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: -= lsr({{.*}}, r4) +; CHECK: -=lsr({{.*}},r4) declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32) define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) { @@ -416,7 +416,7 @@ %z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: -= asl({{.*}}, r4) +; CHECK: -=asl({{.*}},r4) declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32) define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) { @@ -423,7 +423,7 @@ %z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: -= lsl({{.*}}, r4) +; CHECK: -=lsl({{.*}},r4) declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32) define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) { @@ -430,7 +430,7 @@ %z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: += asr({{.*}}, r4) +; CHECK: +=asr({{.*}},r4) declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32) define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) { @@ -437,7 +437,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: += lsr({{.*}}, r4) +; CHECK: +=lsr({{.*}},r4) declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32) define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) { @@ -444,7 +444,7 @@ %z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: += asl({{.*}}, r4) +; CHECK: +=asl({{.*}},r4) declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32) define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) { @@ -451,7 +451,7 @@ %z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: += lsl({{.*}}, r4) +; CHECK: +=lsl({{.*}},r4) declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32) define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) { @@ -458,7 +458,7 @@ %z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= asr({{.*}}, {{.*}}) +; CHECK: -=asr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32) define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) { @@ -465,7 +465,7 @@ %z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= lsr({{.*}}, {{.*}}) +; CHECK: -=lsr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32) define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) { @@ -472,7 +472,7 @@ %z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= asl({{.*}}, {{.*}}) +; CHECK: -=asl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32) define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) { @@ -479,7 +479,7 @@ %z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= lsl({{.*}}, {{.*}}) +; CHECK: -=lsl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32) define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) { @@ -486,7 +486,7 @@ %z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += asr({{.*}}, {{.*}}) +; CHECK: +=asr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32) define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) { @@ -493,7 +493,7 @@ %z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += lsr({{.*}}, {{.*}}) +; CHECK: +=lsr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32) define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) { @@ -500,7 +500,7 @@ %z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += asl({{.*}}, {{.*}}) +; CHECK: +=asl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32) define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) { @@ -507,7 +507,7 @@ %z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += lsl({{.*}}, {{.*}}) +; CHECK: +=lsl({{.*}},{{.*}}) ; Shift by register and logical declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32) @@ -515,7 +515,7 @@ %z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: |= asr({{.*}}, r4) +; CHECK: |=asr({{.*}},r4) declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32) define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) { @@ -522,7 +522,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: |= lsr({{.*}}, r4) +; CHECK: |=lsr({{.*}},r4) declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32) define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) { @@ -529,7 +529,7 @@ %z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: |= asl({{.*}}, r4) +; CHECK: |=asl({{.*}},r4) declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32) define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) { @@ -536,7 +536,7 @@ %z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: |= lsl({{.*}}, r4) +; CHECK: |=lsl({{.*}},r4) declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32) define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) { @@ -543,7 +543,7 @@ %z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: &= asr({{.*}}, r4) +; CHECK: &=asr({{.*}},r4) declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32) define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) { @@ -550,7 +550,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: &= lsr({{.*}}, r4) +; CHECK: &=lsr({{.*}},r4) declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32) define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) { @@ -557,7 +557,7 @@ %z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: &= asl({{.*}}, r4) +; CHECK: &=asl({{.*}},r4) declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32) define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) { @@ -564,7 +564,7 @@ %z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: &= lsl({{.*}}, r4) +; CHECK: &=lsl({{.*}},r4) declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32) define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) { @@ -571,7 +571,7 @@ %z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= asr({{.*}}, {{.*}}) +; CHECK: |=asr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32) define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) { @@ -578,7 +578,7 @@ %z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= lsr({{.*}}, {{.*}}) +; CHECK: |=lsr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32) define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) { @@ -585,7 +585,7 @@ %z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= asl({{.*}}, {{.*}}) +; CHECK: |=asl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32) define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) { @@ -592,7 +592,7 @@ %z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= lsl({{.*}}, {{.*}}) +; CHECK: |=lsl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32) define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) { @@ -599,7 +599,7 @@ %z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= asr({{.*}}, {{.*}}) +; CHECK: &=asr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32) define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) { @@ -606,7 +606,7 @@ %z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= lsr({{.*}}, {{.*}}) +; CHECK: &=lsr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32) define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) { @@ -613,7 +613,7 @@ %z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= asl({{.*}}, {{.*}}) +; CHECK: &=asl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32) define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) { @@ -620,7 +620,7 @@ %z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= lsl({{.*}}, {{.*}}) +; CHECK: &=lsl({{.*}},{{.*}}) ; Shift by register with saturation declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) @@ -628,7 +628,7 @@ %z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = asr({{.*}}, {{.*}}):sat +; CHECK: =asr({{.*}},{{.*}}):sat declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32) define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) { @@ -635,7 +635,7 @@ %z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = asl({{.*}}, {{.*}}):sat +; CHECK: =asl({{.*}},{{.*}}):sat ; Vector shift halfwords by immediate declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32) @@ -643,7 +643,7 @@ %z = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %a, i32 0) ret i64 %z } -; CHECK: = vasrh({{.*}}, #0) +; CHECK: =vasrh({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32) define i64 @S2_lsr_i_vh(i64 %a) { @@ -650,7 +650,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %a, i32 0) ret i64 %z } -; CHECK: = vlsrh({{.*}}, #0) +; CHECK: =vlsrh({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32) define i64 @S2_asl_i_vh(i64 %a) { @@ -657,7 +657,7 @@ %z = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %a, i32 0) ret i64 %z } -; CHECK: = vaslh({{.*}}, #0) +; CHECK: =vaslh({{.*}},#0) ; Vector shift halfwords by register declare i64 @llvm.hexagon.S2.asr.r.vh(i64, i32) @@ -665,7 +665,7 @@ %z = call i64 @llvm.hexagon.S2.asr.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vasrh({{.*}}, {{.*}}) +; CHECK: =vasrh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.lsr.r.vh(i64, i32) define i64 @S2_lsr_r_vh(i64 %a, i32 %b) { @@ -672,7 +672,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vlsrh({{.*}}, {{.*}}) +; CHECK: =vlsrh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.asl.r.vh(i64, i32) define i64 @S2_asl_r_vh(i64 %a, i32 %b) { @@ -679,7 +679,7 @@ %z = call i64 @llvm.hexagon.S2.asl.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vaslh({{.*}}, {{.*}}) +; CHECK: =vaslh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.lsl.r.vh(i64, i32) define i64 @S2_lsl_r_vh(i64 %a, i32 %b) { @@ -686,7 +686,7 @@ %z = call i64 @llvm.hexagon.S2.lsl.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vlslh({{.*}}, {{.*}}) +; CHECK: =vlslh({{.*}},{{.*}}) ; Vector shift words by immediate declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32) @@ -694,7 +694,7 @@ %z = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %a, i32 0) ret i64 %z } -; CHECK: = vasrw({{.*}}, #0) +; CHECK: =vasrw({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32) define i64 @S2_lsr_i_vw(i64 %a) { @@ -701,7 +701,7 @@ %z = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %a, i32 0) ret i64 %z } -; CHECK: = vlsrw({{.*}}, #0) +; CHECK: =vlsrw({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32) define i64 @S2_asl_i_vw(i64 %a) { @@ -708,7 +708,7 @@ %z = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %a, i32 0) ret i64 %z } -; CHECK: = vaslw({{.*}}, #0) +; CHECK: =vaslw({{.*}},#0) ; Vector shift words by with truncate and pack declare i32 @llvm.hexagon.S2.asr.i.svw.trun(i64, i32) @@ -716,7 +716,7 @@ %z = call i32 @llvm.hexagon.S2.asr.i.svw.trun(i64 %a, i32 0) ret i32 %z } -; CHECK: = vasrw({{.*}}, #0) +; CHECK: =vasrw({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.r.svw.trun(i64, i32) define i32 @S2_asr_r_svw_trun(i64 %a, i32 %b) { @@ -723,4 +723,4 @@ %z = call i32 @llvm.hexagon.S2.asr.r.svw.trun(i64 %a, i32 %b) ret i32 %z } -; CHECK: = vasrw({{.*}}, {{.*}}) +; CHECK: =vasrw({{.*}},{{.*}}) Index: test/CodeGen/Hexagon/macint.ll =================================================================== --- test/CodeGen/Hexagon/macint.ll +++ test/CodeGen/Hexagon/macint.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s ; Check that we generate integer multiply accumulate. -; CHECK: r{{[0-9]+}} {{\+|\-}}= mpyi(r{{[0-9]+}}, +; CHECK: r{{[0-9]+}}{{\+|\-}}=mpyi(r{{[0-9]+}}, define i32 @main(i32* %a, i32* %b) nounwind { entry: Index: test/CodeGen/Hexagon/memops-stack.ll =================================================================== --- test/CodeGen/Hexagon/memops-stack.ll +++ test/CodeGen/Hexagon/memops-stack.ll @@ -4,7 +4,7 @@ target triple = "hexagon" ; CHECK-LABEL: test0 -; CHECK: memw(r29+#{{[0-9]+}}) += #1 +; CHECK: memw(r29+#{{[0-9]+}})+=#1 define void @test0() #0 { entry: %x = alloca i32, align 4 @@ -20,7 +20,7 @@ } ; CHECK-LABEL: test1 -; CHECK: memw(r29+#{{[0-9]+}}) -= #1 +; CHECK: memw(r29+#{{[0-9]+}})-=#1 define void @test1() #0 { entry: %x = alloca i32, align 4 @@ -36,7 +36,7 @@ } ; CHECK-LABEL: test2 -; CHECK: memw(r29+#{{[0-9]+}}) = setbit(#0) +; CHECK: memw(r29+#{{[0-9]+}})=setbit(#0) define void @test2() #0 { entry: %x = alloca i32, align 4 @@ -52,7 +52,7 @@ } ; CHECK-LABEL: test3 -; CHECK: memw(r29+#{{[0-9]+}}) = clrbit(#0) +; CHECK: memw(r29+#{{[0-9]+}})=clrbit(#0) define void @test3() #0 { entry: %x = alloca i32, align 4 @@ -68,7 +68,7 @@ } ; CHECK-LABEL: test4 -; CHECK: memw(r29+#{{[0-9]+}}) += r +; CHECK: memw(r29+#{{[0-9]+}})+=r define void @test4(i32 %a) #0 { entry: %x = alloca i32, align 4 @@ -84,7 +84,7 @@ } ; CHECK-LABEL: test5 -; CHECK: memw(r29+#{{[0-9]+}}) -= r +; CHECK: memw(r29+#{{[0-9]+}})-=r define void @test5(i32 %a) #0 { entry: %x = alloca i32, align 4 @@ -100,7 +100,7 @@ } ; CHECK-LABEL: test6 -; CHECK: memw(r29+#{{[0-9]+}}) |= r +; CHECK: memw(r29+#{{[0-9]+}})|=r define void @test6(i32 %a) #0 { entry: %x = alloca i32, align 4 @@ -116,7 +116,7 @@ } ; CHECK-LABEL: test7 -; CHECK: memw(r29+#{{[0-9]+}}) &= r +; CHECK: memw(r29+#{{[0-9]+}})&=r define void @test7(i32 %a) #0 { entry: %x = alloca i32, align 4 Index: test/CodeGen/Hexagon/memops.ll =================================================================== --- test/CodeGen/Hexagon/memops.ll +++ test/CodeGen/Hexagon/memops.ll @@ -4,7 +4,7 @@ define void @memop_unsigned_char_add5(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_add5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memb(r{{[0-9]+}}+#0)+=#5 %0 = load i8, i8* %p, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 5 @@ -16,7 +16,7 @@ define void @memop_unsigned_char_add(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_add: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %conv = zext i8 %x to i32 %0 = load i8, i8* %p, align 1 %conv1 = zext i8 %0 to i32 @@ -29,7 +29,7 @@ define void @memop_unsigned_char_sub(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_sub: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %conv = zext i8 %x to i32 %0 = load i8, i8* %p, align 1 %conv1 = zext i8 %0 to i32 @@ -42,7 +42,7 @@ define void @memop_unsigned_char_or(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_or: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %0 = load i8, i8* %p, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %p, align 1 @@ -52,7 +52,7 @@ define void @memop_unsigned_char_and(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_and: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %0 = load i8, i8* %p, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %p, align 1 @@ -62,7 +62,7 @@ define void @memop_unsigned_char_clrbit(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_clrbit: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#0)=clrbit(#5) %0 = load i8, i8* %p, align 1 %conv = zext i8 %0 to i32 %and = and i32 %conv, 223 @@ -74,7 +74,7 @@ define void @memop_unsigned_char_setbit(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_setbit: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#0)=setbit(#7) %0 = load i8, i8* %p, align 1 %conv = zext i8 %0 to i32 %or = or i32 %conv, 128 @@ -86,7 +86,7 @@ define void @memop_unsigned_char_add5_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_add5_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memb(r{{[0-9]+}}+#0)+=#5 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 @@ -99,7 +99,7 @@ define void @memop_unsigned_char_add_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_add_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %conv = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 @@ -113,7 +113,7 @@ define void @memop_unsigned_char_sub_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_sub_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %conv = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 @@ -127,7 +127,7 @@ define void @memop_unsigned_char_or_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_or_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x @@ -138,7 +138,7 @@ define void @memop_unsigned_char_and_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_and_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x @@ -149,7 +149,7 @@ define void @memop_unsigned_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_clrbit_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#0)=clrbit(#5) %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 @@ -162,7 +162,7 @@ define void @memop_unsigned_char_setbit_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_setbit_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#0)=setbit(#7) %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 @@ -175,7 +175,7 @@ define void @memop_unsigned_char_add5_index5(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_add5_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5 +; CHECK: memb(r{{[0-9]+}}+#5)+=#5 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 @@ -188,7 +188,7 @@ define void @memop_unsigned_char_add_index5(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_add_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#5)+=r{{[0-9]+}} %conv = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 @@ -202,7 +202,7 @@ define void @memop_unsigned_char_sub_index5(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_sub_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#5)-=r{{[0-9]+}} %conv = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 @@ -216,7 +216,7 @@ define void @memop_unsigned_char_or_index5(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_or_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#5)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x @@ -227,7 +227,7 @@ define void @memop_unsigned_char_and_index5(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_and_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#5)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x @@ -238,7 +238,7 @@ define void @memop_unsigned_char_clrbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_clrbit_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#5)=clrbit(#5) %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 @@ -251,7 +251,7 @@ define void @memop_unsigned_char_setbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_char_setbit_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#5)=setbit(#7) %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 @@ -264,7 +264,7 @@ define void @memop_signed_char_add5(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_char_add5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memb(r{{[0-9]+}}+#0)+=#5 %0 = load i8, i8* %p, align 1 %conv2 = zext i8 %0 to i32 %add = add nsw i32 %conv2, 5 @@ -276,7 +276,7 @@ define void @memop_signed_char_add(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_add: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %conv4 = zext i8 %x to i32 %0 = load i8, i8* %p, align 1 %conv13 = zext i8 %0 to i32 @@ -289,7 +289,7 @@ define void @memop_signed_char_sub(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_sub: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %conv4 = zext i8 %x to i32 %0 = load i8, i8* %p, align 1 %conv13 = zext i8 %0 to i32 @@ -302,7 +302,7 @@ define void @memop_signed_char_or(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_or: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %0 = load i8, i8* %p, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %p, align 1 @@ -312,7 +312,7 @@ define void @memop_signed_char_and(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_and: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %0 = load i8, i8* %p, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %p, align 1 @@ -322,7 +322,7 @@ define void @memop_signed_char_clrbit(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_char_clrbit: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#0)=clrbit(#5) %0 = load i8, i8* %p, align 1 %conv2 = zext i8 %0 to i32 %and = and i32 %conv2, 223 @@ -334,7 +334,7 @@ define void @memop_signed_char_setbit(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_char_setbit: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#0)=setbit(#7) %0 = load i8, i8* %p, align 1 %conv2 = zext i8 %0 to i32 %or = or i32 %conv2, 128 @@ -346,7 +346,7 @@ define void @memop_signed_char_add5_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_signed_char_add5_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memb(r{{[0-9]+}}+#0)+=#5 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 @@ -359,7 +359,7 @@ define void @memop_signed_char_add_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_add_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %conv4 = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 @@ -373,7 +373,7 @@ define void @memop_signed_char_sub_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_sub_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %conv4 = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 @@ -387,7 +387,7 @@ define void @memop_signed_char_or_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_or_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x @@ -398,7 +398,7 @@ define void @memop_signed_char_and_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_and_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x @@ -409,7 +409,7 @@ define void @memop_signed_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_signed_char_clrbit_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#0)=clrbit(#5) %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 @@ -422,7 +422,7 @@ define void @memop_signed_char_setbit_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_signed_char_setbit_index: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#0)=setbit(#7) %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 @@ -435,7 +435,7 @@ define void @memop_signed_char_add5_index5(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_char_add5_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5 +; CHECK: memb(r{{[0-9]+}}+#5)+=#5 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 @@ -448,7 +448,7 @@ define void @memop_signed_char_add_index5(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_add_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#5)+=r{{[0-9]+}} %conv4 = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 @@ -462,7 +462,7 @@ define void @memop_signed_char_sub_index5(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_sub_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#5)-=r{{[0-9]+}} %conv4 = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 @@ -476,7 +476,7 @@ define void @memop_signed_char_or_index5(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_or_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#5)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x @@ -487,7 +487,7 @@ define void @memop_signed_char_and_index5(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_char_and_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memb(r{{[0-9]+}}+#5)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x @@ -498,7 +498,7 @@ define void @memop_signed_char_clrbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_char_clrbit_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#5)=clrbit(#5) %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 @@ -511,7 +511,7 @@ define void @memop_signed_char_setbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_char_setbit_index5: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memb(r{{[0-9]+}}+#5)=setbit(#7) %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 @@ -524,7 +524,7 @@ define void @memop_unsigned_short_add5(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_add5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memh(r{{[0-9]+}}+#0)+=#5 %0 = load i16, i16* %p, align 2 %conv = zext i16 %0 to i32 %add = add nsw i32 %conv, 5 @@ -536,7 +536,7 @@ define void @memop_unsigned_short_add(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_add: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %conv = zext i16 %x to i32 %0 = load i16, i16* %p, align 2 %conv1 = zext i16 %0 to i32 @@ -549,7 +549,7 @@ define void @memop_unsigned_short_sub(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_sub: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %conv = zext i16 %x to i32 %0 = load i16, i16* %p, align 2 %conv1 = zext i16 %0 to i32 @@ -562,7 +562,7 @@ define void @memop_unsigned_short_or(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_or: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %0 = load i16, i16* %p, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %p, align 2 @@ -572,7 +572,7 @@ define void @memop_unsigned_short_and(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_and: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %0 = load i16, i16* %p, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %p, align 2 @@ -582,7 +582,7 @@ define void @memop_unsigned_short_clrbit(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_clrbit: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#0)=clrbit(#5) %0 = load i16, i16* %p, align 2 %conv = zext i16 %0 to i32 %and = and i32 %conv, 65503 @@ -594,7 +594,7 @@ define void @memop_unsigned_short_setbit(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_setbit: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#0)=setbit(#7) %0 = load i16, i16* %p, align 2 %conv = zext i16 %0 to i32 %or = or i32 %conv, 128 @@ -606,7 +606,7 @@ define void @memop_unsigned_short_add5_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_add5_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memh(r{{[0-9]+}}+#0)+=#5 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 @@ -619,7 +619,7 @@ define void @memop_unsigned_short_add_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_add_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %conv = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 @@ -633,7 +633,7 @@ define void @memop_unsigned_short_sub_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_sub_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %conv = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 @@ -647,7 +647,7 @@ define void @memop_unsigned_short_or_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_or_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x @@ -658,7 +658,7 @@ define void @memop_unsigned_short_and_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_and_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x @@ -669,7 +669,7 @@ define void @memop_unsigned_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_clrbit_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#0)=clrbit(#5) %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 @@ -682,7 +682,7 @@ define void @memop_unsigned_short_setbit_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_setbit_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#0)=setbit(#7) %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 @@ -695,7 +695,7 @@ define void @memop_unsigned_short_add5_index5(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_add5_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5 +; CHECK: memh(r{{[0-9]+}}+#10)+=#5 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 @@ -708,7 +708,7 @@ define void @memop_unsigned_short_add_index5(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_add_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#10)+=r{{[0-9]+}} %conv = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 @@ -722,7 +722,7 @@ define void @memop_unsigned_short_sub_index5(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_sub_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#10)-=r{{[0-9]+}} %conv = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 @@ -736,7 +736,7 @@ define void @memop_unsigned_short_or_index5(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_or_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#10)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x @@ -747,7 +747,7 @@ define void @memop_unsigned_short_and_index5(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_and_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#10)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x @@ -758,7 +758,7 @@ define void @memop_unsigned_short_clrbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_clrbit_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#10)=clrbit(#5) %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 @@ -771,7 +771,7 @@ define void @memop_unsigned_short_setbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_short_setbit_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#10)=setbit(#7) %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 @@ -784,7 +784,7 @@ define void @memop_signed_short_add5(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_short_add5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memh(r{{[0-9]+}}+#0)+=#5 %0 = load i16, i16* %p, align 2 %conv2 = zext i16 %0 to i32 %add = add nsw i32 %conv2, 5 @@ -796,7 +796,7 @@ define void @memop_signed_short_add(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_add: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %conv4 = zext i16 %x to i32 %0 = load i16, i16* %p, align 2 %conv13 = zext i16 %0 to i32 @@ -809,7 +809,7 @@ define void @memop_signed_short_sub(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_sub: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %conv4 = zext i16 %x to i32 %0 = load i16, i16* %p, align 2 %conv13 = zext i16 %0 to i32 @@ -822,7 +822,7 @@ define void @memop_signed_short_or(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_or: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %0 = load i16, i16* %p, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %p, align 2 @@ -832,7 +832,7 @@ define void @memop_signed_short_and(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_and: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %0 = load i16, i16* %p, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %p, align 2 @@ -842,7 +842,7 @@ define void @memop_signed_short_clrbit(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_short_clrbit: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#0)=clrbit(#5) %0 = load i16, i16* %p, align 2 %conv2 = zext i16 %0 to i32 %and = and i32 %conv2, 65503 @@ -854,7 +854,7 @@ define void @memop_signed_short_setbit(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_short_setbit: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#0)=setbit(#7) %0 = load i16, i16* %p, align 2 %conv2 = zext i16 %0 to i32 %or = or i32 %conv2, 128 @@ -866,7 +866,7 @@ define void @memop_signed_short_add5_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_signed_short_add5_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memh(r{{[0-9]+}}+#0)+=#5 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 @@ -879,7 +879,7 @@ define void @memop_signed_short_add_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_add_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %conv4 = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 @@ -893,7 +893,7 @@ define void @memop_signed_short_sub_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_sub_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %conv4 = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 @@ -907,7 +907,7 @@ define void @memop_signed_short_or_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_or_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x @@ -918,7 +918,7 @@ define void @memop_signed_short_and_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_and_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x @@ -929,7 +929,7 @@ define void @memop_signed_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_signed_short_clrbit_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#0)=clrbit(#5) %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 @@ -942,7 +942,7 @@ define void @memop_signed_short_setbit_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_signed_short_setbit_index: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#0)=setbit(#7) %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 @@ -955,7 +955,7 @@ define void @memop_signed_short_add5_index5(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_short_add5_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5 +; CHECK: memh(r{{[0-9]+}}+#10)+=#5 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 @@ -968,7 +968,7 @@ define void @memop_signed_short_add_index5(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_add_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#10)+=r{{[0-9]+}} %conv4 = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 @@ -982,7 +982,7 @@ define void @memop_signed_short_sub_index5(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_sub_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#10)-=r{{[0-9]+}} %conv4 = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 @@ -996,7 +996,7 @@ define void @memop_signed_short_or_index5(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_or_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#10)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x @@ -1007,7 +1007,7 @@ define void @memop_signed_short_and_index5(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK-LABEL: memop_signed_short_and_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memh(r{{[0-9]+}}+#10)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x @@ -1018,7 +1018,7 @@ define void @memop_signed_short_clrbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_short_clrbit_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#10)=clrbit(#5) %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 @@ -1031,7 +1031,7 @@ define void @memop_signed_short_setbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_short_setbit_index5: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memh(r{{[0-9]+}}+#10)=setbit(#7) %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 @@ -1044,7 +1044,7 @@ define void @memop_signed_int_add5(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_int_add5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memw(r{{[0-9]+}}+#0)+=#5 %0 = load i32, i32* %p, align 4 %add = add i32 %0, 5 store i32 %add, i32* %p, align 4 @@ -1054,7 +1054,7 @@ define void @memop_signed_int_add(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_add: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %0 = load i32, i32* %p, align 4 %add = add i32 %0, %x store i32 %add, i32* %p, align 4 @@ -1064,7 +1064,7 @@ define void @memop_signed_int_sub(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_sub: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %0 = load i32, i32* %p, align 4 %sub = sub i32 %0, %x store i32 %sub, i32* %p, align 4 @@ -1074,7 +1074,7 @@ define void @memop_signed_int_or(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_or: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %0 = load i32, i32* %p, align 4 %or = or i32 %0, %x store i32 %or, i32* %p, align 4 @@ -1084,7 +1084,7 @@ define void @memop_signed_int_and(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_and: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %0 = load i32, i32* %p, align 4 %and = and i32 %0, %x store i32 %and, i32* %p, align 4 @@ -1094,7 +1094,7 @@ define void @memop_signed_int_clrbit(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_int_clrbit: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#0)=clrbit(#5) %0 = load i32, i32* %p, align 4 %and = and i32 %0, -33 store i32 %and, i32* %p, align 4 @@ -1104,7 +1104,7 @@ define void @memop_signed_int_setbit(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_int_setbit: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#0)=setbit(#7) %0 = load i32, i32* %p, align 4 %or = or i32 %0, 128 store i32 %or, i32* %p, align 4 @@ -1114,7 +1114,7 @@ define void @memop_signed_int_add5_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_signed_int_add5_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memw(r{{[0-9]+}}+#0)+=#5 %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, 5 @@ -1125,7 +1125,7 @@ define void @memop_signed_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_add_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, %x @@ -1136,7 +1136,7 @@ define void @memop_signed_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_sub_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %sub = sub i32 %0, %x @@ -1147,7 +1147,7 @@ define void @memop_signed_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_or_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x @@ -1158,7 +1158,7 @@ define void @memop_signed_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_and_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x @@ -1169,7 +1169,7 @@ define void @memop_signed_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_signed_int_clrbit_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#0)=clrbit(#5) %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 @@ -1180,7 +1180,7 @@ define void @memop_signed_int_setbit_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_signed_int_setbit_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#0)=setbit(#7) %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 @@ -1191,7 +1191,7 @@ define void @memop_signed_int_add5_index5(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_int_add5_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5 +; CHECK: memw(r{{[0-9]+}}+#20)+=#5 %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, 5 @@ -1202,7 +1202,7 @@ define void @memop_signed_int_add_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_add_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#20)+=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, %x @@ -1213,7 +1213,7 @@ define void @memop_signed_int_sub_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_sub_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#20)-=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %sub = sub i32 %0, %x @@ -1224,7 +1224,7 @@ define void @memop_signed_int_or_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_or_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#20)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x @@ -1235,7 +1235,7 @@ define void @memop_signed_int_and_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_signed_int_and_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#20)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x @@ -1246,7 +1246,7 @@ define void @memop_signed_int_clrbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_int_clrbit_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#20)=clrbit(#5) %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 @@ -1257,7 +1257,7 @@ define void @memop_signed_int_setbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_signed_int_setbit_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#20)=setbit(#7) %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 @@ -1268,7 +1268,7 @@ define void @memop_unsigned_int_add5(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_add5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memw(r{{[0-9]+}}+#0)+=#5 %0 = load i32, i32* %p, align 4 %add = add nsw i32 %0, 5 store i32 %add, i32* %p, align 4 @@ -1278,7 +1278,7 @@ define void @memop_unsigned_int_add(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_add: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %0 = load i32, i32* %p, align 4 %add = add nsw i32 %0, %x store i32 %add, i32* %p, align 4 @@ -1288,7 +1288,7 @@ define void @memop_unsigned_int_sub(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_sub: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %0 = load i32, i32* %p, align 4 %sub = sub nsw i32 %0, %x store i32 %sub, i32* %p, align 4 @@ -1298,7 +1298,7 @@ define void @memop_unsigned_int_or(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_or: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %0 = load i32, i32* %p, align 4 %or = or i32 %0, %x store i32 %or, i32* %p, align 4 @@ -1308,7 +1308,7 @@ define void @memop_unsigned_int_and(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_and: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %0 = load i32, i32* %p, align 4 %and = and i32 %0, %x store i32 %and, i32* %p, align 4 @@ -1318,7 +1318,7 @@ define void @memop_unsigned_int_clrbit(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_clrbit: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#0)=clrbit(#5) %0 = load i32, i32* %p, align 4 %and = and i32 %0, -33 store i32 %and, i32* %p, align 4 @@ -1328,7 +1328,7 @@ define void @memop_unsigned_int_setbit(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_setbit: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#0)=setbit(#7) %0 = load i32, i32* %p, align 4 %or = or i32 %0, 128 store i32 %or, i32* %p, align 4 @@ -1338,7 +1338,7 @@ define void @memop_unsigned_int_add5_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_add5_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 +; CHECK: memw(r{{[0-9]+}}+#0)+=#5 %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, 5 @@ -1349,7 +1349,7 @@ define void @memop_unsigned_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_add_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)+=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, %x @@ -1360,7 +1360,7 @@ define void @memop_unsigned_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_sub_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)-=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %sub = sub nsw i32 %0, %x @@ -1371,7 +1371,7 @@ define void @memop_unsigned_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_or_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x @@ -1382,7 +1382,7 @@ define void @memop_unsigned_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_and_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#0)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x @@ -1393,7 +1393,7 @@ define void @memop_unsigned_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_clrbit_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#0)=clrbit(#5) %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 @@ -1404,7 +1404,7 @@ define void @memop_unsigned_int_setbit_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_setbit_index: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#0)=setbit(#7) %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 @@ -1415,7 +1415,7 @@ define void @memop_unsigned_int_add5_index5(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_add5_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5 +; CHECK: memw(r{{[0-9]+}}+#20)+=#5 %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, 5 @@ -1426,7 +1426,7 @@ define void @memop_unsigned_int_add_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_add_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#20)+=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, %x @@ -1437,7 +1437,7 @@ define void @memop_unsigned_int_sub_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_sub_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#20)-=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %sub = sub nsw i32 %0, %x @@ -1448,7 +1448,7 @@ define void @memop_unsigned_int_or_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_or_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#20)|=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x @@ -1459,7 +1459,7 @@ define void @memop_unsigned_int_and_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_and_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}+#20)&=r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x @@ -1470,7 +1470,7 @@ define void @memop_unsigned_int_clrbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_clrbit_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#20)=clrbit(#5) %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 @@ -1481,7 +1481,7 @@ define void @memop_unsigned_int_setbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK-LABEL: memop_unsigned_int_setbit_index5: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) +; CHECK: memw(r{{[0-9]+}}+#20)=setbit(#7) %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 Index: test/CodeGen/Hexagon/memops1.ll =================================================================== --- test/CodeGen/Hexagon/memops1.ll +++ test/CodeGen/Hexagon/memops1.ll @@ -4,7 +4,7 @@ define void @f(i32* %p) nounwind { entry: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#40){{ *}}-={{ *}}#1 +; CHECK: memw(r{{[0-9]+}}+#40)-=#1 %p.addr = alloca i32*, align 4 store i32* %p, i32** %p.addr, align 4 %0 = load i32*, i32** %p.addr, align 4 @@ -17,7 +17,7 @@ define void @g(i32* %p, i32 %i) nounwind { entry: -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#40){{ *}}-={{ *}}#1 +; CHECK: memw(r{{[0-9]+}}+#40)-=#1 %p.addr = alloca i32*, align 4 %i.addr = alloca i32, align 4 store i32* %p, i32** %p.addr, align 4 Index: test/CodeGen/Hexagon/memops2.ll =================================================================== --- test/CodeGen/Hexagon/memops2.ll +++ test/CodeGen/Hexagon/memops2.ll @@ -4,7 +4,7 @@ define void @f(i16* nocapture %p) nounwind { entry: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1 +; CHECK: memh(r{{[0-9]+}}+#20)-=#1 %add.ptr = getelementptr inbounds i16, i16* %p, i32 10 %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 @@ -16,7 +16,7 @@ define void @g(i16* nocapture %p, i32 %i) nounwind { entry: -; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1 +; CHECK: memh(r{{[0-9]+}}+#20)-=#1 %add.ptr.sum = add i32 %i, 10 %add.ptr1 = getelementptr inbounds i16, i16* %p, i32 %add.ptr.sum %0 = load i16, i16* %add.ptr1, align 2 Index: test/CodeGen/Hexagon/memops3.ll =================================================================== --- test/CodeGen/Hexagon/memops3.ll +++ test/CodeGen/Hexagon/memops3.ll @@ -4,7 +4,7 @@ define void @f(i8* nocapture %p) nounwind { entry: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1 +; CHECK: memb(r{{[0-9]+}}+#10)-=#1 %add.ptr = getelementptr inbounds i8, i8* %p, i32 10 %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 @@ -16,7 +16,7 @@ define void @g(i8* nocapture %p, i32 %i) nounwind { entry: -; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1 +; CHECK: memb(r{{[0-9]+}}+#10)-=#1 %add.ptr.sum = add i32 %i, 10 %add.ptr1 = getelementptr inbounds i8, i8* %p, i32 %add.ptr.sum %0 = load i8, i8* %add.ptr1, align 1 Index: test/CodeGen/Hexagon/mpy.ll =================================================================== --- test/CodeGen/Hexagon/mpy.ll +++ test/CodeGen/Hexagon/mpy.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s -; CHECK: += mpyi +; CHECK: +=mpyi define void @foo(i32 %acc, i32 %num, i32 %num2) nounwind { entry: Index: test/CodeGen/Hexagon/newvalueSameReg.ll =================================================================== --- test/CodeGen/Hexagon/newvalueSameReg.ll +++ test/CodeGen/Hexagon/newvalueSameReg.ll @@ -12,8 +12,8 @@ ; Test that we don't generate a new value compare if the operands are ; the same register. -; CHECK-NOT: cmp.eq([[REG0:(r[0-9]+)]].new, [[REG0]]) -; CHECK: cmp.eq([[REG1:(r[0-9]+)]], [[REG1]]) +; CHECK-NOT: cmp.eq([[REG0:(r[0-9]+)]].new,[[REG0]]) +; CHECK: cmp.eq([[REG1:(r[0-9]+)]],[[REG1]]) ; Function Attrs: nounwind declare void @fprintf(%struct._Dnk_filet.1* nocapture, i8* nocapture readonly, ...) #1 Index: test/CodeGen/Hexagon/newvaluejump.ll =================================================================== --- test/CodeGen/Hexagon/newvaluejump.ll +++ test/CodeGen/Hexagon/newvaluejump.ll @@ -6,7 +6,7 @@ define i32 @foo(i32 %a) nounwind { entry: -; CHECK: if (cmp.eq(r{{[0-9]+}}.new, #0)) jump{{.}} +; CHECK: if (cmp.eq(r{{[0-9]+}}.new,#0)) jump{{.}} %addr1 = alloca i32, align 4 %addr2 = alloca i32, align 4 %0 = load i32, i32* @i, align 4 Index: test/CodeGen/Hexagon/newvaluejump2.ll =================================================================== --- test/CodeGen/Hexagon/newvaluejump2.ll +++ test/CodeGen/Hexagon/newvaluejump2.ll @@ -6,7 +6,7 @@ @Reg = common global i32 0, align 4 define i32 @main() nounwind { entry: -; CHECK: if (cmp.gt(r{{[0-9]+}}, r{{[0-9]+}}.new)) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}} +; CHECK: if (cmp.gt(r{{[0-9]+}},r{{[0-9]+}}.new)) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}} %Reg2 = alloca i32, align 4 %0 = load i32, i32* %Reg2, align 4 %1 = load i32, i32* @Reg, align 4 Index: test/CodeGen/Hexagon/newvaluestore.ll =================================================================== --- test/CodeGen/Hexagon/newvaluestore.ll +++ test/CodeGen/Hexagon/newvaluestore.ll @@ -5,7 +5,7 @@ define i32 @main(i32 %x, i32* %p) nounwind { entry: -; CHECK: memw(r{{[0-9]+}}+#{{[0-9]+}}) = r{{[0-9]+}}.new +; CHECK: memw(r{{[0-9]+}}+#{{[0-9]+}})=r{{[0-9]+}}.new %t0 = load i32, i32* @i, align 4 store i32 %t0, i32* %p, align 4 ret i32 %x Index: test/CodeGen/Hexagon/opt-addr-mode.ll =================================================================== --- test/CodeGen/Hexagon/opt-addr-mode.ll +++ test/CodeGen/Hexagon/opt-addr-mode.ll @@ -1,11 +1,11 @@ ; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 -disable-hexagon-amodeopt < %s | FileCheck %s --check-prefix=CHECK-NO-AMODE ; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 -disable-hexagon-amodeopt=0 -hexagon-amode-growth-limit=4 < %s | FileCheck %s --check-prefix=CHECK-AMODE -; CHECK-NO-AMODE: [[REG0:(r[0-9]+)]] = ##global_2 -; CHECK-NO-AMODE: memw([[REG0]] + {{.*}}<<#2) = +; CHECK-NO-AMODE: [[REG0:(r[0-9]+)]]=##global_2 +; CHECK-NO-AMODE: memw([[REG0]]+{{.*}}<<#2)= -; CHECK-AMODE: [[REG1:(r[0-9]+)]] = memw(##global_1) -; CHECK-AMODE: memw([[REG1]]<<#2 + ##global_2) = +; CHECK-AMODE: [[REG1:(r[0-9]+)]]=memw(##global_1) +; CHECK-AMODE: memw([[REG1]]<<#2+##global_2)= @global_1 = external global i32, align 4 @global_2 = external global [128 x i32], align 8 Index: test/CodeGen/Hexagon/opt-fabs.ll =================================================================== --- test/CodeGen/Hexagon/opt-fabs.ll +++ test/CodeGen/Hexagon/opt-fabs.ll @@ -1,7 +1,7 @@ ; RUN: llc -mtriple=hexagon-unknown-elf -mcpu=hexagonv5 -hexagon-bit=0 < %s | FileCheck %s ; Optimize fabsf to clrbit in V5. -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}}=clrbit(r{{[0-9]+}},#31) define float @my_fabsf(float %x) nounwind { entry: Index: test/CodeGen/Hexagon/opt-fneg.ll =================================================================== --- test/CodeGen/Hexagon/opt-fneg.ll +++ test/CodeGen/Hexagon/opt-fneg.ll @@ -3,7 +3,7 @@ define float @foo(float %x) nounwind { entry: -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}}=togglebit(r{{[0-9]+}},#31) %x.addr = alloca float, align 4 store float %x, float* %x.addr, align 4 %0 = load float, float* %x.addr, align 4 @@ -13,7 +13,7 @@ define float @bar(float %x) nounwind { entry: -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}}=togglebit(r{{[0-9]+}},#31) %sub = fsub float -0.000000e+00, %x ret float %sub } @@ -20,7 +20,7 @@ define float @baz(float %x) nounwind { entry: -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}}=togglebit(r{{[0-9]+}},#31) %conv1 = fmul float %x, -1.000000e+00 ret float %conv1 } Index: test/CodeGen/Hexagon/opt-spill-volatile.ll =================================================================== --- test/CodeGen/Hexagon/opt-spill-volatile.ll +++ test/CodeGen/Hexagon/opt-spill-volatile.ll @@ -5,8 +5,8 @@ target triple = "hexagon" ; CHECK-LABEL: foo -; CHECK: memw(r29+#4) = -; CHECK: = memw(r29 + #4) +; CHECK: memw(r29+#4)= +; CHECK: =memw(r29+#4) define i32 @foo(i32 %a) #0 { entry: %x = alloca i32, align 4 Index: test/CodeGen/Hexagon/packetize-return-arg.ll =================================================================== --- test/CodeGen/Hexagon/packetize-return-arg.ll +++ test/CodeGen/Hexagon/packetize-return-arg.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; Check that "r0 = rN" is packetized together with dealloc_return. -; CHECK: r0 = r +; CHECK: r0=r ; CHECK-NOT: { ; CHECK: dealloc_return Index: test/CodeGen/Hexagon/pic-jumptables.ll =================================================================== --- test/CodeGen/Hexagon/pic-jumptables.ll +++ test/CodeGen/Hexagon/pic-jumptables.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=hexagon -relocation-model=pic < %s | FileCheck %s -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}add({{pc|PC}}{{ *}},{{ *}}## -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#2) -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=add({{pc|PC}},## +; CHECK: r{{[0-9]+}}=memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2) +; CHECK: r{{[0-9]+}}=add(r{{[0-9]+}},r{{[0-9]+}}) define i32 @test(i32 %y) nounwind { Index: test/CodeGen/Hexagon/pic-local.ll =================================================================== --- test/CodeGen/Hexagon/pic-local.ll +++ test/CodeGen/Hexagon/pic-local.ll @@ -9,11 +9,11 @@ } define void()* @get_f1() { - ; CHECK: r0 = add(pc, ##.Lf1@PCREL) + ; CHECK: r0=add(pc,##.Lf1@PCREL) ret void()* @f1 } define void()* @get_f2() { - ; CHECK: r0 = add(pc, ##f2@PCREL) + ; CHECK: r0=add(pc,##f2@PCREL) ret void()* @f2 } Index: test/CodeGen/Hexagon/pic-regusage.ll =================================================================== --- test/CodeGen/Hexagon/pic-regusage.ll +++ test/CodeGen/Hexagon/pic-regusage.ll @@ -5,7 +5,7 @@ ; R14, R15 and R28). ; CHECK: call __save_r16_through_r27 ; CHECK: } -; CHECK: r14{{ *}}= +; CHECK: r14= @.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1 @@ -29,7 +29,7 @@ ; Same as above for R15. ; CHECK: call __save_r16_through_r27 ; CHECK: } -; CHECK: r15{{ *}}= +; CHECK: r15= ; Function Attrs: nounwind optsize define i32 @_Z7testR15Pi(i32* nocapture %res) #0 { @@ -48,7 +48,7 @@ ; Same as above for R28. ; CHECK: call __save_r16_through_r27 ; CHECK: } -; CHECK: r28{{ *}}= +; CHECK: r28= ; Function Attrs: nounwind optsize define i32 @_Z7testR28Pi(i32* nocapture %res) #0 { Index: test/CodeGen/Hexagon/pic-simple.ll =================================================================== --- test/CodeGen/Hexagon/pic-simple.ll +++ test/CodeGen/Hexagon/pic-simple.ll @@ -1,8 +1,8 @@ ; RUN: llc -mtriple=hexagon-- -mcpu=hexagonv5 -relocation-model=pic < %s | FileCheck %s -; CHECK: r{{[0-9]+}} = add({{pc|PC}}, ##_GLOBAL_OFFSET_TABLE_@PCREL) -; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##src@GOT) -; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##dst@GOT) +; CHECK: r{{[0-9]+}}=add({{pc|PC}},##_GLOBAL_OFFSET_TABLE_@PCREL) +; CHECK: r{{[0-9]+}}=memw(r{{[0-9]+}}+##src@GOT) +; CHECK: r{{[0-9]+}}=memw(r{{[0-9]+}}+##dst@GOT) @dst = external global i32 @src = external global i32 Index: test/CodeGen/Hexagon/pic-static.ll =================================================================== --- test/CodeGen/Hexagon/pic-static.ll +++ test/CodeGen/Hexagon/pic-static.ll @@ -1,8 +1,8 @@ ; RUN: llc -mtriple=hexagon-- -mcpu=hexagonv5 -relocation-model=pic < %s | FileCheck %s -; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}}, ##_GLOBAL_OFFSET_TABLE_@PCREL) -; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}}, ##x@PCREL) -; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##bar@GOT) +; CHECK-DAG: r{{[0-9]+}}=add({{pc|PC}},##_GLOBAL_OFFSET_TABLE_@PCREL) +; CHECK-DAG: r{{[0-9]+}}=add({{pc|PC}},##x@PCREL) +; CHECK: r{{[0-9]+}}=memw(r{{[0-9]+}}+##bar@GOT) @x = internal global i32 9, align 4 @bar = external global i32* Index: test/CodeGen/Hexagon/postinc-offset.ll =================================================================== --- test/CodeGen/Hexagon/postinc-offset.ll +++ test/CodeGen/Hexagon/postinc-offset.ll @@ -2,8 +2,8 @@ ; RUN: < %s | FileCheck %s ; CHECK: { -; CHECK: ={{ *}}memd([[REG0:(r[0-9]+)]]{{ *}}++{{ *}}#8) -; CHECK-NOT: memw([[REG0]]{{ *}}+{{ *}}#0){{ *}}= +; CHECK: =memd([[REG0:(r[0-9]+)]]++#8) +; CHECK-NOT: memw([[REG0]]+#0)= ; CHECK: } define void @main() #0 { Index: test/CodeGen/Hexagon/postinc-store.ll =================================================================== --- test/CodeGen/Hexagon/postinc-store.ll +++ test/CodeGen/Hexagon/postinc-store.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s ; Check that post-increment store instructions are being generated. -; CHECK: memw(r{{[0-9]+}}{{ *}}++{{ *}}#4{{ *}}){{ *}}={{ *}}r{{[0-9]+}} +; CHECK: memw(r{{[0-9]+}}++#4)=r{{[0-9]+}} define i32 @sum(i32* nocapture %a, i16* nocapture %b, i32 %n) nounwind { entry: Index: test/CodeGen/Hexagon/pred-absolute-store.ll =================================================================== --- test/CodeGen/Hexagon/pred-absolute-store.ll +++ test/CodeGen/Hexagon/pred-absolute-store.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; Check that we are able to predicate instructions with abosolute ; addressing mode. -; CHECK: if ({{!*}}p{{[0-2]}}.new) memw(##gvar) = r{{[0-9]+}} +; CHECK: if ({{!*}}p{{[0-2]}}.new) memw(##gvar)=r{{[0-9]+}} @gvar = external global i32 define i32 @test2(i32 %a, i32 %b) nounwind { Index: test/CodeGen/Hexagon/pred-gp.ll =================================================================== --- test/CodeGen/Hexagon/pred-gp.ll +++ test/CodeGen/Hexagon/pred-gp.ll @@ -7,8 +7,8 @@ ; Function Attrs: nounwind define i32 @test2(i8 zeroext %a, i8 zeroext %b) #0 { -; CHECK: if{{ *}}({{!*}}p{{[0-3]+}}{{[.new]*}}){{ *}}r{{[0-9]+}}{{ *}}={{ *}}memw(##{{[cd]}}) -; CHECK: if{{ *}}({{!*}}p{{[0-3]+}}){{ *}}r{{[0-9]+}}{{ *}}={{ *}}memw(##{{[cd]}}) +; CHECK: if ({{!*}}p{{[0-3]+}}{{[.new]*}}) r{{[0-9]+}}=memw(##{{[cd]}}) +; CHECK: if ({{!*}}p{{[0-3]+}}) r{{[0-9]+}}=memw(##{{[cd]}}) entry: %cmp = icmp eq i8 %a, %b br i1 %cmp, label %if.then, label %entry.if.end_crit_edge Index: test/CodeGen/Hexagon/pred-instrs.ll =================================================================== --- test/CodeGen/Hexagon/pred-instrs.ll +++ test/CodeGen/Hexagon/pred-instrs.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s ; Check that we are able to predicate instructions. -; CHECK: if{{ *}}({{!*}}p{{[0-3]}}{{[.new]*}}){{ *}}r{{[0-9]+}}{{ *}}={{ *}}{{and|aslh}} -; CHECK: if{{ *}}({{!*}}p{{[0-3]}}{{[.new]*}}){{ *}}r{{[0-9]+}}{{ *}}={{ *}}{{and|aslh}} +; CHECK: if ({{!*}}p{{[0-3]}}{{[.new]*}}) r{{[0-9]+}}={{and|aslh}} +; CHECK: if ({{!*}}p{{[0-3]}}{{[.new]*}}) r{{[0-9]+}}={{and|aslh}} @a = external global i32 @d = external global i32 Index: test/CodeGen/Hexagon/predicate-copy.ll =================================================================== --- test/CodeGen/Hexagon/predicate-copy.ll +++ test/CodeGen/Hexagon/predicate-copy.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv4 -O3 < %s | FileCheck %s -; CHECK: r{{[0-9]+}} = p{{[0-9]+}} +; CHECK: r{{[0-9]+}}=p{{[0-9]+}} define i1 @foo() { entry: ret i1 false Index: test/CodeGen/Hexagon/predicate-logical.ll =================================================================== --- test/CodeGen/Hexagon/predicate-logical.ll +++ test/CodeGen/Hexagon/predicate-logical.ll @@ -1,5 +1,5 @@ ; RUN: llc -O2 -march=hexagon < %s | FileCheck %s -; CHECK: p{{[0-9]}} = or(p{{[0-9]}}, and(p{{[0-9]}}, p{{[0-9]}})) +; CHECK: p{{[0-9]}}=or(p{{[0-9]}},and(p{{[0-9]}},p{{[0-9]}})) target triple = "hexagon" Index: test/CodeGen/Hexagon/predicate-rcmp.ll =================================================================== --- test/CodeGen/Hexagon/predicate-rcmp.ll +++ test/CodeGen/Hexagon/predicate-rcmp.ll @@ -1,5 +1,5 @@ ; RUN: llc -O2 -march=hexagon < %s | FileCheck %s -; CHECK: cmp.eq(r{{[0-9]+}}, #0) +; CHECK: cmp.eq(r{{[0-9]+}},#0) ; Check that the result of the builtin is not stored directly, i.e. that ; there is an instruction that converts it to {0,1} from {0,-1}. Right now ; the instruction is "r4 = !cmp.eq(r0, #0)". Index: test/CodeGen/Hexagon/rdf-copy.ll =================================================================== --- test/CodeGen/Hexagon/rdf-copy.ll +++ test/CodeGen/Hexagon/rdf-copy.ll @@ -14,7 +14,7 @@ ; } ; ; CHECK-LABEL: LBB0_1 -; CHECK: [[DST:r[0-9]+]] = [[SRC:r[0-9]+]] +; CHECK: [[DST:r[0-9]+]]=[[SRC:r[0-9]+]] ; CHECK-DAG: memw([[SRC]] ; CHECK-NOT: memw([[DST]] ; CHECK: %if.end Index: test/CodeGen/Hexagon/restore-single-reg.ll =================================================================== --- test/CodeGen/Hexagon/restore-single-reg.ll +++ test/CodeGen/Hexagon/restore-single-reg.ll @@ -7,8 +7,8 @@ ; with "optsize" attribute. ; CHECK-LABEL: fred_os -; CHECK-DAG: memd{{.*}} = r17:16 -; CHECK-DAG: r17:16 = memd{{.*}} +; CHECK-DAG: memd{{.*}}=r17:16 +; CHECK-DAG: r17:16=memd{{.*}} ; CHECK-DAG: deallocframe ; CHECK-NOT: call __restore @@ -23,8 +23,8 @@ ; with "minsize" attribute. ; CHECK-LABEL: fred_oz -; CHECK-DAG: memd{{.*}} = r17:16 -; CHECK-NOT: r17:16 = memd{{.*}} +; CHECK-DAG: memd{{.*}}=r17:16 +; CHECK-NOT: r17:16=memd{{.*}} ; CHECK-DAG: call __restore define i32 @fred_oz(i32 %x) #1 { Index: test/CodeGen/Hexagon/ret-struct-by-val.ll =================================================================== --- test/CodeGen/Hexagon/ret-struct-by-val.ll +++ test/CodeGen/Hexagon/ret-struct-by-val.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: r0 = add(r0, r1) +; CHECK: r0=add(r0,r1) ; Allow simple structures to be returned by value. Index: test/CodeGen/Hexagon/select-instr-align.ll =================================================================== --- test/CodeGen/Hexagon/select-instr-align.ll +++ test/CodeGen/Hexagon/select-instr-align.ll @@ -1,12 +1,12 @@ ; RUN: llc -march=hexagon -enable-hexagon-hvx < %s | FileCheck %s ; CHECK-LABEL: aligned_load: -; CHECK: = vmem({{.*}}) +; CHECK: =vmem({{.*}}) ; CHECK-LABEL: aligned_store: -; CHECK: vmem({{.*}}) = +; CHECK: vmem({{.*}})= ; CHECK-LABEL: unaligned_load: -; CHECK: = vmemu({{.*}}) +; CHECK: =vmemu({{.*}}) ; CHECK-LABEL: unaligned_store: -; CHECK: vmemu({{.*}}) = +; CHECK: vmemu({{.*}})= define <16 x i32> @aligned_load(<16 x i32>* %p, <16 x i32> %a) { %v = load <16 x i32>, <16 x i32>* %p, align 64 Index: test/CodeGen/Hexagon/sffms.ll =================================================================== --- test/CodeGen/Hexagon/sffms.ll +++ test/CodeGen/Hexagon/sffms.ll @@ -2,7 +2,7 @@ ; Check that "Rx-=sfmpy(Rs,Rt)" is being generated for "fsub(fmul(..))" -; CHECK: r{{[0-9]+}} -= sfmpy +; CHECK: r{{[0-9]+}}-=sfmpy %struct.matrix_params = type { float** } Index: test/CodeGen/Hexagon/signed_immediates.ll =================================================================== --- test/CodeGen/Hexagon/signed_immediates.ll +++ test/CodeGen/Hexagon/signed_immediates.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; s4_0Imm -; CHECK: memb(r0++#-1) = r1 +; CHECK: memb(r0++#-1)=r1 define i8* @foo1(i8* %a, i8 %b) { store i8 %b, i8* %a %c = getelementptr i8, i8* %a, i32 -1 @@ -9,7 +9,7 @@ } ; s4_1Imm -; CHECK: memh(r0++#-2) = r1 +; CHECK: memh(r0++#-2)=r1 define i16* @foo2(i16* %a, i16 %b) { store i16 %b, i16* %a %c = getelementptr i16, i16* %a, i32 -1 @@ -17,7 +17,7 @@ } ; s4_2Imm -; CHECK: memw(r0++#-4) = r1 +; CHECK: memw(r0++#-4)=r1 define i32* @foo3(i32* %a, i32 %b) { store i32 %b, i32* %a %c = getelementptr i32, i32* %a, i32 -1 @@ -25,7 +25,7 @@ } ; s4_3Imm -; CHECK: memd(r0++#-8) = r3:2 +; CHECK: memd(r0++#-8)=r3:2 define i64* @foo4(i64* %a, i64 %b) { store i64 %b, i64* %a %c = getelementptr i64, i64* %a, i32 -1 @@ -44,7 +44,7 @@ } ; s10Ext -; CHECK: p0 = cmp.eq(r0, #-1) +; CHECK: p0=cmp.eq(r0,#-1) define i1 @foo7(i32 %a) { %b = icmp eq i32 %a, -1 ret i1 %b @@ -51,7 +51,7 @@ } ; s11_0Ext -; CHECK: memb(r0+#-1) = r1 +; CHECK: memb(r0+#-1)=r1 define void @foo8(i8* %a, i8 %b) { %c = getelementptr i8, i8* %a, i32 -1 store i8 %b, i8* %c @@ -59,7 +59,7 @@ } ; s11_1Ext -; CHECK: memh(r0+#-2) = r1 +; CHECK: memh(r0+#-2)=r1 define void @foo9(i16* %a, i16 %b) { %c = getelementptr i16, i16* %a, i32 -1 store i16 %b, i16* %c @@ -67,7 +67,7 @@ } ; s11_2Ext -; CHECK: memw(r0+#-4) = r1 +; CHECK: memw(r0+#-4)=r1 define void @foo10(i32* %a, i32 %b) { %c = getelementptr i32, i32* %a, i32 -1 store i32 %b, i32* %c @@ -75,7 +75,7 @@ } ; s11_3Ext -; CHECK: memd(r0+#-8) = r3:2 +; CHECK: memd(r0+#-8)=r3:2 define void @foo11(i64* %a, i64 %b) { %c = getelementptr i64, i64* %a, i32 -1 store i64 %b, i64* %c @@ -83,7 +83,7 @@ } ; s12Ext -; CHECK: if (p0.new) r0 = #-1 +; CHECK: if (p0.new) r0=#-1 define i32 @foo12(i32 %a, i1 %b) { br i1 %b, label %x, label %y x: @@ -93,7 +93,7 @@ } ; s16Ext -; CHECK: r0 = #-2 +; CHECK: r0=#-2 define i32 @foo13() { ret i32 -2 -} \ No newline at end of file +} Index: test/CodeGen/Hexagon/stack-align1.ll =================================================================== --- test/CodeGen/Hexagon/stack-align1.ll +++ test/CodeGen/Hexagon/stack-align1.ll @@ -1,7 +1,7 @@ ; RUN: llc -O0 -march=hexagon < %s | FileCheck %s -; CHECK: and(r29, #-32) -; CHECK-DAG: add(r29, #0) -; CHECK-DAG: add(r29, #28) +; CHECK: and(r29,#-32) +; CHECK-DAG: add(r29,#0) +; CHECK-DAG: add(r29,#28) target triple = "hexagon-unknown-unknown" Index: test/CodeGen/Hexagon/stack-align2.ll =================================================================== --- test/CodeGen/Hexagon/stack-align2.ll +++ test/CodeGen/Hexagon/stack-align2.ll @@ -1,9 +1,9 @@ ; RUN: llc -O0 -march=hexagon < %s | FileCheck %s -; CHECK: and(r29, #-128) -; CHECK-DAG: add(r29, #0) -; CHECK-DAG: add(r29, #64) -; CHECK-DAG: add(r29, #96) -; CHECK-DAG: add(r29, #124) +; CHECK: and(r29,#-128) +; CHECK-DAG: add(r29,#0) +; CHECK-DAG: add(r29,#64) +; CHECK-DAG: add(r29,#96) +; CHECK-DAG: add(r29,#124) target triple = "hexagon-unknown-unknown" Index: test/CodeGen/Hexagon/stack-alloca1.ll =================================================================== --- test/CodeGen/Hexagon/stack-alloca1.ll +++ test/CodeGen/Hexagon/stack-alloca1.ll @@ -1,6 +1,6 @@ ; RUN: llc -O0 -march=hexagon < %s | FileCheck %s -; CHECK: sub(r29, r[[REG:[0-9]+]]) -; CHECK: r29 = r[[REG]] +; CHECK: sub(r29,r[[REG:[0-9]+]]) +; CHECK: r29=r[[REG]] target triple = "hexagon-unknown-unknown" Index: test/CodeGen/Hexagon/stack-alloca2.ll =================================================================== --- test/CodeGen/Hexagon/stack-alloca2.ll +++ test/CodeGen/Hexagon/stack-alloca2.ll @@ -1,9 +1,9 @@ ; RUN: llc -O0 -march=hexagon < %s | FileCheck %s -; CHECK-DAG: r[[AP:[0-9]+]] = and(r30, #-32) -; CHECK-DAG: r1 = add(r[[AP]], #-32) +; CHECK-DAG: r[[AP:[0-9]+]]=and(r30,#-32) +; CHECK-DAG: r1=add(r[[AP]],#-32) -; CHECK-DAG: sub(r29, r[[SP:[0-9]+]]) -; CHECK-DAG: r29 = r[[SP]] +; CHECK-DAG: sub(r29,r[[SP:[0-9]+]]) +; CHECK-DAG: r29=r[[SP]] target triple = "hexagon-unknown-unknown" Index: test/CodeGen/Hexagon/store-shift.ll =================================================================== --- test/CodeGen/Hexagon/store-shift.ll +++ test/CodeGen/Hexagon/store-shift.ll @@ -1,12 +1,12 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK-DAG: r[[BASE:[0-9]+]] += add -; CHECK-DAG: r[[IDX0:[0-9]+]] = add(r2, #5) -; CHECK-DAG: r[[IDX1:[0-9]+]] = add(r2, #6) -; CHECK-DAG: memw(r0 + r[[IDX0]]<<#2) = r3 -; CHECK-DAG: memw(r0 + r[[IDX1]]<<#2) = r3 -; CHECK-DAG: memw(r[[BASE]] + r[[IDX0]]<<#2) = r[[IDX0]] -; CHECK-DAG: memw(r[[BASE]] + r[[IDX1]]<<#2) = r[[IDX0]] +; CHECK-DAG: r[[BASE:[0-9]+]]+=add +; CHECK-DAG: r[[IDX0:[0-9]+]]=add(r2,#5) +; CHECK-DAG: r[[IDX1:[0-9]+]]=add(r2,#6) +; CHECK-DAG: memw(r0+r[[IDX0]]<<#2)=r3 +; CHECK-DAG: memw(r0+r[[IDX1]]<<#2)=r3 +; CHECK-DAG: memw(r[[BASE]]+r[[IDX0]]<<#2)=r[[IDX0]] +; CHECK-DAG: memw(r[[BASE]]+r[[IDX1]]<<#2)=r[[IDX0]] target triple = "hexagon" Index: test/CodeGen/Hexagon/storerinewabs.ll =================================================================== --- test/CodeGen/Hexagon/storerinewabs.ll +++ test/CodeGen/Hexagon/storerinewabs.ll @@ -4,7 +4,7 @@ ; There was a bug causing ### to be printed. Make sure we print ## instead. ; CHECK-LABEL: foo -; CHECK: memw(##global) = +; CHECK: memw(##global)= define void @foo(i32 %x) #0 { entry: Index: test/CodeGen/Hexagon/struct_args.ll =================================================================== --- test/CodeGen/Hexagon/struct_args.ll +++ test/CodeGen/Hexagon/struct_args.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon -disable-hsdr < %s | FileCheck %s -; CHECK-DAG: r0 = memw -; CHECK-DAG: r1 = memw +; CHECK-DAG: r0=memw +; CHECK-DAG: r1=memw %struct.small = type { i32, i32 } Index: test/CodeGen/Hexagon/sube.ll =================================================================== --- test/CodeGen/Hexagon/sube.ll +++ test/CodeGen/Hexagon/sube.ll @@ -1,13 +1,13 @@ ; RUN: llc -march=hexagon -disable-hsdr -hexagon-expand-condsets=0 -hexagon-bit=0 -disable-post-ra < %s | FileCheck %s -; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #0) -; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #1) -; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}}=combine(#0,#0) +; CHECK: r{{[0-9]+:[0-9]+}}=combine(#0,#1) +; CHECK: p{{[0-9]+}}=cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}}=sub(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK: r{{[0-9]+}}=mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}}) +; CHECK: r{{[0-9]+}}=mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}}=sub(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}}=combine(r{{[0-9]+}},r{{[0-9]+}}) define void @check_sube_subc(i64 %AL, i64 %AH, i64 %BL, i64 %BH, i64* %RL, i64* %RH) { entry: Index: test/CodeGen/Hexagon/subi-asl.ll =================================================================== --- test/CodeGen/Hexagon/subi-asl.ll +++ test/CodeGen/Hexagon/subi-asl.ll @@ -3,11 +3,11 @@ ; Check if S4_subi_asl_ri is being generated correctly. ; CHECK-LABEL: yes_sub_asl -; CHECK: [[REG1:(r[0-9]+)]] = sub(#0, asl([[REG1]], #1)) +; CHECK: [[REG1:(r[0-9]+)]]=sub(#0,asl([[REG1]],#1)) ; CHECK-LABEL: no_sub_asl -; CHECK: [[REG2:(r[0-9]+)]] = asl(r{{[0-9]+}}, #1) -; CHECK: r{{[0-9]+}} = sub([[REG2]], r{{[0-9]+}}) +; CHECK: [[REG2:(r[0-9]+)]]=asl(r{{[0-9]+}},#1) +; CHECK: r{{[0-9]+}}=sub([[REG2]],r{{[0-9]+}}) %struct.rtx_def = type { i16, i8 } Index: test/CodeGen/Hexagon/swp-const-tc.ll =================================================================== --- test/CodeGen/Hexagon/swp-const-tc.ll +++ test/CodeGen/Hexagon/swp-const-tc.ll @@ -4,7 +4,7 @@ ; of computing a new LC0 value. ; CHECK-LABEL: @test -; CHECK: loop0(.LBB0_1, #998) +; CHECK: loop0(.LBB0_1,#998) define i32 @test(i32* %A, i32* %B, i32 %count) { entry: Index: test/CodeGen/Hexagon/swp-matmul-bitext.ll =================================================================== --- test/CodeGen/Hexagon/swp-matmul-bitext.ll +++ test/CodeGen/Hexagon/swp-matmul-bitext.ll @@ -6,12 +6,12 @@ ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: -; CHECK: = extractu([[REG2:(r[0-9]+)]], -; CHECK: = extractu([[REG2]], -; CHECK: [[REG0:(r[0-9]+)]] = memh -; CHECK: [[REG1:(r[0-9]+)]] = memh -; CHECK: += mpyi -; CHECK: [[REG2]] = mpyi([[REG0]], [[REG1]]) +; CHECK: =extractu([[REG2:(r[0-9]+)]], +; CHECK: =extractu([[REG2]], +; CHECK: [[REG0:(r[0-9]+)]]=memh +; CHECK: [[REG1:(r[0-9]+)]]=memh +; CHECK: +=mpyi +; CHECK: [[REG2]]=mpyi([[REG0]],[[REG1]]) ; CHECK: endloop0 %union_h2_sem_t = type { i32 } Index: test/CodeGen/Hexagon/swp-max.ll =================================================================== --- test/CodeGen/Hexagon/swp-max.ll +++ test/CodeGen/Hexagon/swp-max.ll @@ -15,9 +15,9 @@ ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: -; CHECK: [[REG1:(r[0-9]+)]] = max(r{{[0-9]+}}, [[REG1]]) -; CHECK: [[REG0:(r[0-9]+)]] = add([[REG2:(r[0-9]+)]], [[REG0]]) -; CHECK: [[REG2]] = memw +; CHECK: [[REG1:(r[0-9]+)]]=max(r{{[0-9]+}},[[REG1]]) +; CHECK: [[REG0:(r[0-9]+)]]=add([[REG2:(r[0-9]+)]],[[REG0]]) +; CHECK: [[REG2]]=memw ; CHECK: endloop0 for.body: Index: test/CodeGen/Hexagon/swp-multi-loops.ll =================================================================== --- test/CodeGen/Hexagon/swp-multi-loops.ll +++ test/CodeGen/Hexagon/swp-multi-loops.ll @@ -5,7 +5,7 @@ ; Check if the first loop is pipelined. ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: -; CHECK: add(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: add(r{{[0-9]+}},r{{[0-9]+}}) ; CHECK-NEXT: memw(r{{[0-9]+}}{{.*}}++{{.*}}#4) ; CHECK-NEXT: endloop0 @@ -12,7 +12,7 @@ ; Check if the second loop is pipelined. ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: -; CHECK: add(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: add(r{{[0-9]+}},r{{[0-9]+}}) ; CHECK-NEXT: memw(r{{[0-9]+}}{{.*}}++{{.*}}#4) ; CHECK-NEXT: endloop0 Index: test/CodeGen/Hexagon/swp-vect-dotprod.ll =================================================================== --- test/CodeGen/Hexagon/swp-vect-dotprod.ll +++ test/CodeGen/Hexagon/swp-vect-dotprod.ll @@ -5,11 +5,11 @@ ; Check that we pipeline a vectorized dot product in a single packet. ; ; CHECK: { -; CHECK: += mpyi -; CHECK: += mpyi +; CHECK: +=mpyi +; CHECK: +=mpyi ; CHECK: memd ; CHECK: memd -; CHECK: } :endloop0 +; CHECK: } :endloop0 @a = common global [5000 x i32] zeroinitializer, align 8 @b = common global [5000 x i32] zeroinitializer, align 8 Index: test/CodeGen/Hexagon/swp-vmult.ll =================================================================== --- test/CodeGen/Hexagon/swp-vmult.ll +++ test/CodeGen/Hexagon/swp-vmult.ll @@ -2,10 +2,10 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -O3 < %s | FileCheck %s ; Multiply and accumulate -; CHECK: mpyi([[REG0:r([0-9]+)]], [[REG1:r([0-9]+)]]) -; CHECK-NEXT: add(r{{[0-9]+}}, #4) -; CHECK-NEXT: [[REG0]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0) -; CHECK-NEXT: [[REG1]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0) +; CHECK: mpyi([[REG0:r([0-9]+)]],[[REG1:r([0-9]+)]]) +; CHECK-NEXT: add(r{{[0-9]+}},#4) +; CHECK-NEXT: [[REG0]]=memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0) +; CHECK-NEXT: [[REG1]]=memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0) ; CHECK-NEXT: endloop0 define i32 @foo(i32* %a, i32* %b, i32 %n) { Index: test/CodeGen/Hexagon/swp-vsum.ll =================================================================== --- test/CodeGen/Hexagon/swp-vsum.ll +++ test/CodeGen/Hexagon/swp-vsum.ll @@ -4,9 +4,9 @@ ; Simple vector total. ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: -; CHECK: add([[REG:r([0-9]+)]], r{{[0-9]+}}) -; CHECK-NEXT: add(r{{[0-9]+}}, #4) -; CHECK-NEXT: [[REG]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0) +; CHECK: add([[REG:r([0-9]+)]],r{{[0-9]+}}) +; CHECK-NEXT: add(r{{[0-9]+}},#4) +; CHECK-NEXT: [[REG]]=memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0) ; CHECK-NEXT: endloop0 define i32 @foo(i32* %a, i32 %n) { Index: test/CodeGen/Hexagon/tail-dup-subreg-map.ll =================================================================== --- test/CodeGen/Hexagon/tail-dup-subreg-map.ll +++ test/CodeGen/Hexagon/tail-dup-subreg-map.ll @@ -5,7 +5,7 @@ ; subregisters were dropped by the tail duplicator, resulting in invalid ; COPY instructions being generated. -; CHECK: = extractu(r{{[0-9]+}}, #15, #17) +; CHECK: =extractu(r{{[0-9]+}},#15,#17) target triple = "hexagon" Index: test/CodeGen/Hexagon/tfr-to-combine.ll =================================================================== --- test/CodeGen/Hexagon/tfr-to-combine.ll +++ test/CodeGen/Hexagon/tfr-to-combine.ll @@ -8,7 +8,7 @@ ; Function Attrs: nounwind define i64 @test1() #0 { -; CHECK: combine(#10, #0) +; CHECK: combine(#10,#0) entry: store i16 0, i16* @a, align 2 store i16 10, i16* @b, align 2 @@ -17,7 +17,7 @@ ; Function Attrs: nounwind define i64 @test2() #0 { -; CHECK: combine(#0, r{{[0-9]+}}) +; CHECK: combine(#0,r{{[0-9]+}}) entry: store i16 0, i16* @a, align 2 %0 = load i16, i16* @c, align 2 @@ -27,7 +27,7 @@ ; Function Attrs: nounwind define i64 @test4() #0 { -; CHECK: combine(#0, #100) +; CHECK: combine(#0,#100) entry: store i16 100, i16* @b, align 2 store i16 0, i16* @a, align 2 Index: test/CodeGen/Hexagon/tls_pic.ll =================================================================== --- test/CodeGen/Hexagon/tls_pic.ll +++ test/CodeGen/Hexagon/tls_pic.ll @@ -4,9 +4,9 @@ @src_ie = thread_local(initialexec) global i32 0, align 4 ; CHECK-LABEL: test_initial_exec -; CHECK-DAG: = add(pc, ##_GLOBAL_OFFSET_TABLE_@PCREL) -; CHECK-DAG: = ##src_ie@IEGOT -; CHECK-DAG: = ##dst_ie@IEGOT +; CHECK-DAG: =add(pc,##_GLOBAL_OFFSET_TABLE_@PCREL) +; CHECK-DAG: =##src_ie@IEGOT +; CHECK-DAG: =##dst_ie@IEGOT ; CHECK-NOT: call define i32 @test_initial_exec() nounwind { entry: @@ -22,9 +22,9 @@ ; general-dynamic model. ; CHECK-LABEL: test_dynamic -; CHECK-DAG: = add(pc, ##_GLOBAL_OFFSET_TABLE_@PCREL) -; CHECK-DAG: = ##src_gd@GDGOT -; CHECK-DAG: = ##dst_gd@GDGOT +; CHECK-DAG: =add(pc,##_GLOBAL_OFFSET_TABLE_@PCREL) +; CHECK-DAG: =##src_gd@GDGOT +; CHECK-DAG: =##dst_gd@GDGOT ; CHECK-DAG: call src_gd@GDPLT ; CHECK-DAG: call dst_gd@GDPLT Index: test/CodeGen/Hexagon/tls_static.ll =================================================================== --- test/CodeGen/Hexagon/tls_static.ll +++ test/CodeGen/Hexagon/tls_static.ll @@ -4,8 +4,8 @@ @src_le = thread_local global i32 0, align 4 ; CHECK-LABEL: test_local_exec -; CHECK-DAG: = ##src_le@TPREL -; CHECK-DAG: = ##dst_le@TPREL +; CHECK-DAG: =##src_le@TPREL +; CHECK-DAG: =##dst_le@TPREL define i32 @test_local_exec() nounwind { entry: %0 = load i32, i32* @src_le, align 4 @@ -17,8 +17,8 @@ @src_ie = external thread_local global i32 ; CHECK-LABEL: test_initial_exec: -; CHECK-DAG: = memw(##src_ie@IE) -; CHECK-DAG: = memw(##dst_ie@IE) +; CHECK-DAG: =memw(##src_ie@IE) +; CHECK-DAG: =memw(##dst_ie@IE) define i32 @test_initial_exec() nounwind { entry: %0 = load i32, i32* @src_ie, align 4 Index: test/CodeGen/Hexagon/two-crash.ll =================================================================== --- test/CodeGen/Hexagon/two-crash.ll +++ test/CodeGen/Hexagon/two-crash.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; This testcase crashed, because we propagated a reg:sub into a tied use. ; The two-address pass rewrote it in a way that generated incorrect code. -; CHECK: r{{[0-9]+}} += lsr(r{{[0-9]+}}, #16) +; CHECK: r{{[0-9]+}}+=lsr(r{{[0-9]+}},#16) target triple = "hexagon" Index: test/CodeGen/Hexagon/v60-cur.ll =================================================================== --- test/CodeGen/Hexagon/v60-cur.ll +++ test/CodeGen/Hexagon/v60-cur.ll @@ -2,8 +2,8 @@ ; Test that we generate a .cur -; CHECK: v{{[0-9]*}}.cur{{ *}} -; CHECK: v{{[0-9]*}}.cur{{ *}} +; CHECK: v{{[0-9]*}}.cur +; CHECK: v{{[0-9]*}}.cur define void @conv3x3_i(i8* noalias nocapture readonly %iptr0, i32 %shift, i32 %width) #0 { entry: Index: test/CodeGen/Hexagon/v60-vsel1.ll =================================================================== --- test/CodeGen/Hexagon/v60-vsel1.ll +++ test/CodeGen/Hexagon/v60-vsel1.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: if (p{{[0-3]}}) v{{[0-9]+}} = v{{[0-9]+}} +; CHECK: if (p{{[0-3]}}) v{{[0-9]+}}=v{{[0-9]+}} target triple = "hexagon" Index: test/CodeGen/Hexagon/v60Intrins.ll =================================================================== --- test/CodeGen/Hexagon/v60Intrins.ll +++ test/CodeGen/Hexagon/v60Intrins.ll @@ -1,355 +1,355 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv60 -O2 -disable-post-ra < %s | FileCheck %s -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vsetq(r{{[0-9]*}}) -; CHECK: q{{[0-3]}} |= vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} &= vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} &= vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} &= vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} |= vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} |= vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} |= vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} ^= vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} ^= vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} ^= vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} &= vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} |= vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} ^= vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = xor{{[0-9]*}}(q{{[0-3]}},q{{[0-3]}}) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = v -; CHECK: v{{[0-9]*}} = valign(v{{[0-9]*}},v{{[0-9]*}},#0) -; CHECK: v{{[0-9]*}} = valign(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = vand(v{{[0-9]*}},v{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}} |= vand(q{{[0-3]}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = vdelta(v{{[0-9]*}},v{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = vlalign(v{{[0-9]*}},v{{[0-9]*}},#0) -; CHECK: v{{[0-9]*}} = vlalign(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = vmux(q{{[0-3]}},v{{[0-9]*}},v{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = vnot(v{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = vor{{[0-9]*}}(v{{[0-9]*}},v{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = vr{{[0-9]*}}delta(v{{[0-9]*}},v{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = vr{{[0-9]*}}or{{[0-9]*}}(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}} = vxor{{[0-9]*}}(v{{[0-9]*}},v{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.b = vadd(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.b = vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h,r{{[0-9]*}}):{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.b = vdeal(v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.b = vdeale(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.b = vlut32(v{{[0-9]*}}.b,v{{[0-9]*}}.b,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.b |= vlut32(v{{[0-9]*}}.b,v{{[0-9]*}}.b,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.b = vnav{{[0-9]*}}g(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}.b = vpack(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.b = vpacke(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.b = vpacko(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.b = vr{{[0-9]*}}ound(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.b = vshuff(v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.b = vshuffe(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.b = vshuffo(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.b = vsub(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.h = vabs(v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vabs(v{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.h = vadd(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vadd(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.h = vasl(v{{[0-9]*}}.h,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.h = vasl(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vasr{{[0-9]*}}(v{{[0-9]*}}.h,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.h = vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.h = vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}}):{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.h = vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}}):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.h = vav{{[0-9]*}}g(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vav{{[0-9]*}}g(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}r{{[0-9]*}}nd -; CHECK: v{{[0-9]*}}.h = vdeal(v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vdmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.h += vdmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.h = vlsr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vmax(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vmin(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.h = vmpyi(v{{[0-9]*}}.h,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.h = vmpyi(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h += vmpyi(v{{[0-9]*}}.h,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.h += vmpyi(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vnav{{[0-9]*}}g(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vnor{{[0-9]*}}mamt(v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vpack(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.h = vpacke(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.h = vpacko(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.h = vpopcount(v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vr{{[0-9]*}}ound(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.h = vsat(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.h = vshuff(v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vshuffe(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vshuffo(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vsub(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.h = vsub(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.ub = vabsdiff(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}.ub = vadd(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.ub = vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h,r{{[0-9]*}}):{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.ub = vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h,r{{[0-9]*}}):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.ub = vav{{[0-9]*}}g(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}.ub = vav{{[0-9]*}}g(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub):{{[0-9]*}}r{{[0-9]*}}nd -; CHECK: v{{[0-9]*}}.ub = vmax(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}.ub = vmin(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}.ub = vpack(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.ub = vr{{[0-9]*}}ound(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.ub = vsat(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.ub = vsub(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.uh = vabsdiff(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.uh = vabsdiff(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}.uh = vadd(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.uh = vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}}):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.uh = vav{{[0-9]*}}g(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}.uh = vav{{[0-9]*}}g(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh):{{[0-9]*}}r{{[0-9]*}}nd -; CHECK: v{{[0-9]*}}.uh = vcl0(v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}.uh = vlsr{{[0-9]*}}(v{{[0-9]*}}.uh,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.uh = vmax(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}.uh = vmin(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}.uh = vpack(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.uh = vr{{[0-9]*}}ound(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.uh = vsub(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.uw = vabsdiff(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.uw = vcl0(v{{[0-9]*}}.uw) -; CHECK: v{{[0-9]*}}.uw = vlsr{{[0-9]*}}(v{{[0-9]*}}.uw,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.uw = vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}.uw = vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}.uw += vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}.uw += vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}.w = vabs(v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.w = vabs(v{{[0-9]*}}.w):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w = vadd(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.w = vadd(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w = vasl(v{{[0-9]*}}.w,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.w = vasl(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.w += vasl(v{{[0-9]*}}.w,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.w = vasr{{[0-9]*}}(v{{[0-9]*}}.w,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.w = vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.w += vasr{{[0-9]*}}(v{{[0-9]*}}.w,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.w = vav{{[0-9]*}}g(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.w = vav{{[0-9]*}}g(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}r{{[0-9]*}}nd -; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.uh):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.uh,#1):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.uh):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.uh,#1):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w = vinser{{[0-9]*}}t(r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.w = vinser{{[0-9]*}}t(r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.w = vinser{{[0-9]*}}t(r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}.w = vlsr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.w = vmax(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.w = vmin(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.w = vmpye(v{{[0-9]*}}.w,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}.w = vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.w = vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.w += vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.w += vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.w = vmpyie(v{{[0-9]*}}.w,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}.w += vmpyie(v{{[0-9]*}}.w,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.w += vmpyie(v{{[0-9]*}}.w,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}.w = vmpyieo(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.w = vmpyio(v{{[0-9]*}}.w,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}.w = vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w = vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}.w += vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat:{{[0-9]*}}shift -; CHECK: v{{[0-9]*}}.w += vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}sat:{{[0-9]*}}shift -; CHECK: v{{[0-9]*}}.w = vnav{{[0-9]*}}g(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.w = vnor{{[0-9]*}}mamt(v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.w = vr{{[0-9]*}}mpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.w = vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.w = vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.w += vr{{[0-9]*}}mpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.w += vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.w += vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}.w = vsub(v{{[0-9]*}}.w,v{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}.w = vsub(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vcombine(v{{[0-9]*}},v{{[0-9]*}}) -; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vdeal(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vshuff(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vshuff(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vshuff(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vswap(q{{[0-3]}},v{{[0-9]*}},v{{[0-9]*}}) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.b = vadd(v{{[0-9]*}}:{{[0-9]*}}.b,v{{[0-9]*}}:{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.b = vshuffoe(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.b = vsub(v{{[0-9]*}}:{{[0-9]*}}.b,v{{[0-9]*}}:{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vadd(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vadd(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vadd(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vdmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vdmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vlut16(v{{[0-9]*}}.b,v{{[0-9]*}}.h,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h |= vlut16(v{{[0-9]*}}.b,v{{[0-9]*}}.h,r{{[0-9]*}}) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vmpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vshuffoe(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vsub(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vsub(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vsub(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vsxt(v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vtmpy(v{{[0-9]*}}:{{[0-9]*}}.b,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vtmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vtmpy(v{{[0-9]*}}:{{[0-9]*}}.b,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h += vtmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h = vunpack(v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h |= vunpacko(v{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.ub = vadd(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.ub):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.ub = vsub(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.ub):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vadd(v{{[0-9]*}}:{{[0-9]*}}.uh,v{{[0-9]*}}:{{[0-9]*}}.uh):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh += vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh += vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vsub(v{{[0-9]*}}:{{[0-9]*}}.uh,v{{[0-9]*}}:{{[0-9]*}}.uh):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vunpack(v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh = vzxt(v{{[0-9]*}}.ub) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vdsad(v{{[0-9]*}}:{{[0-9]*}}.uh,r{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw += vdsad(v{{[0-9]*}}:{{[0-9]*}}.uh,r{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vmpy(v{{[0-9]*}}.uh,r{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vmpy(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw += vmpy(v{{[0-9]*}}.uh,r{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw += vmpy(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw += vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vr{{[0-9]*}}sad(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw += vr{{[0-9]*}}sad(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vunpack(v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw = vzxt(v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vadd(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vadd(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vadd(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vadd(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vmpa(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vmpa(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b,#0) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b,#0) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vsub(v{{[0-9]*}}.h,v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vsub(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vsub(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vsub(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w):{{[0-9]*}}sat -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vsxt(v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vtmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w += vtmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w = vunpack(v{{[0-9]*}}.h) -; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w |= vunpacko(v{{[0-9]*}}.h) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vsetq(r{{[0-9]*}}) +; CHECK: q{{[0-3]}}|=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}&=vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}&=vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}&=vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}|=vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}|=vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}|=vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}^=vcmp.eq(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}^=vcmp.eq(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}^=vcmp.eq(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}&=vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}&=vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}&=vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}&=vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}&=vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}&=vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}|=vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}|=vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}|=vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}|=vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}|=vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}|=vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}^=vcmp.gt(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}^=vcmp.gt(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}^=vcmp.gt(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}^=vcmp.gt(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}^=vcmp.gt(v{{[0-9]*}}.uw,v{{[0-9]*}}.uw) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}^=vcmp.gt(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=xor{{[0-9]*}}(q{{[0-3]}},q{{[0-3]}}) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=v +; CHECK: v{{[0-9]*}}=valign(v{{[0-9]*}},v{{[0-9]*}},#0) +; CHECK: v{{[0-9]*}}=valign(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=vand(v{{[0-9]*}},v{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}|=vand(q{{[0-3]}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=vdelta(v{{[0-9]*}},v{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=vlalign(v{{[0-9]*}},v{{[0-9]*}},#0) +; CHECK: v{{[0-9]*}}=vlalign(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=vmux(q{{[0-3]}},v{{[0-9]*}},v{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=vnot(v{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=vor{{[0-9]*}}(v{{[0-9]*}},v{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=vr{{[0-9]*}}delta(v{{[0-9]*}},v{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=vr{{[0-9]*}}or{{[0-9]*}}(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}=vxor{{[0-9]*}}(v{{[0-9]*}},v{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.b=vadd(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.b=vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h,r{{[0-9]*}}):{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.b=vdeal(v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.b=vdeale(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.b=vlut32(v{{[0-9]*}}.b,v{{[0-9]*}}.b,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.b|=vlut32(v{{[0-9]*}}.b,v{{[0-9]*}}.b,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.b=vnav{{[0-9]*}}g(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}.b=vpack(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.b=vpacke(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.b=vpacko(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.b=vr{{[0-9]*}}ound(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.b=vshuff(v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.b=vshuffe(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.b=vshuffo(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.b=vsub(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.h=vabs(v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vabs(v{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.h=vadd(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vadd(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.h=vasl(v{{[0-9]*}}.h,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.h=vasl(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vasr{{[0-9]*}}(v{{[0-9]*}}.h,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.h=vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.h=vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}}):{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.h=vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}}):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.h=vav{{[0-9]*}}g(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vav{{[0-9]*}}g(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}r{{[0-9]*}}nd +; CHECK: v{{[0-9]*}}.h=vdeal(v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vdmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.h+=vdmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.h=vlsr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vmax(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vmin(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.h=vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.h=vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.h=vmpyi(v{{[0-9]*}}.h,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.h=vmpyi(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h+=vmpyi(v{{[0-9]*}}.h,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.h+=vmpyi(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vnav{{[0-9]*}}g(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vnor{{[0-9]*}}mamt(v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vpack(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.h=vpacke(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.h=vpacko(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.h=vpopcount(v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vr{{[0-9]*}}ound(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.h=vsat(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.h=vshuff(v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vshuffe(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vshuffo(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vsub(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.h=vsub(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.ub=vabsdiff(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}.ub=vadd(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.ub=vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h,r{{[0-9]*}}):{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.ub=vasr{{[0-9]*}}(v{{[0-9]*}}.h,v{{[0-9]*}}.h,r{{[0-9]*}}):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.ub=vav{{[0-9]*}}g(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}.ub=vav{{[0-9]*}}g(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub):{{[0-9]*}}r{{[0-9]*}}nd +; CHECK: v{{[0-9]*}}.ub=vmax(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}.ub=vmin(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}.ub=vpack(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.ub=vr{{[0-9]*}}ound(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.ub=vsat(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.ub=vsub(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.uh=vabsdiff(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.uh=vabsdiff(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}.uh=vadd(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.uh=vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w,r{{[0-9]*}}):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.uh=vav{{[0-9]*}}g(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}.uh=vav{{[0-9]*}}g(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh):{{[0-9]*}}r{{[0-9]*}}nd +; CHECK: v{{[0-9]*}}.uh=vcl0(v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}.uh=vlsr{{[0-9]*}}(v{{[0-9]*}}.uh,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.uh=vmax(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}.uh=vmin(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}.uh=vpack(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.uh=vr{{[0-9]*}}ound(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.uh=vsub(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.uw=vabsdiff(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.uw=vcl0(v{{[0-9]*}}.uw) +; CHECK: v{{[0-9]*}}.uw=vlsr{{[0-9]*}}(v{{[0-9]*}}.uw,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.uw=vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}.uw=vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}.uw+=vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}.uw+=vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}.w=vabs(v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.w=vabs(v{{[0-9]*}}.w):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w=vadd(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.w=vadd(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w=vasl(v{{[0-9]*}}.w,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.w=vasl(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.w+=vasl(v{{[0-9]*}}.w,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.w=vasr{{[0-9]*}}(v{{[0-9]*}}.w,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.w=vasr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.w+=vasr{{[0-9]*}}(v{{[0-9]*}}.w,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.w=vav{{[0-9]*}}g(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.w=vav{{[0-9]*}}g(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}r{{[0-9]*}}nd +; CHECK: v{{[0-9]*}}.w=vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.w=vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w=vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.uh):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w=vdmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w=vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w=vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.uh,#1):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w+=vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.w+=vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w+=vdmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.uh):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w+=vdmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w+=vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w+=vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.uh,#1):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w=vinser{{[0-9]*}}t(r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.w=vinser{{[0-9]*}}t(r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.w=vinser{{[0-9]*}}t(r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}.w=vlsr{{[0-9]*}}(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.w=vmax(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.w=vmin(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.w=vmpye(v{{[0-9]*}}.w,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}.w=vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.w=vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.w+=vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.w+=vmpyi(v{{[0-9]*}}.w,r{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.w=vmpyie(v{{[0-9]*}}.w,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}.w+=vmpyie(v{{[0-9]*}}.w,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.w+=vmpyie(v{{[0-9]*}}.w,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}.w=vmpyieo(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.w=vmpyio(v{{[0-9]*}}.w,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}.w=vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w=vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}.w+=vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}r{{[0-9]*}}nd:{{[0-9]*}}sat:{{[0-9]*}}shift +; CHECK: v{{[0-9]*}}.w+=vmpyo(v{{[0-9]*}}.w,v{{[0-9]*}}.h):{{[0-9]*}}<<1:{{[0-9]*}}sat:{{[0-9]*}}shift +; CHECK: v{{[0-9]*}}.w=vnav{{[0-9]*}}g(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.w=vnor{{[0-9]*}}mamt(v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.w=vr{{[0-9]*}}mpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.w=vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.w=vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.w+=vr{{[0-9]*}}mpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.w+=vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.w+=vr{{[0-9]*}}mpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}.w=vsub(v{{[0-9]*}}.w,v{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}.w=vsub(v{{[0-9]*}}.w,v{{[0-9]*}}.w):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}:{{[0-9]*}}=vcombine(v{{[0-9]*}},v{{[0-9]*}}) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}=vdeal(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}=vshuff(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}=vshuff(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}=vshuff(v{{[0-9]*}},v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}=vswap(q{{[0-3]}},v{{[0-9]*}},v{{[0-9]*}}) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.b=vadd(v{{[0-9]*}}:{{[0-9]*}}.b,v{{[0-9]*}}:{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.b=vshuffoe(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.b=vsub(v{{[0-9]*}}:{{[0-9]*}}.b,v{{[0-9]*}}:{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vadd(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vadd(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vadd(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vdmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h+=vdmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vlut16(v{{[0-9]*}}.b,v{{[0-9]*}}.h,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h|=vlut16(v{{[0-9]*}}.b,v{{[0-9]*}}.h,r{{[0-9]*}}) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h+=vmpa(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vmpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h+=vmpy(v{{[0-9]*}}.b,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h+=vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h+=vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vshuffoe(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vsub(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vsub(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vsub(v{{[0-9]*}}:{{[0-9]*}}.h,v{{[0-9]*}}:{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vsxt(v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vtmpy(v{{[0-9]*}}:{{[0-9]*}}.b,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vtmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h+=vtmpy(v{{[0-9]*}}:{{[0-9]*}}.b,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h+=vtmpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h=vunpack(v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.h|=vunpacko(v{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.ub=vadd(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.ub):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.ub=vsub(v{{[0-9]*}}:{{[0-9]*}}.ub,v{{[0-9]*}}:{{[0-9]*}}.ub):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh=vadd(v{{[0-9]*}}:{{[0-9]*}}.uh,v{{[0-9]*}}:{{[0-9]*}}.uh):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh=vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh=vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh+=vmpy(v{{[0-9]*}}.ub,r{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh+=vmpy(v{{[0-9]*}}.ub,v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh=vsub(v{{[0-9]*}}:{{[0-9]*}}.uh,v{{[0-9]*}}:{{[0-9]*}}.uh):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh=vunpack(v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uh=vzxt(v{{[0-9]*}}.ub) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw=vdsad(v{{[0-9]*}}:{{[0-9]*}}.uh,r{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw+=vdsad(v{{[0-9]*}}:{{[0-9]*}}.uh,r{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw=vmpy(v{{[0-9]*}}.uh,r{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw=vmpy(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw+=vmpy(v{{[0-9]*}}.uh,r{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw+=vmpy(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw=vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw+=vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw=vr{{[0-9]*}}sad(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw+=vr{{[0-9]*}}sad(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.ub,#0) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw=vunpack(v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.uw=vzxt(v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vadd(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vadd(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vadd(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vadd(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w+=vdmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vmpa(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w+=vmpa(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w+=vmpy(v{{[0-9]*}}.h,r{{[0-9]*}}.h):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w+=vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w+=vmpy(v{{[0-9]*}}.h,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b,#0) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w+=vr{{[0-9]*}}mpy(v{{[0-9]*}}:{{[0-9]*}}.ub,r{{[0-9]*}}.b,#0) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vsub(v{{[0-9]*}}.h,v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vsub(v{{[0-9]*}}.uh,v{{[0-9]*}}.uh) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vsub(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vsub(v{{[0-9]*}}:{{[0-9]*}}.w,v{{[0-9]*}}:{{[0-9]*}}.w):{{[0-9]*}}sat +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vsxt(v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vtmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w+=vtmpy(v{{[0-9]*}}:{{[0-9]*}}.h,r{{[0-9]*}}.b) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w=vunpack(v{{[0-9]*}}.h) +; CHECK: v{{[0-9]*}}:{{[0-9]*}}.w|=vunpacko(v{{[0-9]*}}.h) target datalayout = "e-m:e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a:0-n16:32" target triple = "hexagon" Index: test/CodeGen/Hexagon/v60small.ll =================================================================== --- test/CodeGen/Hexagon/v60small.ll +++ test/CodeGen/Hexagon/v60small.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon -O2 -mcpu=hexagonv60 < %s | FileCheck %s -; CHECK: q{{[0-3]}} = v{{[0-9]*}}and(v{{[0-9]*}},r{{[0-9]*}}) +; CHECK: q{{[0-3]}}=vand(v{{[0-9]*}},r{{[0-9]*}}) target datalayout = "e-m:e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a:0-n16:32" target triple = "hexagon" Index: test/CodeGen/Hexagon/vaddh.ll =================================================================== --- test/CodeGen/Hexagon/vaddh.ll +++ test/CodeGen/Hexagon/vaddh.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s -; CHECK: vaddh(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: vaddh(r{{[0-9]+}},r{{[0-9]+}}) @j = external global i32 @k = external global i32 Index: test/CodeGen/Hexagon/vec-pred-spill1.ll =================================================================== --- test/CodeGen/Hexagon/vec-pred-spill1.ll +++ test/CodeGen/Hexagon/vec-pred-spill1.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv60 -O2 -enable-hexagon-hvx < %s | FileCheck %s -; CHECK: vmem(r{{[0-9]+}}+#3) = v{{[0-9]+}} +; CHECK: vmem(r{{[0-9]+}}+#3)=v{{[0-9]+}} ; CHECK: call puts ; CHECK: call print_vecpred ; CHECK: v{{[0-9]+}}{{ *}}={{ *}}vmem(r{{[0-9]+}}+#3) Index: test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll =================================================================== --- test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll +++ test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s ; This one should generate a combine with two immediates. -; CHECK: combine(#7, #7) +; CHECK: combine(#7,#7) @B = common global [400 x i32] zeroinitializer, align 8 @A = common global [400 x i32] zeroinitializer, align 8 @C = common global [400 x i32] zeroinitializer, align 8 Index: test/CodeGen/Hexagon/vect/vect-loadv4i16.ll =================================================================== --- test/CodeGen/Hexagon/vect/vect-loadv4i16.ll +++ test/CodeGen/Hexagon/vect/vect-loadv4i16.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s ; Check that store is post-incremented. -; CHECK: memuh(r{{[0-9]+}} + {{ *}}#6{{ *}}) -; CHECK: combine(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}{{ *}}) +; CHECK: memuh(r{{[0-9]+}}+#6) +; CHECK: combine(r{{[0-9]+}},r{{[0-9]+}}) ; CHECK: vaddh target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" Index: test/CodeGen/Hexagon/vect/vect-shift-imm.ll =================================================================== --- test/CodeGen/Hexagon/vect/vect-shift-imm.ll +++ test/CodeGen/Hexagon/vect/vect-shift-imm.ll @@ -6,12 +6,12 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s --check-prefix=CHECK-LSRH ; ; Make sure that the instructions with immediate operands are generated. -; CHECK-ASLW: vaslw({{.*}}, #9) -; CHECK-ASRW: vasrw({{.*}}, #8) -; CHECK-LSRW: vlsrw({{.*}}, #7) -; CHECK-ASLH: vaslh({{.*}}, #6) -; CHECK-ASRH: vasrh({{.*}}, #5) -; CHECK-LSRH: vlsrh({{.*}}, #4) +; CHECK-ASLW: vaslw({{.*}},#9) +; CHECK-ASRW: vasrw({{.*}},#8) +; CHECK-LSRW: vlsrw({{.*}},#7) +; CHECK-ASLH: vaslh({{.*}},#6) +; CHECK-ASRH: vasrh({{.*}},#5) +; CHECK-LSRH: vlsrh({{.*}},#4) target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" target triple = "hexagon" Index: test/CodeGen/Hexagon/vect/vect-vshifts.ll =================================================================== --- test/CodeGen/Hexagon/vect/vect-vshifts.ll +++ test/CodeGen/Hexagon/vect/vect-vshifts.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that store is post-incremented. -; CHECK: r{{[0-9]+:[0-9]+}} = vasrw(r{{[0-9]+:[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = vaslw(r{{[0-9]+:[0-9]+}}, r{{[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}}=vasrw(r{{[0-9]+:[0-9]+}},r{{[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}}=vaslw(r{{[0-9]+:[0-9]+}},r{{[0-9]+}}) target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" target triple = "hexagon" Index: test/CodeGen/Hexagon/vect/vect-xor.ll =================================================================== --- test/CodeGen/Hexagon/vect/vect-xor.ll +++ test/CodeGen/Hexagon/vect/vect-xor.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s ; Check that the parsing succeeded. -; CHECK: r{{[0-9]+:[0-9]+}} = xor(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}}=xor(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" target triple = "hexagon" Index: test/CodeGen/Hexagon/vload-postinc-sel.ll =================================================================== --- test/CodeGen/Hexagon/vload-postinc-sel.ll +++ test/CodeGen/Hexagon/vload-postinc-sel.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: = vmem(r{{[0-9]+}}++#1) +; CHECK: =vmem(r{{[0-9]+}}++#1) target triple = "hexagon-unknown--elf" Index: test/CodeGen/Hexagon/zextloadi1.ll =================================================================== --- test/CodeGen/Hexagon/zextloadi1.ll +++ test/CodeGen/Hexagon/zextloadi1.ll @@ -6,12 +6,12 @@ @i129_s = external global i129 ; CHECK-LABEL: i129_ls -; CHECK-DAG: r[[REG0:[0-9:]+]] = memd(##i129_l) -; CHECK-DAG: r[[REG1:[0-9:]+]] = memd(##i129_l+8) -; CHECK-DAG: r[[REG2:[0-9]+]] = memub(##i129_l+16) -; CHECK-DAG: memb(##i129_s+16) = r[[REG2]] -; CHECK-DAG: memd(##i129_s+8) = r[[REG1]] -; CHECK-DAG: memd(##i129_s) = r[[REG0]] +; CHECK-DAG: r[[REG0:[0-9:]+]]=memd(##i129_l) +; CHECK-DAG: r[[REG1:[0-9:]+]]=memd(##i129_l+8) +; CHECK-DAG: r[[REG2:[0-9]+]]=memub(##i129_l+16) +; CHECK-DAG: memb(##i129_s+16)=r[[REG2]] +; CHECK-DAG: memd(##i129_s+8)=r[[REG1]] +; CHECK-DAG: memd(##i129_s)=r[[REG0]] define void @i129_ls() nounwind { %tmp = load i129, i129* @i129_l store i129 %tmp, i129* @i129_s @@ -19,10 +19,10 @@ } ; CHECK-LABEL: i65_ls -; CHECK-DAG: r[[REG0:[0-9:]+]] = memd(##i65_l) -; CHECK-DAG: r[[REG1:[0-9]+]] = memub(##i65_l+8) -; CHECK-DAG: memd(##i65_s) = r[[REG0]] -; CHECK-DAG: memb(##i65_s+8) = r[[REG1]] +; CHECK-DAG: r[[REG0:[0-9:]+]]=memd(##i65_l) +; CHECK-DAG: r[[REG1:[0-9]+]]=memub(##i65_l+8) +; CHECK-DAG: memd(##i65_s)=r[[REG0]] +; CHECK-DAG: memb(##i65_s+8)=r[[REG1]] define void @i65_ls() nounwind { %tmp = load i65, i65* @i65_l store i65 %tmp, i65* @i65_s Index: test/MC/Disassembler/Hexagon/alu32_alu.txt =================================================================== --- test/MC/Disassembler/Hexagon/alu32_alu.txt +++ test/MC/Disassembler/Hexagon/alu32_alu.txt @@ -3,27 +3,27 @@ # Add 0xf1 0xc3 0x15 0xb0 -# CHECK: r17 = add(r21, #31) +# CHECK: r17=add(r21,#31) 0x11 0xdf 0x15 0xf3 -# CHECK: r17 = add(r21, r31) +# CHECK: r17=add(r21,r31) 0x11 0xdf 0x55 0xf6 -# CHECK: r17 = add(r21, r31):sat +# CHECK: r17=add(r21,r31):sat # And 0xf1 0xc3 0x15 0x76 -# CHECK: r17 = and(r21, #31) +# CHECK: r17=and(r21,#31) 0xf1 0xc3 0x95 0x76 -# CHECK: r17 = or(r21, #31) +# CHECK: r17=or(r21,#31) 0x11 0xdf 0x15 0xf1 -# CHECK: r17 = and(r21, r31) +# CHECK: r17=and(r21,r31) 0x11 0xdf 0x35 0xf1 -# CHECK: r17 = or(r21, r31) +# CHECK: r17=or(r21,r31) 0x11 0xdf 0x75 0xf1 -# CHECK: r17 = xor(r21, r31) +# CHECK: r17=xor(r21,r31) 0x11 0xd5 0x9f 0xf1 -# CHECK: r17 = and(r21, ~r31) +# CHECK: r17=and(r21,~r31) 0x11 0xd5 0xbf 0xf1 -# CHECK: r17 = or(r21, ~r31) +# CHECK: r17=or(r21,~r31) # Nop 0x00 0xc0 0x00 0x7f @@ -31,54 +31,54 @@ # Subtract 0xb1 0xc2 0x5f 0x76 -# CHECK: r17 = sub(#21, r31) +# CHECK: r17=sub(#21,r31) 0x11 0xdf 0x35 0xf3 -# CHECK: r17 = sub(r31, r21) +# CHECK: r17=sub(r31,r21) 0x11 0xdf 0xd5 0xf6 -# CHECK: r17 = sub(r31, r21):sat +# CHECK: r17=sub(r31,r21):sat # Sign extend 0x11 0xc0 0xbf 0x70 -# CHECK: r17 = sxtb(r31) +# CHECK: r17=sxtb(r31) # Transfer immediate 0x15 0xc0 0x31 0x72 -# CHECK: r17.h = #21 +# CHECK: r17.h=#21 0x15 0xc0 0x31 0x71 -# CHECK: r17.l = #21 +# CHECK: r17.l=#21 0xf1 0xff 0x5f 0x78 -# CHECK: r17 = #32767 +# CHECK: r17=#32767 0xf1 0xff 0xdf 0x78 -# CHECK: r17 = #-1 +# CHECK: r17=#-1 # Transfer register 0x11 0xc0 0x75 0x70 -# CHECK: r17 = r21 +# CHECK: r17=r21 # Vector add halfwords 0x11 0xdf 0x15 0xf6 -# CHECK: r17 = vaddh(r21, r31) +# CHECK: r17=vaddh(r21,r31) 0x11 0xdf 0x35 0xf6 -# CHECK: r17 = vaddh(r21, r31):sat +# CHECK: r17=vaddh(r21,r31):sat 0x11 0xdf 0x75 0xf6 -# CHECK: r17 = vadduh(r21, r31):sat +# CHECK: r17=vadduh(r21,r31):sat # Vector average halfwords 0x11 0xdf 0x15 0xf7 -# CHECK: r17 = vavgh(r21, r31) +# CHECK: r17=vavgh(r21,r31) 0x11 0xdf 0x35 0xf7 -# CHECK: r17 = vavgh(r21, r31):rnd +# CHECK: r17=vavgh(r21,r31):rnd 0x11 0xdf 0x75 0xf7 -# CHECK: r17 = vnavgh(r31, r21) +# CHECK: r17=vnavgh(r31,r21) # Vector subtract halfwords 0x11 0xdf 0x95 0xf6 -# CHECK: r17 = vsubh(r31, r21) +# CHECK: r17=vsubh(r31,r21) 0x11 0xdf 0xb5 0xf6 -# CHECK: r17 = vsubh(r31, r21):sat +# CHECK: r17=vsubh(r31,r21):sat 0x11 0xdf 0xf5 0xf6 -# CHECK: r17 = vsubuh(r31, r21):sat +# CHECK: r17=vsubuh(r31,r21):sat # Zero extend 0x11 0xc0 0xd5 0x70 -# CHECK: r17 = zxth(r21) +# CHECK: r17=zxth(r21) Index: test/MC/Disassembler/Hexagon/alu32_perm.txt =================================================================== --- test/MC/Disassembler/Hexagon/alu32_perm.txt +++ test/MC/Disassembler/Hexagon/alu32_perm.txt @@ -3,38 +3,38 @@ # Combine words in to doublewords 0x11 0xdf 0x95 0xf3 -# CHECK: r17 = combine(r31.h, r21.h) +# CHECK: r17=combine(r31.h,r21.h) 0x11 0xdf 0xb5 0xf3 -# CHECK: r17 = combine(r31.h, r21.l) +# CHECK: r17=combine(r31.h,r21.l) 0x11 0xdf 0xd5 0xf3 -# CHECK: r17 = combine(r31.l, r21.h) +# CHECK: r17=combine(r31.l,r21.h) 0x11 0xdf 0xf5 0xf3 -# CHECK: r17 = combine(r31.l, r21.l) +# CHECK: r17=combine(r31.l,r21.l) 0xb0 0xe2 0x0f 0x7c -# CHECK: r17:16 = combine(#21, #31) +# CHECK: r17:16=combine(#21,#31) 0xb0 0xe2 0x3f 0x73 -# CHECK: r17:16 = combine(#21, r31) +# CHECK: r17:16=combine(#21,r31) 0xf0 0xe3 0x15 0x73 -# CHECK: r17:16 = combine(r21, #31) +# CHECK: r17:16=combine(r21,#31) 0x10 0xdf 0x15 0xf5 -# CHECK: r17:16 = combine(r21, r31) +# CHECK: r17:16=combine(r21,r31) # Mux 0xf1 0xc3 0x75 0x73 -# CHECK: r17 = mux(p3, r21, #31) +# CHECK: r17=mux(p3,r21,#31) 0xb1 0xc2 0xff 0x73 -# CHECK: r17 = mux(p3, #21, r31) +# CHECK: r17=mux(p3,#21,r31) 0xb1 0xe2 0x8f 0x7b -# CHECK: r17 = mux(p3, #21, #31) +# CHECK: r17=mux(p3,#21,#31) 0x71 0xdf 0x15 0xf4 -# CHECK: r17 = mux(p3, r21, r31) +# CHECK: r17=mux(p3,r21,r31) # Shift word by 16 0x11 0xc0 0x15 0x70 -# CHECK: r17 = aslh(r21) +# CHECK: r17=aslh(r21) 0x11 0xc0 0x35 0x70 -# CHECK: r17 = asrh(r21) +# CHECK: r17=asrh(r21) # Pack high and low halfwords 0x10 0xdf 0x95 0xf5 -# CHECK: r17:16 = packhl(r21, r31) +# CHECK: r17:16=packhl(r21,r31) Index: test/MC/Disassembler/Hexagon/alu32_pred.txt =================================================================== --- test/MC/Disassembler/Hexagon/alu32_pred.txt +++ test/MC/Disassembler/Hexagon/alu32_pred.txt @@ -3,192 +3,192 @@ # Conditional add 0xf1 0xc3 0x75 0x74 -# CHECK: if (p3) r17 = add(r21, #31) +# CHECK: if (p3) r17=add(r21,#31) 0x03 0x40 0x45 0x85 0xf1 0xe3 0x75 0x74 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = add(r21, #31) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=add(r21,#31) 0xf1 0xc3 0xf5 0x74 -# CHECK: if (!p3) r17 = add(r21, #31) +# CHECK: if (!p3) r17=add(r21,#31) 0x03 0x40 0x45 0x85 0xf1 0xe3 0xf5 0x74 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = add(r21, #31) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=add(r21,#31) 0x71 0xdf 0x15 0xfb -# CHECK: if (p3) r17 = add(r21, r31) +# CHECK: if (p3) r17=add(r21,r31) 0x03 0x40 0x45 0x85 0x71 0xff 0x15 0xfb -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = add(r21, r31) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=add(r21,r31) 0xf1 0xdf 0x15 0xfb -# CHECK: if (!p3) r17 = add(r21, r31) +# CHECK: if (!p3) r17=add(r21,r31) 0x03 0x40 0x45 0x85 0xf1 0xff 0x15 0xfb -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = add(r21, r31) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=add(r21,r31) # Conditional shift halfword 0x11 0xe3 0x15 0x70 -# CHECK: if (p3) r17 = aslh(r21) +# CHECK: if (p3) r17=aslh(r21) 0x03 0x40 0x45 0x85 0x11 0xe7 0x15 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = aslh(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=aslh(r21) 0x11 0xeb 0x15 0x70 -# CHECK: if (!p3) r17 = aslh(r21) +# CHECK: if (!p3) r17=aslh(r21) 0x03 0x40 0x45 0x85 0x11 0xef 0x15 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = aslh(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=aslh(r21) 0x11 0xe3 0x35 0x70 -# CHECK: if (p3) r17 = asrh(r21) +# CHECK: if (p3) r17=asrh(r21) 0x03 0x40 0x45 0x85 0x11 0xe7 0x35 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = asrh(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=asrh(r21) 0x11 0xeb 0x35 0x70 -# CHECK: if (!p3) r17 = asrh(r21) +# CHECK: if (!p3) r17=asrh(r21) 0x03 0x40 0x45 0x85 0x11 0xef 0x35 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = asrh(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=asrh(r21) # Conditional combine 0x70 0xdf 0x15 0xfd -# CHECK: if (p3) r17:16 = combine(r21, r31) +# CHECK: if (p3) r17:16=combine(r21,r31) 0xf0 0xdf 0x15 0xfd -# CHECK: if (!p3) r17:16 = combine(r21, r31) +# CHECK: if (!p3) r17:16=combine(r21,r31) 0x03 0x40 0x45 0x85 0x70 0xff 0x15 0xfd -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17:16 = combine(r21, r31) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17:16=combine(r21,r31) 0x03 0x40 0x45 0x85 0xf0 0xff 0x15 0xfd -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17:16 = combine(r21, r31) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17:16=combine(r21,r31) # Conditional logical operations 0x71 0xdf 0x15 0xf9 -# CHECK: if (p3) r17 = and(r21, r31) +# CHECK: if (p3) r17=and(r21,r31) 0xf1 0xdf 0x15 0xf9 -# CHECK: if (!p3) r17 = and(r21, r31) +# CHECK: if (!p3) r17=and(r21,r31) 0x03 0x40 0x45 0x85 0x71 0xff 0x15 0xf9 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = and(r21, r31) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=and(r21,r31) 0x03 0x40 0x45 0x85 0xf1 0xff 0x15 0xf9 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = and(r21, r31) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=and(r21,r31) 0x71 0xdf 0x35 0xf9 -# CHECK: if (p3) r17 = or(r21, r31) +# CHECK: if (p3) r17=or(r21,r31) 0xf1 0xdf 0x35 0xf9 -# CHECK: if (!p3) r17 = or(r21, r31) +# CHECK: if (!p3) r17=or(r21,r31) 0x03 0x40 0x45 0x85 0x71 0xff 0x35 0xf9 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = or(r21, r31) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=or(r21,r31) 0x03 0x40 0x45 0x85 0xf1 0xff 0x35 0xf9 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = or(r21, r31) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=or(r21,r31) 0x71 0xdf 0x75 0xf9 -# CHECK: if (p3) r17 = xor(r21, r31) +# CHECK: if (p3) r17=xor(r21,r31) 0xf1 0xdf 0x75 0xf9 -# CHECK: if (!p3) r17 = xor(r21, r31) +# CHECK: if (!p3) r17=xor(r21,r31) 0x03 0x40 0x45 0x85 0x71 0xff 0x75 0xf9 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = xor(r21, r31) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=xor(r21,r31) 0x03 0x40 0x45 0x85 0xf1 0xff 0x75 0xf9 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = xor(r21, r31) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=xor(r21,r31) # Conditional subtract 0x71 0xdf 0x35 0xfb -# CHECK: if (p3) r17 = sub(r31, r21) +# CHECK: if (p3) r17=sub(r31,r21) 0xf1 0xdf 0x35 0xfb -# CHECK: if (!p3) r17 = sub(r31, r21) +# CHECK: if (!p3) r17=sub(r31,r21) 0x03 0x40 0x45 0x85 0x71 0xff 0x35 0xfb -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = sub(r31, r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=sub(r31,r21) 0x03 0x40 0x45 0x85 0xf1 0xff 0x35 0xfb -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = sub(r31, r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=sub(r31,r21) # Conditional sign extend 0x11 0xe3 0xb5 0x70 -# CHECK: if (p3) r17 = sxtb(r21) +# CHECK: if (p3) r17=sxtb(r21) 0x11 0xeb 0xb5 0x70 -# CHECK: if (!p3) r17 = sxtb(r21) +# CHECK: if (!p3) r17=sxtb(r21) 0x03 0x40 0x45 0x85 0x11 0xe7 0xb5 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = sxtb(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=sxtb(r21) 0x03 0x40 0x45 0x85 0x11 0xef 0xb5 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = sxtb(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=sxtb(r21) 0x11 0xe3 0xf5 0x70 -# CHECK: if (p3) r17 = sxth(r21) +# CHECK: if (p3) r17=sxth(r21) 0x11 0xeb 0xf5 0x70 -# CHECK: if (!p3) r17 = sxth(r21) +# CHECK: if (!p3) r17=sxth(r21) 0x03 0x40 0x45 0x85 0x11 0xe7 0xf5 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = sxth(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=sxth(r21) 0x03 0x40 0x45 0x85 0x11 0xef 0xf5 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = sxth(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=sxth(r21) # Conditional transfer 0xb1 0xc2 0x60 0x7e -# CHECK: if (p3) r17 = #21 +# CHECK: if (p3) r17=#21 0xb1 0xc2 0xe0 0x7e -# CHECK: if (!p3) r17 = #21 +# CHECK: if (!p3) r17=#21 0x03 0x40 0x45 0x85 0xb1 0xe2 0x60 0x7e -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = #21 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=#21 0x03 0x40 0x45 0x85 0xb1 0xe2 0xe0 0x7e -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = #21 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=#21 # Conditional zero extend 0x11 0xe3 0x95 0x70 -# CHECK: if (p3) r17 = zxtb(r21) +# CHECK: if (p3) r17=zxtb(r21) 0x11 0xeb 0x95 0x70 -# CHECK: if (!p3) r17 = zxtb(r21) +# CHECK: if (!p3) r17=zxtb(r21) 0x03 0x40 0x45 0x85 0x11 0xe7 0x95 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = zxtb(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=zxtb(r21) 0x03 0x40 0x45 0x85 0x11 0xef 0x95 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = zxtb(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=zxtb(r21) 0x11 0xe3 0xd5 0x70 -# CHECK: if (p3) r17 = zxth(r21) +# CHECK: if (p3) r17=zxth(r21) 0x11 0xeb 0xd5 0x70 -# CHECK: if (!p3) r17 = zxth(r21) +# CHECK: if (!p3) r17=zxth(r21) 0x03 0x40 0x45 0x85 0x11 0xe7 0xd5 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = zxth(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=zxth(r21) 0x03 0x40 0x45 0x85 0x11 0xef 0xd5 0x70 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = zxth(r21) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=zxth(r21) # Compare 0xe3 0xc3 0x15 0x75 -# CHECK: p3 = cmp.eq(r21, #31) +# CHECK: p3=cmp.eq(r21,#31) 0xf3 0xc3 0x15 0x75 -# CHECK: p3 = !cmp.eq(r21, #31) +# CHECK: p3=!cmp.eq(r21,#31) 0xe3 0xc3 0x55 0x75 -# CHECK: p3 = cmp.gt(r21, #31) +# CHECK: p3=cmp.gt(r21,#31) 0xf3 0xc3 0x55 0x75 -# CHECK: p3 = !cmp.gt(r21, #31) +# CHECK: p3=!cmp.gt(r21,#31) 0xe3 0xc3 0x95 0x75 -# CHECK: p3 = cmp.gtu(r21, #31) +# CHECK: p3=cmp.gtu(r21,#31) 0xf3 0xc3 0x95 0x75 -# CHECK: p3 = !cmp.gtu(r21, #31) +# CHECK: p3=!cmp.gtu(r21,#31) 0x03 0xdf 0x15 0xf2 -# CHECK: p3 = cmp.eq(r21, r31) +# CHECK: p3=cmp.eq(r21,r31) 0x13 0xdf 0x15 0xf2 -# CHECK: p3 = !cmp.eq(r21, r31) +# CHECK: p3=!cmp.eq(r21,r31) 0x03 0xdf 0x55 0xf2 -# CHECK: p3 = cmp.gt(r21, r31) +# CHECK: p3=cmp.gt(r21,r31) 0x13 0xdf 0x55 0xf2 -# CHECK: p3 = !cmp.gt(r21, r31) +# CHECK: p3=!cmp.gt(r21,r31) 0x03 0xdf 0x75 0xf2 -# CHECK: p3 = cmp.gtu(r21, r31) +# CHECK: p3=cmp.gtu(r21,r31) 0x13 0xdf 0x75 0xf2 -# CHECK: p3 = !cmp.gtu(r21, r31) +# CHECK: p3=!cmp.gtu(r21,r31) # Compare to general register 0xf1 0xe3 0x55 0x73 -# CHECK: r17 = cmp.eq(r21, #31) +# CHECK: r17=cmp.eq(r21,#31) 0xf1 0xe3 0x75 0x73 -# CHECK: r17 = !cmp.eq(r21, #31) +# CHECK: r17=!cmp.eq(r21,#31) 0x11 0xdf 0x55 0xf3 -# CHECK: r17 = cmp.eq(r21, r31) +# CHECK: r17=cmp.eq(r21,r31) 0x11 0xdf 0x75 0xf3 -# CHECK: r17 = !cmp.eq(r21, r31) +# CHECK: r17=!cmp.eq(r21,r31) Index: test/MC/Disassembler/Hexagon/cr.txt =================================================================== --- test/MC/Disassembler/Hexagon/cr.txt +++ test/MC/Disassembler/Hexagon/cr.txt @@ -3,15 +3,15 @@ # Corner detection acceleration 0x93 0xe1 0x12 0x6b -# CHECK: p3 = !fastcorner9(p2, p1) +# CHECK: p3=!fastcorner9(p2,p1) 0x91 0xe3 0x02 0x6b -# CHECK: p1 = fastcorner9(p2, p3) +# CHECK: p1=fastcorner9(p2,p3) # Logical reductions on predicates 0x01 0xc0 0x82 0x6b -# CHECK: p1 = any8(p2) +# CHECK: p1=any8(p2) 0x01 0xc0 0xa2 0x6b -# CHECK: p1 = all8(p2) +# CHECK: p1=all8(p2) # Looping instructions 0x08 0xc4 0x15 0x60 @@ -25,54 +25,54 @@ # Add to PC 0x91 0xca 0x49 0x6a -# CHECK: r17 = add(pc, #21) +# CHECK: r17=add(pc,#21) # Pipelined loop instructions 0x08 0xc4 0xb5 0x60 -# CHECK: p3 = sp1loop0 +# CHECK: p3=sp1loop0 0x08 0xc4 0xd5 0x60 -# CHECK: p3 = sp2loop0 +# CHECK: p3=sp2loop0 0x08 0xc4 0xf5 0x60 -# CHECK: p3 = sp3loop0 +# CHECK: p3=sp3loop0 0xa9 0xc4 0xa0 0x69 -# CHECK: p3 = sp1loop0 +# CHECK: p3=sp1loop0 0xa9 0xc4 0xc0 0x69 -# CHECK: p3 = sp2loop0 +# CHECK: p3=sp2loop0 0xa9 0xc4 0xe0 0x69 -# CHECK: p3 = sp3loop0 +# CHECK: p3=sp3loop0 # Logical operations on predicates 0x01 0xc3 0x02 0x6b -# CHECK: p1 = and(p3, p2) +# CHECK: p1=and(p3,p2) 0xc1 0xc3 0x12 0x6b -# CHECK: p1 = and(p2, and(p3, p3)) +# CHECK: p1=and(p2,and(p3,p3)) 0x01 0xc3 0x22 0x6b -# CHECK: p1 = or(p3, p2) +# CHECK: p1=or(p3,p2) 0xc1 0xc3 0x32 0x6b -# CHECK: p1 = and(p2, or(p3, p3)) +# CHECK: p1=and(p2,or(p3,p3)) 0x01 0xc3 0x42 0x6b -# CHECK: p1 = xor(p2, p3) +# CHECK: p1=xor(p2,p3) 0xc1 0xc3 0x52 0x6b -# CHECK: p1 = or(p2, and(p3, p3)) +# CHECK: p1=or(p2,and(p3,p3)) 0x01 0xc2 0x63 0x6b -# CHECK: p1 = and(p2, !p3) +# CHECK: p1=and(p2,!p3) 0xc1 0xc3 0x72 0x6b -# CHECK: p1 = or(p2, or(p3, p3)) +# CHECK: p1=or(p2,or(p3,p3)) 0xc1 0xc3 0x92 0x6b -# CHECK: p1 = and(p2, and(p3, !p3)) +# CHECK: p1=and(p2,and(p3,!p3)) 0xc1 0xc3 0xb2 0x6b -# CHECK: p1 = and(p2, or(p3, !p3)) +# CHECK: p1=and(p2,or(p3,!p3)) 0x01 0xc0 0xc2 0x6b -# CHECK: p1 = not(p2) +# CHECK: p1=not(p2) 0xc1 0xc3 0xd2 0x6b -# CHECK: p1 = or(p2, and(p3, !p3)) +# CHECK: p1=or(p2,and(p3,!p3)) 0x01 0xc2 0xe3 0x6b -# CHECK: p1 = or(p2, !p3) +# CHECK: p1=or(p2,!p3) 0xc1 0xc3 0xf2 0x6b -# CHECK: p1 = or(p2, or(p3, !p3)) +# CHECK: p1=or(p2,or(p3,!p3)) # User control register transfer 0x0d 0xc0 0x35 0x62 -# CHECK: cs1 = r21 +# CHECK: cs1=r21 0x11 0xc0 0x0d 0x6a -# CHECK: r17 = cs1 +# CHECK: r17=cs1 Index: test/MC/Disassembler/Hexagon/j.txt =================================================================== --- test/MC/Disassembler/Hexagon/j.txt +++ test/MC/Disassembler/Hexagon/j.txt @@ -11,149 +11,149 @@ # Compare and jump 0x00 0xc0 0x89 0x11 -# CHECK: p0 = cmp.eq(r17,#-1); if (p0.new) jump:nt +# CHECK: p0=cmp.eq(r17,#-1); if (p0.new) jump:nt 0x00 0xc1 0x89 0x11 -# CHECK: p0 = cmp.gt(r17,#-1); if (p0.new) jump:nt +# CHECK: p0=cmp.gt(r17,#-1); if (p0.new) jump:nt 0x00 0xc3 0x89 0x11 -# CHECK: p0 = tstbit(r17, #0); if (p0.new) jump:nt +# CHECK: p0=tstbit(r17,#0); if (p0.new) jump:nt 0x00 0xe0 0x89 0x11 -# CHECK: p0 = cmp.eq(r17,#-1); if (p0.new) jump:t +# CHECK: p0=cmp.eq(r17,#-1); if (p0.new) jump:t 0x00 0xe1 0x89 0x11 -# CHECK: p0 = cmp.gt(r17,#-1); if (p0.new) jump:t +# CHECK: p0=cmp.gt(r17,#-1); if (p0.new) jump:t 0x00 0xe3 0x89 0x11 -# CHECK: p0 = tstbit(r17, #0); if (p0.new) jump:t +# CHECK: p0=tstbit(r17,#0); if (p0.new) jump:t 0x00 0xc0 0xc9 0x11 -# CHECK: p0 = cmp.eq(r17,#-1); if (!p0.new) jump:nt +# CHECK: p0=cmp.eq(r17,#-1); if (!p0.new) jump:nt 0x00 0xc1 0xc9 0x11 -# CHECK: p0 = cmp.gt(r17,#-1); if (!p0.new) jump:nt +# CHECK: p0=cmp.gt(r17,#-1); if (!p0.new) jump:nt 0x00 0xc3 0xc9 0x11 -# CHECK: p0 = tstbit(r17, #0); if (!p0.new) jump:nt +# CHECK: p0=tstbit(r17,#0); if (!p0.new) jump:nt 0x00 0xe0 0xc9 0x11 -# CHECK: p0 = cmp.eq(r17,#-1); if (!p0.new) jump:t +# CHECK: p0=cmp.eq(r17,#-1); if (!p0.new) jump:t 0x00 0xe1 0xc9 0x11 -# CHECK: p0 = cmp.gt(r17,#-1); if (!p0.new) jump:t +# CHECK: p0=cmp.gt(r17,#-1); if (!p0.new) jump:t 0x00 0xe3 0xc9 0x11 -# CHECK: p0 = tstbit(r17, #0); if (!p0.new) jump:t +# CHECK: p0=tstbit(r17,#0); if (!p0.new) jump:t 0x00 0xd5 0x09 0x10 -# CHECK: p0 = cmp.eq(r17, #21); if (p0.new) jump:nt +# CHECK: p0=cmp.eq(r17,#21); if (p0.new) jump:nt 0x00 0xf5 0x09 0x10 -# CHECK: p0 = cmp.eq(r17, #21); if (p0.new) jump:t +# CHECK: p0=cmp.eq(r17,#21); if (p0.new) jump:t 0x00 0xd5 0x49 0x10 -# CHECK: p0 = cmp.eq(r17, #21); if (!p0.new) jump:nt +# CHECK: p0=cmp.eq(r17,#21); if (!p0.new) jump:nt 0x00 0xf5 0x49 0x10 -# CHECK: p0 = cmp.eq(r17, #21); if (!p0.new) jump:t +# CHECK: p0=cmp.eq(r17,#21); if (!p0.new) jump:t 0x00 0xd5 0x89 0x10 -# CHECK: p0 = cmp.gt(r17, #21); if (p0.new) jump:nt +# CHECK: p0=cmp.gt(r17,#21); if (p0.new) jump:nt 0x00 0xf5 0x89 0x10 -# CHECK: p0 = cmp.gt(r17, #21); if (p0.new) jump:t +# CHECK: p0=cmp.gt(r17,#21); if (p0.new) jump:t 0x00 0xd5 0xc9 0x10 -# CHECK: p0 = cmp.gt(r17, #21); if (!p0.new) jump:nt +# CHECK: p0=cmp.gt(r17,#21); if (!p0.new) jump:nt 0x00 0xf5 0xc9 0x10 -# CHECK: p0 = cmp.gt(r17, #21); if (!p0.new) jump:t +# CHECK: p0=cmp.gt(r17,#21); if (!p0.new) jump:t 0x00 0xd5 0x09 0x11 -# CHECK: p0 = cmp.gtu(r17, #21); if (p0.new) jump:nt +# CHECK: p0=cmp.gtu(r17,#21); if (p0.new) jump:nt 0x00 0xf5 0x09 0x11 -# CHECK: p0 = cmp.gtu(r17, #21); if (p0.new) jump:t +# CHECK: p0=cmp.gtu(r17,#21); if (p0.new) jump:t 0x00 0xd5 0x49 0x11 -# CHECK: p0 = cmp.gtu(r17, #21); if (!p0.new) jump:nt +# CHECK: p0=cmp.gtu(r17,#21); if (!p0.new) jump:nt 0x00 0xf5 0x49 0x11 -# CHECK: p0 = cmp.gtu(r17, #21); if (!p0.new) jump:t +# CHECK: p0=cmp.gtu(r17,#21); if (!p0.new) jump:t 0x00 0xc0 0x89 0x13 -# CHECK: p1 = cmp.eq(r17,#-1); if (p1.new) jump:nt +# CHECK: p1=cmp.eq(r17,#-1); if (p1.new) jump:nt 0x00 0xc1 0x89 0x13 -# CHECK: p1 = cmp.gt(r17,#-1); if (p1.new) jump:nt +# CHECK: p1=cmp.gt(r17,#-1); if (p1.new) jump:nt 0x00 0xc3 0x89 0x13 -# CHECK: p1 = tstbit(r17, #0); if (p1.new) jump:nt +# CHECK: p1=tstbit(r17,#0); if (p1.new) jump:nt 0x00 0xe0 0x89 0x13 -# CHECK: p1 = cmp.eq(r17,#-1); if (p1.new) jump:t +# CHECK: p1=cmp.eq(r17,#-1); if (p1.new) jump:t 0x00 0xe1 0x89 0x13 -# CHECK: p1 = cmp.gt(r17,#-1); if (p1.new) jump:t +# CHECK: p1=cmp.gt(r17,#-1); if (p1.new) jump:t 0x00 0xe3 0x89 0x13 -# CHECK: p1 = tstbit(r17, #0); if (p1.new) jump:t +# CHECK: p1=tstbit(r17,#0); if (p1.new) jump:t 0x00 0xc0 0xc9 0x13 -# CHECK: p1 = cmp.eq(r17,#-1); if (!p1.new) jump:nt +# CHECK: p1=cmp.eq(r17,#-1); if (!p1.new) jump:nt 0x00 0xc1 0xc9 0x13 -# CHECK: p1 = cmp.gt(r17,#-1); if (!p1.new) jump:nt +# CHECK: p1=cmp.gt(r17,#-1); if (!p1.new) jump:nt 0x00 0xc3 0xc9 0x13 -# CHECK: p1 = tstbit(r17, #0); if (!p1.new) jump:nt +# CHECK: p1=tstbit(r17,#0); if (!p1.new) jump:nt 0x00 0xe0 0xc9 0x13 -# CHECK: p1 = cmp.eq(r17,#-1); if (!p1.new) jump:t +# CHECK: p1=cmp.eq(r17,#-1); if (!p1.new) jump:t 0x00 0xe1 0xc9 0x13 -# CHECK: p1 = cmp.gt(r17,#-1); if (!p1.new) jump:t +# CHECK: p1=cmp.gt(r17,#-1); if (!p1.new) jump:t 0x00 0xe3 0xc9 0x13 -# CHECK: p1 = tstbit(r17, #0); if (!p1.new) jump:t +# CHECK: p1=tstbit(r17,#0); if (!p1.new) jump:t 0x00 0xd5 0x09 0x12 -# CHECK: p1 = cmp.eq(r17, #21); if (p1.new) jump:nt +# CHECK: p1=cmp.eq(r17,#21); if (p1.new) jump:nt 0x00 0xf5 0x09 0x12 -# CHECK: p1 = cmp.eq(r17, #21); if (p1.new) jump:t +# CHECK: p1=cmp.eq(r17,#21); if (p1.new) jump:t 0x00 0xd5 0x49 0x12 -# CHECK: p1 = cmp.eq(r17, #21); if (!p1.new) jump:nt +# CHECK: p1=cmp.eq(r17,#21); if (!p1.new) jump:nt 0x00 0xf5 0x49 0x12 -# CHECK: p1 = cmp.eq(r17, #21); if (!p1.new) jump:t +# CHECK: p1=cmp.eq(r17,#21); if (!p1.new) jump:t 0x00 0xd5 0x89 0x12 -# CHECK: p1 = cmp.gt(r17, #21); if (p1.new) jump:nt +# CHECK: p1=cmp.gt(r17,#21); if (p1.new) jump:nt 0x00 0xf5 0x89 0x12 -# CHECK: p1 = cmp.gt(r17, #21); if (p1.new) jump:t +# CHECK: p1=cmp.gt(r17,#21); if (p1.new) jump:t 0x00 0xd5 0xc9 0x12 -# CHECK: p1 = cmp.gt(r17, #21); if (!p1.new) jump:nt +# CHECK: p1=cmp.gt(r17,#21); if (!p1.new) jump:nt 0x00 0xf5 0xc9 0x12 -# CHECK: p1 = cmp.gt(r17, #21); if (!p1.new) jump:t +# CHECK: p1=cmp.gt(r17,#21); if (!p1.new) jump:t 0x00 0xd5 0x09 0x13 -# CHECK: p1 = cmp.gtu(r17, #21); if (p1.new) jump:nt +# CHECK: p1=cmp.gtu(r17,#21); if (p1.new) jump:nt 0x00 0xf5 0x09 0x13 -# CHECK: p1 = cmp.gtu(r17, #21); if (p1.new) jump:t +# CHECK: p1=cmp.gtu(r17,#21); if (p1.new) jump:t 0x00 0xd5 0x49 0x13 -# CHECK: p1 = cmp.gtu(r17, #21); if (!p1.new) jump:nt +# CHECK: p1=cmp.gtu(r17,#21); if (!p1.new) jump:nt 0x00 0xf5 0x49 0x13 -# CHECK: p1 = cmp.gtu(r17, #21); if (!p1.new) jump:t +# CHECK: p1=cmp.gtu(r17,#21); if (!p1.new) jump:t 0x00 0xcd 0x09 0x14 -# CHECK: p0 = cmp.eq(r17, r21); if (p0.new) jump:nt +# CHECK: p0=cmp.eq(r17,r21); if (p0.new) jump:nt 0x00 0xdd 0x09 0x14 -# CHECK: p1 = cmp.eq(r17, r21); if (p1.new) jump:nt +# CHECK: p1=cmp.eq(r17,r21); if (p1.new) jump:nt 0x00 0xed 0x09 0x14 -# CHECK: p0 = cmp.eq(r17, r21); if (p0.new) jump:t +# CHECK: p0=cmp.eq(r17,r21); if (p0.new) jump:t 0x00 0xfd 0x09 0x14 -# CHECK: p1 = cmp.eq(r17, r21); if (p1.new) jump:t +# CHECK: p1=cmp.eq(r17,r21); if (p1.new) jump:t 0x00 0xcd 0x49 0x14 -# CHECK: p0 = cmp.eq(r17, r21); if (!p0.new) jump:nt +# CHECK: p0=cmp.eq(r17,r21); if (!p0.new) jump:nt 0x00 0xdd 0x49 0x14 -# CHECK: p1 = cmp.eq(r17, r21); if (!p1.new) jump:nt +# CHECK: p1=cmp.eq(r17,r21); if (!p1.new) jump:nt 0x00 0xed 0x49 0x14 -# CHECK: p0 = cmp.eq(r17, r21); if (!p0.new) jump:t +# CHECK: p0=cmp.eq(r17,r21); if (!p0.new) jump:t 0x00 0xfd 0x49 0x14 -# CHECK: p1 = cmp.eq(r17, r21); if (!p1.new) jump:t +# CHECK: p1=cmp.eq(r17,r21); if (!p1.new) jump:t 0x00 0xcd 0x89 0x14 -# CHECK: p0 = cmp.gt(r17, r21); if (p0.new) jump:nt +# CHECK: p0=cmp.gt(r17,r21); if (p0.new) jump:nt 0x00 0xdd 0x89 0x14 -# CHECK: p1 = cmp.gt(r17, r21); if (p1.new) jump:nt +# CHECK: p1=cmp.gt(r17,r21); if (p1.new) jump:nt 0x00 0xed 0x89 0x14 -# CHECK: p0 = cmp.gt(r17, r21); if (p0.new) jump:t +# CHECK: p0=cmp.gt(r17,r21); if (p0.new) jump:t 0x00 0xfd 0x89 0x14 -# CHECK: p1 = cmp.gt(r17, r21); if (p1.new) jump:t +# CHECK: p1=cmp.gt(r17,r21); if (p1.new) jump:t 0x00 0xcd 0xc9 0x14 -# CHECK: p0 = cmp.gt(r17, r21); if (!p0.new) jump:nt +# CHECK: p0=cmp.gt(r17,r21); if (!p0.new) jump:nt 0x00 0xdd 0xc9 0x14 -# CHECK: p1 = cmp.gt(r17, r21); if (!p1.new) jump:nt +# CHECK: p1=cmp.gt(r17,r21); if (!p1.new) jump:nt 0x00 0xed 0xc9 0x14 -# CHECK: p0 = cmp.gt(r17, r21); if (!p0.new) jump:t +# CHECK: p0=cmp.gt(r17,r21); if (!p0.new) jump:t 0x00 0xfd 0xc9 0x14 -# CHECK: p1 = cmp.gt(r17, r21); if (!p1.new) jump:t +# CHECK: p1=cmp.gt(r17,r21); if (!p1.new) jump:t 0x00 0xcd 0x09 0x15 -# CHECK: p0 = cmp.gtu(r17, r21); if (p0.new) jump:nt +# CHECK: p0=cmp.gtu(r17,r21); if (p0.new) jump:nt 0x00 0xdd 0x09 0x15 -# CHECK: p1 = cmp.gtu(r17, r21); if (p1.new) jump:nt +# CHECK: p1=cmp.gtu(r17,r21); if (p1.new) jump:nt 0x00 0xed 0x09 0x15 -# CHECK: p0 = cmp.gtu(r17, r21); if (p0.new) jump:t +# CHECK: p0=cmp.gtu(r17,r21); if (p0.new) jump:t 0x00 0xfd 0x09 0x15 -# CHECK: p1 = cmp.gtu(r17, r21); if (p1.new) jump:t +# CHECK: p1=cmp.gtu(r17,r21); if (p1.new) jump:t 0x00 0xcd 0x49 0x15 -# CHECK: p0 = cmp.gtu(r17, r21); if (!p0.new) jump:nt +# CHECK: p0=cmp.gtu(r17,r21); if (!p0.new) jump:nt 0x00 0xdd 0x49 0x15 -# CHECK: p1 = cmp.gtu(r17, r21); if (!p1.new) jump:nt +# CHECK: p1=cmp.gtu(r17,r21); if (!p1.new) jump:nt 0x00 0xed 0x49 0x15 -# CHECK: p0 = cmp.gtu(r17, r21); if (!p0.new) jump:t +# CHECK: p0=cmp.gtu(r17,r21); if (!p0.new) jump:t 0x00 0xfd 0x49 0x15 -# CHECK: p1 = cmp.gtu(r17, r21); if (!p1.new) jump:t +# CHECK: p1=cmp.gtu(r17,r21); if (!p1.new) jump:t # Jump to address 0x22 0xc0 0x00 0x58 @@ -165,16 +165,16 @@ # Jump to address conditioned on new predicate 0x03 0x40 0x45 0x85 0x00 0xcb 0x00 0x5c -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (p3.new) jump:nt 0x03 0x40 0x45 0x85 0x00 0xdb 0x00 0x5c -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (p3.new) jump:t 0x03 0x40 0x45 0x85 0x00 0xcb 0x20 0x5c -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (!p3.new) jump:nt 0x03 0x40 0x45 0x85 0x00 0xdb 0x20 0x5c -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (!p3.new) jump:t # Jump to address conditioned on register value @@ -197,6 +197,6 @@ # Transfer and jump 0x00 0xd5 0x09 0x16 -# CHECK: r17 = #21 ; jump +# CHECK: r17=#21 ; jump 0x00 0xc9 0x0d 0x17 -# CHECK: r17 = r21 ; jump +# CHECK: r17=r21 ; jump Index: test/MC/Disassembler/Hexagon/jr.txt =================================================================== --- test/MC/Disassembler/Hexagon/jr.txt +++ test/MC/Disassembler/Hexagon/jr.txt @@ -19,16 +19,16 @@ 0x00 0xc1 0x55 0x53 # CHECK: if (p1) jumpr:nt r21 0x03 0x40 0x45 0x85 0x00 0xcb 0x55 0x53 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (p3.new) jumpr:nt r21 0x03 0x40 0x45 0x85 0x00 0xdb 0x55 0x53 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (p3.new) jumpr:t r21 0x00 0xc3 0x75 0x53 # CHECK: if (!p3) jumpr:nt r21 0x03 0x40 0x45 0x85 0x00 0xcb 0x75 0x53 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (!p3.new) jumpr:nt r21 0x03 0x40 0x45 0x85 0x00 0xdb 0x75 0x53 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (!p3.new) jumpr:t r21 Index: test/MC/Disassembler/Hexagon/ld.txt =================================================================== --- test/MC/Disassembler/Hexagon/ld.txt +++ test/MC/Disassembler/Hexagon/ld.txt @@ -3,343 +3,343 @@ # Load doubleword 0x90 0xff 0xd5 0x3a -# CHECK: r17:16 = memd(r21 + r31<<#3) +# CHECK: r17:16=memd(r21+r31<<#3) 0xb0 0xc2 0xc0 0x49 -# CHECK: r17:16 = memd(#168) +# CHECK: r17:16=memd(#168) 0x02 0x40 0x00 0x00 0x10 0xc5 0xc0 0x49 -# CHECK: r17:16 = memd(##168) +# CHECK: r17:16=memd(##168) 0xd0 0xc0 0xd5 0x91 -# CHECK: r17:16 = memd(r21 + #48) +# CHECK: r17:16=memd(r21+#48) 0xb0 0xe0 0xd5 0x99 -# CHECK: r17:16 = memd(r21 ++ #40:circ(m1)) +# CHECK: r17:16=memd(r21++#40:circ(m1)) 0x10 0xe2 0xd5 0x99 -# CHECK: r17:16 = memd(r21 ++ I:circ(m1)) +# CHECK: r17:16=memd(r21++I:circ(m1)) 0x00 0x40 0x00 0x00 0x70 0xd7 0xd5 0x9b -# CHECK: r17:16 = memd(r21 = ##31) +# CHECK: r17:16=memd(r21=##31) 0xb0 0xc0 0xd5 0x9b -# CHECK: r17:16 = memd(r21++#40) +# CHECK: r17:16=memd(r21++#40) 0x10 0xe0 0xd5 0x9d -# CHECK: r17:16 = memd(r21++m1) +# CHECK: r17:16=memd(r21++m1) 0x10 0xe0 0xd5 0x9f -# CHECK: r17:16 = memd(r21 ++ m1:brev) +# CHECK: r17:16=memd(r21++m1:brev) # Load doubleword conditionally 0xf0 0xff 0xd5 0x30 -# CHECK: if (p3) r17:16 = memd(r21+r31<<#3) +# CHECK: if (p3) r17:16=memd(r21+r31<<#3) 0xf0 0xff 0xd5 0x31 -# CHECK: if (!p3) r17:16 = memd(r21+r31<<#3) +# CHECK: if (!p3) r17:16=memd(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf0 0xff 0xd5 0x32 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17:16 = memd(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17:16=memd(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf0 0xff 0xd5 0x33 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17:16 = memd(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17:16=memd(r21+r31<<#3) 0x70 0xd8 0xd5 0x41 -# CHECK: if (p3) r17:16 = memd(r21 + #24) +# CHECK: if (p3) r17:16=memd(r21+#24) 0x03 0x40 0x45 0x85 0x70 0xd8 0xd5 0x43 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17:16 = memd(r21 + #24) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17:16=memd(r21+#24) 0x70 0xd8 0xd5 0x45 -# CHECK: if (!p3) r17:16 = memd(r21 + #24) +# CHECK: if (!p3) r17:16=memd(r21+#24) 0x03 0x40 0x45 0x85 0x70 0xd8 0xd5 0x47 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17:16 = memd(r21 + #24) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17:16=memd(r21+#24) 0xb0 0xe6 0xd5 0x9b -# CHECK: if (p3) r17:16 = memd(r21++#40) +# CHECK: if (p3) r17:16=memd(r21++#40) 0xb0 0xee 0xd5 0x9b -# CHECK: if (!p3) r17:16 = memd(r21++#40) +# CHECK: if (!p3) r17:16=memd(r21++#40) 0x03 0x40 0x45 0x85 0xb0 0xf6 0xd5 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17:16 = memd(r21++#40) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17:16=memd(r21++#40) 0x03 0x40 0x45 0x85 0xb0 0xfe 0xd5 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17:16 = memd(r21++#40) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17:16=memd(r21++#40) # Load byte 0x91 0xff 0x15 0x3a -# CHECK: r17 = memb(r21 + r31<<#3) +# CHECK: r17=memb(r21+r31<<#3) 0xb1 0xc2 0x00 0x49 -# CHECK: r17 = memb(#21) +# CHECK: r17=memb(#21) 0x00 0x40 0x00 0x00 0xb1 0xc2 0x00 0x49 -# CHECK: r17 = memb(##21) +# CHECK: r17=memb(##21) 0xf1 0xc3 0x15 0x91 -# CHECK: r17 = memb(r21 + #31) +# CHECK: r17=memb(r21+#31) 0xb1 0xe0 0x15 0x99 -# CHECK: r17 = memb(r21 ++ #5:circ(m1)) +# CHECK: r17=memb(r21++#5:circ(m1)) 0x11 0xe2 0x15 0x99 -# CHECK: r17 = memb(r21 ++ I:circ(m1)) +# CHECK: r17=memb(r21++I:circ(m1)) 0x00 0x40 0x00 0x00 0x71 0xd7 0x15 0x9b -# CHECK: r17 = memb(r21 = ##31) +# CHECK: r17=memb(r21=##31) 0xb1 0xc0 0x15 0x9b -# CHECK: r17 = memb(r21++#5) +# CHECK: r17=memb(r21++#5) 0x11 0xe0 0x15 0x9d -# CHECK: r17 = memb(r21++m1) +# CHECK: r17=memb(r21++m1) 0x11 0xe0 0x15 0x9f -# CHECK: r17 = memb(r21 ++ m1:brev) +# CHECK: r17=memb(r21++m1:brev) # Load byte conditionally 0xf1 0xff 0x15 0x30 -# CHECK: if (p3) r17 = memb(r21+r31<<#3) +# CHECK: if (p3) r17=memb(r21+r31<<#3) 0xf1 0xff 0x15 0x31 -# CHECK: if (!p3) r17 = memb(r21+r31<<#3) +# CHECK: if (!p3) r17=memb(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf1 0xff 0x15 0x32 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memb(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memb(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf1 0xff 0x15 0x33 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memb(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memb(r21+r31<<#3) 0x91 0xdd 0x15 0x41 -# CHECK: if (p3) r17 = memb(r21 + #44) +# CHECK: if (p3) r17=memb(r21+#44) 0x03 0x40 0x45 0x85 0x91 0xdd 0x15 0x43 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memb(r21 + #44) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memb(r21+#44) 0x91 0xdd 0x15 0x45 -# CHECK: if (!p3) r17 = memb(r21 + #44) +# CHECK: if (!p3) r17=memb(r21+#44) 0x03 0x40 0x45 0x85 0x91 0xdd 0x15 0x47 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memb(r21 + #44) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memb(r21+#44) 0xb1 0xe6 0x15 0x9b -# CHECK: if (p3) r17 = memb(r21++#5) +# CHECK: if (p3) r17=memb(r21++#5) 0xb1 0xee 0x15 0x9b -# CHECK: if (!p3) r17 = memb(r21++#5) +# CHECK: if (!p3) r17=memb(r21++#5) 0x03 0x40 0x45 0x85 0xb1 0xf6 0x15 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memb(r21++#5) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memb(r21++#5) 0x03 0x40 0x45 0x85 0xb1 0xfe 0x15 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memb(r21++#5) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memb(r21++#5) # Load byte into shifted vector 0xf0 0xc3 0x95 0x90 -# CHECK: r17:16 = memb_fifo(r21 + #31) +# CHECK: r17:16=memb_fifo(r21+#31) 0xb0 0xe0 0x95 0x98 -# CHECK: r17:16 = memb_fifo(r21 ++ #5:circ(m1)) +# CHECK: r17:16=memb_fifo(r21++#5:circ(m1)) 0x10 0xe2 0x95 0x98 -# CHECK: r17:16 = memb_fifo(r21 ++ I:circ(m1)) +# CHECK: r17:16=memb_fifo(r21++I:circ(m1)) # Load half into shifted vector 0xf0 0xc3 0x55 0x90 -# CHECK: r17:16 = memh_fifo(r21 + #62) +# CHECK: r17:16=memh_fifo(r21+#62) 0xb0 0xe0 0x55 0x98 -# CHECK: r17:16 = memh_fifo(r21 ++ #10:circ(m1)) +# CHECK: r17:16=memh_fifo(r21++#10:circ(m1)) 0x10 0xe2 0x55 0x98 -# CHECK: r17:16 = memh_fifo(r21 ++ I:circ(m1)) +# CHECK: r17:16=memh_fifo(r21++I:circ(m1)) # Load halfword 0x91 0xff 0x55 0x3a -# CHECK: r17 = memh(r21 + r31<<#3) +# CHECK: r17=memh(r21+r31<<#3) 0xb1 0xc2 0x40 0x49 -# CHECK: r17 = memh(#42) +# CHECK: r17=memh(#42) 0x00 0x40 0x00 0x00 0x51 0xc5 0x40 0x49 -# CHECK: r17 = memh(##42) +# CHECK: r17=memh(##42) 0xf1 0xc3 0x55 0x91 -# CHECK: r17 = memh(r21 + #62) +# CHECK: r17=memh(r21+#62) 0xb1 0xe0 0x55 0x99 -# CHECK: r17 = memh(r21 ++ #10:circ(m1)) +# CHECK: r17=memh(r21++#10:circ(m1)) 0x11 0xe2 0x55 0x99 -# CHECK: r17 = memh(r21 ++ I:circ(m1)) +# CHECK: r17=memh(r21++I:circ(m1)) 0x00 0x40 0x00 0x00 0x71 0xd7 0x55 0x9b -# CHECK: r17 = memh(r21 = ##31) +# CHECK: r17=memh(r21=##31) 0xb1 0xc0 0x55 0x9b -# CHECK: r17 = memh(r21++#10) +# CHECK: r17=memh(r21++#10) 0x11 0xe0 0x55 0x9d -# CHECK: r17 = memh(r21++m1) +# CHECK: r17=memh(r21++m1) 0x11 0xe0 0x55 0x9f -# CHECK: r17 = memh(r21 ++ m1:brev) +# CHECK: r17=memh(r21++m1:brev) # Load halfword conditionally 0xf1 0xff 0x55 0x30 -# CHECK: if (p3) r17 = memh(r21+r31<<#3) +# CHECK: if (p3) r17=memh(r21+r31<<#3) 0xf1 0xff 0x55 0x31 -# CHECK: if (!p3) r17 = memh(r21+r31<<#3) +# CHECK: if (!p3) r17=memh(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf1 0xff 0x55 0x32 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memh(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memh(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf1 0xff 0x55 0x33 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memh(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memh(r21+r31<<#3) 0xb1 0xe6 0x55 0x9b -# CHECK: if (p3) r17 = memh(r21++#10) +# CHECK: if (p3) r17=memh(r21++#10) 0xb1 0xee 0x55 0x9b -# CHECK: if (!p3) r17 = memh(r21++#10) +# CHECK: if (!p3) r17=memh(r21++#10) 0x03 0x40 0x45 0x85 0xb1 0xf6 0x55 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memh(r21++#10) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memh(r21++#10) 0x03 0x40 0x45 0x85 0xb1 0xfe 0x55 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memh(r21++#10) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memh(r21++#10) 0xf1 0xdb 0x55 0x41 -# CHECK: if (p3) r17 = memh(r21 + #62) +# CHECK: if (p3) r17=memh(r21+#62) 0xf1 0xdb 0x55 0x45 -# CHECK: if (!p3) r17 = memh(r21 + #62) +# CHECK: if (!p3) r17=memh(r21+#62) 0x03 0x40 0x45 0x85 0xf1 0xdb 0x55 0x43 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memh(r21 + #62) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memh(r21+#62) 0x03 0x40 0x45 0x85 0xf1 0xdb 0x55 0x47 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memh(r21 + #62) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memh(r21+#62) # Load unsigned byte 0x91 0xff 0x35 0x3a -# CHECK: r17 = memub(r21 + r31<<#3) +# CHECK: r17=memub(r21+r31<<#3) 0xb1 0xc2 0x20 0x49 -# CHECK: r17 = memub(#21) +# CHECK: r17=memub(#21) 0x00 0x40 0x00 0x00 0xb1 0xc2 0x20 0x49 -# CHECK: r17 = memub(##21) +# CHECK: r17=memub(##21) 0xf1 0xc3 0x35 0x91 -# CHECK: r17 = memub(r21 + #31) +# CHECK: r17=memub(r21+#31) 0xb1 0xe0 0x35 0x99 -# CHECK: r17 = memub(r21 ++ #5:circ(m1)) +# CHECK: r17=memub(r21++#5:circ(m1)) 0x11 0xe2 0x35 0x99 -# CHECK: r17 = memub(r21 ++ I:circ(m1)) +# CHECK: r17=memub(r21++I:circ(m1)) 0x00 0x40 0x00 0x00 0x71 0xd7 0x35 0x9b -# CHECK: r17 = memub(r21 = ##31) +# CHECK: r17=memub(r21=##31) 0xb1 0xc0 0x35 0x9b -# CHECK: r17 = memub(r21++#5) +# CHECK: r17=memub(r21++#5) 0x11 0xe0 0x35 0x9d -# CHECK: r17 = memub(r21++m1) +# CHECK: r17=memub(r21++m1) 0x11 0xe0 0x35 0x9f -# CHECK: r17 = memub(r21 ++ m1:brev) +# CHECK: r17=memub(r21++m1:brev) # Load unsigned byte conditionally 0xf1 0xff 0x35 0x30 -# CHECK: if (p3) r17 = memub(r21+r31<<#3) +# CHECK: if (p3) r17=memub(r21+r31<<#3) 0xf1 0xff 0x35 0x31 -# CHECK: if (!p3) r17 = memub(r21+r31<<#3) +# CHECK: if (!p3) r17=memub(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf1 0xff 0x35 0x32 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memub(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memub(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf1 0xff 0x35 0x33 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memub(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memub(r21+r31<<#3) 0xf1 0xdb 0x35 0x41 -# CHECK: if (p3) r17 = memub(r21 + #31) +# CHECK: if (p3) r17=memub(r21+#31) 0x03 0x40 0x45 0x85 0xf1 0xdb 0x35 0x43 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memub(r21 + #31) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memub(r21+#31) 0xf1 0xdb 0x35 0x45 -# CHECK: if (!p3) r17 = memub(r21 + #31) +# CHECK: if (!p3) r17=memub(r21+#31) 0x03 0x40 0x45 0x85 0xf1 0xdb 0x35 0x47 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memub(r21 + #31) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memub(r21+#31) 0xb1 0xe6 0x35 0x9b -# CHECK: if (p3) r17 = memub(r21++#5) +# CHECK: if (p3) r17=memub(r21++#5) 0xb1 0xee 0x35 0x9b -# CHECK: if (!p3) r17 = memub(r21++#5) +# CHECK: if (!p3) r17=memub(r21++#5) 0x03 0x40 0x45 0x85 0xb1 0xf6 0x35 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memub(r21++#5) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memub(r21++#5) 0x03 0x40 0x45 0x85 0xb1 0xfe 0x35 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memub(r21++#5) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memub(r21++#5) # Load unsigned halfword 0x91 0xff 0x75 0x3a -# CHECK: r17 = memuh(r21 + r31<<#3) +# CHECK: r17=memuh(r21+r31<<#3) 0xb1 0xc2 0x60 0x49 -# CHECK: r17 = memuh(#42) +# CHECK: r17=memuh(#42) 0x00 0x40 0x00 0x00 0x51 0xc5 0x60 0x49 -# CHECK: r17 = memuh(##42) +# CHECK: r17=memuh(##42) 0xb1 0xc2 0x75 0x91 -# CHECK: r17 = memuh(r21 + #42) +# CHECK: r17=memuh(r21+#42) 0xb1 0xe0 0x75 0x99 -# CHECK: r17 = memuh(r21 ++ #10:circ(m1)) +# CHECK: r17=memuh(r21++#10:circ(m1)) 0x11 0xe2 0x75 0x99 -# CHECK: r17 = memuh(r21 ++ I:circ(m1)) +# CHECK: r17=memuh(r21++I:circ(m1)) 0x00 0x40 0x00 0x00 0x71 0xd7 0x75 0x9b -# CHECK: r17 = memuh(r21 = ##31) +# CHECK: r17=memuh(r21=##31) 0xb1 0xc0 0x75 0x9b -# CHECK: r17 = memuh(r21++#10) +# CHECK: r17=memuh(r21++#10) 0x11 0xe0 0x75 0x9d -# CHECK: r17 = memuh(r21++m1) +# CHECK: r17=memuh(r21++m1) 0x11 0xe0 0x75 0x9f -# CHECK: r17 = memuh(r21 ++ m1:brev) +# CHECK: r17=memuh(r21++m1:brev) # Load unsigned halfword conditionally 0xf1 0xff 0x75 0x30 -# CHECK: if (p3) r17 = memuh(r21+r31<<#3) +# CHECK: if (p3) r17=memuh(r21+r31<<#3) 0xf1 0xff 0x75 0x31 -# CHECK: if (!p3) r17 = memuh(r21+r31<<#3) +# CHECK: if (!p3) r17=memuh(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf1 0xff 0x75 0x32 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memuh(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memuh(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf1 0xff 0x75 0x33 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memuh(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memuh(r21+r31<<#3) 0xb1 0xda 0x75 0x41 -# CHECK: if (p3) r17 = memuh(r21 + #42) +# CHECK: if (p3) r17=memuh(r21+#42) 0xb1 0xda 0x75 0x45 -# CHECK: if (!p3) r17 = memuh(r21 + #42) +# CHECK: if (!p3) r17=memuh(r21+#42) 0x03 0x40 0x45 0x85 0xb1 0xda 0x75 0x43 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memuh(r21 + #42) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memuh(r21+#42) 0x03 0x40 0x45 0x85 0xb1 0xda 0x75 0x47 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memuh(r21 + #42) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memuh(r21+#42) 0xb1 0xe6 0x75 0x9b -# CHECK: if (p3) r17 = memuh(r21++#10) +# CHECK: if (p3) r17=memuh(r21++#10) 0xb1 0xee 0x75 0x9b -# CHECK: if (!p3) r17 = memuh(r21++#10) +# CHECK: if (!p3) r17=memuh(r21++#10) 0x03 0x40 0x45 0x85 0xb1 0xf6 0x75 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memuh(r21++#10) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memuh(r21++#10) 0x03 0x40 0x45 0x85 0xb1 0xfe 0x75 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memuh(r21++#10) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memuh(r21++#10) # Load word 0x91 0xff 0x95 0x3a -# CHECK: r17 = memw(r21 + r31<<#3) +# CHECK: r17=memw(r21+r31<<#3) 0xb1 0xc2 0x80 0x49 -# CHECK: r17 = memw(#84) +# CHECK: r17=memw(#84) 0x01 0x40 0x00 0x00 0x91 0xc2 0x80 0x49 -# CHECK: r17 = memw(##84) +# CHECK: r17=memw(##84) 0xb1 0xc2 0x95 0x91 -# CHECK: r17 = memw(r21 + #84) +# CHECK: r17=memw(r21+#84) 0xb1 0xe0 0x95 0x99 -# CHECK: r17 = memw(r21 ++ #20:circ(m1)) +# CHECK: r17=memw(r21++#20:circ(m1)) 0x11 0xe2 0x95 0x99 -# CHECK: r17 = memw(r21 ++ I:circ(m1)) +# CHECK: r17=memw(r21++I:circ(m1)) 0x00 0x40 0x00 0x00 0x71 0xd7 0x95 0x9b -# CHECK: r17 = memw(r21 = ##31) +# CHECK: r17=memw(r21=##31) 0xb1 0xc0 0x95 0x9b -# CHECK: r17 = memw(r21++#20) +# CHECK: r17=memw(r21++#20) 0x11 0xe0 0x95 0x9d -# CHECK: r17 = memw(r21++m1) +# CHECK: r17=memw(r21++m1) 0x11 0xe0 0x95 0x9f -# CHECK: r17 = memw(r21 ++ m1:brev) +# CHECK: r17=memw(r21++m1:brev) # Load word conditionally 0xf1 0xff 0x95 0x30 -# CHECK: if (p3) r17 = memw(r21+r31<<#3) +# CHECK: if (p3) r17=memw(r21+r31<<#3) 0xf1 0xff 0x95 0x31 -# CHECK: if (!p3) r17 = memw(r21+r31<<#3) +# CHECK: if (!p3) r17=memw(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf1 0xff 0x95 0x32 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memw(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memw(r21+r31<<#3) 0x03 0x40 0x45 0x85 0xf1 0xff 0x95 0x33 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memw(r21+r31<<#3) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memw(r21+r31<<#3) 0xb1 0xda 0x95 0x41 -# CHECK: if (p3) r17 = memw(r21 + #84) +# CHECK: if (p3) r17=memw(r21+#84) 0xb1 0xda 0x95 0x45 -# CHECK: if (!p3) r17 = memw(r21 + #84) +# CHECK: if (!p3) r17=memw(r21+#84) 0x03 0x40 0x45 0x85 0xb1 0xda 0x95 0x43 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memw(r21 + #84) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memw(r21+#84) 0x03 0x40 0x45 0x85 0xb1 0xda 0x95 0x47 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memw(r21 + #84) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memw(r21+#84) 0xb1 0xe6 0x95 0x9b -# CHECK: if (p3) r17 = memw(r21++#20) +# CHECK: if (p3) r17=memw(r21++#20) 0xb1 0xee 0x95 0x9b -# CHECK: if (!p3) r17 = memw(r21++#20) +# CHECK: if (!p3) r17=memw(r21++#20) 0x03 0x40 0x45 0x85 0xb1 0xf6 0x95 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) r17 = memw(r21++#20) +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) r17=memw(r21++#20) 0x03 0x40 0x45 0x85 0xb1 0xfe 0x95 0x9b -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) r17 = memw(r21++#20) +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) r17=memw(r21++#20) # Deallocate stack frame 0x1e 0xc0 0x1e 0x90 @@ -349,90 +349,90 @@ 0x1e 0xc0 0x1e 0x96 # CHECK: dealloc_return 0x03 0x40 0x45 0x85 0x1e 0xcb 0x1e 0x96 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (p3.new) dealloc_return:nt 0x1e 0xd3 0x1e 0x96 # CHECK: if (p3) dealloc_return 0x03 0x40 0x45 0x85 0x1e 0xdb 0x1e 0x96 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (p3.new) dealloc_return:t 0x03 0x40 0x45 0x85 0x1e 0xeb 0x1e 0x96 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (!p3.new) dealloc_return:nt 0x1e 0xf3 0x1e 0x96 # CHECK: if (!p3) dealloc_return 0x03 0x40 0x45 0x85 0x1e 0xfb 0x1e 0x96 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (!p3.new) dealloc_return:t # Load and unpack bytes to halfwords 0xf1 0xc3 0x35 0x90 -# CHECK: r17 = membh(r21 + #62) +# CHECK: r17=membh(r21+#62) 0xf1 0xc3 0x75 0x90 -# CHECK: r17 = memubh(r21 + #62) +# CHECK: r17=memubh(r21+#62) 0xf0 0xc3 0xb5 0x90 -# CHECK: r17:16 = memubh(r21 + #124) +# CHECK: r17:16=memubh(r21+#124) 0xf0 0xc3 0xf5 0x90 -# CHECK: r17:16 = membh(r21 + #124) +# CHECK: r17:16=membh(r21+#124) 0xb1 0xe0 0x35 0x98 -# CHECK: r17 = membh(r21 ++ #10:circ(m1)) +# CHECK: r17=membh(r21++#10:circ(m1)) 0x11 0xe2 0x35 0x98 -# CHECK: r17 = membh(r21 ++ I:circ(m1)) +# CHECK: r17=membh(r21++I:circ(m1)) 0xb1 0xe0 0x75 0x98 -# CHECK: r17 = memubh(r21 ++ #10:circ(m1)) +# CHECK: r17=memubh(r21++#10:circ(m1)) 0x11 0xe2 0x75 0x98 -# CHECK: r17 = memubh(r21 ++ I:circ(m1)) +# CHECK: r17=memubh(r21++I:circ(m1)) 0xb0 0xe0 0xf5 0x98 -# CHECK: r17:16 = membh(r21 ++ #20:circ(m1)) +# CHECK: r17:16=membh(r21++#20:circ(m1)) 0x10 0xe2 0xf5 0x98 -# CHECK: r17:16 = membh(r21 ++ I:circ(m1)) +# CHECK: r17:16=membh(r21++I:circ(m1)) 0xb0 0xe0 0xb5 0x98 -# CHECK: r17:16 = memubh(r21 ++ #20:circ(m1)) +# CHECK: r17:16=memubh(r21++#20:circ(m1)) 0x10 0xe2 0xb5 0x98 -# CHECK: r17:16 = memubh(r21 ++ I:circ(m1)) +# CHECK: r17:16=memubh(r21++I:circ(m1)) 0x00 0x40 0x00 0x00 0x71 0xd7 0x35 0x9a -# CHECK: r17 = membh(r21 = ##31) +# CHECK: r17=membh(r21=##31) 0xb1 0xc0 0x35 0x9a -# CHECK: r17 = membh(r21++#10) +# CHECK: r17=membh(r21++#10) 0x00 0x40 0x00 0x00 0x71 0xd7 0x75 0x9a -# CHECK: r17 = memubh(r21 = ##31) +# CHECK: r17=memubh(r21=##31) 0xb1 0xc0 0x75 0x9a -# CHECK: r17 = memubh(r21++#10) +# CHECK: r17=memubh(r21++#10) 0x00 0x40 0x00 0x00 0x70 0xd7 0xb5 0x9a -# CHECK: r17:16 = memubh(r21 = ##31) +# CHECK: r17:16=memubh(r21=##31) 0xb0 0xc0 0xb5 0x9a -# CHECK: r17:16 = memubh(r21++#20) +# CHECK: r17:16=memubh(r21++#20) 0x00 0x40 0x00 0x00 0x70 0xd7 0xf5 0x9a -# CHECK: r17:16 = membh(r21 = ##31) +# CHECK: r17:16=membh(r21=##31) 0xb0 0xc0 0xf5 0x9a -# CHECK: r17:16 = membh(r21++#20) +# CHECK: r17:16=membh(r21++#20) 0x00 0x40 0x00 0x00 0xf1 0xf7 0x35 0x9c -# CHECK: r17 = membh(r21<<#3 + ##31) +# CHECK: r17=membh(r21<<#3+##31) 0x11 0xe0 0x35 0x9c -# CHECK: r17 = membh(r21++m1) +# CHECK: r17=membh(r21++m1) 0x00 0x40 0x00 0x00 0xf1 0xf7 0x75 0x9c -# CHECK: r17 = memubh(r21<<#3 + ##31) +# CHECK: r17=memubh(r21<<#3+##31) 0x11 0xe0 0x75 0x9c -# CHECK: r17 = memubh(r21++m1) +# CHECK: r17=memubh(r21++m1) 0x00 0x40 0x00 0x00 0xf0 0xf7 0xf5 0x9c -# CHECK: r17:16 = membh(r21<<#3 + ##31) +# CHECK: r17:16=membh(r21<<#3+##31) 0x10 0xe0 0xf5 0x9c -# CHECK: r17:16 = membh(r21++m1) +# CHECK: r17:16=membh(r21++m1) 0x00 0x40 0x00 0x00 0xf0 0xf7 0xb5 0x9c -# CHECK: r17:16 = memubh(r21<<#3 + ##31) +# CHECK: r17:16=memubh(r21<<#3+##31) 0x11 0xe0 0x35 0x9c -# CHECK: r17 = membh(r21++m1) +# CHECK: r17=membh(r21++m1) 0x11 0xe0 0x75 0x9c -# CHECK: r17 = memubh(r21++m1) +# CHECK: r17=memubh(r21++m1) 0x10 0xe0 0xf5 0x9c -# CHECK: r17:16 = membh(r21++m1) +# CHECK: r17:16=membh(r21++m1) 0x10 0xe0 0xb5 0x9c -# CHECK: r17:16 = memubh(r21++m1) +# CHECK: r17:16=memubh(r21++m1) 0x11 0xe0 0x35 0x9e -# CHECK: r17 = membh(r21 ++ m1:brev) +# CHECK: r17=membh(r21++m1:brev) 0x11 0xe0 0x75 0x9e -# CHECK: r17 = memubh(r21 ++ m1:brev) +# CHECK: r17=memubh(r21++m1:brev) 0x10 0xe0 0xb5 0x9e -# CHECK: r17:16 = memubh(r21 ++ m1:brev) +# CHECK: r17:16=memubh(r21++m1:brev) 0x10 0xe0 0xf5 0x9e -# CHECK: r17:16 = membh(r21 ++ m1:brev) +# CHECK: r17:16=membh(r21++m1:brev) Index: test/MC/Disassembler/Hexagon/memop.txt =================================================================== --- test/MC/Disassembler/Hexagon/memop.txt +++ test/MC/Disassembler/Hexagon/memop.txt @@ -3,54 +3,54 @@ # Operation on memory byte 0x95 0xd9 0x11 0x3e -# CHECK: memb(r17+#51) += r21 +# CHECK: memb(r17+#51)+=r21 0xb5 0xd9 0x11 0x3e -# CHECK: memb(r17+#51) -= r21 +# CHECK: memb(r17+#51)-=r21 0xd5 0xd9 0x11 0x3e -# CHECK: memb(r17+#51) &= r21 +# CHECK: memb(r17+#51)&=r21 0xf5 0xd9 0x11 0x3e -# CHECK: memb(r17+#51) |= r21 +# CHECK: memb(r17+#51)|=r21 0x95 0xd9 0x11 0x3f -# CHECK: memb(r17+#51) += #21 +# CHECK: memb(r17+#51)+=#21 0xb5 0xd9 0x11 0x3f -# CHECK: memb(r17+#51) -= #21 +# CHECK: memb(r17+#51)-=#21 0xd5 0xd9 0x11 0x3f -# CHECK: memb(r17+#51) = clrbit(#21) +# CHECK: memb(r17+#51)=clrbit(#21) 0xf5 0xd9 0x11 0x3f -# CHECK: memb(r17+#51) = setbit(#21) +# CHECK: memb(r17+#51)=setbit(#21) # Operation on memory halfword 0x95 0xd9 0x31 0x3e -# CHECK: memh(r17+#102) += r21 +# CHECK: memh(r17+#102)+=r21 0xb5 0xd9 0x31 0x3e -# CHECK: memh(r17+#102) -= r21 +# CHECK: memh(r17+#102)-=r21 0xd5 0xd9 0x31 0x3e -# CHECK: memh(r17+#102) &= r21 +# CHECK: memh(r17+#102)&=r21 0xf5 0xd9 0x31 0x3e -# CHECK: memh(r17+#102) |= r21 +# CHECK: memh(r17+#102)|=r21 0x95 0xd9 0x31 0x3f -# CHECK: memh(r17+#102) += #21 +# CHECK: memh(r17+#102)+=#21 0xb5 0xd9 0x31 0x3f -# CHECK: memh(r17+#102) -= #21 +# CHECK: memh(r17+#102)-=#21 0xd5 0xd9 0x31 0x3f -# CHECK: memh(r17+#102) = clrbit(#21) +# CHECK: memh(r17+#102)=clrbit(#21) 0xf5 0xd9 0x31 0x3f -# CHECK: memh(r17+#102) = setbit(#21) +# CHECK: memh(r17+#102)=setbit(#21) # Operation on memory word 0x95 0xd9 0x51 0x3e -# CHECK: memw(r17+#204) += r21 +# CHECK: memw(r17+#204)+=r21 0xb5 0xd9 0x51 0x3e -# CHECK: memw(r17+#204) -= r21 +# CHECK: memw(r17+#204)-=r21 0xd5 0xd9 0x51 0x3e -# CHECK: memw(r17+#204) &= r21 +# CHECK: memw(r17+#204)&=r21 0xf5 0xd9 0x51 0x3e -# CHECK: memw(r17+#204) |= r21 +# CHECK: memw(r17+#204)|=r21 0x95 0xd9 0x51 0x3f -# CHECK: memw(r17+#204) += #21 +# CHECK: memw(r17+#204)+=#21 0xb5 0xd9 0x51 0x3f -# CHECK: memw(r17+#204) -= #21 +# CHECK: memw(r17+#204)-=#21 0xd5 0xd9 0x51 0x3f -# CHECK: memw(r17+#204) = clrbit(#21) +# CHECK: memw(r17+#204)=clrbit(#21) 0xf5 0xd9 0x51 0x3f -# CHECK: memw(r17+#204) = setbit(#21) +# CHECK: memw(r17+#204)=setbit(#21) Index: test/MC/Disassembler/Hexagon/nv_j.txt =================================================================== --- test/MC/Disassembler/Hexagon/nv_j.txt +++ test/MC/Disassembler/Hexagon/nv_j.txt @@ -3,134 +3,134 @@ # Jump to address conditioned on new register value 0x11 0x40 0x71 0x70 0x92 0xd5 0x02 0x20 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.eq(r17.new, r21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.eq(r17.new,r21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x02 0x20 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.eq(r17.new, r21)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.eq(r17.new,r21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x42 0x20 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.eq(r17.new, r21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.eq(r17.new,r21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x42 0x20 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.eq(r17.new, r21)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.eq(r17.new,r21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x82 0x20 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gt(r17.new, r21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gt(r17.new,r21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x82 0x20 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gt(r17.new, r21)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gt(r17.new,r21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0xc2 0x20 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gt(r17.new, r21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gt(r17.new,r21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0xc2 0x20 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gt(r17.new, r21)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gt(r17.new,r21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x02 0x21 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gtu(r17.new, r21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gtu(r17.new,r21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x02 0x21 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gtu(r17.new, r21)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gtu(r17.new,r21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x42 0x21 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gtu(r17.new, r21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gtu(r17.new,r21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x42 0x21 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gtu(r17.new, r21)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gtu(r17.new,r21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x82 0x21 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gt(r21, r17.new)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gt(r21,r17.new)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x82 0x21 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gt(r21, r17.new)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gt(r21,r17.new)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0xc2 0x21 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gt(r21, r17.new)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gt(r21,r17.new)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0xc2 0x21 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gt(r21, r17.new)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gt(r21,r17.new)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x02 0x22 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gtu(r21, r17.new)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gtu(r21,r17.new)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x02 0x22 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gtu(r21, r17.new)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gtu(r21,r17.new)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x42 0x22 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gtu(r21, r17.new)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gtu(r21,r17.new)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x42 0x22 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gtu(r21, r17.new)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gtu(r21,r17.new)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x02 0x24 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.eq(r17.new, #21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.eq(r17.new,#21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x02 0x24 -# CHECK: r17 = r17 -# CHECK-NETX: if (cmp.eq(r17.new, #21)) jump:t +# CHECK: r17=r17 +# CHECK-NETX: if (cmp.eq(r17.new,#21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x42 0x24 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.eq(r17.new, #21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.eq(r17.new,#21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x42 0x24 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.eq(r17.new, #21)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.eq(r17.new,#21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x82 0x24 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gt(r17.new, #21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gt(r17.new,#21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x82 0x24 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gt(r17.new, #21)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gt(r17.new,#21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0xc2 0x24 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gt(r17.new, #21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gt(r17.new,#21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0xc2 0x24 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gt(r17.new, #21)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gt(r17.new,#21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x02 0x25 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gtu(r17.new, #21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gtu(r17.new,#21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x02 0x25 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gtu(r17.new, #21)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gtu(r17.new,#21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xd5 0x42 0x25 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gtu(r17.new, #21)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gtu(r17.new,#21)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xf5 0x42 0x25 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gtu(r17.new, #21)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gtu(r17.new,#21)) jump:t 0x11 0x40 0x71 0x70 0x92 0xc0 0x82 0x25 -# CHECK: r17 = r17 -# CHECK-NEXT: if (tstbit(r17.new, #0)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (tstbit(r17.new,#0)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xe0 0x82 0x25 -# CHECK: r17 = r17 -# CHECK-NEXT: if (tstbit(r17.new, #0)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (tstbit(r17.new,#0)) jump:t 0x11 0x40 0x71 0x70 0x92 0xc0 0xc2 0x25 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!tstbit(r17.new, #0)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (!tstbit(r17.new,#0)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xe0 0xc2 0x25 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!tstbit(r17.new, #0)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (!tstbit(r17.new,#0)) jump:t 0x11 0x40 0x71 0x70 0x92 0xc0 0x02 0x26 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.eq(r17.new, #-1)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.eq(r17.new,#-1)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xe0 0x02 0x26 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.eq(r17.new, #-1)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.eq(r17.new,#-1)) jump:t 0x11 0x40 0x71 0x70 0x92 0xc0 0x42 0x26 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.eq(r17.new, #-1)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.eq(r17.new,#-1)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xe0 0x42 0x26 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.eq(r17.new, #-1)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.eq(r17.new,#-1)) jump:t 0x11 0x40 0x71 0x70 0x92 0xc0 0x82 0x26 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gt(r17.new, #-1)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gt(r17.new,#-1)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xe0 0x82 0x26 -# CHECK: r17 = r17 -# CHECK-NEXT: if (cmp.gt(r17.new, #-1)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (cmp.gt(r17.new,#-1)) jump:t 0x11 0x40 0x71 0x70 0x92 0xc0 0xc2 0x26 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gt(r17.new, #-1)) jump:nt +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gt(r17.new,#-1)) jump:nt 0x11 0x40 0x71 0x70 0x92 0xe0 0xc2 0x26 -# CHECK: r17 = r17 -# CHECK-NEXT: if (!cmp.gt(r17.new, #-1)) jump:t +# CHECK: r17=r17 +# CHECK-NEXT: if (!cmp.gt(r17.new,#-1)) jump:t Index: test/MC/Disassembler/Hexagon/nv_st.txt =================================================================== --- test/MC/Disassembler/Hexagon/nv_st.txt +++ test/MC/Disassembler/Hexagon/nv_st.txt @@ -3,210 +3,210 @@ # Store new-value byte 0x1f 0x40 0x7f 0x70 0x82 0xf5 0xb1 0x3b -# CHECK: r31 = r31 -# CHECK-NEXT: memb(r17 + r21<<#3) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memb(r17+r21<<#3)=r31.new 0x1f 0x40 0x7f 0x70 0x11 0xc2 0xa0 0x48 -# CHECK: r31 = r31 -# CHECK-NEXT: memb(#17) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memb(#17)=r31.new 0x1f 0x40 0x7f 0x70 0x15 0xc2 0xb1 0xa1 -# CHECK: r31 = r31 -# CHECK-NEXT: memb(r17+#21) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memb(r17+#21)=r31.new 0x1f 0x40 0x7f 0x70 0x02 0xe2 0xb1 0xa9 -# CHECK: r31 = r31 -# CHECK-NEXT: memb(r17 ++ I:circ(m1)) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memb(r17++I:circ(m1))=r31.new 0x1f 0x40 0x7f 0x70 0x28 0xe2 0xb1 0xa9 -# CHECK: r31 = r31 -# CHECK-NEXT: memb(r17 ++ #5:circ(m1)) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memb(r17++#5:circ(m1))=r31.new 0x1f 0x40 0x7f 0x70 0x28 0xc2 0xb1 0xab -# CHECK: r31 = r31 -# CHECK-NEXT: memb(r17++#5) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memb(r17++#5)=r31.new 0x1f 0x40 0x7f 0x70 0x00 0xe2 0xb1 0xad -# CHECK: r31 = r31 -# CHECK-NEXT: memb(r17++m1) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memb(r17++m1)=r31.new 0x1f 0x40 0x7f 0x70 0x00 0xe2 0xb1 0xaf -# CHECK: r31 = r31 -# CHECK-NEXT: memb(r17 ++ m1:brev) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memb(r17++m1:brev)=r31.new # Store new-value byte conditionally 0x1f 0x40 0x7f 0x70 0xe2 0xf5 0xb1 0x34 -# CHECK: r31 = r31 -# CHECK-NEXT: if (p3) memb(r17+r21<<#3) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (p3) memb(r17+r21<<#3)=r31.new 0x1f 0x40 0x7f 0x70 0xe2 0xf5 0xb1 0x35 -# CHECK: r31 = r31 -# CHECK-NEXT: if (!p3) memb(r17+r21<<#3) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (!p3) memb(r17+r21<<#3)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xe2 0xf5 0xb1 0x36 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (p3.new) memb(r17+r21<<#3) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (p3.new) memb(r17+r21<<#3)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xe2 0xf5 0xb1 0x37 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (!p3.new) memb(r17+r21<<#3) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (!p3.new) memb(r17+r21<<#3)=r31.new 0x1f 0x40 0x7f 0x70 0xab 0xc2 0xb1 0x40 -# CHECK: r31 = r31 -# CHECK-NEXT: if (p3) memb(r17+#21) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (p3) memb(r17+#21)=r31.new 0x1f 0x40 0x7f 0x70 0xab 0xc2 0xb1 0x44 -# CHECK: r31 = r31 -# CHECK-NEXT: if (!p3) memb(r17+#21) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (!p3) memb(r17+#21)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xab 0xc2 0xb1 0x42 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (p3.new) memb(r17+#21) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (p3.new) memb(r17+#21)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xab 0xc2 0xb1 0x46 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (!p3.new) memb(r17+#21) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (!p3.new) memb(r17+#21)=r31.new 0x1f 0x40 0x7f 0x70 0x2b 0xe2 0xb1 0xab -# CHECK: r31 = r31 -# CHECK-NEXT: if (p3) memb(r17++#5) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (p3) memb(r17++#5)=r31.new 0x1f 0x40 0x7f 0x70 0x2f 0xe2 0xb1 0xab -# CHECK: r31 = r31 -# CHECK-NEXT: if (!p3) memb(r17++#5) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (!p3) memb(r17++#5)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xab 0xe2 0xb1 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (p3.new) memb(r17++#5) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (p3.new) memb(r17++#5)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xaf 0xe2 0xb1 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (!p3.new) memb(r17++#5) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (!p3.new) memb(r17++#5)=r31.new # Store new-value halfword 0x1f 0x40 0x7f 0x70 0x8a 0xf5 0xb1 0x3b -# CHECK: r31 = r31 -# CHECK-NEXT: memh(r17 + r21<<#3) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memh(r17+r21<<#3)=r31.new 0x1f 0x40 0x7f 0x70 0x15 0xca 0xa0 0x48 -# CHECK: r31 = r31 -# CHECK-NEXT: memh(#42) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memh(#42)=r31.new 0x1f 0x40 0x7f 0x70 0x15 0xca 0xb1 0xa1 -# CHECK: r31 = r31 -# CHECK-NEXT: memh(r17+#42) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memh(r17+#42)=r31.new 0x1f 0x40 0x7f 0x70 0x02 0xea 0xb1 0xa9 -# CHECK: r31 = r31 -# CHECK-NEXT: memh(r17 ++ I:circ(m1)) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memh(r17++I:circ(m1))=r31.new 0x1f 0x40 0x7f 0x70 0x28 0xea 0xb1 0xa9 -# CHECK: r31 = r31 -# CHECK-NEXT: memh(r17 ++ #10:circ(m1)) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memh(r17++#10:circ(m1))=r31.new 0x1f 0x40 0x7f 0x70 0x28 0xca 0xb1 0xab -# CHECK: r31 = r31 -# CHECK-NEXT: memh(r17++#10) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memh(r17++#10)=r31.new 0x1f 0x40 0x7f 0x70 0x00 0xea 0xb1 0xad -# CHECK: r31 = r31 -# CHECK-NEXT: memh(r17++m1) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memh(r17++m1)=r31.new 0x1f 0x40 0x7f 0x70 0x00 0xea 0xb1 0xaf -# CHECK: r31 = r31 -# CHECK-NEXT: memh(r17 ++ m1:brev) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memh(r17++m1:brev)=r31.new # Store new-value halfword conditionally 0x1f 0x40 0x7f 0x70 0xea 0xf5 0xb1 0x34 -# CHECK: r31 = r31 -# CHECK-NEXT: if (p3) memh(r17+r21<<#3) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (p3) memh(r17+r21<<#3)=r31.new 0x1f 0x40 0x7f 0x70 0xea 0xf5 0xb1 0x35 -# CHECK: r31 = r31 -# CHECK-NEXT: if (!p3) memh(r17+r21<<#3) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (!p3) memh(r17+r21<<#3)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xea 0xf5 0xb1 0x36 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (p3.new) memh(r17+r21<<#3) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (p3.new) memh(r17+r21<<#3)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xea 0xf5 0xb1 0x37 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (!p3.new) memh(r17+r21<<#3) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (!p3.new) memh(r17+r21<<#3)=r31.new 0x1f 0x40 0x7f 0x70 0xab 0xca 0xb1 0x40 -# CHECK: r31 = r31 -# CHECK-NEXT: if (p3) memh(r17+#42) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (p3) memh(r17+#42)=r31.new 0x1f 0x40 0x7f 0x70 0xab 0xca 0xb1 0x44 -# CHECK: r31 = r31 -# CHECK-NEXT: if (!p3) memh(r17+#42) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (!p3) memh(r17+#42)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xab 0xca 0xb1 0x42 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (p3.new) memh(r17+#42) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (p3.new) memh(r17+#42)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xab 0xca 0xb1 0x46 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (!p3.new) memh(r17+#42) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (!p3.new) memh(r17+#42)=r31.new 0x1f 0x40 0x7f 0x70 0x2b 0xea 0xb1 0xab -# CHECK: r31 = r31 -# CHECK-NEXT: if (p3) memh(r17++#10) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (p3) memh(r17++#10)=r31.new 0x1f 0x40 0x7f 0x70 0x2f 0xea 0xb1 0xab -# CHECK: r31 = r31 -# CHECK-NEXT: if (!p3) memh(r17++#10) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (!p3) memh(r17++#10)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xab 0xea 0xb1 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (p3.new) memh(r17++#10) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (p3.new) memh(r17++#10)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xaf 0xea 0xb1 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (!p3.new) memh(r17++#10) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (!p3.new) memh(r17++#10)=r31.new # Store new-value word 0x1f 0x40 0x7f 0x70 0x92 0xf5 0xb1 0x3b -# CHECK: r31 = r31 -# CHECK-NEXT: memw(r17 + r21<<#3) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memw(r17+r21<<#3)=r31.new 0x1f 0x40 0x7f 0x70 0x15 0xd2 0xa0 0x48 -# CHECK: r31 = r31 -# CHECK-NEXT: memw(#84) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memw(#84)=r31.new 0x1f 0x40 0x7f 0x70 0x15 0xd2 0xb1 0xa1 -# CHECK: r31 = r31 -# CHECK-NEXT: memw(r17+#84) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memw(r17+#84)=r31.new 0x1f 0x40 0x7f 0x70 0x02 0xf2 0xb1 0xa9 -# CHECK: r31 = r31 -# CHECK-NEXT: memw(r17 ++ I:circ(m1)) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memw(r17++I:circ(m1))=r31.new 0x1f 0x40 0x7f 0x70 0x28 0xf2 0xb1 0xa9 -# CHECK: r31 = r31 -# CHECK-NEXT: memw(r17 ++ #20:circ(m1)) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memw(r17++#20:circ(m1))=r31.new 0x1f 0x40 0x7f 0x70 0x28 0xd2 0xb1 0xab -# CHECK: r31 = r31 -# CHECK-NEXT: memw(r17++#20) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memw(r17++#20)=r31.new 0x1f 0x40 0x7f 0x70 0x00 0xf2 0xb1 0xad -# CHECK: r31 = r31 -# CHECK-NEXT: memw(r17++m1) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memw(r17++m1)=r31.new 0x1f 0x40 0x7f 0x70 0x00 0xf2 0xb1 0xaf -# CHECK: r31 = r31 -# CHECK-NEXT: memw(r17 ++ m1:brev) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: memw(r17++m1:brev)=r31.new # Store new-value word conditionally 0x1f 0x40 0x7f 0x70 0xf2 0xf5 0xb1 0x34 -# CHECK: r31 = r31 -# CHECK-NEXT: if (p3) memw(r17+r21<<#3) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (p3) memw(r17+r21<<#3)=r31.new 0x1f 0x40 0x7f 0x70 0xf2 0xf5 0xb1 0x35 -# CHECK: r31 = r31 -# CHECK-NEXT: if (!p3) memw(r17+r21<<#3) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (!p3) memw(r17+r21<<#3)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xf2 0xf5 0xb1 0x36 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (p3.new) memw(r17+r21<<#3) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (p3.new) memw(r17+r21<<#3)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xf2 0xf5 0xb1 0x37 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (!p3.new) memw(r17+r21<<#3) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (!p3.new) memw(r17+r21<<#3)=r31.new 0x1f 0x40 0x7f 0x70 0xab 0xd2 0xb1 0x40 -# CHECK: r31 = r31 -# CHECK-NEXT: if (p3) memw(r17+#84) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (p3) memw(r17+#84)=r31.new 0x1f 0x40 0x7f 0x70 0xab 0xd2 0xb1 0x44 -# CHECK: r31 = r31 -# CHECK-NEXT: if (!p3) memw(r17+#84) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (!p3) memw(r17+#84)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xab 0xd2 0xb1 0x42 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (p3.new) memw(r17+#84) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (p3.new) memw(r17+#84)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xab 0xd2 0xb1 0x46 -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (!p3.new) memw(r17+#84) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (!p3.new) memw(r17+#84)=r31.new 0x1f 0x40 0x7f 0x70 0x2b 0xf2 0xb1 0xab -# CHECK: r31 = r31 -# CHECK-NEXT: if (p3) memw(r17++#20) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (p3) memw(r17++#20)=r31.new 0x1f 0x40 0x7f 0x70 0x2f 0xf2 0xb1 0xab -# CHECK: r31 = r31 -# CHECK-NEXT: if (!p3) memw(r17++#20) = r31.new +# CHECK: r31=r31 +# CHECK-NEXT: if (!p3) memw(r17++#20)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xab 0xf2 0xb1 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (p3.new) memw(r17++#20) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (p3.new) memw(r17++#20)=r31.new 0x03 0x40 0x45 0x85 0x1f 0x40 0x7f 0x70 0xaf 0xf2 0xb1 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: r31 = r31 -# CHECK-NEXT: if (!p3.new) memw(r17++#20) = r31.new +# CHECK: p3=r5 +# CHECK-NEXT: r31=r31 +# CHECK-NEXT: if (!p3.new) memw(r17++#20)=r31.new Index: test/MC/Disassembler/Hexagon/st.txt =================================================================== --- test/MC/Disassembler/Hexagon/st.txt +++ test/MC/Disassembler/Hexagon/st.txt @@ -3,357 +3,357 @@ # Store doubleword 0x9e 0xf5 0xd1 0x3b -# CHECK: memd(r17 + r21<<#3) = r31:30 +# CHECK: memd(r17+r21<<#3)=r31:30 0x28 0xd4 0xc0 0x48 -# CHECK: memd(#320) = r21:20 +# CHECK: memd(#320)=r21:20 0x02 0x40 0x00 0x00 0x28 0xd4 0xc0 0x48 -# CHECK: memd(##168) = r21:20 +# CHECK: memd(##168)=r21:20 0x15 0xd4 0xd1 0xa1 -# CHECK: memd(r17+#168) = r21:20 +# CHECK: memd(r17+#168)=r21:20 0x02 0xf4 0xd1 0xa9 -# CHECK: memd(r17 ++ I:circ(m1)) = r21:20 +# CHECK: memd(r17++I:circ(m1))=r21:20 0x28 0xf4 0xd1 0xa9 -# CHECK: memd(r17 ++ #40:circ(m1)) = r21:20 +# CHECK: memd(r17++#40:circ(m1))=r21:20 0x28 0xd4 0xd1 0xab -# CHECK: memd(r17++#40) = r21:20 +# CHECK: memd(r17++#40)=r21:20 0x00 0x40 0x00 0x00 0xd5 0xfe 0xd1 0xad -# CHECK: memd(r17<<#3 + ##21) = r31:30 +# CHECK: memd(r17<<#3+##21)=r31:30 0x00 0xf4 0xd1 0xad -# CHECK: memd(r17++m1) = r21:20 +# CHECK: memd(r17++m1)=r21:20 0x00 0xf4 0xd1 0xaf -# CHECK: memd(r17 ++ m1:brev) = r21:20 +# CHECK: memd(r17++m1:brev)=r21:20 # Store doubleword conditionally 0xfe 0xf5 0xd1 0x34 -# CHECK: if (p3) memd(r17+r21<<#3) = r31:30 +# CHECK: if (p3) memd(r17+r21<<#3)=r31:30 0xfe 0xf5 0xd1 0x35 -# CHECK: if (!p3) memd(r17+r21<<#3) = r31:30 +# CHECK: if (!p3) memd(r17+r21<<#3)=r31:30 0x03 0x40 0x45 0x85 0xfe 0xf5 0xd1 0x36 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memd(r17+r21<<#3) = r31:30 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memd(r17+r21<<#3)=r31:30 0x03 0x40 0x45 0x85 0xfe 0xf5 0xd1 0x37 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memd(r17+r21<<#3) = r31:30 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memd(r17+r21<<#3)=r31:30 0xab 0xde 0xd1 0x40 -# CHECK: if (p3) memd(r17+#168) = r31:30 +# CHECK: if (p3) memd(r17+#168)=r31:30 0xab 0xde 0xd1 0x44 -# CHECK: if (!p3) memd(r17+#168) = r31:30 +# CHECK: if (!p3) memd(r17+#168)=r31:30 0x03 0x40 0x45 0x85 0xab 0xde 0xd1 0x42 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memd(r17+#168) = r31:30 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memd(r17+#168)=r31:30 0x03 0x40 0x45 0x85 0xab 0xde 0xd1 0x46 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memd(r17+#168) = r31:30 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memd(r17+#168)=r31:30 0x2b 0xf4 0xd1 0xab -# CHECK: if (p3) memd(r17++#40) = r21:20 +# CHECK: if (p3) memd(r17++#40)=r21:20 0x2f 0xf4 0xd1 0xab -# CHECK: if (!p3) memd(r17++#40) = r21:20 +# CHECK: if (!p3) memd(r17++#40)=r21:20 0x03 0x40 0x45 0x85 0xab 0xf4 0xd1 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memd(r17++#40) = r21:20 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memd(r17++#40)=r21:20 0x03 0x40 0x45 0x85 0xaf 0xf4 0xd1 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memd(r17++#40) = r21:20 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memd(r17++#40)=r21:20 0x02 0x40 0x00 0x00 0xc3 0xd4 0xc2 0xaf -# CHECK: if (p3) memd(##168) = r21:20 +# CHECK: if (p3) memd(##168)=r21:20 0x02 0x40 0x00 0x00 0xc7 0xd4 0xc2 0xaf -# CHECK: if (!p3) memd(##168) = r21:20 +# CHECK: if (!p3) memd(##168)=r21:20 0x03 0x40 0x45 0x85 0x02 0x40 0x00 0x00 0xc3 0xf4 0xc2 0xaf -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memd(##168) = r21:20 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memd(##168)=r21:20 0x03 0x40 0x45 0x85 0x02 0x40 0x00 0x00 0xc7 0xf4 0xc2 0xaf -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memd(##168) = r21:20 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memd(##168)=r21:20 # Store byte 0x9f 0xf5 0x11 0x3b -# CHECK: memb(r17 + r21<<#3) = r31 +# CHECK: memb(r17+r21<<#3)=r31 0x9f 0xca 0x11 0x3c # CHECK: memb(r17+#21)=#31 0x15 0xd5 0x00 0x48 -# CHECK: memb(#21) = r21 +# CHECK: memb(#21)=r21 0x00 0x40 0x00 0x00 0x15 0xd5 0x00 0x48 -# CHECK: memb(##21) = r21 +# CHECK: memb(##21)=r21 0x15 0xd5 0x11 0xa1 -# CHECK: memb(r17+#21) = r21 +# CHECK: memb(r17+#21)=r21 0x02 0xf5 0x11 0xa9 -# CHECK: memb(r17 ++ I:circ(m1)) = r21 +# CHECK: memb(r17++I:circ(m1))=r21 0x28 0xf5 0x11 0xa9 -# CHECK: memb(r17 ++ #5:circ(m1)) = r21 +# CHECK: memb(r17++#5:circ(m1))=r21 0x28 0xd5 0x11 0xab -# CHECK: memb(r17++#5) = r21 +# CHECK: memb(r17++#5)=r21 0x00 0x40 0x00 0x00 0xd5 0xff 0x11 0xad -# CHECK: memb(r17<<#3 + ##21) = r31 +# CHECK: memb(r17<<#3+##21)=r31 0x00 0xf5 0x11 0xad -# CHECK: memb(r17++m1) = r21 +# CHECK: memb(r17++m1)=r21 0x00 0xf5 0x11 0xaf -# CHECK: memb(r17 ++ m1:brev) = r21 +# CHECK: memb(r17++m1:brev)=r21 # Store byte conditionally 0xff 0xf5 0x11 0x34 -# CHECK: if (p3) memb(r17+r21<<#3) = r31 +# CHECK: if (p3) memb(r17+r21<<#3)=r31 0xff 0xf5 0x11 0x35 -# CHECK: if (!p3) memb(r17+r21<<#3) = r31 +# CHECK: if (!p3) memb(r17+r21<<#3)=r31 0x03 0x40 0x45 0x85 0xff 0xf5 0x11 0x36 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memb(r17+r21<<#3) = r31 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memb(r17+r21<<#3)=r31 0x03 0x40 0x45 0x85 0xff 0xf5 0x11 0x37 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memb(r17+r21<<#3) = r31 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memb(r17+r21<<#3)=r31 0xff 0xca 0x11 0x38 # CHECK: if (p3) memb(r17+#21)=#31 0xff 0xca 0x91 0x38 # CHECK: if (!p3) memb(r17+#21)=#31 0x03 0x40 0x45 0x85 0xff 0xca 0x11 0x39 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (p3.new) memb(r17+#21)=#31 0x03 0x40 0x45 0x85 0xff 0xca 0x91 0x39 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (!p3.new) memb(r17+#21)=#31 0xab 0xdf 0x11 0x40 -# CHECK: if (p3) memb(r17+#21) = r31 +# CHECK: if (p3) memb(r17+#21)=r31 0xab 0xdf 0x11 0x44 -# CHECK: if (!p3) memb(r17+#21) = r31 +# CHECK: if (!p3) memb(r17+#21)=r31 0x03 0x40 0x45 0x85 0xab 0xdf 0x11 0x42 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memb(r17+#21) = r31 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memb(r17+#21)=r31 0x03 0x40 0x45 0x85 0xab 0xdf 0x11 0x46 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memb(r17+#21) = r31 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memb(r17+#21)=r31 0x2b 0xf5 0x11 0xab -# CHECK: if (p3) memb(r17++#5) = r21 +# CHECK: if (p3) memb(r17++#5)=r21 0x2f 0xf5 0x11 0xab -# CHECK: if (!p3) memb(r17++#5) = r21 +# CHECK: if (!p3) memb(r17++#5)=r21 0x03 0x40 0x45 0x85 0xab 0xf5 0x11 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memb(r17++#5) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memb(r17++#5)=r21 0x03 0x40 0x45 0x85 0xaf 0xf5 0x11 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memb(r17++#5) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memb(r17++#5)=r21 0x00 0x40 0x00 0x00 0xab 0xd5 0x01 0xaf -# CHECK: if (p3) memb(##21) = r21 +# CHECK: if (p3) memb(##21)=r21 0x00 0x40 0x00 0x00 0xaf 0xd5 0x01 0xaf -# CHECK: if (!p3) memb(##21) = r21 +# CHECK: if (!p3) memb(##21)=r21 0x03 0x40 0x45 0x85 0x00 0x40 0x00 0x00 0xab 0xf5 0x01 0xaf -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memb(##21) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memb(##21)=r21 0x03 0x40 0x45 0x85 0x00 0x40 0x00 0x00 0xaf 0xf5 0x01 0xaf -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memb(##21) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memb(##21)=r21 # Store halfword 0x9f 0xf5 0x51 0x3b -# CHECK: memh(r17 + r21<<#3) = r31 +# CHECK: memh(r17+r21<<#3)=r31 0x9f 0xf5 0x71 0x3b -# CHECK: memh(r17 + r21<<#3) = r31.h +# CHECK: memh(r17+r21<<#3)=r31.h 0x95 0xcf 0x31 0x3c # CHECK: memh(r17+#62)=#21 0x00 0x40 0x00 0x00 0x2a 0xd5 0x40 0x48 -# CHECK: memh(##42) = r21 +# CHECK: memh(##42)=r21 0x00 0x40 0x00 0x00 0x2a 0xd5 0x60 0x48 -# CHECK: memh(##42) = r21.h +# CHECK: memh(##42)=r21.h 0x2a 0xd5 0x40 0x48 -# CHECK: memh(#84) = r21 +# CHECK: memh(#84)=r21 0x2a 0xd5 0x60 0x48 -# CHECK: memh(#84) = r21.h +# CHECK: memh(#84)=r21.h 0x15 0xdf 0x51 0xa1 -# CHECK: memh(r17+#42) = r31 +# CHECK: memh(r17+#42)=r31 0x15 0xdf 0x71 0xa1 -# CHECK: memh(r17+#42) = r31.h +# CHECK: memh(r17+#42)=r31.h 0x02 0xf5 0x51 0xa9 -# CHECK: memh(r17 ++ I:circ(m1)) = r21 +# CHECK: memh(r17++I:circ(m1))=r21 0x28 0xf5 0x51 0xa9 -# CHECK: memh(r17 ++ #10:circ(m1)) = r21 +# CHECK: memh(r17++#10:circ(m1))=r21 0x02 0xf5 0x71 0xa9 -# CHECK: memh(r17 ++ I:circ(m1)) = r21.h +# CHECK: memh(r17++I:circ(m1))=r21.h 0x28 0xf5 0x71 0xa9 -# CHECK: memh(r17 ++ #10:circ(m1)) = r21.h +# CHECK: memh(r17++#10:circ(m1))=r21.h 0x28 0xd5 0x51 0xab -# CHECK: memh(r17++#10) = r21 +# CHECK: memh(r17++#10)=r21 0x00 0x40 0x00 0x00 0xd5 0xff 0x51 0xad -# CHECK: memh(r17<<#3 + ##21) = r31 +# CHECK: memh(r17<<#3+##21)=r31 0x28 0xd5 0x71 0xab -# CHECK: memh(r17++#10) = r21.h +# CHECK: memh(r17++#10)=r21.h 0x00 0x40 0x00 0x00 0xd5 0xff 0x71 0xad -# CHECK: memh(r17<<#3 + ##21) = r31.h +# CHECK: memh(r17<<#3+##21)=r31.h 0x00 0xf5 0x51 0xad -# CHECK: memh(r17++m1) = r21 +# CHECK: memh(r17++m1)=r21 0x00 0xf5 0x71 0xad -# CHECK: memh(r17++m1) = r21.h +# CHECK: memh(r17++m1)=r21.h 0x00 0xf5 0x51 0xaf -# CHECK: memh(r17 ++ m1:brev) = r21 +# CHECK: memh(r17++m1:brev)=r21 0x00 0xf5 0x71 0xaf -# CHECK: memh(r17 ++ m1:brev) = r21.h +# CHECK: memh(r17++m1:brev)=r21.h # Store halfword conditionally 0xff 0xf5 0x51 0x34 -# CHECK: if (p3) memh(r17+r21<<#3) = r31 +# CHECK: if (p3) memh(r17+r21<<#3)=r31 0xff 0xf5 0x71 0x34 -# CHECK: if (p3) memh(r17+r21<<#3) = r31.h +# CHECK: if (p3) memh(r17+r21<<#3)=r31.h 0xff 0xf5 0x51 0x35 -# CHECK: if (!p3) memh(r17+r21<<#3) = r31 +# CHECK: if (!p3) memh(r17+r21<<#3)=r31 0xff 0xf5 0x71 0x35 -# CHECK: if (!p3) memh(r17+r21<<#3) = r31.h +# CHECK: if (!p3) memh(r17+r21<<#3)=r31.h 0x03 0x40 0x45 0x85 0xff 0xf5 0x51 0x36 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memh(r17+r21<<#3) = r31 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memh(r17+r21<<#3)=r31 0x03 0x40 0x45 0x85 0xff 0xf5 0x71 0x36 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memh(r17+r21<<#3) = r31.h +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memh(r17+r21<<#3)=r31.h 0x03 0x40 0x45 0x85 0xff 0xf5 0x51 0x37 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memh(r17+r21<<#3) = r31 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memh(r17+r21<<#3)=r31 0x03 0x40 0x45 0x85 0xff 0xf5 0x71 0x37 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memh(r17+r21<<#3) = r31.h +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memh(r17+r21<<#3)=r31.h 0xf5 0xcf 0x31 0x38 # CHECK: if (p3) memh(r17+#62)=#21 0xf5 0xcf 0xb1 0x38 # CHECK: if (!p3) memh(r17+#62)=#21 0x03 0x40 0x45 0x85 0xf5 0xcf 0x31 0x39 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (p3.new) memh(r17+#62)=#21 0x03 0x40 0x45 0x85 0xf5 0xcf 0xb1 0x39 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (!p3.new) memh(r17+#62)=#21 0xfb 0xd5 0x51 0x40 -# CHECK: if (p3) memh(r17+#62) = r21 +# CHECK: if (p3) memh(r17+#62)=r21 0xfb 0xd5 0x71 0x40 -# CHECK: if (p3) memh(r17+#62) = r21.h +# CHECK: if (p3) memh(r17+#62)=r21.h 0xfb 0xd5 0x51 0x44 -# CHECK: if (!p3) memh(r17+#62) = r21 +# CHECK: if (!p3) memh(r17+#62)=r21 0xfb 0xd5 0x71 0x44 -# CHECK: if (!p3) memh(r17+#62) = r21.h +# CHECK: if (!p3) memh(r17+#62)=r21.h 0x03 0x40 0x45 0x85 0xfb 0xd5 0x51 0x42 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memh(r17+#62) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memh(r17+#62)=r21 0x03 0x40 0x45 0x85 0xfb 0xd5 0x71 0x42 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memh(r17+#62) = r21.h +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memh(r17+#62)=r21.h 0x03 0x40 0x45 0x85 0xfb 0xd5 0x51 0x46 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memh(r17+#62) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memh(r17+#62)=r21 0x03 0x40 0x45 0x85 0xfb 0xd5 0x71 0x46 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memh(r17+#62) = r21.h +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memh(r17+#62)=r21.h 0x2b 0xf5 0x51 0xab -# CHECK: if (p3) memh(r17++#10) = r21 +# CHECK: if (p3) memh(r17++#10)=r21 0x2f 0xf5 0x51 0xab -# CHECK: if (!p3) memh(r17++#10) = r21 +# CHECK: if (!p3) memh(r17++#10)=r21 0x03 0x40 0x45 0x85 0xab 0xf5 0x51 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memh(r17++#10) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memh(r17++#10)=r21 0x03 0x40 0x45 0x85 0xaf 0xf5 0x51 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memh(r17++#10) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memh(r17++#10)=r21 0x2b 0xf5 0x71 0xab -# CHECK: if (p3) memh(r17++#10) = r21.h +# CHECK: if (p3) memh(r17++#10)=r21.h 0x2f 0xf5 0x71 0xab -# CHECK: if (!p3) memh(r17++#10) = r21.h +# CHECK: if (!p3) memh(r17++#10)=r21.h 0x03 0x40 0x45 0x85 0xab 0xf5 0x71 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memh(r17++#10) = r21.h +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memh(r17++#10)=r21.h 0x03 0x40 0x45 0x85 0xaf 0xf5 0x71 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memh(r17++#10) = r21.h +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memh(r17++#10)=r21.h 0x00 0x40 0x00 0x00 0xd3 0xd5 0x42 0xaf -# CHECK: if (p3) memh(##42) = r21 +# CHECK: if (p3) memh(##42)=r21 0x00 0x40 0x00 0x00 0xd3 0xd5 0x62 0xaf -# CHECK: if (p3) memh(##42) = r21.h +# CHECK: if (p3) memh(##42)=r21.h 0x00 0x40 0x00 0x00 0xd7 0xd5 0x42 0xaf -# CHECK: if (!p3) memh(##42) = r21 +# CHECK: if (!p3) memh(##42)=r21 0x00 0x40 0x00 0x00 0xd7 0xd5 0x62 0xaf -# CHECK: if (!p3) memh(##42) = r21.h +# CHECK: if (!p3) memh(##42)=r21.h 0x03 0x40 0x45 0x85 0x00 0x40 0x00 0x00 0xd3 0xf5 0x42 0xaf -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memh(##42) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memh(##42)=r21 0x03 0x40 0x45 0x85 0x00 0x40 0x00 0x00 0xd3 0xf5 0x62 0xaf -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memh(##42) = r21.h +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memh(##42)=r21.h 0x03 0x40 0x45 0x85 0x00 0x40 0x00 0x00 0xd7 0xf5 0x42 0xaf -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memh(##42) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memh(##42)=r21 0x03 0x40 0x45 0x85 0x00 0x40 0x00 0x00 0xd7 0xf5 0x62 0xaf -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memh(##42) = r21.h +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memh(##42)=r21.h # Store word 0x9f 0xf5 0x91 0x3b -# CHECK: memw(r17 + r21<<#3) = r31 +# CHECK: memw(r17+r21<<#3)=r31 0x9f 0xca 0x51 0x3c -# CHECK: memw(r17{{ *}}+{{ *}}#84)=#31 +# CHECK: memw(r17+#84)=#31 0x15 0xdf 0x80 0x48 -# CHECK: memw(#84) = r31 +# CHECK: memw(#84)=r31 0x01 0x40 0x00 0x00 0x14 0xd5 0x80 0x48 -# CHECK: memw(##84) = r21 +# CHECK: memw(##84)=r21 0x9f 0xca 0x51 0x3c # CHECK: memw(r17+#84)=#31 0x15 0xdf 0x91 0xa1 -# CHECK: memw(r17+#84) = r31 +# CHECK: memw(r17+#84)=r31 0x02 0xf5 0x91 0xa9 -# CHECK: memw(r17 ++ I:circ(m1)) = r21 +# CHECK: memw(r17++I:circ(m1))=r21 0x28 0xf5 0x91 0xa9 -# CHECK: memw(r17 ++ #20:circ(m1)) = r21 +# CHECK: memw(r17++#20:circ(m1))=r21 0x28 0xd5 0x91 0xab -# CHECK: memw(r17++#20) = r21 +# CHECK: memw(r17++#20)=r21 0x00 0x40 0x00 0x00 0xd5 0xff 0x91 0xad -# CHECK: memw(r17<<#3 + ##21) = r31 +# CHECK: memw(r17<<#3+##21)=r31 0x00 0xf5 0x91 0xad -# CHECK: memw(r17++m1) = r21 +# CHECK: memw(r17++m1)=r21 0x00 0xf5 0x91 0xaf -# CHECK: memw(r17 ++ m1:brev) = r21 +# CHECK: memw(r17++m1:brev)=r21 # Store word conditionally 0xff 0xf5 0x91 0x34 -# CHECK: if (p3) memw(r17+r21<<#3) = r31 +# CHECK: if (p3) memw(r17+r21<<#3)=r31 0xff 0xf5 0x91 0x35 -# CHECK: if (!p3) memw(r17+r21<<#3) = r31 +# CHECK: if (!p3) memw(r17+r21<<#3)=r31 0x03 0x40 0x45 0x85 0xff 0xf5 0x91 0x36 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memw(r17+r21<<#3) = r31 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memw(r17+r21<<#3)=r31 0x03 0x40 0x45 0x85 0xff 0xf5 0x91 0x37 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memw(r17+r21<<#3) = r31 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memw(r17+r21<<#3)=r31 0xff 0xca 0x51 0x38 # CHECK: if (p3) memw(r17+#84)=#31 0xff 0xca 0xd1 0x38 # CHECK: if (!p3) memw(r17+#84)=#31 0x03 0x40 0x45 0x85 0xff 0xca 0x51 0x39 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (p3.new) memw(r17+#84)=#31 0x03 0x40 0x45 0x85 0xff 0xca 0xd1 0x39 -# CHECK: p3 = r5 +# CHECK: p3=r5 # CHECK-NEXT: if (!p3.new) memw(r17+#84)=#31 0xab 0xdf 0x91 0x40 -# CHECK: if (p3) memw(r17+#84) = r31 +# CHECK: if (p3) memw(r17+#84)=r31 0xab 0xdf 0x91 0x44 -# CHECK: if (!p3) memw(r17+#84) = r31 +# CHECK: if (!p3) memw(r17+#84)=r31 0x03 0x40 0x45 0x85 0xab 0xdf 0x91 0x42 -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memw(r17+#84) = r31 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memw(r17+#84)=r31 0x03 0x40 0x45 0x85 0xab 0xdf 0x91 0x46 -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memw(r17+#84) = r31 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memw(r17+#84)=r31 0x2b 0xf5 0x91 0xab -# CHECK: if (p3) memw(r17++#20) = r21 +# CHECK: if (p3) memw(r17++#20)=r21 0x2f 0xf5 0x91 0xab -# CHECK: if (!p3) memw(r17++#20) = r21 +# CHECK: if (!p3) memw(r17++#20)=r21 0x03 0x40 0x45 0x85 0xaf 0xf5 0x91 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memw(r17++#20) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memw(r17++#20)=r21 0x03 0x40 0x45 0x85 0xab 0xf5 0x91 0xab -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memw(r17++#20) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memw(r17++#20)=r21 0x01 0x40 0x00 0x00 0xa3 0xd5 0x81 0xaf -# CHECK: if (p3) memw(##84) = r21 +# CHECK: if (p3) memw(##84)=r21 0x01 0x40 0x00 0x00 0xa7 0xd5 0x81 0xaf -# CHECK: if (!p3) memw(##84) = r21 +# CHECK: if (!p3) memw(##84)=r21 0x03 0x40 0x45 0x85 0x01 0x40 0x00 0x00 0xa3 0xf5 0x81 0xaf -# CHECK: p3 = r5 -# CHECK-NEXT: if (p3.new) memw(##84) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (p3.new) memw(##84)=r21 0x03 0x40 0x45 0x85 0x01 0x40 0x00 0x00 0xa7 0xf5 0x81 0xaf -# CHECK: p3 = r5 -# CHECK-NEXT: if (!p3.new) memw(##84) = r21 +# CHECK: p3=r5 +# CHECK-NEXT: if (!p3.new) memw(##84)=r21 # Allocate stack frame 0x1f 0xc0 0x9d 0xa0 Index: test/MC/Disassembler/Hexagon/system_user.txt =================================================================== --- test/MC/Disassembler/Hexagon/system_user.txt +++ test/MC/Disassembler/Hexagon/system_user.txt @@ -3,15 +3,15 @@ # Load locked 0x11 0xc0 0x15 0x92 -# CHECK: r17 = memw_locked(r21) +# CHECK: r17=memw_locked(r21) 0x10 0xd0 0x15 0x92 -# CHECK: r17:16 = memd_locked(r21) +# CHECK: r17:16=memd_locked(r21) # Store conditional 0x03 0xd5 0xb1 0xa0 -# CHECK: memw_locked(r17, p3) = r21 +# CHECK: memw_locked(r17,p3)=r21 0x03 0xd4 0xf1 0xa0 -# CHECK: memd_locked(r17, p3) = r21:20 +# CHECK: memd_locked(r17,p3)=r21:20 # Memory barrier 0x00 0xc0 0x00 0xa8 @@ -19,7 +19,7 @@ # Data cache prefetch 0x15 0xc0 0x11 0x94 -# CHECK: dcfetch(r17 + #168) +# CHECK: dcfetch(r17+#168) # Send value to ETM trace 0x00 0xc0 0x51 0x62 Index: test/MC/Disassembler/Hexagon/xtype_alu.txt =================================================================== --- test/MC/Disassembler/Hexagon/xtype_alu.txt +++ test/MC/Disassembler/Hexagon/xtype_alu.txt @@ -3,393 +3,393 @@ # Absolute value doubleword 0xd0 0xc0 0x94 0x80 -# CHECK: r17:16 = abs(r21:20) +# CHECK: r17:16=abs(r21:20) 0x91 0xc0 0x95 0x8c -# CHECK: r17 = abs(r21) +# CHECK: r17=abs(r21) 0xb1 0xc0 0x95 0x8c -# CHECK: r17 = abs(r21):sat +# CHECK: r17=abs(r21):sat # Add and accumulate 0xff 0xd1 0x35 0xdb -# CHECK: r17 = add(r21, add(r31, #23)) +# CHECK: r17=add(r21,add(r31,#23)) 0xff 0xd1 0xb5 0xdb -# CHECK: r17 = add(r21, sub(#23, r31)) +# CHECK: r17=add(r21,sub(#23,r31)) 0xf1 0xc2 0x15 0xe2 -# CHECK: r17 += add(r21, #23) +# CHECK: r17+=add(r21,#23) 0xf1 0xc2 0x95 0xe2 -# CHECK: r17 -= add(r21, #23) +# CHECK: r17-=add(r21,#23) 0x31 0xdf 0x15 0xef -# CHECK: r17 += add(r21, r31) +# CHECK: r17+=add(r21,r31) 0x31 0xdf 0x95 0xef -# CHECK: r17 -= add(r21, r31) +# CHECK: r17-=add(r21,r31) # Add doublewords 0xf0 0xde 0x14 0xd3 -# CHECK: r17:16 = add(r21:20, r31:30) +# CHECK: r17:16=add(r21:20,r31:30) 0xb0 0xde 0x74 0xd3 -# CHECK: r17:16 = add(r21:20, r31:30):sat +# CHECK: r17:16=add(r21:20,r31:30):sat 0xd0 0xde 0x74 0xd3 -# CHECK: r17:16 = add(r21:20, r31:30):raw:lo +# CHECK: r17:16=add(r21:20,r31:30):raw:lo 0xf0 0xde 0x74 0xd3 -# CHECK: r17:16 = add(r21:20, r31:30):raw:hi +# CHECK: r17:16=add(r21:20,r31:30):raw:hi # Add halfword 0x11 0xd5 0x1f 0xd5 -# CHECK: r17 = add(r21.l, r31.l) +# CHECK: r17=add(r21.l,r31.l) 0x51 0xd5 0x1f 0xd5 -# CHECK: r17 = add(r21.l, r31.h) +# CHECK: r17=add(r21.l,r31.h) 0x91 0xd5 0x1f 0xd5 -# CHECK: r17 = add(r21.l, r31.l):sat +# CHECK: r17=add(r21.l,r31.l):sat 0xd1 0xd5 0x1f 0xd5 -# CHECK: r17 = add(r21.l, r31.h):sat +# CHECK: r17=add(r21.l,r31.h):sat 0x11 0xd5 0x5f 0xd5 -# CHECK: r17 = add(r21.l, r31.l):<<16 +# CHECK: r17=add(r21.l,r31.l):<<16 0x31 0xd5 0x5f 0xd5 -# CHECK: r17 = add(r21.l, r31.h):<<16 +# CHECK: r17=add(r21.l,r31.h):<<16 0x51 0xd5 0x5f 0xd5 -# CHECK: r17 = add(r21.h, r31.l):<<16 +# CHECK: r17=add(r21.h,r31.l):<<16 0x71 0xd5 0x5f 0xd5 -# CHECK: r17 = add(r21.h, r31.h):<<16 +# CHECK: r17=add(r21.h,r31.h):<<16 0x91 0xd5 0x5f 0xd5 -# CHECK: r17 = add(r21.l, r31.l):sat:<<16 +# CHECK: r17=add(r21.l,r31.l):sat:<<16 0xb1 0xd5 0x5f 0xd5 -# CHECK: r17 = add(r21.l, r31.h):sat:<<16 +# CHECK: r17=add(r21.l,r31.h):sat:<<16 0xd1 0xd5 0x5f 0xd5 -# CHECK: r17 = add(r21.h, r31.l):sat:<<16 +# CHECK: r17=add(r21.h,r31.l):sat:<<16 0xf1 0xd5 0x5f 0xd5 -# CHECK: r17 = add(r21.h, r31.h):sat:<<16 +# CHECK: r17=add(r21.h,r31.h):sat:<<16 # Add or subtract doublewords with carry 0x70 0xde 0xd4 0xc2 -# CHECK: r17:16 = add(r21:20, r31:30, p3):carry +# CHECK: r17:16=add(r21:20,r31:30,p3):carry 0x70 0xde 0xf4 0xc2 -# CHECK: r17:16 = sub(r21:20, r31:30, p3):carry +# CHECK: r17:16=sub(r21:20,r31:30,p3):carry # Logical doublewords 0x90 0xc0 0x94 0x80 -# CHECK: r17:16 = not(r21:20) +# CHECK: r17:16=not(r21:20) 0x10 0xde 0xf4 0xd3 -# CHECK: r17:16 = and(r21:20, r31:30) +# CHECK: r17:16=and(r21:20,r31:30) 0x30 0xd4 0xfe 0xd3 -# CHECK: r17:16 = and(r21:20, ~r31:30) +# CHECK: r17:16=and(r21:20,~r31:30) 0x50 0xde 0xf4 0xd3 -# CHECK: r17:16 = or(r21:20, r31:30) +# CHECK: r17:16=or(r21:20,r31:30) 0x70 0xd4 0xfe 0xd3 -# CHECK: r17:16 = or(r21:20, ~r31:30) +# CHECK: r17:16=or(r21:20,~r31:30) 0x90 0xde 0xf4 0xd3 -# CHECK: r17:16 = xor(r21:20, r31:30) +# CHECK: r17:16=xor(r21:20,r31:30) # Logical-logical doublewords 0x10 0xde 0x94 0xca -# CHECK: r17:16 ^= xor(r21:20, r31:30) +# CHECK: r17:16^=xor(r21:20,r31:30) # Logical-logical words 0xf1 0xc3 0x15 0xda -# CHECK: r17 |= and(r21, #31) +# CHECK: r17|=and(r21,#31) 0xf5 0xc3 0x51 0xda -# CHECK: r17 = or(r21, and(r17, #31)) +# CHECK: r17=or(r21,and(r17,#31)) 0xf1 0xc3 0x95 0xda -# CHECK: r17 |= or(r21, #31) +# CHECK: r17|=or(r21,#31) 0x11 0xdf 0x35 0xef -# CHECK: r17 |= and(r21, ~r31) +# CHECK: r17|=and(r21,~r31) 0x31 0xdf 0x35 0xef -# CHECK: r17 &= and(r21, ~r31) +# CHECK: r17&=and(r21,~r31) 0x51 0xdf 0x35 0xef -# CHECK: r17 ^= and(r21, ~r31) +# CHECK: r17^=and(r21,~r31) 0x11 0xdf 0x55 0xef -# CHECK: r17 &= and(r21, r31) +# CHECK: r17&=and(r21,r31) 0x31 0xdf 0x55 0xef -# CHECK: r17 &= or(r21, r31) +# CHECK: r17&=or(r21,r31) 0x51 0xdf 0x55 0xef -# CHECK: r17 &= xor(r21, r31) +# CHECK: r17&=xor(r21,r31) 0x71 0xdf 0x55 0xef -# CHECK: r17 |= and(r21, r31) +# CHECK: r17|=and(r21,r31) 0x71 0xdf 0x95 0xef -# CHECK: r17 ^= xor(r21, r31) +# CHECK: r17^=xor(r21,r31) 0x11 0xdf 0xd5 0xef -# CHECK: r17 |= or(r21, r31) +# CHECK: r17|=or(r21,r31) 0x31 0xdf 0xd5 0xef -# CHECK: r17 |= xor(r21, r31) +# CHECK: r17|=xor(r21,r31) 0x51 0xdf 0xd5 0xef -# CHECK: r17 ^= and(r21, r31) +# CHECK: r17^=and(r21,r31) 0x71 0xdf 0xd5 0xef -# CHECK: r17 ^= or(r21, r31) +# CHECK: r17^=or(r21,r31) # Maximum words 0x11 0xdf 0xd5 0xd5 -# CHECK: r17 = max(r21, r31) +# CHECK: r17=max(r21,r31) 0x91 0xdf 0xd5 0xd5 -# CHECK: r17 = maxu(r21, r31) +# CHECK: r17=maxu(r21,r31) # Maximum doublewords 0x90 0xde 0xd4 0xd3 -# CHECK: r17:16 = max(r21:20, r31:30) +# CHECK: r17:16=max(r21:20,r31:30) 0xb0 0xde 0xd4 0xd3 -# CHECK: r17:16 = maxu(r21:20, r31:30) +# CHECK: r17:16=maxu(r21:20,r31:30) # Minimum words 0x11 0xd5 0xbf 0xd5 -# CHECK: r17 = min(r21, r31) +# CHECK: r17=min(r21,r31) 0x91 0xd5 0xbf 0xd5 -# CHECK: r17 = minu(r21, r31) +# CHECK: r17=minu(r21,r31) # Minimum doublewords 0xd0 0xd4 0xbe 0xd3 -# CHECK: r17:16 = min(r21:20, r31:30) +# CHECK: r17:16=min(r21:20,r31:30) 0xf0 0xd4 0xbe 0xd3 -# CHECK: r17:16 = minu(r21:20, r31:30) +# CHECK: r17:16=minu(r21:20,r31:30) # Module wrap 0xf1 0xdf 0xf5 0xd3 -# CHECK: r17 = modwrap(r21, r31) +# CHECK: r17=modwrap(r21,r31) # Negate 0xb0 0xc0 0x94 0x80 -# CHECK: r17:16 = neg(r21:20) +# CHECK: r17:16=neg(r21:20) 0xd1 0xc0 0x95 0x8c -# CHECK: r17 = neg(r21):sat +# CHECK: r17=neg(r21):sat # Round 0x31 0xc0 0xd4 0x88 -# CHECK: r17 = round(r21:20):sat +# CHECK: r17=round(r21:20):sat 0x11 0xdf 0xf5 0x8c -# CHECK: r17 = cround(r21, #31) +# CHECK: r17=cround(r21,#31) 0x91 0xdf 0xf5 0x8c -# CHECK: r17 = round(r21, #31) +# CHECK: r17=round(r21,#31) 0xd1 0xdf 0xf5 0x8c -# CHECK: r17 = round(r21, #31):sat +# CHECK: r17=round(r21,#31):sat 0x11 0xdf 0xd5 0xc6 -# CHECK: r17 = cround(r21, r31) +# CHECK: r17=cround(r21,r31) 0x91 0xdf 0xd5 0xc6 -# CHECK: r17 = round(r21, r31) +# CHECK: r17=round(r21,r31) 0xd1 0xdf 0xd5 0xc6 -# CHECK: r17 = round(r21, r31):sat +# CHECK: r17=round(r21,r31):sat # Subtract doublewords 0xf0 0xd4 0x3e 0xd3 -# CHECK: r17:16 = sub(r21:20, r31:30) +# CHECK: r17:16=sub(r21:20,r31:30) # Subtract and accumulate words 0x71 0xd5 0x1f 0xef -# CHECK: r17 += sub(r21, r31) +# CHECK: r17+=sub(r21,r31) # Subtract halfword 0x11 0xd5 0x3f 0xd5 -# CHECK: r17 = sub(r21.l, r31.l) +# CHECK: r17=sub(r21.l,r31.l) 0x51 0xd5 0x3f 0xd5 -# CHECK: r17 = sub(r21.l, r31.h) +# CHECK: r17=sub(r21.l,r31.h) 0x91 0xd5 0x3f 0xd5 -# CHECK: r17 = sub(r21.l, r31.l):sat +# CHECK: r17=sub(r21.l,r31.l):sat 0xd1 0xd5 0x3f 0xd5 -# CHECK: r17 = sub(r21.l, r31.h):sat +# CHECK: r17=sub(r21.l,r31.h):sat 0x11 0xd5 0x7f 0xd5 -# CHECK: r17 = sub(r21.l, r31.l):<<16 +# CHECK: r17=sub(r21.l,r31.l):<<16 0x31 0xd5 0x7f 0xd5 -# CHECK: r17 = sub(r21.l, r31.h):<<16 +# CHECK: r17=sub(r21.l,r31.h):<<16 0x51 0xd5 0x7f 0xd5 -# CHECK: r17 = sub(r21.h, r31.l):<<16 +# CHECK: r17=sub(r21.h,r31.l):<<16 0x71 0xd5 0x7f 0xd5 -# CHECK: r17 = sub(r21.h, r31.h):<<16 +# CHECK: r17=sub(r21.h,r31.h):<<16 0x91 0xd5 0x7f 0xd5 -# CHECK: r17 = sub(r21.l, r31.l):sat:<<16 +# CHECK: r17=sub(r21.l,r31.l):sat:<<16 0xb1 0xd5 0x7f 0xd5 -# CHECK: r17 = sub(r21.l, r31.h):sat:<<16 +# CHECK: r17=sub(r21.l,r31.h):sat:<<16 0xd1 0xd5 0x7f 0xd5 -# CHECK: r17 = sub(r21.h, r31.l):sat:<<16 +# CHECK: r17=sub(r21.h,r31.l):sat:<<16 0xf1 0xd5 0x7f 0xd5 -# CHECK: r17 = sub(r21.h, r31.h):sat:<<16 +# CHECK: r17=sub(r21.h,r31.h):sat:<<16 # Sign extend word to doubleword 0x10 0xc0 0x55 0x84 -# CHECK: r17:16 = sxtw(r21) +# CHECK: r17:16=sxtw(r21) # Vector absolute value halfwords 0x90 0xc0 0x54 0x80 -# CHECK: r17:16 = vabsh(r21:20) +# CHECK: r17:16=vabsh(r21:20) 0xb0 0xc0 0x54 0x80 -# CHECK: r17:16 = vabsh(r21:20):sat +# CHECK: r17:16=vabsh(r21:20):sat # Vector absolute value words 0xd0 0xc0 0x54 0x80 -# CHECK: r17:16 = vabsw(r21:20) +# CHECK: r17:16=vabsw(r21:20) 0xf0 0xc0 0x54 0x80 -# CHECK: r17:16 = vabsw(r21:20):sat +# CHECK: r17:16=vabsw(r21:20):sat # Vector absolute difference halfwords 0x10 0xd4 0x7e 0xe8 -# CHECK: r17:16 = vabsdiffh(r21:20, r31:30) +# CHECK: r17:16=vabsdiffh(r21:20,r31:30) # Vector absolute difference words 0x10 0xd4 0x3e 0xe8 -# CHECK: r17:16 = vabsdiffw(r21:20, r31:30) +# CHECK: r17:16=vabsdiffw(r21:20,r31:30) # Vector add halfwords 0x50 0xde 0x14 0xd3 -# CHECK: r17:16 = vaddh(r21:20, r31:30) +# CHECK: r17:16=vaddh(r21:20,r31:30) 0x70 0xde 0x14 0xd3 -# CHECK: r17:16 = vaddh(r21:20, r31:30):sat +# CHECK: r17:16=vaddh(r21:20,r31:30):sat 0x90 0xde 0x14 0xd3 -# CHECK: r17:16 = vadduh(r21:20, r31:30):sat +# CHECK: r17:16=vadduh(r21:20,r31:30):sat # Vector add halfwords with saturate and pack to unsigned bytes 0x31 0xde 0x54 0xc1 -# CHECK: r17 = vaddhub(r21:20, r31:30):sat +# CHECK: r17=vaddhub(r21:20,r31:30):sat # Vector reduce add unsigned bytes 0x30 0xde 0x54 0xe8 -# CHECK: r17:16 = vraddub(r21:20, r31:30) +# CHECK: r17:16=vraddub(r21:20,r31:30) 0x30 0xde 0x54 0xea -# CHECK: r17:16 += vraddub(r21:20, r31:30) +# CHECK: r17:16+=vraddub(r21:20,r31:30) # Vector reduce add halfwords 0x31 0xde 0x14 0xe9 -# CHECK: r17 = vradduh(r21:20, r31:30) +# CHECK: r17=vradduh(r21:20,r31:30) 0xf1 0xde 0x34 0xe9 -# CHECK: r17 = vraddh(r21:20, r31:30) +# CHECK: r17=vraddh(r21:20,r31:30) # Vector add bytes 0x10 0xde 0x14 0xd3 -# CHECK: r17:16 = vaddub(r21:20, r31:30) +# CHECK: r17:16=vaddub(r21:20,r31:30) 0x30 0xde 0x14 0xd3 -# CHECK: r17:16 = vaddub(r21:20, r31:30):sat +# CHECK: r17:16=vaddub(r21:20,r31:30):sat # Vector add words 0xb0 0xde 0x14 0xd3 -# CHECK: r17:16 = vaddw(r21:20, r31:30) +# CHECK: r17:16=vaddw(r21:20,r31:30) 0xd0 0xde 0x14 0xd3 -# CHECK: r17:16 = vaddw(r21:20, r31:30):sat +# CHECK: r17:16=vaddw(r21:20,r31:30):sat # Vector average halfwords 0x50 0xde 0x54 0xd3 -# CHECK: r17:16 = vavgh(r21:20, r31:30) +# CHECK: r17:16=vavgh(r21:20,r31:30) 0x70 0xde 0x54 0xd3 -# CHECK: r17:16 = vavgh(r21:20, r31:30):rnd +# CHECK: r17:16=vavgh(r21:20,r31:30):rnd 0x90 0xde 0x54 0xd3 -# CHECK: r17:16 = vavgh(r21:20, r31:30):crnd +# CHECK: r17:16=vavgh(r21:20,r31:30):crnd 0xb0 0xde 0x54 0xd3 -# CHECK: r17:16 = vavguh(r21:20, r31:30) +# CHECK: r17:16=vavguh(r21:20,r31:30) 0xd0 0xde 0x54 0xd3 -# CHECK: r17:16 = vavguh(r21:20, r31:30):rnd +# CHECK: r17:16=vavguh(r21:20,r31:30):rnd 0x10 0xd4 0x9e 0xd3 -# CHECK: r17:16 = vnavgh(r21:20, r31:30) +# CHECK: r17:16=vnavgh(r21:20,r31:30) 0x30 0xd4 0x9e 0xd3 -# CHECK: r17:16 = vnavgh(r21:20, r31:30):rnd:sat +# CHECK: r17:16=vnavgh(r21:20,r31:30):rnd:sat 0x50 0xd4 0x9e 0xd3 -# CHECK: r17:16 = vnavgh(r21:20, r31:30):crnd:sat +# CHECK: r17:16=vnavgh(r21:20,r31:30):crnd:sat # Vector average unsigned bytes 0x10 0xde 0x54 0xd3 -# CHECK: r17:16 = vavgub(r21:20, r31:30) +# CHECK: r17:16=vavgub(r21:20,r31:30) 0x30 0xde 0x54 0xd3 -# CHECK: r17:16 = vavgub(r21:20, r31:30):rnd +# CHECK: r17:16=vavgub(r21:20,r31:30):rnd # Vector average words 0x10 0xde 0x74 0xd3 -# CHECK: r17:16 = vavgw(r21:20, r31:30) +# CHECK: r17:16=vavgw(r21:20,r31:30) 0x30 0xde 0x74 0xd3 -# CHECK: r17:16 = vavgw(r21:20, r31:30):rnd +# CHECK: r17:16=vavgw(r21:20,r31:30):rnd 0x50 0xde 0x74 0xd3 -# CHECK: r17:16 = vavgw(r21:20, r31:30):crnd +# CHECK: r17:16=vavgw(r21:20,r31:30):crnd 0x70 0xde 0x74 0xd3 -# CHECK: r17:16 = vavguw(r21:20, r31:30) +# CHECK: r17:16=vavguw(r21:20,r31:30) 0x90 0xde 0x74 0xd3 -# CHECK: r17:16 = vavguw(r21:20, r31:30):rnd +# CHECK: r17:16=vavguw(r21:20,r31:30):rnd 0x70 0xd4 0x9e 0xd3 -# CHECK: r17:16 = vnavgw(r21:20, r31:30) +# CHECK: r17:16=vnavgw(r21:20,r31:30) 0x90 0xd4 0x9e 0xd3 -# CHECK: r17:16 = vnavgw(r21:20, r31:30):rnd:sat +# CHECK: r17:16=vnavgw(r21:20,r31:30):rnd:sat 0xd0 0xd4 0x9e 0xd3 -# CHECK: r17:16 = vnavgw(r21:20, r31:30):crnd:sat +# CHECK: r17:16=vnavgw(r21:20,r31:30):crnd:sat # Vector conditional negate 0x50 0xdf 0xd4 0xc3 -# CHECK: r17:16 = vcnegh(r21:20, r31) +# CHECK: r17:16=vcnegh(r21:20,r31) 0xf0 0xff 0x34 0xcb -# CHECK: r17:16 += vrcnegh(r21:20, r31) +# CHECK: r17:16+=vrcnegh(r21:20,r31) # Vector maximum bytes 0x10 0xd4 0xde 0xd3 -# CHECK: r17:16 = vmaxub(r21:20, r31:30) +# CHECK: r17:16=vmaxub(r21:20,r31:30) 0xd0 0xd4 0xde 0xd3 -# CHECK: r17:16 = vmaxb(r21:20, r31:30) +# CHECK: r17:16=vmaxb(r21:20,r31:30) # Vector maximum halfwords 0x30 0xd4 0xde 0xd3 -# CHECK: r17:16 = vmaxh(r21:20, r31:30) +# CHECK: r17:16=vmaxh(r21:20,r31:30) 0x50 0xd4 0xde 0xd3 -# CHECK: r17:16 = vmaxuh(r21:20, r31:30) +# CHECK: r17:16=vmaxuh(r21:20,r31:30) # Vector reduce maximum halfwords 0x3f 0xd0 0x34 0xcb -# CHECK: r17:16 = vrmaxh(r21:20, r31) +# CHECK: r17:16=vrmaxh(r21:20,r31) 0x3f 0xf0 0x34 0xcb -# CHECK: r17:16 = vrmaxuh(r21:20, r31) +# CHECK: r17:16=vrmaxuh(r21:20,r31) # Vector reduce maximum words 0x5f 0xd0 0x34 0xcb -# CHECK: r17:16 = vrmaxw(r21:20, r31) +# CHECK: r17:16=vrmaxw(r21:20,r31) 0x5f 0xf0 0x34 0xcb -# CHECK: r17:16 = vrmaxuw(r21:20, r31) +# CHECK: r17:16=vrmaxuw(r21:20,r31) # Vector maximum words 0xb0 0xd4 0xbe 0xd3 -# CHECK: r17:16 = vmaxuw(r21:20, r31:30) +# CHECK: r17:16=vmaxuw(r21:20,r31:30) 0x70 0xd4 0xde 0xd3 -# CHECK: r17:16 = vmaxw(r21:20, r31:30) +# CHECK: r17:16=vmaxw(r21:20,r31:30) # Vector minimum bytes 0x10 0xd4 0xbe 0xd3 -# CHECK: r17:16 = vminub(r21:20, r31:30) +# CHECK: r17:16=vminub(r21:20,r31:30) 0xf0 0xd4 0xde 0xd3 -# CHECK: r17:16 = vminb(r21:20, r31:30) +# CHECK: r17:16=vminb(r21:20,r31:30) # Vector minimum halfwords 0x30 0xd4 0xbe 0xd3 -# CHECK: r17:16 = vminh(r21:20, r31:30) +# CHECK: r17:16=vminh(r21:20,r31:30) 0x50 0xd4 0xbe 0xd3 -# CHECK: r17:16 = vminuh(r21:20, r31:30) +# CHECK: r17:16=vminuh(r21:20,r31:30) # Vector reduce minimum halfwords 0xbf 0xd0 0x34 0xcb -# CHECK: r17:16 = vrminh(r21:20, r31) +# CHECK: r17:16=vrminh(r21:20,r31) 0xbf 0xf0 0x34 0xcb -# CHECK: r17:16 = vrminuh(r21:20, r31) +# CHECK: r17:16=vrminuh(r21:20,r31) # Vector reduce minimum words 0xdf 0xd0 0x34 0xcb -# CHECK: r17:16 = vrminw(r21:20, r31) +# CHECK: r17:16=vrminw(r21:20,r31) 0xdf 0xf0 0x34 0xcb -# CHECK: r17:16 = vrminuw(r21:20, r31) +# CHECK: r17:16=vrminuw(r21:20,r31) # Vector minimum words 0x70 0xd4 0xbe 0xd3 -# CHECK: r17:16 = vminw(r21:20, r31:30) +# CHECK: r17:16=vminw(r21:20,r31:30) 0x90 0xd4 0xbe 0xd3 -# CHECK: r17:16 = vminuw(r21:20, r31:30) +# CHECK: r17:16=vminuw(r21:20,r31:30) # Vector sum of absolute differences unsigned bytes 0x50 0xde 0x54 0xe8 -# CHECK: r17:16 = vrsadub(r21:20, r31:30) +# CHECK: r17:16=vrsadub(r21:20,r31:30) 0x50 0xde 0x54 0xea -# CHECK: r17:16 += vrsadub(r21:20, r31:30) +# CHECK: r17:16+=vrsadub(r21:20,r31:30) # Vector subtract halfwords 0x50 0xd4 0x3e 0xd3 -# CHECK: r17:16 = vsubh(r21:20, r31:30) +# CHECK: r17:16=vsubh(r21:20,r31:30) 0x70 0xd4 0x3e 0xd3 -# CHECK: r17:16 = vsubh(r21:20, r31:30):sat +# CHECK: r17:16=vsubh(r21:20,r31:30):sat 0x90 0xd4 0x3e 0xd3 -# CHECK: r17:16 = vsubuh(r21:20, r31:30):sat +# CHECK: r17:16=vsubuh(r21:20,r31:30):sat # Vector subtract bytes 0x10 0xd4 0x3e 0xd3 -# CHECK: r17:16 = vsubub(r21:20, r31:30) +# CHECK: r17:16=vsubub(r21:20,r31:30) 0x30 0xd4 0x3e 0xd3 -# CHECK: r17:16 = vsubub(r21:20, r31:30):sat +# CHECK: r17:16=vsubub(r21:20,r31:30):sat # Vector subtract words 0xb0 0xd4 0x3e 0xd3 -# CHECK: r17:16 = vsubw(r21:20, r31:30) +# CHECK: r17:16=vsubw(r21:20,r31:30) 0xd0 0xd4 0x3e 0xd3 -# CHECK: r17:16 = vsubw(r21:20, r31:30):sat +# CHECK: r17:16=vsubw(r21:20,r31:30):sat Index: test/MC/Disassembler/Hexagon/xtype_bit.txt =================================================================== --- test/MC/Disassembler/Hexagon/xtype_bit.txt +++ test/MC/Disassembler/Hexagon/xtype_bit.txt @@ -3,116 +3,116 @@ # Count leading 0x11 0xc0 0x54 0x88 -# CHECK: r17 = clb(r21:20) +# CHECK: r17=clb(r21:20) 0x51 0xc0 0x54 0x88 -# CHECK: r17 = cl0(r21:20) +# CHECK: r17=cl0(r21:20) 0x91 0xc0 0x54 0x88 -# CHECK: r17 = cl1(r21:20) +# CHECK: r17=cl1(r21:20) 0x11 0xc0 0x74 0x88 -# CHECK: r17 = normamt(r21:20) +# CHECK: r17=normamt(r21:20) 0x51 0xd7 0x74 0x88 -# CHECK: r17 = add(clb(r21:20), #23) +# CHECK: r17=add(clb(r21:20),#23) 0x11 0xd7 0x35 0x8c -# CHECK: r17 = add(clb(r21), #23) +# CHECK: r17=add(clb(r21),#23) 0x91 0xc0 0x15 0x8c -# CHECK: r17 = clb(r21) +# CHECK: r17=clb(r21) 0xb1 0xc0 0x15 0x8c -# CHECK: r17 = cl0(r21) +# CHECK: r17=cl0(r21) 0xd1 0xc0 0x15 0x8c -# CHECK: r17 = cl1(r21) +# CHECK: r17=cl1(r21) 0xf1 0xc0 0x15 0x8c -# CHECK: r17 = normamt(r21) +# CHECK: r17=normamt(r21) # Count population 0x71 0xc0 0x74 0x88 -# CHECK: r17 = popcount(r21:20) +# CHECK: r17=popcount(r21:20) # Count trailing 0x51 0xc0 0xf4 0x88 -# CHECK: r17 = ct0(r21:20) +# CHECK: r17=ct0(r21:20) 0x91 0xc0 0xf4 0x88 -# CHECK: r17 = ct1(r21:20) +# CHECK: r17=ct1(r21:20) 0x91 0xc0 0x55 0x8c -# CHECK: r17 = ct0(r21) +# CHECK: r17=ct0(r21) 0xb1 0xc0 0x55 0x8c -# CHECK: r17 = ct1(r21) +# CHECK: r17=ct1(r21) # Extract bitfield 0xf0 0xdf 0x54 0x81 -# CHECK: r17:16 = extractu(r21:20, #31, #23) +# CHECK: r17:16=extractu(r21:20,#31,#23) 0xf0 0xdf 0x54 0x8a -# CHECK: r17:16 = extract(r21:20, #31, #23) +# CHECK: r17:16=extract(r21:20,#31,#23) 0xf1 0xdf 0x55 0x8d -# CHECK: r17 = extractu(r21, #31, #23) +# CHECK: r17=extractu(r21,#31,#23) 0xf1 0xdf 0xd5 0x8d -# CHECK: r17 = extract(r21, #31, #23) +# CHECK: r17=extract(r21,#31,#23) 0x10 0xde 0x14 0xc1 -# CHECK: r17:16 = extractu(r21:20, r31:30) +# CHECK: r17:16=extractu(r21:20,r31:30) 0x90 0xde 0xd4 0xc1 -# CHECK: r17:16 = extract(r21:20, r31:30) +# CHECK: r17:16=extract(r21:20,r31:30) 0x11 0xde 0x15 0xc9 -# CHECK: r17 = extractu(r21, r31:30) +# CHECK: r17=extractu(r21,r31:30) 0x51 0xde 0x15 0xc9 -# CHECK: r17 = extract(r21, r31:30) +# CHECK: r17=extract(r21,r31:30) # Insert bitfield 0xf0 0xdf 0x54 0x83 -# CHECK: r17:16 = insert(r21:20, #31, #23) +# CHECK: r17:16=insert(r21:20,#31,#23) 0xf1 0xdf 0x55 0x8f -# CHECK: r17 = insert(r21, #31, #23) +# CHECK: r17=insert(r21,#31,#23) 0x11 0xde 0x15 0xc8 -# CHECK: r17 = insert(r21, r31:30) +# CHECK: r17=insert(r21,r31:30) 0x10 0xde 0x14 0xca -# CHECK: r17:16 = insert(r21:20, r31:30) +# CHECK: r17:16=insert(r21:20,r31:30) # Interleave/deinterleave 0x90 0xc0 0xd4 0x80 -# CHECK: r17:16 = deinterleave(r21:20) +# CHECK: r17:16=deinterleave(r21:20) 0xb0 0xc0 0xd4 0x80 -# CHECK: r17:16 = interleave(r21:20) +# CHECK: r17:16=interleave(r21:20) # Linear feedback-shift iteration 0xd0 0xde 0x94 0xc1 -# CHECK: r17:16 = lfs(r21:20, r31:30) +# CHECK: r17:16=lfs(r21:20,r31:30) # Masked parity 0x11 0xde 0x14 0xd0 -# CHECK: r17 = parity(r21:20, r31:30) +# CHECK: r17=parity(r21:20,r31:30) 0x11 0xdf 0xf5 0xd5 -# CHECK: r17 = parity(r21, r31) +# CHECK: r17=parity(r21,r31) # Bit reverse 0xd0 0xc0 0xd4 0x80 -# CHECK: r17:16 = brev(r21:20) +# CHECK: r17:16=brev(r21:20) 0xd1 0xc0 0x55 0x8c -# CHECK: r17 = brev(r21) +# CHECK: r17=brev(r21) # Set/clear/toggle bit 0x11 0xdf 0xd5 0x8c -# CHECK: r17 = setbit(r21, #31) +# CHECK: r17=setbit(r21,#31) 0x31 0xdf 0xd5 0x8c -# CHECK: r17 = clrbit(r21, #31) +# CHECK: r17=clrbit(r21,#31) 0x51 0xdf 0xd5 0x8c -# CHECK: r17 = togglebit(r21, #31) +# CHECK: r17=togglebit(r21,#31) 0x11 0xdf 0x95 0xc6 -# CHECK: r17 = setbit(r21, r31) +# CHECK: r17=setbit(r21,r31) 0x51 0xdf 0x95 0xc6 -# CHECK: r17 = clrbit(r21, r31) +# CHECK: r17=clrbit(r21,r31) 0x91 0xdf 0x95 0xc6 -# CHECK: r17 = togglebit(r21, r31) +# CHECK: r17=togglebit(r21,r31) # Split bitfield 0x90 0xdf 0xd5 0x88 -# CHECK: r17:16 = bitsplit(r21, #31) +# CHECK: r17:16=bitsplit(r21,#31) 0x10 0xdf 0x35 0xd4 -# CHECK: r17:16 = bitsplit(r21, r31) +# CHECK: r17:16=bitsplit(r21,r31) # Table index 0xf1 0xcd 0x15 0x87 -# CHECK: r17 = tableidxb(r21, #7, #13):raw +# CHECK: r17=tableidxb(r21,#7,#13):raw 0xf1 0xcd 0x55 0x87 -# CHECK: r17 = tableidxh(r21, #7, #13):raw +# CHECK: r17=tableidxh(r21,#7,#13):raw 0xf1 0xcd 0x95 0x87 -# CHECK: r17 = tableidxw(r21, #7, #13):raw +# CHECK: r17=tableidxw(r21,#7,#13):raw 0xf1 0xcd 0xd5 0x87 -# CHECK: r17 = tableidxd(r21, #7, #13):raw +# CHECK: r17=tableidxd(r21,#7,#13):raw Index: test/MC/Disassembler/Hexagon/xtype_complex.txt =================================================================== --- test/MC/Disassembler/Hexagon/xtype_complex.txt +++ test/MC/Disassembler/Hexagon/xtype_complex.txt @@ -3,126 +3,126 @@ # Complex add/sub halfwords 0x90 0xde 0x54 0xc1 -# CHECK: r17:16 = vxaddsubh(r21:20, r31:30):sat +# CHECK: r17:16=vxaddsubh(r21:20,r31:30):sat 0xd0 0xde 0x54 0xc1 -# CHECK: r17:16 = vxsubaddh(r21:20, r31:30):sat +# CHECK: r17:16=vxsubaddh(r21:20,r31:30):sat 0x10 0xde 0xd4 0xc1 -# CHECK: r17:16 = vxaddsubh(r21:20, r31:30):rnd:>>1:sat +# CHECK: r17:16=vxaddsubh(r21:20,r31:30):rnd:>>1:sat 0x50 0xde 0xd4 0xc1 -# CHECK: r17:16 = vxsubaddh(r21:20, r31:30):rnd:>>1:sat +# CHECK: r17:16=vxsubaddh(r21:20,r31:30):rnd:>>1:sat # Complex add/sub words 0x10 0xde 0x54 0xc1 -# CHECK: r17:16 = vxaddsubw(r21:20, r31:30):sat +# CHECK: r17:16=vxaddsubw(r21:20,r31:30):sat 0x50 0xde 0x54 0xc1 -# CHECK: r17:16 = vxsubaddw(r21:20, r31:30):sat +# CHECK: r17:16=vxsubaddw(r21:20,r31:30):sat # Complex multiply 0xd0 0xdf 0x15 0xe5 -# CHECK: r17:16 = cmpy(r21, r31):sat +# CHECK: r17:16=cmpy(r21,r31):sat 0xd0 0xdf 0x95 0xe5 -# CHECK: r17:16 = cmpy(r21, r31):<<1:sat +# CHECK: r17:16=cmpy(r21,r31):<<1:sat 0xd0 0xdf 0x55 0xe5 -# CHECK: r17:16 = cmpy(r21, r31*):sat +# CHECK: r17:16=cmpy(r21,r31*):sat 0xd0 0xdf 0xd5 0xe5 -# CHECK: r17:16 = cmpy(r21, r31*):<<1:sat +# CHECK: r17:16=cmpy(r21,r31*):<<1:sat 0xd0 0xdf 0x15 0xe7 -# CHECK: r17:16 += cmpy(r21, r31):sat +# CHECK: r17:16+=cmpy(r21,r31):sat 0xd0 0xdf 0x95 0xe7 -# CHECK: r17:16 += cmpy(r21, r31):<<1:sat +# CHECK: r17:16+=cmpy(r21,r31):<<1:sat 0xf0 0xdf 0x15 0xe7 -# CHECK: r17:16 -= cmpy(r21, r31):sat +# CHECK: r17:16-=cmpy(r21,r31):sat 0xf0 0xdf 0x95 0xe7 -# CHECK: r17:16 -= cmpy(r21, r31):<<1:sat +# CHECK: r17:16-=cmpy(r21,r31):<<1:sat 0xd0 0xdf 0x55 0xe7 -# CHECK: r17:16 += cmpy(r21, r31*):sat +# CHECK: r17:16+=cmpy(r21,r31*):sat 0xd0 0xdf 0xd5 0xe7 -# CHECK: r17:16 += cmpy(r21, r31*):<<1:sat +# CHECK: r17:16+=cmpy(r21,r31*):<<1:sat 0xf0 0xdf 0x55 0xe7 -# CHECK: r17:16 -= cmpy(r21, r31*):sat +# CHECK: r17:16-=cmpy(r21,r31*):sat 0xf0 0xdf 0xd5 0xe7 -# CHECK: r17:16 -= cmpy(r21, r31*):<<1:sat +# CHECK: r17:16-=cmpy(r21,r31*):<<1:sat # Complex multiply real or imaginary 0x30 0xdf 0x15 0xe5 -# CHECK: r17:16 = cmpyi(r21, r31) +# CHECK: r17:16=cmpyi(r21,r31) 0x50 0xdf 0x15 0xe5 -# CHECK: r17:16 = cmpyr(r21, r31) +# CHECK: r17:16=cmpyr(r21,r31) 0x30 0xdf 0x15 0xe7 -# CHECK: r17:16 += cmpyi(r21, r31) +# CHECK: r17:16+=cmpyi(r21,r31) 0x50 0xdf 0x15 0xe7 -# CHECK: r17:16 += cmpyr(r21, r31) +# CHECK: r17:16+=cmpyr(r21,r31) # Complex multiply with round and pack 0xd1 0xdf 0x35 0xed -# CHECK: r17 = cmpy(r21, r31):rnd:sat +# CHECK: r17=cmpy(r21,r31):rnd:sat 0xd1 0xdf 0xb5 0xed -# CHECK: r17 = cmpy(r21, r31):<<1:rnd:sat +# CHECK: r17=cmpy(r21,r31):<<1:rnd:sat 0xd1 0xdf 0x75 0xed -# CHECK: r17 = cmpy(r21, r31*):rnd:sat +# CHECK: r17=cmpy(r21,r31*):rnd:sat 0xd1 0xdf 0xf5 0xed -# CHECK: r17 = cmpy(r21, r31*):<<1:rnd:sat +# CHECK: r17=cmpy(r21,r31*):<<1:rnd:sat # Complex multiply 32x16 0x91 0xdf 0x14 0xc5 -# CHECK: r17 = cmpyiwh(r21:20, r31):<<1:rnd:sat +# CHECK: r17=cmpyiwh(r21:20,r31):<<1:rnd:sat 0xb1 0xdf 0x14 0xc5 -# CHECK: r17 = cmpyiwh(r21:20, r31*):<<1:rnd:sat +# CHECK: r17=cmpyiwh(r21:20,r31*):<<1:rnd:sat 0xd1 0xdf 0x14 0xc5 -# CHECK: r17 = cmpyrwh(r21:20, r31):<<1:rnd:sat +# CHECK: r17=cmpyrwh(r21:20,r31):<<1:rnd:sat 0xf1 0xdf 0x14 0xc5 -# CHECK: r17 = cmpyrwh(r21:20, r31*):<<1:rnd:sat +# CHECK: r17=cmpyrwh(r21:20,r31*):<<1:rnd:sat # Vector complex multiply real or imaginary 0xd0 0xde 0x34 0xe8 -# CHECK: r17:16 = vcmpyr(r21:20, r31:30):sat +# CHECK: r17:16=vcmpyr(r21:20,r31:30):sat 0xd0 0xde 0xb4 0xe8 -# CHECK: r17:16 = vcmpyr(r21:20, r31:30):<<1:sat +# CHECK: r17:16=vcmpyr(r21:20,r31:30):<<1:sat 0xd0 0xde 0x54 0xe8 -# CHECK: r17:16 = vcmpyi(r21:20, r31:30):sat +# CHECK: r17:16=vcmpyi(r21:20,r31:30):sat 0xd0 0xde 0xd4 0xe8 -# CHECK: r17:16 = vcmpyi(r21:20, r31:30):<<1:sat +# CHECK: r17:16=vcmpyi(r21:20,r31:30):<<1:sat 0x90 0xde 0x34 0xea -# CHECK: r17:16 += vcmpyr(r21:20, r31:30):sat +# CHECK: r17:16+=vcmpyr(r21:20,r31:30):sat 0x90 0xde 0x54 0xea -# CHECK: r17:16 += vcmpyi(r21:20, r31:30):sat +# CHECK: r17:16+=vcmpyi(r21:20,r31:30):sat # Vector complex conjugate 0xf0 0xc0 0x94 0x80 -# CHECK: r17:16 = vconj(r21:20):sat +# CHECK: r17:16=vconj(r21:20):sat # Vector complex rotate 0x10 0xdf 0xd4 0xc3 -# CHECK: r17:16 = vcrotate(r21:20, r31) +# CHECK: r17:16=vcrotate(r21:20,r31) # Vector reduce complex multiply real or imaginary 0x10 0xde 0x14 0xe8 -# CHECK: r17:16 = vrcmpyi(r21:20, r31:30) +# CHECK: r17:16=vrcmpyi(r21:20,r31:30) 0x30 0xde 0x14 0xe8 -# CHECK: r17:16 = vrcmpyr(r21:20, r31:30) +# CHECK: r17:16=vrcmpyr(r21:20,r31:30) 0x10 0xde 0x54 0xe8 -# CHECK: r17:16 = vrcmpyi(r21:20, r31:30*) +# CHECK: r17:16=vrcmpyi(r21:20,r31:30*) 0x30 0xde 0x74 0xe8 -# CHECK: r17:16 = vrcmpyr(r21:20, r31:30*) +# CHECK: r17:16=vrcmpyr(r21:20,r31:30*) # Vector reduce complex multiply by scalar 0x90 0xde 0xb4 0xe8 -# CHECK: r17:16 = vrcmpys(r21:20, r31:30):<<1:sat:raw:hi +# CHECK: r17:16=vrcmpys(r21:20,r31:30):<<1:sat:raw:hi 0x90 0xde 0xf4 0xe8 -# CHECK: r17:16 = vrcmpys(r21:20, r31:30):<<1:sat:raw:lo +# CHECK: r17:16=vrcmpys(r21:20,r31:30):<<1:sat:raw:lo 0x90 0xde 0xb4 0xea -# CHECK: r17:16 += vrcmpys(r21:20, r31:30):<<1:sat:raw:hi +# CHECK: r17:16+=vrcmpys(r21:20,r31:30):<<1:sat:raw:hi 0x90 0xde 0xf4 0xea -# CHECK: r17:16 += vrcmpys(r21:20, r31:30):<<1:sat:raw:lo +# CHECK: r17:16+=vrcmpys(r21:20,r31:30):<<1:sat:raw:lo # Vector reduce complex multiply by scalar with round and pack 0xd1 0xde 0xb4 0xe9 -# CHECK: r17 = vrcmpys(r21:20, r31:30):<<1:rnd:sat:raw:hi +# CHECK: r17=vrcmpys(r21:20,r31:30):<<1:rnd:sat:raw:hi 0xf1 0xde 0xb4 0xe9 -# CHECK: r17 = vrcmpys(r21:20, r31:30):<<1:rnd:sat:raw:lo +# CHECK: r17=vrcmpys(r21:20,r31:30):<<1:rnd:sat:raw:lo # Vector reduce complex rotate 0xf0 0xff 0xd4 0xc3 -# CHECK: r17:16 = vrcrotate(r21:20, r31, #3) +# CHECK: r17:16=vrcrotate(r21:20,r31,#3) 0x30 0xff 0xb4 0xcb -# CHECK: r17:16 += vrcrotate(r21:20, r31, #3) +# CHECK: r17:16+=vrcrotate(r21:20,r31,#3) Index: test/MC/Disassembler/Hexagon/xtype_fp.txt =================================================================== --- test/MC/Disassembler/Hexagon/xtype_fp.txt +++ test/MC/Disassembler/Hexagon/xtype_fp.txt @@ -3,144 +3,144 @@ # Floating point addition 0x11 0xdf 0x15 0xeb -# CHECK: r17 = sfadd(r21, r31) +# CHECK: r17=sfadd(r21,r31) # Classify floating-point value 0x03 0xd5 0xf1 0x85 -# CHECK: p3 = sfclass(r17, #21) +# CHECK: p3=sfclass(r17,#21) 0xb3 0xc2 0x90 0xdc -# CHECK: p3 = dfclass(r17:16, #21) +# CHECK: p3=dfclass(r17:16,#21) # Compare floating-point value 0x03 0xd5 0xf1 0xc7 -# CHECK: p3 = sfcmp.ge(r17, r21) +# CHECK: p3=sfcmp.ge(r17,r21) 0x23 0xd5 0xf1 0xc7 -# CHECK: p3 = sfcmp.uo(r17, r21) +# CHECK: p3=sfcmp.uo(r17,r21) 0x63 0xd5 0xf1 0xc7 -# CHECK: p3 = sfcmp.eq(r17, r21) +# CHECK: p3=sfcmp.eq(r17,r21) 0x83 0xd5 0xf1 0xc7 -# CHECK: p3 = sfcmp.gt(r17, r21) +# CHECK: p3=sfcmp.gt(r17,r21) 0x03 0xd4 0xf0 0xd2 -# CHECK: p3 = dfcmp.eq(r17:16, r21:20) +# CHECK: p3=dfcmp.eq(r17:16,r21:20) 0x23 0xd4 0xf0 0xd2 -# CHECK: p3 = dfcmp.gt(r17:16, r21:20) +# CHECK: p3=dfcmp.gt(r17:16,r21:20) 0x43 0xd4 0xf0 0xd2 -# CHECK: p3 = dfcmp.ge(r17:16, r21:20) +# CHECK: p3=dfcmp.ge(r17:16,r21:20) 0x63 0xd4 0xf0 0xd2 -# CHECK: p3 = dfcmp.uo(r17:16, r21:20) +# CHECK: p3=dfcmp.uo(r17:16,r21:20) # Convert floating-point value to other format 0x10 0xc0 0x95 0x84 -# CHECK: r17:16 = convert_sf2df(r21) +# CHECK: r17:16=convert_sf2df(r21) 0x31 0xc0 0x14 0x88 -# CHECK: r17 = convert_df2sf(r21:20) +# CHECK: r17=convert_df2sf(r21:20) # Convert integer to floating-point value 0x50 0xc0 0xf4 0x80 -# CHECK: r17:16 = convert_ud2df(r21:20) +# CHECK: r17:16=convert_ud2df(r21:20) 0x70 0xc0 0xf4 0x80 -# CHECK: r17:16 = convert_d2df(r21:20) +# CHECK: r17:16=convert_d2df(r21:20) 0x30 0xc0 0x95 0x84 -# CHECK: r17:16 = convert_uw2df(r21) +# CHECK: r17:16=convert_uw2df(r21) 0x50 0xc0 0x95 0x84 -# CHECK: r17:16 = convert_w2df(r21) +# CHECK: r17:16=convert_w2df(r21) 0x31 0xc0 0x34 0x88 -# CHECK: r17 = convert_ud2sf(r21:20) +# CHECK: r17=convert_ud2sf(r21:20) 0x31 0xc0 0x54 0x88 -# CHECK: r17 = convert_d2sf(r21:20) +# CHECK: r17=convert_d2sf(r21:20) 0x11 0xc0 0x35 0x8b -# CHECK: r17 = convert_uw2sf(r21) +# CHECK: r17=convert_uw2sf(r21) 0x11 0xc0 0x55 0x8b -# CHECK: r17 = convert_w2sf(r21) +# CHECK: r17=convert_w2sf(r21) # Convert floating-point value to integer 0x10 0xc0 0xf4 0x80 -# CHECK: r17:16 = convert_df2d(r21:20) +# CHECK: r17:16=convert_df2d(r21:20) 0x30 0xc0 0xf4 0x80 -# CHECK: r17:16 = convert_df2ud(r21:20) +# CHECK: r17:16=convert_df2ud(r21:20) 0xd0 0xc0 0xf4 0x80 -# CHECK: r17:16 = convert_df2d(r21:20):chop +# CHECK: r17:16=convert_df2d(r21:20):chop 0xf0 0xc0 0xf4 0x80 -# CHECK: r17:16 = convert_df2ud(r21:20):chop +# CHECK: r17:16=convert_df2ud(r21:20):chop 0x70 0xc0 0x95 0x84 -# CHECK: r17:16 = convert_sf2ud(r21) +# CHECK: r17:16=convert_sf2ud(r21) 0x90 0xc0 0x95 0x84 -# CHECK: r17:16 = convert_sf2d(r21) +# CHECK: r17:16=convert_sf2d(r21) 0xb0 0xc0 0x95 0x84 -# CHECK: r17:16 = convert_sf2ud(r21):chop +# CHECK: r17:16=convert_sf2ud(r21):chop 0xd0 0xc0 0x95 0x84 -# CHECK: r17:16 = convert_sf2d(r21):chop +# CHECK: r17:16=convert_sf2d(r21):chop 0x31 0xc0 0x74 0x88 -# CHECK: r17 = convert_df2uw(r21:20) +# CHECK: r17=convert_df2uw(r21:20) 0x31 0xc0 0x94 0x88 -# CHECK: r17 = convert_df2w(r21:20) +# CHECK: r17=convert_df2w(r21:20) 0x31 0xc0 0xb4 0x88 -# CHECK: r17 = convert_df2uw(r21:20):chop +# CHECK: r17=convert_df2uw(r21:20):chop 0x31 0xc0 0xf4 0x88 -# CHECK: r17 = convert_df2w(r21:20):chop +# CHECK: r17=convert_df2w(r21:20):chop 0x11 0xc0 0x75 0x8b -# CHECK: r17 = convert_sf2uw(r21) +# CHECK: r17=convert_sf2uw(r21) 0x31 0xc0 0x75 0x8b -# CHECK: r17 = convert_sf2uw(r21):chop +# CHECK: r17=convert_sf2uw(r21):chop 0x11 0xc0 0x95 0x8b -# CHECK: r17 = convert_sf2w(r21) +# CHECK: r17=convert_sf2w(r21) 0x31 0xc0 0x95 0x8b -# CHECK: r17 = convert_sf2w(r21):chop +# CHECK: r17=convert_sf2w(r21):chop # Floating point extreme value assistance 0x11 0xc0 0xb5 0x8b -# CHECK: r17 = sffixupr(r21) +# CHECK: r17=sffixupr(r21) 0x11 0xdf 0xd5 0xeb -# CHECK: r17 = sffixupn(r21, r31) +# CHECK: r17=sffixupn(r21,r31) 0x31 0xdf 0xd5 0xeb -# CHECK: r17 = sffixupd(r21, r31) +# CHECK: r17=sffixupd(r21,r31) # Floating point fused multiply-add 0x91 0xdf 0x15 0xef -# CHECK: r17 += sfmpy(r21, r31) +# CHECK: r17+=sfmpy(r21,r31) 0xb1 0xdf 0x15 0xef -# CHECK: r17 -= sfmpy(r21, r31) +# CHECK: r17-=sfmpy(r21,r31) # Floating point fused multiply-add with scaling 0xf1 0xdf 0x75 0xef -# CHECK: r17 += sfmpy(r21, r31, p3):scale +# CHECK: r17+=sfmpy(r21,r31,p3):scale # Floating point reciprocal square root approximation 0x71 0xc0 0xf5 0x8b -# CHECK: r17, p3 = sfinvsqrta(r21) +# CHECK: r17,p3=sfinvsqrta(r21) # Floating point fused multiply-add for library routines 0xd1 0xdf 0x15 0xef -# CHECK: r17 += sfmpy(r21, r31):lib +# CHECK: r17+=sfmpy(r21,r31):lib 0xf1 0xdf 0x15 0xef -# CHECK: r17 -= sfmpy(r21, r31):lib +# CHECK: r17-=sfmpy(r21,r31):lib # Create floating-point constant 0xb1 0xc2 0x00 0xd6 -# CHECK: r17 = sfmake(#21):pos +# CHECK: r17=sfmake(#21):pos 0xb1 0xc2 0x40 0xd6 -# CHECK: r17 = sfmake(#21):neg +# CHECK: r17=sfmake(#21):neg 0xb0 0xc2 0x00 0xd9 -# CHECK: r17:16 = dfmake(#21):pos +# CHECK: r17:16=dfmake(#21):pos 0xb0 0xc2 0x40 0xd9 -# CHECK: r17:16 = dfmake(#21):neg +# CHECK: r17:16=dfmake(#21):neg # Floating point maximum 0x11 0xdf 0x95 0xeb -# CHECK: r17 = sfmax(r21, r31) +# CHECK: r17=sfmax(r21,r31) # Floating point minimum 0x31 0xdf 0x95 0xeb -# CHECK: r17 = sfmin(r21, r31) +# CHECK: r17=sfmin(r21,r31) # Floating point multiply 0x11 0xdf 0x55 0xeb -# CHECK: r17 = sfmpy(r21, r31) +# CHECK: r17=sfmpy(r21,r31) # Floating point reciprocal approximation 0xf1 0xdf 0xf5 0xeb -# CHECK: r17, p3 = sfrecipa(r21, r31) +# CHECK: r17,p3=sfrecipa(r21,r31) # Floating point subtraction 0x31 0xdf 0x15 0xeb -# CHECK: r17 = sfsub(r21, r31) +# CHECK: r17=sfsub(r21,r31) Index: test/MC/Disassembler/Hexagon/xtype_mpy.txt =================================================================== --- test/MC/Disassembler/Hexagon/xtype_mpy.txt +++ test/MC/Disassembler/Hexagon/xtype_mpy.txt @@ -3,398 +3,398 @@ # Multiply and use lower result 0xb1 0xdf 0x35 0xd7 -# CHECK: r17 = add(#21, mpyi(r21, r31)) +# CHECK: r17=add(#21,mpyi(r21,r31)) 0xbf 0xd1 0x35 0xd8 -# CHECK: r17 = add(#21, mpyi(r21, #31)) +# CHECK: r17=add(#21,mpyi(r21,#31)) 0xb5 0xd1 0x3f 0xdf -# CHECK: r17 = add(r21, mpyi(#84, r31)) +# CHECK: r17=add(r21,mpyi(#84,r31)) 0xf5 0xf1 0xb5 0xdf -# CHECK: r17 = add(r21, mpyi(r21, #31)) +# CHECK: r17=add(r21,mpyi(r21,#31)) 0x15 0xd1 0x1f 0xe3 -# CHECK: r17 = add(r21, mpyi(r17, r31)) +# CHECK: r17=add(r21,mpyi(r17,r31)) 0xf1 0xc3 0x15 0xe0 -# CHECK: r17 =+ mpyi(r21, #31) +# CHECK: r17=+mpyi(r21,#31) 0xf1 0xc3 0x95 0xe0 -# CHECK: r17 =- mpyi(r21, #31) +# CHECK: r17=-mpyi(r21,#31) 0xf1 0xc3 0x15 0xe1 -# CHECK: r17 += mpyi(r21, #31) +# CHECK: r17+=mpyi(r21,#31) 0xf1 0xc3 0x95 0xe1 -# CHECK: r17 -= mpyi(r21, #31) +# CHECK: r17-=mpyi(r21,#31) 0x11 0xdf 0x15 0xed -# CHECK: r17 = mpyi(r21, r31) +# CHECK: r17=mpyi(r21,r31) 0x11 0xdf 0x15 0xef -# CHECK: r17 += mpyi(r21, r31) +# CHECK: r17+=mpyi(r21,r31) # Vector multiply word by signed half (32x16) 0xb0 0xde 0x14 0xe8 -# CHECK: r17:16 = vmpyweh(r21:20, r31:30):sat +# CHECK: r17:16=vmpyweh(r21:20,r31:30):sat 0xb0 0xde 0x94 0xe8 -# CHECK: r17:16 = vmpyweh(r21:20, r31:30):<<1:sat +# CHECK: r17:16=vmpyweh(r21:20,r31:30):<<1:sat 0xf0 0xde 0x14 0xe8 -# CHECK: r17:16 = vmpywoh(r21:20, r31:30):sat +# CHECK: r17:16=vmpywoh(r21:20,r31:30):sat 0xf0 0xde 0x94 0xe8 -# CHECK: r17:16 = vmpywoh(r21:20, r31:30):<<1:sat +# CHECK: r17:16=vmpywoh(r21:20,r31:30):<<1:sat 0xb0 0xde 0x34 0xe8 -# CHECK: r17:16 = vmpyweh(r21:20, r31:30):rnd:sat +# CHECK: r17:16=vmpyweh(r21:20,r31:30):rnd:sat 0xb0 0xde 0xb4 0xe8 -# CHECK: r17:16 = vmpyweh(r21:20, r31:30):<<1:rnd:sat +# CHECK: r17:16=vmpyweh(r21:20,r31:30):<<1:rnd:sat 0xf0 0xde 0x34 0xe8 -# CHECK: r17:16 = vmpywoh(r21:20, r31:30):rnd:sat +# CHECK: r17:16=vmpywoh(r21:20,r31:30):rnd:sat 0xf0 0xde 0xb4 0xe8 -# CHECK: r17:16 = vmpywoh(r21:20, r31:30):<<1:rnd:sat +# CHECK: r17:16=vmpywoh(r21:20,r31:30):<<1:rnd:sat 0xb0 0xde 0x14 0xea -# CHECK: r17:16 += vmpyweh(r21:20, r31:30):sat +# CHECK: r17:16+=vmpyweh(r21:20,r31:30):sat 0xb0 0xde 0x94 0xea -# CHECK: r17:16 += vmpyweh(r21:20, r31:30):<<1:sat +# CHECK: r17:16+=vmpyweh(r21:20,r31:30):<<1:sat 0xf0 0xde 0x14 0xea -# CHECK: r17:16 += vmpywoh(r21:20, r31:30):sat +# CHECK: r17:16+=vmpywoh(r21:20,r31:30):sat 0xf0 0xde 0x94 0xea -# CHECK: r17:16 += vmpywoh(r21:20, r31:30):<<1:sat +# CHECK: r17:16+=vmpywoh(r21:20,r31:30):<<1:sat 0xb0 0xde 0x34 0xea -# CHECK: r17:16 += vmpyweh(r21:20, r31:30):rnd:sat +# CHECK: r17:16+=vmpyweh(r21:20,r31:30):rnd:sat 0xb0 0xde 0xb4 0xea -# CHECK: r17:16 += vmpyweh(r21:20, r31:30):<<1:rnd:sat +# CHECK: r17:16+=vmpyweh(r21:20,r31:30):<<1:rnd:sat 0xf0 0xde 0x34 0xea -# CHECK: r17:16 += vmpywoh(r21:20, r31:30):rnd:sat +# CHECK: r17:16+=vmpywoh(r21:20,r31:30):rnd:sat 0xf0 0xde 0xb4 0xea -# CHECK: r17:16 += vmpywoh(r21:20, r31:30):<<1:rnd:sat +# CHECK: r17:16+=vmpywoh(r21:20,r31:30):<<1:rnd:sat # Vector multiply word by unsigned half (32x16) 0xb0 0xde 0x54 0xe8 -# CHECK: r17:16 = vmpyweuh(r21:20, r31:30):sat +# CHECK: r17:16=vmpyweuh(r21:20,r31:30):sat 0xb0 0xde 0xd4 0xe8 -# CHECK: r17:16 = vmpyweuh(r21:20, r31:30):<<1:sat +# CHECK: r17:16=vmpyweuh(r21:20,r31:30):<<1:sat 0xf0 0xde 0x54 0xe8 -# CHECK: r17:16 = vmpywouh(r21:20, r31:30):sat +# CHECK: r17:16=vmpywouh(r21:20,r31:30):sat 0xf0 0xde 0xd4 0xe8 -# CHECK: r17:16 = vmpywouh(r21:20, r31:30):<<1:sat +# CHECK: r17:16=vmpywouh(r21:20,r31:30):<<1:sat 0xb0 0xde 0x74 0xe8 -# CHECK: r17:16 = vmpyweuh(r21:20, r31:30):rnd:sat +# CHECK: r17:16=vmpyweuh(r21:20,r31:30):rnd:sat 0xb0 0xde 0xf4 0xe8 -# CHECK: r17:16 = vmpyweuh(r21:20, r31:30):<<1:rnd:sat +# CHECK: r17:16=vmpyweuh(r21:20,r31:30):<<1:rnd:sat 0xf0 0xde 0x74 0xe8 -# CHECK: r17:16 = vmpywouh(r21:20, r31:30):rnd:sat +# CHECK: r17:16=vmpywouh(r21:20,r31:30):rnd:sat 0xf0 0xde 0xf4 0xe8 -# CHECK: r17:16 = vmpywouh(r21:20, r31:30):<<1:rnd:sat +# CHECK: r17:16=vmpywouh(r21:20,r31:30):<<1:rnd:sat 0xb0 0xde 0x54 0xea -# CHECK: r17:16 += vmpyweuh(r21:20, r31:30):sat +# CHECK: r17:16+=vmpyweuh(r21:20,r31:30):sat 0xb0 0xde 0xd4 0xea -# CHECK: r17:16 += vmpyweuh(r21:20, r31:30):<<1:sat +# CHECK: r17:16+=vmpyweuh(r21:20,r31:30):<<1:sat 0xf0 0xde 0x54 0xea -# CHECK: r17:16 += vmpywouh(r21:20, r31:30):sat +# CHECK: r17:16+=vmpywouh(r21:20,r31:30):sat 0xf0 0xde 0xd4 0xea -# CHECK: r17:16 += vmpywouh(r21:20, r31:30):<<1:sat +# CHECK: r17:16+=vmpywouh(r21:20,r31:30):<<1:sat 0xb0 0xde 0x74 0xea -# CHECK: r17:16 += vmpyweuh(r21:20, r31:30):rnd:sat +# CHECK: r17:16+=vmpyweuh(r21:20,r31:30):rnd:sat 0xb0 0xde 0xf4 0xea -# CHECK: r17:16 += vmpyweuh(r21:20, r31:30):<<1:rnd:sat +# CHECK: r17:16+=vmpyweuh(r21:20,r31:30):<<1:rnd:sat 0xf0 0xde 0x74 0xea -# CHECK: r17:16 += vmpywouh(r21:20, r31:30):rnd:sat +# CHECK: r17:16+=vmpywouh(r21:20,r31:30):rnd:sat 0xf0 0xde 0xf4 0xea -# CHECK: r17:16 += vmpywouh(r21:20, r31:30):<<1:rnd:sat +# CHECK: r17:16+=vmpywouh(r21:20,r31:30):<<1:rnd:sat # Multiply signed halfwords 0x10 0xdf 0x95 0xe4 -# CHECK: r17:16 = mpy(r21.l, r31.l):<<1 +# CHECK: r17:16=mpy(r21.l,r31.l):<<1 0x30 0xdf 0x95 0xe4 -# CHECK: r17:16 = mpy(r21.l, r31.h):<<1 +# CHECK: r17:16=mpy(r21.l,r31.h):<<1 0x50 0xdf 0x95 0xe4 -# CHECK: r17:16 = mpy(r21.h, r31.l):<<1 +# CHECK: r17:16=mpy(r21.h,r31.l):<<1 0x70 0xdf 0x95 0xe4 -# CHECK: r17:16 = mpy(r21.h, r31.h):<<1 +# CHECK: r17:16=mpy(r21.h,r31.h):<<1 0x10 0xdf 0xb5 0xe4 -# CHECK: r17:16 = mpy(r21.l, r31.l):<<1:rnd +# CHECK: r17:16=mpy(r21.l,r31.l):<<1:rnd 0x30 0xdf 0xb5 0xe4 -# CHECK: r17:16 = mpy(r21.l, r31.h):<<1:rnd +# CHECK: r17:16=mpy(r21.l,r31.h):<<1:rnd 0x50 0xdf 0xb5 0xe4 -# CHECK: r17:16 = mpy(r21.h, r31.l):<<1:rnd +# CHECK: r17:16=mpy(r21.h,r31.l):<<1:rnd 0x70 0xdf 0xb5 0xe4 -# CHECK: r17:16 = mpy(r21.h, r31.h):<<1:rnd +# CHECK: r17:16=mpy(r21.h,r31.h):<<1:rnd 0x10 0xdf 0x95 0xe6 -# CHECK: r17:16 += mpy(r21.l, r31.l):<<1 +# CHECK: r17:16+=mpy(r21.l,r31.l):<<1 0x30 0xdf 0x95 0xe6 -# CHECK: r17:16 += mpy(r21.l, r31.h):<<1 +# CHECK: r17:16+=mpy(r21.l,r31.h):<<1 0x50 0xdf 0x95 0xe6 -# CHECK: r17:16 += mpy(r21.h, r31.l):<<1 +# CHECK: r17:16+=mpy(r21.h,r31.l):<<1 0x70 0xdf 0x95 0xe6 -# CHECK: r17:16 += mpy(r21.h, r31.h):<<1 +# CHECK: r17:16+=mpy(r21.h,r31.h):<<1 0x10 0xdf 0xb5 0xe6 -# CHECK: r17:16 -= mpy(r21.l, r31.l):<<1 +# CHECK: r17:16-=mpy(r21.l,r31.l):<<1 0x30 0xdf 0xb5 0xe6 -# CHECK: r17:16 -= mpy(r21.l, r31.h):<<1 +# CHECK: r17:16-=mpy(r21.l,r31.h):<<1 0x50 0xdf 0xb5 0xe6 -# CHECK: r17:16 -= mpy(r21.h, r31.l):<<1 +# CHECK: r17:16-=mpy(r21.h,r31.l):<<1 0x70 0xdf 0xb5 0xe6 -# CHECK: r17:16 -= mpy(r21.h, r31.h):<<1 +# CHECK: r17:16-=mpy(r21.h,r31.h):<<1 0x11 0xdf 0x95 0xec -# CHECK: r17 = mpy(r21.l, r31.l):<<1 +# CHECK: r17=mpy(r21.l,r31.l):<<1 0x31 0xdf 0x95 0xec -# CHECK: r17 = mpy(r21.l, r31.h):<<1 +# CHECK: r17=mpy(r21.l,r31.h):<<1 0x51 0xdf 0x95 0xec -# CHECK: r17 = mpy(r21.h, r31.l):<<1 +# CHECK: r17=mpy(r21.h,r31.l):<<1 0x71 0xdf 0x95 0xec -# CHECK: r17 = mpy(r21.h, r31.h):<<1 +# CHECK: r17=mpy(r21.h,r31.h):<<1 0x91 0xdf 0x95 0xec -# CHECK: r17 = mpy(r21.l, r31.l):<<1:sat +# CHECK: r17=mpy(r21.l,r31.l):<<1:sat 0xb1 0xdf 0x95 0xec -# CHECK: r17 = mpy(r21.l, r31.h):<<1:sat +# CHECK: r17=mpy(r21.l,r31.h):<<1:sat 0xd1 0xdf 0x95 0xec -# CHECK: r17 = mpy(r21.h, r31.l):<<1:sat +# CHECK: r17=mpy(r21.h,r31.l):<<1:sat 0xf1 0xdf 0x95 0xec -# CHECK: r17 = mpy(r21.h, r31.h):<<1:sat +# CHECK: r17=mpy(r21.h,r31.h):<<1:sat 0x11 0xdf 0xb5 0xec -# CHECK: r17 = mpy(r21.l, r31.l):<<1:rnd +# CHECK: r17=mpy(r21.l,r31.l):<<1:rnd 0x31 0xdf 0xb5 0xec -# CHECK: r17 = mpy(r21.l, r31.h):<<1:rnd +# CHECK: r17=mpy(r21.l,r31.h):<<1:rnd 0x51 0xdf 0xb5 0xec -# CHECK: r17 = mpy(r21.h, r31.l):<<1:rnd +# CHECK: r17=mpy(r21.h,r31.l):<<1:rnd 0x71 0xdf 0xb5 0xec -# CHECK: r17 = mpy(r21.h, r31.h):<<1:rnd +# CHECK: r17=mpy(r21.h,r31.h):<<1:rnd 0x91 0xdf 0xb5 0xec -# CHECK: r17 = mpy(r21.l, r31.l):<<1:rnd:sat +# CHECK: r17=mpy(r21.l,r31.l):<<1:rnd:sat 0xb1 0xdf 0xb5 0xec -# CHECK: r17 = mpy(r21.l, r31.h):<<1:rnd:sat +# CHECK: r17=mpy(r21.l,r31.h):<<1:rnd:sat 0xd1 0xdf 0xb5 0xec -# CHECK: r17 = mpy(r21.h, r31.l):<<1:rnd:sat +# CHECK: r17=mpy(r21.h,r31.l):<<1:rnd:sat 0xf1 0xdf 0xb5 0xec -# CHECK: r17 = mpy(r21.h, r31.h):<<1:rnd:sat +# CHECK: r17=mpy(r21.h,r31.h):<<1:rnd:sat 0x11 0xdf 0x95 0xee -# CHECK: r17 += mpy(r21.l, r31.l):<<1 +# CHECK: r17+=mpy(r21.l,r31.l):<<1 0x31 0xdf 0x95 0xee -# CHECK: r17 += mpy(r21.l, r31.h):<<1 +# CHECK: r17+=mpy(r21.l,r31.h):<<1 0x51 0xdf 0x95 0xee -# CHECK: r17 += mpy(r21.h, r31.l):<<1 +# CHECK: r17+=mpy(r21.h,r31.l):<<1 0x71 0xdf 0x95 0xee -# CHECK: r17 += mpy(r21.h, r31.h):<<1 +# CHECK: r17+=mpy(r21.h,r31.h):<<1 0x91 0xdf 0x95 0xee -# CHECK: r17 += mpy(r21.l, r31.l):<<1:sat +# CHECK: r17+=mpy(r21.l,r31.l):<<1:sat 0xb1 0xdf 0x95 0xee -# CHECK: r17 += mpy(r21.l, r31.h):<<1:sat +# CHECK: r17+=mpy(r21.l,r31.h):<<1:sat 0xd1 0xdf 0x95 0xee -# CHECK: r17 += mpy(r21.h, r31.l):<<1:sat +# CHECK: r17+=mpy(r21.h,r31.l):<<1:sat 0xf1 0xdf 0x95 0xee -# CHECK: r17 += mpy(r21.h, r31.h):<<1:sat +# CHECK: r17+=mpy(r21.h,r31.h):<<1:sat 0x11 0xdf 0xb5 0xee -# CHECK: r17 -= mpy(r21.l, r31.l):<<1 +# CHECK: r17-=mpy(r21.l,r31.l):<<1 0x31 0xdf 0xb5 0xee -# CHECK: r17 -= mpy(r21.l, r31.h):<<1 +# CHECK: r17-=mpy(r21.l,r31.h):<<1 0x51 0xdf 0xb5 0xee -# CHECK: r17 -= mpy(r21.h, r31.l):<<1 +# CHECK: r17-=mpy(r21.h,r31.l):<<1 0x71 0xdf 0xb5 0xee -# CHECK: r17 -= mpy(r21.h, r31.h):<<1 +# CHECK: r17-=mpy(r21.h,r31.h):<<1 0x91 0xdf 0xb5 0xee -# CHECK: r17 -= mpy(r21.l, r31.l):<<1:sat +# CHECK: r17-=mpy(r21.l,r31.l):<<1:sat 0xb1 0xdf 0xb5 0xee -# CHECK: r17 -= mpy(r21.l, r31.h):<<1:sat +# CHECK: r17-=mpy(r21.l,r31.h):<<1:sat 0xd1 0xdf 0xb5 0xee -# CHECK: r17 -= mpy(r21.h, r31.l):<<1:sat +# CHECK: r17-=mpy(r21.h,r31.l):<<1:sat 0xf1 0xdf 0xb5 0xee -# CHECK: r17 -= mpy(r21.h, r31.h):<<1:sat +# CHECK: r17-=mpy(r21.h,r31.h):<<1:sat # Multiply unsigned halfwords 0x10 0xdf 0xd5 0xe4 -# CHECK: r17:16 = mpyu(r21.l, r31.l):<<1 +# CHECK: r17:16=mpyu(r21.l,r31.l):<<1 0x30 0xdf 0xd5 0xe4 -# CHECK: r17:16 = mpyu(r21.l, r31.h):<<1 +# CHECK: r17:16=mpyu(r21.l,r31.h):<<1 0x50 0xdf 0xd5 0xe4 -# CHECK: r17:16 = mpyu(r21.h, r31.l):<<1 +# CHECK: r17:16=mpyu(r21.h,r31.l):<<1 0x70 0xdf 0xd5 0xe4 -# CHECK: r17:16 = mpyu(r21.h, r31.h):<<1 +# CHECK: r17:16=mpyu(r21.h,r31.h):<<1 0x10 0xdf 0xd5 0xe6 -# CHECK: r17:16 += mpyu(r21.l, r31.l):<<1 +# CHECK: r17:16+=mpyu(r21.l,r31.l):<<1 0x30 0xdf 0xd5 0xe6 -# CHECK: r17:16 += mpyu(r21.l, r31.h):<<1 +# CHECK: r17:16+=mpyu(r21.l,r31.h):<<1 0x50 0xdf 0xd5 0xe6 -# CHECK: r17:16 += mpyu(r21.h, r31.l):<<1 +# CHECK: r17:16+=mpyu(r21.h,r31.l):<<1 0x70 0xdf 0xd5 0xe6 -# CHECK: r17:16 += mpyu(r21.h, r31.h):<<1 +# CHECK: r17:16+=mpyu(r21.h,r31.h):<<1 0x10 0xdf 0xf5 0xe6 -# CHECK: r17:16 -= mpyu(r21.l, r31.l):<<1 +# CHECK: r17:16-=mpyu(r21.l,r31.l):<<1 0x30 0xdf 0xf5 0xe6 -# CHECK: r17:16 -= mpyu(r21.l, r31.h):<<1 +# CHECK: r17:16-=mpyu(r21.l,r31.h):<<1 0x50 0xdf 0xf5 0xe6 -# CHECK: r17:16 -= mpyu(r21.h, r31.l):<<1 +# CHECK: r17:16-=mpyu(r21.h,r31.l):<<1 0x70 0xdf 0xf5 0xe6 -# CHECK: r17:16 -= mpyu(r21.h, r31.h):<<1 +# CHECK: r17:16-=mpyu(r21.h,r31.h):<<1 0x11 0xdf 0xd5 0xec -# CHECK: r17 = mpyu(r21.l, r31.l):<<1 +# CHECK: r17=mpyu(r21.l,r31.l):<<1 0x31 0xdf 0xd5 0xec -# CHECK: r17 = mpyu(r21.l, r31.h):<<1 +# CHECK: r17=mpyu(r21.l,r31.h):<<1 0x51 0xdf 0xd5 0xec -# CHECK: r17 = mpyu(r21.h, r31.l):<<1 +# CHECK: r17=mpyu(r21.h,r31.l):<<1 0x71 0xdf 0xd5 0xec -# CHECK: r17 = mpyu(r21.h, r31.h):<<1 +# CHECK: r17=mpyu(r21.h,r31.h):<<1 0x11 0xdf 0xd5 0xee -# CHECK: r17 += mpyu(r21.l, r31.l):<<1 +# CHECK: r17+=mpyu(r21.l,r31.l):<<1 0x31 0xdf 0xd5 0xee -# CHECK: r17 += mpyu(r21.l, r31.h):<<1 +# CHECK: r17+=mpyu(r21.l,r31.h):<<1 0x51 0xdf 0xd5 0xee -# CHECK: r17 += mpyu(r21.h, r31.l):<<1 +# CHECK: r17+=mpyu(r21.h,r31.l):<<1 0x71 0xdf 0xd5 0xee -# CHECK: r17 += mpyu(r21.h, r31.h):<<1 +# CHECK: r17+=mpyu(r21.h,r31.h):<<1 0x11 0xdf 0xf5 0xee -# CHECK: r17 -= mpyu(r21.l, r31.l):<<1 +# CHECK: r17-=mpyu(r21.l,r31.l):<<1 0x31 0xdf 0xf5 0xee -# CHECK: r17 -= mpyu(r21.l, r31.h):<<1 +# CHECK: r17-=mpyu(r21.l,r31.h):<<1 0x51 0xdf 0xf5 0xee -# CHECK: r17 -= mpyu(r21.h, r31.l):<<1 +# CHECK: r17-=mpyu(r21.h,r31.l):<<1 0x71 0xdf 0xf5 0xee -# CHECK: r17 -= mpyu(r21.h, r31.h):<<1 +# CHECK: r17-=mpyu(r21.h,r31.h):<<1 # Polynomial multiply words 0xf0 0xdf 0x55 0xe5 -# CHECK: r17:16 = pmpyw(r21, r31) +# CHECK: r17:16=pmpyw(r21,r31) 0xf0 0xdf 0x35 0xe7 -# CHECK: r17:16 ^= pmpyw(r21, r31) +# CHECK: r17:16^=pmpyw(r21,r31) # Vector reduce multiply word by signed half (32x16) 0x50 0xde 0x34 0xe8 -# CHECK: r17:16 = vrmpywoh(r21:20, r31:30) +# CHECK: r17:16=vrmpywoh(r21:20,r31:30) 0x50 0xde 0xb4 0xe8 -# CHECK: r17:16 = vrmpywoh(r21:20, r31:30):<<1 +# CHECK: r17:16=vrmpywoh(r21:20,r31:30):<<1 0x90 0xde 0x54 0xe8 -# CHECK: r17:16 = vrmpyweh(r21:20, r31:30) +# CHECK: r17:16=vrmpyweh(r21:20,r31:30) 0x90 0xde 0xd4 0xe8 -# CHECK: r17:16 = vrmpyweh(r21:20, r31:30):<<1 +# CHECK: r17:16=vrmpyweh(r21:20,r31:30):<<1 0xd0 0xde 0x74 0xea -# CHECK: r17:16 += vrmpywoh(r21:20, r31:30) +# CHECK: r17:16+=vrmpywoh(r21:20,r31:30) 0xd0 0xde 0xf4 0xea -# CHECK: r17:16 += vrmpywoh(r21:20, r31:30):<<1 +# CHECK: r17:16+=vrmpywoh(r21:20,r31:30):<<1 0xd0 0xde 0x34 0xea -# CHECK: r17:16 += vrmpyweh(r21:20, r31:30) +# CHECK: r17:16+=vrmpyweh(r21:20,r31:30) 0xd0 0xde 0xb4 0xea -# CHECK: r17:16 += vrmpyweh(r21:20, r31:30):<<1 +# CHECK: r17:16+=vrmpyweh(r21:20,r31:30):<<1 # Multiply and use upper result 0x31 0xdf 0x15 0xed -# CHECK: r17 = mpy(r21, r31) +# CHECK: r17=mpy(r21,r31) 0x31 0xdf 0x35 0xed -# CHECK: r17 = mpy(r21, r31):rnd +# CHECK: r17=mpy(r21,r31):rnd 0x31 0xdf 0x55 0xed -# CHECK: r17 = mpyu(r21, r31) +# CHECK: r17=mpyu(r21,r31) 0x31 0xdf 0x75 0xed -# CHECK: r17 = mpysu(r21, r31) +# CHECK: r17=mpysu(r21,r31) 0x11 0xdf 0xb5 0xed -# CHECK: r17 = mpy(r21, r31.h):<<1:sat +# CHECK: r17=mpy(r21,r31.h):<<1:sat 0x31 0xdf 0xb5 0xed -# CHECK: r17 = mpy(r21, r31.l):<<1:sat +# CHECK: r17=mpy(r21,r31.l):<<1:sat 0x91 0xdf 0xb5 0xed -# CHECK: r17 = mpy(r21, r31.h):<<1:rnd:sat +# CHECK: r17=mpy(r21,r31.h):<<1:rnd:sat 0x11 0xdf 0xf5 0xed -# CHECK: r17 = mpy(r21, r31):<<1:sat +# CHECK: r17=mpy(r21,r31):<<1:sat 0x91 0xdf 0xf5 0xed -# CHECK: r17 = mpy(r21, r31.l):<<1:rnd:sat +# CHECK: r17=mpy(r21,r31.l):<<1:rnd:sat 0x51 0xdf 0xb5 0xed -# CHECK: r17 = mpy(r21, r31):<<1 +# CHECK: r17=mpy(r21,r31):<<1 0x11 0xdf 0x75 0xef -# CHECK: r17 += mpy(r21, r31):<<1:sat +# CHECK: r17+=mpy(r21,r31):<<1:sat 0x31 0xdf 0x75 0xef -# CHECK: r17 -= mpy(r21, r31):<<1:sat +# CHECK: r17-=mpy(r21,r31):<<1:sat # Multiply and use full result 0x10 0xdf 0x15 0xe5 -# CHECK: r17:16 = mpy(r21, r31) +# CHECK: r17:16=mpy(r21,r31) 0x10 0xdf 0x55 0xe5 -# CHECK: r17:16 = mpyu(r21, r31) +# CHECK: r17:16=mpyu(r21,r31) 0x10 0xdf 0x15 0xe7 -# CHECK: r17:16 += mpy(r21, r31) +# CHECK: r17:16+=mpy(r21,r31) 0x10 0xdf 0x35 0xe7 -# CHECK: r17:16 -= mpy(r21, r31) +# CHECK: r17:16-=mpy(r21,r31) 0x10 0xdf 0x55 0xe7 -# CHECK: r17:16 += mpyu(r21, r31) +# CHECK: r17:16+=mpyu(r21,r31) 0x10 0xdf 0x75 0xe7 -# CHECK: r17:16 -= mpyu(r21, r31) +# CHECK: r17:16-=mpyu(r21,r31) # Vector dual multiply 0x90 0xde 0x14 0xe8 -# CHECK: r17:16 = vdmpy(r21:20, r31:30):sat +# CHECK: r17:16=vdmpy(r21:20,r31:30):sat 0x90 0xde 0x94 0xe8 -# CHECK: r17:16 = vdmpy(r21:20, r31:30):<<1:sat +# CHECK: r17:16=vdmpy(r21:20,r31:30):<<1:sat 0x90 0xde 0x14 0xea -# CHECK: r17:16 += vdmpy(r21:20, r31:30):sat +# CHECK: r17:16+=vdmpy(r21:20,r31:30):sat 0x90 0xde 0x94 0xea -# CHECK: r17:16 += vdmpy(r21:20, r31:30):<<1:sat +# CHECK: r17:16+=vdmpy(r21:20,r31:30):<<1:sat # Vector dual multiply with round and pack 0x11 0xde 0x14 0xe9 -# CHECK: r17 = vdmpy(r21:20, r31:30):rnd:sat +# CHECK: r17=vdmpy(r21:20,r31:30):rnd:sat 0x11 0xde 0x94 0xe9 -# CHECK: r17 = vdmpy(r21:20, r31:30):<<1:rnd:sat +# CHECK: r17=vdmpy(r21:20,r31:30):<<1:rnd:sat # Vector reduce multiply bytes 0x30 0xde 0x94 0xe8 -# CHECK: r17:16 = vrmpybu(r21:20, r31:30) +# CHECK: r17:16=vrmpybu(r21:20,r31:30) 0x30 0xde 0xd4 0xe8 -# CHECK: r17:16 = vrmpybsu(r21:20, r31:30) +# CHECK: r17:16=vrmpybsu(r21:20,r31:30) 0x30 0xde 0x94 0xea -# CHECK: r17:16 += vrmpybu(r21:20, r31:30) +# CHECK: r17:16+=vrmpybu(r21:20,r31:30) 0x30 0xde 0xd4 0xea -# CHECK: r17:16 += vrmpybsu(r21:20, r31:30) +# CHECK: r17:16+=vrmpybsu(r21:20,r31:30) # Vector dual multiply signed by unsigned bytes 0x30 0xde 0xb4 0xe8 -# CHECK: r17:16 = vdmpybsu(r21:20, r31:30):sat +# CHECK: r17:16=vdmpybsu(r21:20,r31:30):sat 0x30 0xde 0x34 0xea -# CHECK: r17:16 += vdmpybsu(r21:20, r31:30):sat +# CHECK: r17:16+=vdmpybsu(r21:20,r31:30):sat # Vector multiply even haldwords 0xd0 0xde 0x14 0xe8 -# CHECK: r17:16 = vmpyeh(r21:20, r31:30):sat +# CHECK: r17:16=vmpyeh(r21:20,r31:30):sat 0xd0 0xde 0x94 0xe8 -# CHECK: r17:16 = vmpyeh(r21:20, r31:30):<<1:sat +# CHECK: r17:16=vmpyeh(r21:20,r31:30):<<1:sat 0x50 0xde 0x34 0xea -# CHECK: r17:16 += vmpyeh(r21:20, r31:30) +# CHECK: r17:16+=vmpyeh(r21:20,r31:30) 0xd0 0xde 0x14 0xea -# CHECK: r17:16 += vmpyeh(r21:20, r31:30):sat +# CHECK: r17:16+=vmpyeh(r21:20,r31:30):sat 0xd0 0xde 0x94 0xea -# CHECK: r17:16 += vmpyeh(r21:20, r31:30):<<1:sat +# CHECK: r17:16+=vmpyeh(r21:20,r31:30):<<1:sat # Vector multiply halfwords 0xb0 0xdf 0x15 0xe5 -# CHECK: r17:16 = vmpyh(r21, r31):sat +# CHECK: r17:16=vmpyh(r21,r31):sat 0xb0 0xdf 0x95 0xe5 -# CHECK: r17:16 = vmpyh(r21, r31):<<1:sat +# CHECK: r17:16=vmpyh(r21,r31):<<1:sat 0x30 0xdf 0x35 0xe7 -# CHECK: r17:16 += vmpyh(r21, r31) +# CHECK: r17:16+=vmpyh(r21,r31) 0xb0 0xdf 0x15 0xe7 -# CHECK: r17:16 += vmpyh(r21, r31):sat +# CHECK: r17:16+=vmpyh(r21,r31):sat 0xb0 0xdf 0x95 0xe7 -# CHECK: r17:16 += vmpyh(r21, r31):<<1:sat +# CHECK: r17:16+=vmpyh(r21,r31):<<1:sat # Vector multiply halfwords with round and pack 0xf1 0xdf 0x35 0xed -# CHECK: r17 = vmpyh(r21, r31):rnd:sat +# CHECK: r17=vmpyh(r21,r31):rnd:sat 0xf1 0xdf 0xb5 0xed -# CHECK: r17 = vmpyh(r21, r31):<<1:rnd:sat +# CHECK: r17=vmpyh(r21,r31):<<1:rnd:sat # Vector multiply halfwords signed by unsigned 0xf0 0xdf 0x15 0xe5 -# CHECK: r17:16 = vmpyhsu(r21, r31):sat +# CHECK: r17:16=vmpyhsu(r21,r31):sat 0xf0 0xdf 0x95 0xe5 -# CHECK: r17:16 = vmpyhsu(r21, r31):<<1:sat +# CHECK: r17:16=vmpyhsu(r21,r31):<<1:sat 0xb0 0xdf 0x75 0xe7 -# CHECK: r17:16 += vmpyhsu(r21, r31):sat +# CHECK: r17:16+=vmpyhsu(r21,r31):sat 0xb0 0xdf 0xf5 0xe7 -# CHECK: r17:16 += vmpyhsu(r21, r31):<<1:sat +# CHECK: r17:16+=vmpyhsu(r21,r31):<<1:sat # Vector reduce multiply halfwords 0x50 0xde 0x14 0xe8 -# CHECK: r17:16 = vrmpyh(r21:20, r31:30) +# CHECK: r17:16=vrmpyh(r21:20,r31:30) 0x50 0xde 0x14 0xea -# CHECK: r17:16 += vrmpyh(r21:20, r31:30) +# CHECK: r17:16+=vrmpyh(r21:20,r31:30) # Vector multiply bytes 0x30 0xdf 0x55 0xe5 -# CHECK: r17:16 = vmpybsu(r21, r31) +# CHECK: r17:16=vmpybsu(r21,r31) 0x30 0xdf 0x95 0xe5 -# CHECK: r17:16 = vmpybu(r21, r31) +# CHECK: r17:16=vmpybu(r21,r31) 0x30 0xdf 0x95 0xe7 -# CHECK: r17:16 += vmpybu(r21, r31) +# CHECK: r17:16+=vmpybu(r21,r31) 0x30 0xdf 0xd5 0xe7 -# CHECK: r17:16 += vmpybsu(r21, r31) +# CHECK: r17:16+=vmpybsu(r21,r31) # Vector polynomial multiply halfwords 0xf0 0xdf 0xd5 0xe5 -# CHECK: r17:16 = vpmpyh(r21, r31) +# CHECK: r17:16=vpmpyh(r21,r31) 0xf0 0xdf 0xb5 0xe7 -# CHECK: r17:16 ^= vpmpyh(r21, r31) +# CHECK: r17:16^=vpmpyh(r21,r31) Index: test/MC/Disassembler/Hexagon/xtype_perm.txt =================================================================== --- test/MC/Disassembler/Hexagon/xtype_perm.txt +++ test/MC/Disassembler/Hexagon/xtype_perm.txt @@ -3,102 +3,102 @@ # CABAC decode bin 0xd0 0xde 0xd4 0xc1 -# CHECK: r17:16 = decbin(r21:20, r31:30) +# CHECK: r17:16=decbin(r21:20,r31:30) # Saturate 0x11 0xc0 0xd4 0x88 -# CHECK: r17 = sat(r21:20) +# CHECK: r17=sat(r21:20) 0x91 0xc0 0xd5 0x8c -# CHECK: r17 = sath(r21) +# CHECK: r17=sath(r21) 0xb1 0xc0 0xd5 0x8c -# CHECK: r17 = satuh(r21) +# CHECK: r17=satuh(r21) 0xd1 0xc0 0xd5 0x8c -# CHECK: r17 = satub(r21) +# CHECK: r17=satub(r21) 0xf1 0xc0 0xd5 0x8c -# CHECK: r17 = satb(r21) +# CHECK: r17=satb(r21) # Swizzle bytes 0xf1 0xc0 0x95 0x8c -# CHECK: r17 = swiz(r21) +# CHECK: r17=swiz(r21) # Vector align 0x70 0xd4 0x1e 0xc2 -# CHECK: r17:16 = valignb(r21:20, r31:30, p3) +# CHECK: r17:16=valignb(r21:20,r31:30,p3) 0x70 0xde 0x94 0xc2 -# CHECK: r17:16 = vspliceb(r21:20, r31:30, p3) +# CHECK: r17:16=vspliceb(r21:20,r31:30,p3) # Vector round and pack 0x91 0xc0 0x94 0x88 -# CHECK: r17 = vrndwh(r21:20) +# CHECK: r17=vrndwh(r21:20) 0xd1 0xc0 0x94 0x88 -# CHECK: r17 = vrndwh(r21:20):sat +# CHECK: r17=vrndwh(r21:20):sat # Vector saturate and pack 0x11 0xc0 0x14 0x88 -# CHECK: r17 = vsathub(r21:20) +# CHECK: r17=vsathub(r21:20) 0x51 0xc0 0x14 0x88 -# CHECK: r17 = vsatwh(r21:20) +# CHECK: r17=vsatwh(r21:20) 0x91 0xc0 0x14 0x88 -# CHECK: r17 = vsatwuh(r21:20) +# CHECK: r17=vsatwuh(r21:20) 0xd1 0xc0 0x14 0x88 -# CHECK: r17 = vsathb(r21:20) +# CHECK: r17=vsathb(r21:20) 0x11 0xc0 0x95 0x8c -# CHECK: r17 = vsathb(r21) +# CHECK: r17=vsathb(r21) 0x51 0xc0 0x95 0x8c -# CHECK: r17 = vsathub(r21) +# CHECK: r17=vsathub(r21) # Vector saturate without pack 0x90 0xc0 0x14 0x80 -# CHECK: r17:16 = vsathub(r21:20) +# CHECK: r17:16=vsathub(r21:20) 0xb0 0xc0 0x14 0x80 -# CHECK: r17:16 = vsatwuh(r21:20) +# CHECK: r17:16=vsatwuh(r21:20) 0xd0 0xc0 0x14 0x80 -# CHECK: r17:16 = vsatwh(r21:20) +# CHECK: r17:16=vsatwh(r21:20) 0xf0 0xc0 0x14 0x80 -# CHECK: r17:16 = vsathb(r21:20) +# CHECK: r17:16=vsathb(r21:20) # Vector shuffle 0x50 0xde 0x14 0xc1 -# CHECK: r17:16 = shuffeb(r21:20, r31:30) +# CHECK: r17:16=shuffeb(r21:20,r31:30) 0x90 0xd4 0x1e 0xc1 -# CHECK: r17:16 = shuffob(r21:20, r31:30) +# CHECK: r17:16=shuffob(r21:20,r31:30) 0xd0 0xde 0x14 0xc1 -# CHECK: r17:16 = shuffeh(r21:20, r31:30) +# CHECK: r17:16=shuffeh(r21:20,r31:30) 0x10 0xd4 0x9e 0xc1 -# CHECK: r17:16 = shuffoh(r21:20, r31:30) +# CHECK: r17:16=shuffoh(r21:20,r31:30) # Vector splat bytes 0xf1 0xc0 0x55 0x8c -# CHECK: r17 = vsplatb(r21) +# CHECK: r17=vsplatb(r21) # Vector splat halfwords 0x50 0xc0 0x55 0x84 -# CHECK: r17:16 = vsplath(r21) +# CHECK: r17:16=vsplath(r21) # Vector splice 0x70 0xde 0x94 0xc0 -# CHECK: r17:16 = vspliceb(r21:20, r31:30, #3) +# CHECK: r17:16=vspliceb(r21:20,r31:30,#3) 0x70 0xde 0x94 0xc2 -# CHECK: r17:16 = vspliceb(r21:20, r31:30, p3) +# CHECK: r17:16=vspliceb(r21:20,r31:30,p3) # Vector sign extend 0x10 0xc0 0x15 0x84 -# CHECK: r17:16 = vsxtbh(r21) +# CHECK: r17:16=vsxtbh(r21) 0x90 0xc0 0x15 0x84 -# CHECK: r17:16 = vsxthw(r21) +# CHECK: r17:16=vsxthw(r21) # Vector truncate 0x11 0xc0 0x94 0x88 -# CHECK: r17 = vtrunohb(r21:20) +# CHECK: r17=vtrunohb(r21:20) 0x51 0xc0 0x94 0x88 -# CHECK: r17 = vtrunehb(r21:20) +# CHECK: r17=vtrunehb(r21:20) 0x50 0xde 0x94 0xc1 -# CHECK: r17:16 = vtrunewh(r21:20, r31:30) +# CHECK: r17:16=vtrunewh(r21:20,r31:30) 0x90 0xde 0x94 0xc1 -# CHECK: r17:16 = vtrunowh(r21:20, r31:30) +# CHECK: r17:16=vtrunowh(r21:20,r31:30) # Vector zero extend 0x50 0xc0 0x15 0x84 -# CHECK: r17:16 = vzxtbh(r21) +# CHECK: r17:16=vzxtbh(r21) 0xd0 0xc0 0x15 0x84 -# CHECK: r17:16 = vzxthw(r21) +# CHECK: r17:16=vzxthw(r21) Index: test/MC/Disassembler/Hexagon/xtype_pred.txt =================================================================== --- test/MC/Disassembler/Hexagon/xtype_pred.txt +++ test/MC/Disassembler/Hexagon/xtype_pred.txt @@ -3,134 +3,134 @@ # Bounds check 0x83 0xf4 0x10 0xd2 -# CHECK: p3 = boundscheck(r17:16, r21:20):raw:lo +# CHECK: p3=boundscheck(r17:16,r21:20):raw:lo 0xa3 0xf4 0x10 0xd2 -# CHECK: p3 = boundscheck(r17:16, r21:20):raw:hi +# CHECK: p3=boundscheck(r17:16,r21:20):raw:hi # Compare byte 0x43 0xd5 0xd1 0xc7 -# CHECK: p3 = cmpb.gt(r17, r21) +# CHECK: p3=cmpb.gt(r17,r21) 0xc3 0xd5 0xd1 0xc7 -# CHECK: p3 = cmpb.eq(r17, r21) +# CHECK: p3=cmpb.eq(r17,r21) 0xe3 0xd5 0xd1 0xc7 -# CHECK: p3 = cmpb.gtu(r17, r21) +# CHECK: p3=cmpb.gtu(r17,r21) 0xa3 0xc2 0x11 0xdd -# CHECK: p3 = cmpb.eq(r17, #21) +# CHECK: p3=cmpb.eq(r17,#21) 0xa3 0xc2 0x31 0xdd -# CHECK: p3 = cmpb.gt(r17, #21) +# CHECK: p3=cmpb.gt(r17,#21) 0xa3 0xc2 0x51 0xdd -# CHECK: p3 = cmpb.gtu(r17, #21) +# CHECK: p3=cmpb.gtu(r17,#21) # Compare half 0x63 0xd5 0xd1 0xc7 -# CHECK: p3 = cmph.eq(r17, r21) +# CHECK: p3=cmph.eq(r17,r21) 0x83 0xd5 0xd1 0xc7 -# CHECK: p3 = cmph.gt(r17, r21) +# CHECK: p3=cmph.gt(r17,r21) 0xa3 0xd5 0xd1 0xc7 -# CHECK: p3 = cmph.gtu(r17, r21) +# CHECK: p3=cmph.gtu(r17,r21) 0xab 0xc2 0x11 0xdd -# CHECK: p3 = cmph.eq(r17, #21) +# CHECK: p3=cmph.eq(r17,#21) 0xab 0xc2 0x31 0xdd -# CHECK: p3 = cmph.gt(r17, #21) +# CHECK: p3=cmph.gt(r17,#21) 0xab 0xc2 0x51 0xdd -# CHECK: p3 = cmph.gtu(r17, #21) +# CHECK: p3=cmph.gtu(r17,#21) # Compare doublewords 0x03 0xde 0x94 0xd2 -# CHECK: p3 = cmp.eq(r21:20, r31:30) +# CHECK: p3=cmp.eq(r21:20,r31:30) 0x43 0xde 0x94 0xd2 -# CHECK: p3 = cmp.gt(r21:20, r31:30) +# CHECK: p3=cmp.gt(r21:20,r31:30) 0x83 0xde 0x94 0xd2 -# CHECK: p3 = cmp.gtu(r21:20, r31:30) +# CHECK: p3=cmp.gtu(r21:20,r31:30) # Compare bitmask 0x03 0xd5 0x91 0x85 -# CHECK: p3 = bitsclr(r17, #21) +# CHECK: p3=bitsclr(r17,#21) 0x03 0xd5 0xb1 0x85 -# CHECK: p3 = !bitsclr(r17, #21) +# CHECK: p3=!bitsclr(r17,#21) 0x03 0xd5 0x51 0xc7 -# CHECK: p3 = bitsset(r17, r21) +# CHECK: p3=bitsset(r17,r21) 0x03 0xd5 0x71 0xc7 -# CHECK: p3 = !bitsset(r17, r21) +# CHECK: p3=!bitsset(r17,r21) 0x03 0xd5 0x91 0xc7 -# CHECK: p3 = bitsclr(r17, r21) +# CHECK: p3=bitsclr(r17,r21) 0x03 0xd5 0xb1 0xc7 -# CHECK: p3 = !bitsclr(r17, r21) +# CHECK: p3=!bitsclr(r17,r21) # mask generate from predicate 0x10 0xc3 0x00 0x86 -# CHECK: r17:16 = mask(p3) +# CHECK: r17:16=mask(p3) # Check for TLB match 0x63 0xf5 0x10 0xd2 -# CHECK: p3 = tlbmatch(r17:16, r21) +# CHECK: p3=tlbmatch(r17:16,r21) # Predicate Transfer 0x03 0xc0 0x45 0x85 -# CHECK: p3 = r5 +# CHECK: p3=r5 0x05 0xc0 0x43 0x89 -# CHECK: r5 = p3 +# CHECK: r5=p3 # Test bit 0x03 0xd5 0x11 0x85 -# CHECK: p3 = tstbit(r17, #21) +# CHECK: p3=tstbit(r17,#21) 0x03 0xd5 0x31 0x85 -# CHECK: p3 = !tstbit(r17, #21) +# CHECK: p3=!tstbit(r17,#21) 0x03 0xd5 0x11 0xc7 -# CHECK: p3 = tstbit(r17, r21) +# CHECK: p3=tstbit(r17,r21) 0x03 0xd5 0x31 0xc7 -# CHECK: p3 = !tstbit(r17, r21) +# CHECK: p3=!tstbit(r17,r21) # Vector compare halfwords 0x63 0xde 0x14 0xd2 -# CHECK: p3 = vcmph.eq(r21:20, r31:30) +# CHECK: p3=vcmph.eq(r21:20,r31:30) 0x83 0xde 0x14 0xd2 -# CHECK: p3 = vcmph.gt(r21:20, r31:30) +# CHECK: p3=vcmph.gt(r21:20,r31:30) 0xa3 0xde 0x14 0xd2 -# CHECK: p3 = vcmph.gtu(r21:20, r31:30) +# CHECK: p3=vcmph.gtu(r21:20,r31:30) 0xeb 0xc3 0x14 0xdc -# CHECK: p3 = vcmph.eq(r21:20, #31) +# CHECK: p3=vcmph.eq(r21:20,#31) 0xeb 0xc3 0x34 0xdc -# CHECK: p3 = vcmph.gt(r21:20, #31) +# CHECK: p3=vcmph.gt(r21:20,#31) 0xeb 0xc3 0x54 0xdc -# CHECK: p3 = vcmph.gtu(r21:20, #31) +# CHECK: p3=vcmph.gtu(r21:20,#31) # Vector compare bytes for any match 0x03 0xfe 0x14 0xd2 -# CHECK: p3 = any8(vcmpb.eq(r21:20, r31:30)) +# CHECK: p3=any8(vcmpb.eq(r21:20,r31:30)) # Vector compare bytes 0x63 0xde 0x14 0xd2 -# CHECK: p3 = vcmph.eq(r21:20, r31:30) +# CHECK: p3=vcmph.eq(r21:20,r31:30) 0x83 0xde 0x14 0xd2 -# CHECK: p3 = vcmph.gt(r21:20, r31:30) +# CHECK: p3=vcmph.gt(r21:20,r31:30) 0xa3 0xde 0x14 0xd2 -# CHECK: p3 = vcmph.gtu(r21:20, r31:30) +# CHECK: p3=vcmph.gtu(r21:20,r31:30) 0xeb 0xc3 0x14 0xdc -# CHECK: p3 = vcmph.eq(r21:20, #31) +# CHECK: p3=vcmph.eq(r21:20,#31) 0xeb 0xc3 0x34 0xdc -# CHECK: p3 = vcmph.gt(r21:20, #31) +# CHECK: p3=vcmph.gt(r21:20,#31) 0xeb 0xc3 0x54 0xdc -# CHECK: p3 = vcmph.gtu(r21:20, #31) +# CHECK: p3=vcmph.gtu(r21:20,#31) # Vector compare words 0x03 0xde 0x14 0xd2 -# CHECK: p3 = vcmpw.eq(r21:20, r31:30) +# CHECK: p3=vcmpw.eq(r21:20,r31:30) 0x23 0xde 0x14 0xd2 -# CHECK: p3 = vcmpw.gt(r21:20, r31:30) +# CHECK: p3=vcmpw.gt(r21:20,r31:30) 0x43 0xde 0x14 0xd2 -# CHECK: p3 = vcmpw.gtu(r21:20, r31:30) +# CHECK: p3=vcmpw.gtu(r21:20,r31:30) 0xf3 0xc3 0x14 0xdc -# CHECK: p3 = vcmpw.eq(r21:20, #31) +# CHECK: p3=vcmpw.eq(r21:20,#31) 0xf3 0xc3 0x34 0xdc -# CHECK: p3 = vcmpw.gt(r21:20, #31) +# CHECK: p3=vcmpw.gt(r21:20,#31) 0xf3 0xc3 0x54 0xdc -# CHECK: p3 = vcmpw.gtu(r21:20, #31) +# CHECK: p3=vcmpw.gtu(r21:20,#31) # Viterbi pack even and odd predicate bits 0x11 0xc2 0x03 0x89 -# CHECK: r17 = vitpack(p3, p2) +# CHECK: r17=vitpack(p3,p2) # Vector mux 0x70 0xde 0x14 0xd1 -# CHECK: r17:16 = vmux(p3, r21:20, r31:30) +# CHECK: r17:16=vmux(p3,r21:20,r31:30) Index: test/MC/Disassembler/Hexagon/xtype_shift.txt =================================================================== --- test/MC/Disassembler/Hexagon/xtype_shift.txt +++ test/MC/Disassembler/Hexagon/xtype_shift.txt @@ -3,258 +3,258 @@ # Shift by immediate 0x10 0xdf 0x14 0x80 -# CHECK: r17:16 = asr(r21:20, #31) +# CHECK: r17:16=asr(r21:20,#31) 0x30 0xdf 0x14 0x80 -# CHECK: r17:16 = lsr(r21:20, #31) +# CHECK: r17:16=lsr(r21:20,#31) 0x50 0xdf 0x14 0x80 -# CHECK: r17:16 = asl(r21:20, #31) +# CHECK: r17:16=asl(r21:20,#31) 0x11 0xdf 0x15 0x8c -# CHECK: r17 = asr(r21, #31) +# CHECK: r17=asr(r21,#31) 0x31 0xdf 0x15 0x8c -# CHECK: r17 = lsr(r21, #31) +# CHECK: r17=lsr(r21,#31) 0x51 0xdf 0x15 0x8c -# CHECK: r17 = asl(r21, #31) +# CHECK: r17=asl(r21,#31) # Shift by immediate and accumulate 0x10 0xdf 0x14 0x82 -# CHECK: r17:16 -= asr(r21:20, #31) +# CHECK: r17:16-=asr(r21:20,#31) 0x30 0xdf 0x14 0x82 -# CHECK: r17:16 -= lsr(r21:20, #31) +# CHECK: r17:16-=lsr(r21:20,#31) 0x50 0xdf 0x14 0x82 -# CHECK: r17:16 -= asl(r21:20, #31) +# CHECK: r17:16-=asl(r21:20,#31) 0x90 0xdf 0x14 0x82 -# CHECK: r17:16 += asr(r21:20, #31) +# CHECK: r17:16+=asr(r21:20,#31) 0xb0 0xdf 0x14 0x82 -# CHECK: r17:16 += lsr(r21:20, #31) +# CHECK: r17:16+=lsr(r21:20,#31) 0xd0 0xdf 0x14 0x82 -# CHECK: r17:16 += asl(r21:20, #31) +# CHECK: r17:16+=asl(r21:20,#31) 0x11 0xdf 0x15 0x8e -# CHECK: r17 -= asr(r21, #31) +# CHECK: r17-=asr(r21,#31) 0x31 0xdf 0x15 0x8e -# CHECK: r17 -= lsr(r21, #31) +# CHECK: r17-=lsr(r21,#31) 0x51 0xdf 0x15 0x8e -# CHECK: r17 -= asl(r21, #31) +# CHECK: r17-=asl(r21,#31) 0x91 0xdf 0x15 0x8e -# CHECK: r17 += asr(r21, #31) +# CHECK: r17+=asr(r21,#31) 0xb1 0xdf 0x15 0x8e -# CHECK: r17 += lsr(r21, #31) +# CHECK: r17+=lsr(r21,#31) 0xd1 0xdf 0x15 0x8e -# CHECK: r17 += asl(r21, #31) +# CHECK: r17+=asl(r21,#31) 0x4c 0xf7 0x11 0xde -# CHECK: r17 = add(#21, asl(r17, #23)) +# CHECK: r17=add(#21,asl(r17,#23)) 0x4e 0xf7 0x11 0xde -# CHECK: r17 = sub(#21, asl(r17, #23)) +# CHECK: r17=sub(#21,asl(r17,#23)) 0x5c 0xf7 0x11 0xde -# CHECK: r17 = add(#21, lsr(r17, #23)) +# CHECK: r17=add(#21,lsr(r17,#23)) 0x5e 0xf7 0x11 0xde -# CHECK: r17 = sub(#21, lsr(r17, #23)) +# CHECK: r17=sub(#21,lsr(r17,#23)) # Shift by immediate and add 0xf1 0xd5 0x1f 0xc4 -# CHECK: r17 = addasl(r21, r31, #7) +# CHECK: r17=addasl(r21,r31,#7) # Shift by immediate and logical 0x10 0xdf 0x54 0x82 -# CHECK: r17:16 &= asr(r21:20, #31) +# CHECK: r17:16&=asr(r21:20,#31) 0x30 0xdf 0x54 0x82 -# CHECK: r17:16 &= lsr(r21:20, #31) +# CHECK: r17:16&=lsr(r21:20,#31) 0x50 0xdf 0x54 0x82 -# CHECK: r17:16 &= asl(r21:20, #31) +# CHECK: r17:16&=asl(r21:20,#31) 0x90 0xdf 0x54 0x82 -# CHECK: r17:16 |= asr(r21:20, #31) +# CHECK: r17:16|=asr(r21:20,#31) 0xb0 0xdf 0x54 0x82 -# CHECK: r17:16 |= lsr(r21:20, #31) +# CHECK: r17:16|=lsr(r21:20,#31) 0xd0 0xdf 0x54 0x82 -# CHECK: r17:16 |= asl(r21:20, #31) +# CHECK: r17:16|=asl(r21:20,#31) 0x30 0xdf 0x94 0x82 -# CHECK: r17:16 ^= lsr(r21:20, #31) +# CHECK: r17:16^=lsr(r21:20,#31) 0x50 0xdf 0x94 0x82 -# CHECK: r17:16 ^= asl(r21:20, #31) +# CHECK: r17:16^=asl(r21:20,#31) 0x11 0xdf 0x55 0x8e -# CHECK: r17 &= asr(r21, #31) +# CHECK: r17&=asr(r21,#31) 0x31 0xdf 0x55 0x8e -# CHECK: r17 &= lsr(r21, #31) +# CHECK: r17&=lsr(r21,#31) 0x51 0xdf 0x55 0x8e -# CHECK: r17 &= asl(r21, #31) +# CHECK: r17&=asl(r21,#31) 0x91 0xdf 0x55 0x8e -# CHECK: r17 |= asr(r21, #31) +# CHECK: r17|=asr(r21,#31) 0xb1 0xdf 0x55 0x8e -# CHECK: r17 |= lsr(r21, #31) +# CHECK: r17|=lsr(r21,#31) 0xd1 0xdf 0x55 0x8e -# CHECK: r17 |= asl(r21, #31) +# CHECK: r17|=asl(r21,#31) 0x31 0xdf 0x95 0x8e -# CHECK: r17 ^= lsr(r21, #31) +# CHECK: r17^=lsr(r21,#31) 0x51 0xdf 0x95 0x8e -# CHECK: r17 ^= asl(r21, #31) +# CHECK: r17^=asl(r21,#31) 0x48 0xff 0x11 0xde -# CHECK: r17 = and(#21, asl(r17, #31)) +# CHECK: r17=and(#21,asl(r17,#31)) 0x4a 0xff 0x11 0xde -# CHECK: r17 = or(#21, asl(r17, #31)) +# CHECK: r17=or(#21,asl(r17,#31)) 0x58 0xff 0x11 0xde -# CHECK: r17 = and(#21, lsr(r17, #31)) +# CHECK: r17=and(#21,lsr(r17,#31)) 0x5a 0xff 0x11 0xde -# CHECK: r17 = or(#21, lsr(r17, #31)) +# CHECK: r17=or(#21,lsr(r17,#31)) # Shift right by immediate with rounding 0xf0 0xdf 0xd4 0x80 -# CHECK: r17:16 = asr(r21:20, #31):rnd +# CHECK: r17:16=asr(r21:20,#31):rnd 0x11 0xdf 0x55 0x8c -# CHECK: r17 = asr(r21, #31):rnd +# CHECK: r17=asr(r21,#31):rnd # Shift left by immediate with saturation 0x51 0xdf 0x55 0x8c -# CHECK: r17 = asl(r21, #31):sat +# CHECK: r17=asl(r21,#31):sat # Shift by register 0x10 0xdf 0x94 0xc3 -# CHECK: r17:16 = asr(r21:20, r31) +# CHECK: r17:16=asr(r21:20,r31) 0x50 0xdf 0x94 0xc3 -# CHECK: r17:16 = lsr(r21:20, r31) +# CHECK: r17:16=lsr(r21:20,r31) 0x90 0xdf 0x94 0xc3 -# CHECK: r17:16 = asl(r21:20, r31) +# CHECK: r17:16=asl(r21:20,r31) 0xd0 0xdf 0x94 0xc3 -# CHECK: r17:16 = lsl(r21:20, r31) +# CHECK: r17:16=lsl(r21:20,r31) 0x11 0xdf 0x55 0xc6 -# CHECK: r17 = asr(r21, r31) +# CHECK: r17=asr(r21,r31) 0x51 0xdf 0x55 0xc6 -# CHECK: r17 = lsr(r21, r31) +# CHECK: r17=lsr(r21,r31) 0x91 0xdf 0x55 0xc6 -# CHECK: r17 = asl(r21, r31) +# CHECK: r17=asl(r21,r31) 0xd1 0xdf 0x55 0xc6 -# CHECK: r17 = lsl(r21, r31) +# CHECK: r17=lsl(r21,r31) 0xf1 0xdf 0x8a 0xc6 -# CHECK: r17 = lsl(#21, r31) +# CHECK: r17=lsl(#21,r31) # Shift by register and accumulate 0x10 0xdf 0x94 0xcb -# CHECK: r17:16 -= asr(r21:20, r31) +# CHECK: r17:16-=asr(r21:20,r31) 0x50 0xdf 0x94 0xcb -# CHECK: r17:16 -= lsr(r21:20, r31) +# CHECK: r17:16-=lsr(r21:20,r31) 0x90 0xdf 0x94 0xcb -# CHECK: r17:16 -= asl(r21:20, r31) +# CHECK: r17:16-=asl(r21:20,r31) 0xd0 0xdf 0x94 0xcb -# CHECK: r17:16 -= lsl(r21:20, r31) +# CHECK: r17:16-=lsl(r21:20,r31) 0x10 0xdf 0xd4 0xcb -# CHECK: r17:16 += asr(r21:20, r31) +# CHECK: r17:16+=asr(r21:20,r31) 0x50 0xdf 0xd4 0xcb -# CHECK: r17:16 += lsr(r21:20, r31) +# CHECK: r17:16+=lsr(r21:20,r31) 0x90 0xdf 0xd4 0xcb -# CHECK: r17:16 += asl(r21:20, r31) +# CHECK: r17:16+=asl(r21:20,r31) 0xd0 0xdf 0xd4 0xcb -# CHECK: r17:16 += lsl(r21:20, r31) +# CHECK: r17:16+=lsl(r21:20,r31) 0x11 0xdf 0x95 0xcc -# CHECK: r17 -= asr(r21, r31) +# CHECK: r17-=asr(r21,r31) 0x51 0xdf 0x95 0xcc -# CHECK: r17 -= lsr(r21, r31) +# CHECK: r17-=lsr(r21,r31) 0x91 0xdf 0x95 0xcc -# CHECK: r17 -= asl(r21, r31) +# CHECK: r17-=asl(r21,r31) 0xd1 0xdf 0x95 0xcc -# CHECK: r17 -= lsl(r21, r31) +# CHECK: r17-=lsl(r21,r31) 0x11 0xdf 0xd5 0xcc -# CHECK: r17 += asr(r21, r31) +# CHECK: r17+=asr(r21,r31) 0x51 0xdf 0xd5 0xcc -# CHECK: r17 += lsr(r21, r31) +# CHECK: r17+=lsr(r21,r31) 0x91 0xdf 0xd5 0xcc -# CHECK: r17 += asl(r21, r31) +# CHECK: r17+=asl(r21,r31) 0xd1 0xdf 0xd5 0xcc -# CHECK: r17 += lsl(r21, r31) +# CHECK: r17+=lsl(r21,r31) # Shift by register and logical 0x10 0xdf 0x14 0xcb -# CHECK: r17:16 |= asr(r21:20, r31) +# CHECK: r17:16|=asr(r21:20,r31) 0x50 0xdf 0x14 0xcb -# CHECK: r17:16 |= lsr(r21:20, r31) +# CHECK: r17:16|=lsr(r21:20,r31) 0x90 0xdf 0x14 0xcb -# CHECK: r17:16 |= asl(r21:20, r31) +# CHECK: r17:16|=asl(r21:20,r31) 0xd0 0xdf 0x14 0xcb -# CHECK: r17:16 |= lsl(r21:20, r31) +# CHECK: r17:16|=lsl(r21:20,r31) 0x10 0xdf 0x54 0xcb -# CHECK: r17:16 &= asr(r21:20, r31) +# CHECK: r17:16&=asr(r21:20,r31) 0x50 0xdf 0x54 0xcb -# CHECK: r17:16 &= lsr(r21:20, r31) +# CHECK: r17:16&=lsr(r21:20,r31) 0x90 0xdf 0x54 0xcb -# CHECK: r17:16 &= asl(r21:20, r31) +# CHECK: r17:16&=asl(r21:20,r31) 0xd0 0xdf 0x54 0xcb -# CHECK: r17:16 &= lsl(r21:20, r31) +# CHECK: r17:16&=lsl(r21:20,r31) 0x10 0xdf 0x74 0xcb -# CHECK: r17:16 ^= asr(r21:20, r31) +# CHECK: r17:16^=asr(r21:20,r31) 0x50 0xdf 0x74 0xcb -# CHECK: r17:16 ^= lsr(r21:20, r31) +# CHECK: r17:16^=lsr(r21:20,r31) 0x90 0xdf 0x74 0xcb -# CHECK: r17:16 ^= asl(r21:20, r31) +# CHECK: r17:16^=asl(r21:20,r31) 0xd0 0xdf 0x74 0xcb -# CHECK: r17:16 ^= lsl(r21:20, r31) +# CHECK: r17:16^=lsl(r21:20,r31) 0x11 0xdf 0x15 0xcc -# CHECK: r17 |= asr(r21, r31) +# CHECK: r17|=asr(r21,r31) 0x51 0xdf 0x15 0xcc -# CHECK: r17 |= lsr(r21, r31) +# CHECK: r17|=lsr(r21,r31) 0x91 0xdf 0x15 0xcc -# CHECK: r17 |= asl(r21, r31) +# CHECK: r17|=asl(r21,r31) 0xd1 0xdf 0x15 0xcc -# CHECK: r17 |= lsl(r21, r31) +# CHECK: r17|=lsl(r21,r31) 0x11 0xdf 0x55 0xcc -# CHECK: r17 &= asr(r21, r31) +# CHECK: r17&=asr(r21,r31) 0x51 0xdf 0x55 0xcc -# CHECK: r17 &= lsr(r21, r31) +# CHECK: r17&=lsr(r21,r31) 0x91 0xdf 0x55 0xcc -# CHECK: r17 &= asl(r21, r31) +# CHECK: r17&=asl(r21,r31) 0xd1 0xdf 0x55 0xcc -# CHECK: r17 &= lsl(r21, r31) +# CHECK: r17&=lsl(r21,r31) # Shift by register with saturation 0x11 0xdf 0x15 0xc6 -# CHECK: r17 = asr(r21, r31):sat +# CHECK: r17=asr(r21,r31):sat 0x91 0xdf 0x15 0xc6 -# CHECK: r17 = asl(r21, r31):sat +# CHECK: r17=asl(r21,r31):sat # Vector shift halfwords by immediate 0x10 0xc5 0x94 0x80 -# CHECK: r17:16 = vasrh(r21:20, #5) +# CHECK: r17:16=vasrh(r21:20,#5) 0x30 0xc5 0x94 0x80 -# CHECK: r17:16 = vlsrh(r21:20, #5) +# CHECK: r17:16=vlsrh(r21:20,#5) 0x50 0xc5 0x94 0x80 -# CHECK: r17:16 = vaslh(r21:20, #5) +# CHECK: r17:16=vaslh(r21:20,#5) # Vector arithmetic shift halfwords with round 0x10 0xc5 0x34 0x80 -# CHECK: r17:16 = vasrh(r21:20, #5):raw +# CHECK: r17:16=vasrh(r21:20,#5):raw # Vector arithmetic shift halfwords with saturate and pack 0x91 0xc5 0x74 0x88 -# CHECK: r17 = vasrhub(r21:20, #5):raw +# CHECK: r17=vasrhub(r21:20,#5):raw 0xb1 0xc5 0x74 0x88 -# CHECK: r17 = vasrhub(r21:20, #5):sat +# CHECK: r17=vasrhub(r21:20,#5):sat # Vector shift halfwords by register 0x10 0xdf 0x54 0xc3 -# CHECK: r17:16 = vasrh(r21:20, r31) +# CHECK: r17:16=vasrh(r21:20,r31) 0x50 0xdf 0x54 0xc3 -# CHECK: r17:16 = vlsrh(r21:20, r31) +# CHECK: r17:16=vlsrh(r21:20,r31) 0x90 0xdf 0x54 0xc3 -# CHECK: r17:16 = vaslh(r21:20, r31) +# CHECK: r17:16=vaslh(r21:20,r31) 0xd0 0xdf 0x54 0xc3 -# CHECK: r17:16 = vlslh(r21:20, r31) +# CHECK: r17:16=vlslh(r21:20,r31) # Vector shift words by immediate 0x10 0xdf 0x54 0x80 -# CHECK: r17:16 = vasrw(r21:20, #31) +# CHECK: r17:16=vasrw(r21:20,#31) 0x30 0xdf 0x54 0x80 -# CHECK: r17:16 = vlsrw(r21:20, #31) +# CHECK: r17:16=vlsrw(r21:20,#31) 0x50 0xdf 0x54 0x80 -# CHECK: r17:16 = vaslw(r21:20, #31) +# CHECK: r17:16=vaslw(r21:20,#31) # Vector shift words by register 0x10 0xdf 0x14 0xc3 -# CHECK: r17:16 = vasrw(r21:20, r31) +# CHECK: r17:16=vasrw(r21:20,r31) 0x50 0xdf 0x14 0xc3 -# CHECK: r17:16 = vlsrw(r21:20, r31) +# CHECK: r17:16=vlsrw(r21:20,r31) 0x90 0xdf 0x14 0xc3 -# CHECK: r17:16 = vaslw(r21:20, r31) +# CHECK: r17:16=vaslw(r21:20,r31) 0xd0 0xdf 0x14 0xc3 -# CHECK: r17:16 = vlslw(r21:20, r31) +# CHECK: r17:16=vlslw(r21:20,r31) # Vector shift words with truncate and pack 0x51 0xdf 0xd4 0x88 -# CHECK: r17 = vasrw(r21:20, #31) +# CHECK: r17=vasrw(r21:20,#31) 0x51 0xdf 0x14 0xc5 -# CHECK: r17 = vasrw(r21:20, r31) +# CHECK: r17=vasrw(r21:20,r31) Index: test/MC/Hexagon/align.s =================================================================== --- test/MC/Hexagon/align.s +++ test/MC/Hexagon/align.s @@ -3,7 +3,7 @@ # Verify that the .align directive emits the proper insn packets. { r1 = sub(#1, r1) } -# CHECK: 76414021 { r1 = sub(#1, r1) +# CHECK: 76414021 { r1=sub(#1,r1) # CHECK-NEXT: 7f004000 nop # CHECK-NEXT: 7f004000 nop # CHECK-NEXT: 7f00c000 nop } @@ -11,8 +11,8 @@ .align 16 { r1 = sub(#1, r1) r2 = sub(#1, r2) } -# CHECK: 76414021 { r1 = sub(#1, r1) -# CHECK-NEXT: 76424022 r2 = sub(#1, r2) +# CHECK: 76414021 { r1=sub(#1,r1) +# CHECK-NEXT: 76424022 r2=sub(#1,r2) # CHECK-NEXT: 7f004000 nop # CHECK-NEXT: 7f00c000 nop } @@ -20,7 +20,7 @@ { r1 = sub(#1, r1) r2 = sub(#1, r2) r3 = sub(#1, r3) } -# CHECK: 76434023 r3 = sub(#1, r3) +# CHECK: 76434023 r3=sub(#1,r3) # CHECK-NEXT: 7f00c000 nop } .align 16 @@ -30,16 +30,16 @@ r4 = sub(#1, r4) } # Don't pad packets that can't be padded e.g. solo insts -# CHECK: 9200c020 { r0 = vextract(v0,r0) } +# CHECK: 9200c020 { r0=vextract(v0,r0) } r0 = vextract(v0, r0) .align 128 -# CHECK: 76414021 { r1 = sub(#1, r1) +# CHECK: 76414021 { r1=sub(#1,r1) # CHECK-NEXT: 7f00c000 nop } { r1 = sub(#1, r1) } -#CHECK: { r1 = sub(#1, r1) -#CHECK: r2 = sub(#1, r2) -#CHECK: r3 = sub(#1, r3) } +#CHECK: { r1=sub(#1,r1) +#CHECK: r2=sub(#1,r2) +#CHECK: r3=sub(#1,r3) } .falign .align 8 { r1 = sub(#1, r1) @@ -47,13 +47,13 @@ r3 = sub(#1, r3) } # CHECK: { immext(#0) -# CHECK: r0 = sub(##1, r0) +# CHECK: r0=sub(##1,r0) # CHECK: immext(#0) -# CHECK: r1 = sub(##1, r1) } +# CHECK: r1=sub(##1,r1) } # CHECK: { nop # CHECK: nop # CHECK: nop } -# CHECK: { r0 = sub(#1, r0) } +# CHECK: { r0=sub(#1,r0) } { r0 = sub (##1, r0) r1 = sub (##1, r1) } .align 16 Index: test/MC/Hexagon/asmMap.s =================================================================== --- test/MC/Hexagon/asmMap.s +++ test/MC/Hexagon/asmMap.s @@ -11,82 +11,82 @@ #CHECK: 3c07c012 { memb(r7{{ *}}+{{ *}}#0)=#18 memb(r7)=#18 -#CHECK: 4101c008 { if (p0) r8 = memb(r1{{ *}}+{{ *}}#0) +#CHECK: 4101c008 { if (p0) r8=memb(r1{{ *}}+{{ *}}#0) if (p0) r8=memb(r1) -#CHECK: 4519d817 { if (!p3) r23 = memb(r25{{ *}}+{{ *}}#0) +#CHECK: 4519d817 { if (!p3) r23=memb(r25{{ *}}+{{ *}}#0) if (!p3) r23=memb(r25) -#CHECK: 412dc002 { if (p0) r2 = memub(r13{{ *}}+{{ *}}#0) +#CHECK: 412dc002 { if (p0) r2=memub(r13{{ *}}+{{ *}}#0) if (p0) r2=memub(r13) -#CHECK: 453cc01a { if (!p0) r26 = memub(r28{{ *}}+{{ *}}#0) +#CHECK: 453cc01a { if (!p0) r26=memub(r28{{ *}}+{{ *}}#0) if (!p0) r26=memub(r28) -#CHECK: 416bc818 { if (p1) r24 = memuh(r11{{ *}}+{{ *}}#0) +#CHECK: 416bc818 { if (p1) r24=memuh(r11{{ *}}+{{ *}}#0) if (p1) r24=memuh(r11) -#CHECK: 457fc012 { if (!p0) r18 = memuh(r31{{ *}}+{{ *}}#0) +#CHECK: 457fc012 { if (!p0) r18=memuh(r31{{ *}}+{{ *}}#0) if (!p0) r18=memuh(r31) -#CHECK: 455dc014 { if (!p0) r20 = memh(r29{{ *}}+{{ *}}#0) +#CHECK: 455dc014 { if (!p0) r20=memh(r29{{ *}}+{{ *}}#0) if (!p0) r20=memh(r29) -#CHECK: 415dc01d { if (p0) r29 = memh(r29{{ *}}+{{ *}}#0) +#CHECK: 415dc01d { if (p0) r29=memh(r29{{ *}}+{{ *}}#0) if (p0) r29=memh(r29) -#CHECK: 4583c01d { if (!p0) r29 = memw(r3{{ *}}+{{ *}}#0) +#CHECK: 4583c01d { if (!p0) r29=memw(r3{{ *}}+{{ *}}#0) if (!p0) r29=memw(r3) -#CHECK: 419bd01e { if (p2) r30 = memw(r27{{ *}}+{{ *}}#0) +#CHECK: 419bd01e { if (p2) r30=memw(r27{{ *}}+{{ *}}#0) if (p2) r30=memw(r27) -#CHECK: 90e2c018 { r25:24 = membh(r2{{ *}}+{{ *}}#0) +#CHECK: 90e2c018 { r25:24=membh(r2{{ *}}+{{ *}}#0) r25:24=membh(r2) -#CHECK: 902bc006 { r6 = membh(r11{{ *}}+{{ *}}#0) +#CHECK: 902bc006 { r6=membh(r11{{ *}}+{{ *}}#0) r6=membh(r11) -#CHECK: 90a2c01c { r29:28 = memubh(r2{{ *}}+{{ *}}#0) +#CHECK: 90a2c01c { r29:28=memubh(r2{{ *}}+{{ *}}#0) r29:28=memubh(r2) -#CHECK: 906ec00d { r13 = memubh(r14{{ *}}+{{ *}}#0) +#CHECK: 906ec00d { r13=memubh(r14{{ *}}+{{ *}}#0) r13=memubh(r14) -#CHECK: 91dac00c { r13:12 = memd(r26{{ *}}+{{ *}}#0) +#CHECK: 91dac00c { r13:12=memd(r26{{ *}}+{{ *}}#0) r13:12=memd(r26) -#CHECK: 919bc004 { r4 = memw(r27{{ *}}+{{ *}}#0) +#CHECK: 919bc004 { r4=memw(r27{{ *}}+{{ *}}#0) r4=memw(r27) -#CHECK: 914cc005 { r5 = memh(r12{{ *}}+{{ *}}#0) +#CHECK: 914cc005 { r5=memh(r12{{ *}}+{{ *}}#0) r5=memh(r12) -#CHECK: 9176c010 { r16 = memuh(r22{{ *}}+{{ *}}#0) +#CHECK: 9176c010 { r16=memuh(r22{{ *}}+{{ *}}#0) r16=memuh(r22) -#CHECK: 910bc017 { r23 = memb(r11{{ *}}+{{ *}}#0) +#CHECK: 910bc017 { r23=memb(r11{{ *}}+{{ *}}#0) r23=memb(r11) -#CHECK: 912bc01b { r27 = memub(r11{{ *}}+{{ *}}#0) +#CHECK: 912bc01b { r27=memub(r11{{ *}}+{{ *}}#0) r27=memub(r11) -#CHECK: 404ede01 { if (p1) memh(r14{{ *}}+{{ *}}#0) = r30 +#CHECK: 404ede01 { if (p1) memh(r14{{ *}}+{{ *}}#0)=r30 if (p1) memh(r14)=r30 -#CHECK: 4449d900 { if (!p0) memh(r9{{ *}}+{{ *}}#0) = r25 +#CHECK: 4449d900 { if (!p0) memh(r9{{ *}}+{{ *}}#0)=r25 if (!p0) memh(r9)=r25 -#CHECK: 400ecd00 { if (p0) memb(r14{{ *}}+{{ *}}#0) = r13 +#CHECK: 400ecd00 { if (p0) memb(r14{{ *}}+{{ *}}#0)=r13 if (p0) memb(r14)=r13 -#CHECK: 440bcc01 { if (!p1) memb(r11{{ *}}+{{ *}}#0) = r12 +#CHECK: 440bcc01 { if (!p1) memb(r11{{ *}}+{{ *}}#0)=r12 if (!p1) memb(r11)=r12 -#CHECK: 41d0d804 { if (p3) r5:4 = memd(r16{{ *}}+{{ *}}#0) +#CHECK: 41d0d804 { if (p3) r5:4=memd(r16{{ *}}+{{ *}}#0) if (p3) r5:4=memd(r16) -#CHECK: 45d9c00c { if (!p0) r13:12 = memd(r25{{ *}}+{{ *}}#0) +#CHECK: 45d9c00c { if (!p0) r13:12=memd(r25{{ *}}+{{ *}}#0) if (!p0) r13:12=memd(r25) #CHECK: 385ee06d { if (p3) memw(r30{{ *}}+{{ *}}#0)=#-19 @@ -107,34 +107,34 @@ #CHECK: 38b7c013 { if (!p0) memh(r23{{ *}}+{{ *}}#0)=#19 if (!p0) memh(r23)=#19 -#CHECK: 4488d401 { if (!p1) memw(r8{{ *}}+{{ *}}#0) = r20 +#CHECK: 4488d401 { if (!p1) memw(r8{{ *}}+{{ *}}#0)=r20 if (!p1) memw(r8)=r20 -#CHECK: 409ddc02 { if (p2) memw(r29{{ *}}+{{ *}}#0) = r28 +#CHECK: 409ddc02 { if (p2) memw(r29{{ *}}+{{ *}}#0)=r28 if (p2) memw(r29)=r28 -#CHECK: 446fc301 { if (!p1) memh(r15{{ *}}+{{ *}}#0) = r3.h +#CHECK: 446fc301 { if (!p1) memh(r15{{ *}}+{{ *}}#0)=r3.h if (!p1) memh(r15)=r3.h -#CHECK: 406dc201 { if (p1) memh(r13{{ *}}+{{ *}}#0) = r2.h +#CHECK: 406dc201 { if (p1) memh(r13{{ *}}+{{ *}}#0)=r2.h if (p1) memh(r13)=r2.h -#CHECK: 40d9c601 { if (p1) memd(r25{{ *}}+{{ *}}#0) = r7:6 +#CHECK: 40d9c601 { if (p1) memd(r25{{ *}}+{{ *}}#0)=r7:6 if (p1) memd(r25)=r7:6 -#CHECK: 44dad803 { if (!p3) memd(r26{{ *}}+{{ *}}#0) = r25:24 +#CHECK: 44dad803 { if (!p3) memd(r26{{ *}}+{{ *}}#0)=r25:24 if (!p3) memd(r26)=r25:24 -#CHECK: 3e21c011 { memh(r1{{ *}}+{{ *}}#0) {{ *}}+={{ *}} r17 +#CHECK: 3e21c011 { memh(r1{{ *}}+{{ *}}#0){{ *}}+={{ *}}r17 memh(r1)+=r17 -#CHECK: 3e4fc019 { memw(r15{{ *}}+{{ *}}#0) {{ *}}+={{ *}} r25 +#CHECK: 3e4fc019 { memw(r15{{ *}}+{{ *}}#0){{ *}}+={{ *}}r25 memw(r15)+=r25 -#CHECK: 3e5dc022 { memw(r29{{ *}}+{{ *}}#0) {{ *}}-={{ *}} r2 +#CHECK: 3e5dc022 { memw(r29{{ *}}+{{ *}}#0){{ *}}-={{ *}}r2 memw(r29)-=r2 -#CHECK: 3e04c004 { memb(r4{{ *}}+{{ *}}#0) {{ *}}+={{ *}} r4 +#CHECK: 3e04c004 { memb(r4{{ *}}+{{ *}}#0){{ *}}+={{ *}}r4 memb(r4)+=r4 #CHECK: 3f53c016 { memw(r19{{ *}}+{{ *}}#0){{ *}}{{ *}}+={{ *}}{{ *}}#22 @@ -143,55 +143,55 @@ #CHECK: 3f24c01e { memh(r4{{ *}}+{{ *}}#0){{ *}}{{ *}}+={{ *}}{{ *}}#30 memh(r4)+=#30 -#CHECK: 3e27c02d { memh(r7{{ *}}+{{ *}}#0) {{ *}}-={{ *}} r13 +#CHECK: 3e27c02d { memh(r7{{ *}}+{{ *}}#0){{ *}}-={{ *}}r13 memh(r7)-=r13 -#CHECK: 3e1ec032 { memb(r30{{ *}}+{{ *}}#0) {{ *}}-={{ *}} r18 +#CHECK: 3e1ec032 { memb(r30{{ *}}+{{ *}}#0){{ *}}-={{ *}}r18 memb(r30)-=r18 -#CHECK: 3e49c05b { memw(r9{{ *}}+{{ *}}#0) &= r27 +#CHECK: 3e49c05b { memw(r9{{ *}}+{{ *}}#0)&=r27 memw(r9)&=r27 -#CHECK: 3e2dc040 { memh(r13{{ *}}+{{ *}}#0) &= r0 +#CHECK: 3e2dc040 { memh(r13{{ *}}+{{ *}}#0)&=r0 memh(r13)&=r0 -#CHECK: 3e05c046 { memb(r5{{ *}}+{{ *}}#0) &= r6 +#CHECK: 3e05c046 { memb(r5{{ *}}+{{ *}}#0)&=r6 memb(r5)&=r6 -#CHECK: 3e45c06a { memw(r5{{ *}}+{{ *}}#0) |= r10 +#CHECK: 3e45c06a { memw(r5{{ *}}+{{ *}}#0)|=r10 memw(r5)|=r10 -#CHECK: 3e21c07e { memh(r1{{ *}}+{{ *}}#0) |= r30 +#CHECK: 3e21c07e { memh(r1{{ *}}+{{ *}}#0)|=r30 memh(r1)|=r30 -#CHECK: 3e09c06f { memb(r9{{ *}}+{{ *}}#0) |= r15 +#CHECK: 3e09c06f { memb(r9{{ *}}+{{ *}}#0)|=r15 memb(r9)|=r15 -#CHECK: a157d100 { memh(r23{{ *}}+{{ *}}#0) = r17 +#CHECK: a157d100 { memh(r23{{ *}}+{{ *}}#0)=r17 memh(r23)=r17 -#CHECK: a10fd400 { memb(r15{{ *}}+{{ *}}#0) = r20 +#CHECK: a10fd400 { memb(r15{{ *}}+{{ *}}#0)=r20 memb(r15)=r20 -#CHECK: 9082c014 { r21:20 = memb_fifo(r2{{ *}}+{{ *}}#0) +#CHECK: 9082c014 { r21:20=memb_fifo(r2{{ *}}+{{ *}}#0) r21:20=memb_fifo(r2) -#CHECK: 9056c01c { r29:28 = memh_fifo(r22{{ *}}+{{ *}}#0) +#CHECK: 9056c01c { r29:28=memh_fifo(r22{{ *}}+{{ *}}#0) r29:28=memh_fifo(r22) -#CHECK: a1d8ca00 { memd(r24{{ *}}+{{ *}}#0) = r11:10 +#CHECK: a1d8ca00 { memd(r24{{ *}}+{{ *}}#0)=r11:10 memd(r24)=r11:10 -#CHECK: a19ed900 { memw(r30{{ *}}+{{ *}}#0) = r25 +#CHECK: a19ed900 { memw(r30{{ *}}+{{ *}}#0)=r25 memw(r30)=r25 -#CHECK: a169ce00 { memh(r9{{ *}}+{{ *}}#0) = r14.h +#CHECK: a169ce00 { memh(r9{{ *}}+{{ *}}#0)=r14.h memh(r9)=r14.h -#CHECK: 3f07c06b { memb(r7{{ *}}+{{ *}}#0) = setbit(#11) +#CHECK: 3f07c06b { memb(r7{{ *}}+{{ *}}#0)=setbit(#11) memb(r7)=setbit(#11) -#CHECK: 3f34c07b { memh(r20{{ *}}+{{ *}}#0) = setbit(#27) +#CHECK: 3f34c07b { memh(r20{{ *}}+{{ *}}#0)=setbit(#27) memh(r20)=setbit(#27) #CHECK: 3f1cc032 { memb(r28{{ *}}+{{ *}}#0){{ *}}-={{ *}}#18 @@ -206,16 +206,16 @@ #CHECK: 3f00c00c { memb(r0{{ *}}+{{ *}}#0){{ *}}+={{ *}}#12 memb(r0)+=#12 -#CHECK: 3f50c07a { memw(r16{{ *}}+{{ *}}#0) = setbit(#26) +#CHECK: 3f50c07a { memw(r16{{ *}}+{{ *}}#0)=setbit(#26) memw(r16)=setbit(#26) -#CHECK: 3f1fc05d { memb(r31{{ *}}+{{ *}}#0) = clrbit(#29) +#CHECK: 3f1fc05d { memb(r31{{ *}}+{{ *}}#0)=clrbit(#29) memb(r31)=clrbit(#29) -#CHECK: 3f20c05e { memh(r0{{ *}}+{{ *}}#0) = clrbit(#30) +#CHECK: 3f20c05e { memh(r0{{ *}}+{{ *}}#0)=clrbit(#30) memh(r0)=clrbit(#30) -#CHECK: 3f42c059 { memw(r2{{ *}}+{{ *}}#0) = clrbit(#25) +#CHECK: 3f42c059 { memw(r2{{ *}}+{{ *}}#0)=clrbit(#25) memw(r2)=clrbit(#25) #CHECK: 39cfe072 if (!p3.new) memw(r15{{ *}}+{{ *}}#0)=#-14 @@ -230,85 +230,85 @@ if (p3.new) memw(r25)=#-21 } -#CHECK: 4312c801 if (p1.new) r1 = memb(r18{{ *}}+{{ *}}#0) +#CHECK: 4312c801 if (p1.new) r1=memb(r18{{ *}}+{{ *}}#0) { if (p1.new) r1=memb(r18) p1=cmp.eq(r23,##-1105571618) } -#CHECK: 4718d803 if (!p3.new) r3 = memb(r24{{ *}}+{{ *}}#0) +#CHECK: 4718d803 if (!p3.new) r3=memb(r24{{ *}}+{{ *}}#0) { if (!p3.new) r3=memb(r24) p3=cmp.eq(r3,##-210870878) } -#CHECK: 4326c81b if (p1.new) r27 = memub(r6{{ *}}+{{ *}}#0) +#CHECK: 4326c81b if (p1.new) r27=memub(r6{{ *}}+{{ *}}#0) { if (p1.new) r27=memub(r6) p1=cmp.eq(r29,##-188410493) } -#CHECK: 473ad00d if (!p2.new) r13 = memub(r26{{ *}}+{{ *}}#0) +#CHECK: 473ad00d if (!p2.new) r13=memub(r26{{ *}}+{{ *}}#0) { p2=cmp.eq(r30,##-1823852150) if (!p2.new) r13=memub(r26) } -#CHECK: 4785d80e if (!p3.new) r14 = memw(r5{{ *}}+{{ *}}#0) +#CHECK: 4785d80e if (!p3.new) r14=memw(r5{{ *}}+{{ *}}#0) { if (!p3.new) r14=memw(r5) p3=cmp.eq(r31,##-228524711) } -#CHECK: 438cc81a if (p1.new) r26 = memw(r12{{ *}}+{{ *}}#0) +#CHECK: 438cc81a if (p1.new) r26=memw(r12{{ *}}+{{ *}}#0) { if (p1.new) r26=memw(r12) p1=cmp.eq(r11,##-485232313) } -#CHECK: 477dc019 if (!p0.new) r25 = memuh(r29{{ *}}+{{ *}}#0) +#CHECK: 477dc019 if (!p0.new) r25=memuh(r29{{ *}}+{{ *}}#0) { p0=cmp.eq(r23,##127565957) if (!p0.new) r25=memuh(r29) } -#CHECK: 4377c807 if (p1.new) r7 = memuh(r23{{ *}}+{{ *}}#0) +#CHECK: 4377c807 if (p1.new) r7=memuh(r23{{ *}}+{{ *}}#0) { p1=cmp.eq(r30,##-222020054) if (p1.new) r7=memuh(r23) } -#CHECK: 4754c81c if (!p1.new) r28 = memh(r20{{ *}}+{{ *}}#0) +#CHECK: 4754c81c if (!p1.new) r28=memh(r20{{ *}}+{{ *}}#0) { p1=cmp.eq(r18,##1159699785) if (!p1.new) r28=memh(r20) } -#CHECK: 435ec01b if (p0.new) r27 = memh(r30{{ *}}+{{ *}}#0) +#CHECK: 435ec01b if (p0.new) r27=memh(r30{{ *}}+{{ *}}#0) { p0=cmp.eq(r7,##-1114567705) if (p0.new) r27=memh(r30) } -#CHECK: 420dd100 if (p0.new) memb(r13{{ *}}+{{ *}}#0) = r17 +#CHECK: 420dd100 if (p0.new) memb(r13{{ *}}+{{ *}}#0)=r17 { p0=cmp.eq(r21,##-1458796638) if (p0.new) memb(r13)=r17 } -#CHECK: 4601d602 if (!p2.new) memb(r1{{ *}}+{{ *}}#0) = r22 +#CHECK: 4601d602 if (!p2.new) memb(r1{{ *}}+{{ *}}#0)=r22 { p2=cmp.eq(r20,##-824022439) if (!p2.new) memb(r1)=r22 } -#CHECK: 43dcd808 if (p3.new) r9:8 = memd(r28{{ *}}+{{ *}}#0) +#CHECK: 43dcd808 if (p3.new) r9:8=memd(r28{{ *}}+{{ *}}#0) { p3=cmp.eq(r13,##56660744) if (p3.new) r9:8=memd(r28) } -#CHECK: 47d8c80e if (!p1.new) r15:14 = memd(r24{{ *}}+{{ *}}#0) +#CHECK: 47d8c80e if (!p1.new) r15:14=memd(r24{{ *}}+{{ *}}#0) { if (!p1.new) r15:14=memd(r24) p1=cmp.eq(r15,##1536716489) @@ -338,55 +338,55 @@ if (!p2.new) memh(r14)=#-10 } -#CHECK: 4692df01 if (!p1.new) memw(r18{{ *}}+{{ *}}#0) = r31 +#CHECK: 4692df01 if (!p1.new) memw(r18{{ *}}+{{ *}}#0)=r31 { if (!p1.new) memw(r18)=r31 p1=cmp.eq(r11,##-319375732) } -#CHECK: 428dc402 if (p2.new) memw(r13{{ *}}+{{ *}}#0) = r4 +#CHECK: 428dc402 if (p2.new) memw(r13{{ *}}+{{ *}}#0)=r4 { if (p2.new) memw(r13)=r4 p2=cmp.eq(r18,##1895120239) } -#CHECK: 4670c300 if (!p0.new) memh(r16{{ *}}+{{ *}}#0) = r3.h +#CHECK: 4670c300 if (!p0.new) memh(r16{{ *}}+{{ *}}#0)=r3.h { p0=cmp.eq(r25,##1348715015) if (!p0.new) memh(r16)=r3.h } -#CHECK: 426ddf02 if (p2.new) memh(r13{{ *}}+{{ *}}#0) = r31.h +#CHECK: 426ddf02 if (p2.new) memh(r13{{ *}}+{{ *}}#0)=r31.h { p2=cmp.eq(r25,##1085560657) if (p2.new) memh(r13)=r31.h } -#CHECK: 464bcb01 if (!p1.new) memh(r11{{ *}}+{{ *}}#0) = r11 +#CHECK: 464bcb01 if (!p1.new) memh(r11{{ *}}+{{ *}}#0)=r11 { p1=cmp.eq(r10,##1491455911) if (!p1.new) memh(r11)=r11 } -#CHECK: 4248d200 if (p0.new) memh(r8{{ *}}+{{ *}}#0) = r18 +#CHECK: 4248d200 if (p0.new) memh(r8{{ *}}+{{ *}}#0)=r18 { p0=cmp.eq(r3,##687581160) if (p0.new) memh(r8)=r18 } -#CHECK: 42deca00 if (p0.new) memd(r30{{ *}}+{{ *}}#0) = r11:10 +#CHECK: 42deca00 if (p0.new) memd(r30{{ *}}+{{ *}}#0)=r11:10 { if (p0.new) memd(r30)=r11:10 p0=cmp.eq(r28,##562796189) } -#CHECK: 46d5cc03 if (!p3.new) memd(r21{{ *}}+{{ *}}#0) = r13:12 +#CHECK: 46d5cc03 if (!p3.new) memd(r21{{ *}}+{{ *}}#0)=r13:12 { if (!p3.new) memd(r21)=r13:12 p3=cmp.eq(r6,##-969273288) } -#CHECK: 42bad201 if (p1.new) memw(r26{{ *}}+{{ *}}#0) = r22.new +#CHECK: 42bad201 if (p1.new) memw(r26{{ *}}+{{ *}}#0)=r22.new { if (p1.new) memw(r26)=r22.new p1=cmp.eq(r0,##-1110065473) @@ -393,7 +393,7 @@ r22=add(r28,r9) } -#CHECK: 46b9d201 if (!p1.new) memw(r25{{ *}}+{{ *}}#0) = r26.new +#CHECK: 46b9d201 if (!p1.new) memw(r25{{ *}}+{{ *}}#0)=r26.new { p1=cmp.eq(r11,##-753121346) r26=add(r19,r7) @@ -400,31 +400,31 @@ if (!p1.new) memw(r25)=r26.new } -#CHECK: 40aad200 if (p0) memw(r10{{ *}}+{{ *}}#0) = r6.new +#CHECK: 40aad200 if (p0) memw(r10{{ *}}+{{ *}}#0)=r6.new { r6=add(r30,r0) if (p0) memw(r10)=r6.new } -#CHECK: 44a6d202 if (!p2) memw(r6{{ *}}+{{ *}}#0) = r4.new +#CHECK: 44a6d202 if (!p2) memw(r6{{ *}}+{{ *}}#0)=r4.new { if (!p2) memw(r6)=r4.new r4=add(r0,r3) } -#CHECK: 40b9c200 if (p0) memb(r25{{ *}}+{{ *}}#0) = r29.new +#CHECK: 40b9c200 if (p0) memb(r25{{ *}}+{{ *}}#0)=r29.new { if (p0) memb(r25)=r29.new r29=add(r27,r30) } -#CHECK: 44bec203 if (!p3) memb(r30{{ *}}+{{ *}}#0) = r8.new +#CHECK: 44bec203 if (!p3) memb(r30{{ *}}+{{ *}}#0)=r8.new { if (!p3) memb(r30)=r8.new r8=add(r24,r4) } -#CHECK: 46aecc01 if (!p1.new) memh(r14{{ *}}+{{ *}}#0) = r13.new +#CHECK: 46aecc01 if (!p1.new) memh(r14{{ *}}+{{ *}}#0)=r13.new { if (!p1.new) memh(r14)=r13.new r13=add(r21,r2) @@ -431,7 +431,7 @@ p1=cmp.eq(r3,##-1529345886) } -#CHECK: 42bcca02 if (p2.new) memh(r28{{ *}}+{{ *}}#0) = r18.new +#CHECK: 42bcca02 if (p2.new) memh(r28{{ *}}+{{ *}}#0)=r18.new { p2=cmp.eq(r15,##2048545649) if (p2.new) memh(r28)=r18.new @@ -438,7 +438,7 @@ r18=add(r9,r3) } -#CHECK: 46aac200 if (!p0.new) memb(r10{{ *}}+{{ *}}#0) = r30.new +#CHECK: 46aac200 if (!p0.new) memb(r10{{ *}}+{{ *}}#0)=r30.new { p0=cmp.eq(r21,##-1160401822) r30=add(r9,r22) @@ -445,7 +445,7 @@ if (!p0.new) memb(r10)=r30.new } -#CHECK: 42b8c202 if (p2.new) memb(r24{{ *}}+{{ *}}#0) = r11.new +#CHECK: 42b8c202 if (p2.new) memb(r24{{ *}}+{{ *}}#0)=r11.new { if (p2.new) memb(r24)=r11.new p2=cmp.eq(r30,##1267977346) @@ -452,89 +452,89 @@ r11=add(r8,r18) } -#CHECK: 44a3ca00 if (!p0) memh(r3{{ *}}+{{ *}}#0) = r28.new +#CHECK: 44a3ca00 if (!p0) memh(r3{{ *}}+{{ *}}#0)=r28.new { r28=add(r16,r11) if (!p0) memh(r3)=r28.new } -#CHECK: 40abca03 if (p3) memh(r11{{ *}}+{{ *}}#0) = r24.new +#CHECK: 40abca03 if (p3) memh(r11{{ *}}+{{ *}}#0)=r24.new { if (p3) memh(r11)=r24.new r24=add(r18,r19) } -#CHECK: a1abd200 memw(r11{{ *}}+{{ *}}#0) = r5.new +#CHECK: a1abd200 memw(r11{{ *}}+{{ *}}#0)=r5.new { memw(r11)=r5.new r5=add(r0,r10) } -#CHECK: a1a2ca00 memh(r2{{ *}}+{{ *}}#0) = r18.new +#CHECK: a1a2ca00 memh(r2{{ *}}+{{ *}}#0)=r18.new { r18=add(r27,r18) memh(r2)=r18.new } -#CHECK: a1bac200 memb(r26{{ *}}+{{ *}}#0) = r15.new +#CHECK: a1bac200 memb(r26{{ *}}+{{ *}}#0)=r15.new { r15=add(r22,r17) memb(r26)=r15.new } -#CHECK: d328ce1c { r29:28{{ *}}={{ *}}vsubub(r15:14, r9:8) +#CHECK: d328ce1c { r29:28{{ *}}={{ *}}vsubub(r15:14,r9:8) r29:28=vsubb(r15:14,r9:8) -#CHECK: 8c5ed60c { r12{{ *}}={{ *}}asr(r30, #22):rnd +#CHECK: 8c5ed60c { r12{{ *}}={{ *}}asr(r30,#22):rnd r12=asrrnd(r30,#23) -#CHECK: ed1ec109 { r9{{ *}}={{ *}}mpyi(r30, r1) +#CHECK: ed1ec109 { r9{{ *}}={{ *}}mpyi(r30,r1) r9=mpyui(r30,r1) -#CHECK: e010d787 { r7{{ *}}={{ *}}+{{ *}}mpyi(r16, #188) +#CHECK: e010d787 { r7{{ *}}={{ *}}+{{ *}}mpyi(r16,#188) r7=mpyi(r16,#188) -#CHECK: d206eea2 { p2{{ *}}={{ *}}boundscheck(r7:6, r15:14):raw:hi +#CHECK: d206eea2 { p2{{ *}}={{ *}}boundscheck(r7:6,r15:14):raw:hi p2=boundscheck(r7,r15:14) -#CHECK: f27ac102 { p2{{ *}}={{ *}}cmp.gtu(r26, r1) +#CHECK: f27ac102 { p2{{ *}}={{ *}}cmp.gtu(r26,r1) p2=cmp.ltu(r1,r26) -#CHECK: f240df00 { p0{{ *}}={{ *}}cmp.gt(r0, r31) +#CHECK: f240df00 { p0{{ *}}={{ *}}cmp.gt(r0,r31) p0=cmp.lt(r31,r0) -#CHECK: 7586cc01 { p1{{ *}}={{ *}}cmp.gtu(r6, #96) +#CHECK: 7586cc01 { p1{{ *}}={{ *}}cmp.gtu(r6,#96) p1=cmp.geu(r6,#97) -#CHECK: 755dc9a2 { p2{{ *}}={{ *}}cmp.gt(r29, #77) +#CHECK: 755dc9a2 { p2{{ *}}={{ *}}cmp.gt(r29,#77) p2=cmp.ge(r29,#78) -#CHECK: d310d60a { r11:10{{ *}}={{ *}}vaddub(r17:16, r23:22) +#CHECK: d310d60a { r11:10{{ *}}={{ *}}vaddub(r17:16,r23:22) r11:10=vaddb(r17:16,r23:22) -#CHECK: 8753d1e6 { r6{{ *}}={{ *}}tableidxh(r19, #7, #17):raw +#CHECK: 8753d1e6 { r6{{ *}}={{ *}}tableidxh(r19,#7,#17):raw r6=tableidxh(r19,#7,#18) -#CHECK: 8786d277 { r23{{ *}}={{ *}}tableidxw(r6, #3, #18):raw +#CHECK: 8786d277 { r23{{ *}}={{ *}}tableidxw(r6,#3,#18):raw r23=tableidxw(r6,#3,#20) -#CHECK: 7c4dfff8 { r25:24{{ *}}={{ *}}combine(#-1, #-101) +#CHECK: 7c4dfff8 { r25:24{{ *}}={{ *}}combine(#-1,#-101) r25:24=#-101 -#CHECK: 8866c09a { r26{{ *}}={{ *}}vasrhub(r7:6, #0):raw +#CHECK: 8866c09a { r26{{ *}}={{ *}}vasrhub(r7:6,#0):raw r26=vasrhub(r7:6,#1):rnd:sat -#CHECK: 7654c016 { r22{{ *}}={{ *}}sub(#0, r20) +#CHECK: 7654c016 { r22{{ *}}={{ *}}sub(#0,r20) r22=neg(r20) -#CHECK: 802cc808 { r9:8{{ *}}={{ *}}vasrh(r13:12, #8):raw +#CHECK: 802cc808 { r9:8{{ *}}={{ *}}vasrh(r13:12,#8):raw r9:8=vasrh(r13:12,#9):rnd -#CHECK: 7614dfe5 { r5{{ *}}={{ *}}{{zxtb\(r20\)|and\(r20, *#255\)}} +#CHECK: 7614dfe5 { r5{{ *}}={{ *}}{{zxtb\(r20\)|and\(r20,*#255\)}} r5=zxtb(r20) #CHECK: 00ab68e2 immext(#179976320) -#CHECK: 7500c500 p0{{ *}}={{ *}}cmp.eq(r0, ##179976360) +#CHECK: 7500c500 p0{{ *}}={{ *}}cmp.eq(r0,##179976360) { if (p0.new) r11=r26 p0=cmp.eq(r0,##179976360) @@ -546,19 +546,19 @@ #CHECK: 7425c005 { if (p1) r5{{ *}}={{ *}}r5 if (p1) r5=r5 -#CHECK: e9badae2 { r2{{ *}}={{ *}}vrcmpys(r27:26, r27:26):<<1:rnd:sat:raw:lo +#CHECK: e9badae2 { r2{{ *}}={{ *}}vrcmpys(r27:26,r27:26):<<1:rnd:sat:raw:lo r2=vrcmpys(r27:26,r26):<<1:rnd:sat -#CHECK: fd13f20e if (p0.new) r15:14{{ *}}={{ *}}{{r19:18|combine\(r19, *r18\)}} +#CHECK: fd13f20e if (p0.new) r15:14{{ *}}={{ *}}{{r19:18|combine\(r19,*r18\)}} { p0=cmp.eq(r26,##1766934387) if (p0.new) r15:14=r19:18 } -#CHECK: fd07c6c2 { if (!p2) r3:2{{ *}}={{ *}}{{r7:6|combine\(r7, *r6\)}} +#CHECK: fd07c6c2 { if (!p2) r3:2{{ *}}={{ *}}{{r7:6|combine\(r7,*r6\)}} if (!p2) r3:2=r7:6 -#CHECK: fd0dcc7e { if (p3) r31:30{{ *}}={{ *}}{{r13:12|combine\(r13, *r12\)}} +#CHECK: fd0dcc7e { if (p3) r31:30{{ *}}={{ *}}{{r13:12|combine\(r13,*r12\)}} if (p3) r31:30=r13:12 #CHECK: 748ae015 if (!p0.new) r21{{ *}}={{ *}}r10 @@ -567,38 +567,38 @@ if (!p0.new) r21=r10 } -#CHECK: d36ec6c8 { r9:8{{ *}}={{ *}}add(r15:14, r7:6):raw:lo +#CHECK: d36ec6c8 { r9:8{{ *}}={{ *}}add(r15:14,r7:6):raw:lo r9:8=add(r14,r7:6) #CHECK: 01e65477 immext(#509943232) -#CHECK: 7516c3a3 p3{{ *}}={{ *}}cmp.eq(r22, ##509943261) +#CHECK: 7516c3a3 p3{{ *}}={{ *}}cmp.eq(r22,##509943261) { if (!p3.new) r9:8=r25:24 p3=cmp.eq(r22,##509943261) } -#CHECK: 87e0d5e5 { r5{{ *}}={{ *}}tableidxd(r0, #15, #21):raw +#CHECK: 87e0d5e5 { r5{{ *}}={{ *}}tableidxd(r0,#15,#21):raw r5=tableidxd(r0,#15,#24) -#CHECK: 8701db65 { r5{{ *}}={{ *}}tableidxb(r1, #3, #27):raw +#CHECK: 8701db65 { r5{{ *}}={{ *}}tableidxb(r1,#3,#27):raw r5=tableidxb(r1,#3,#27) -#CHECK: 767affe3 { r3{{ *}}={{ *}}sub(#-1, r26) +#CHECK: 767affe3 { r3{{ *}}={{ *}}sub(#-1,r26) r3=not(r26) -#CHECK: f51ddc06 { r7:6{{ *}}={{ *}}{{r29:28|combine\(r29, *r28\)}} +#CHECK: f51ddc06 { r7:6{{ *}}={{ *}}{{r29:28|combine\(r29,*r28\)}} r7:6=r29:28 -#CHECK: 9406c000 { dcfetch(r6 + #0) +#CHECK: 9406c000 { dcfetch(r6+#0) dcfetch(r6) -#CHECK: 6b20c001 { p1{{ *}}={{ *}}or(p0, p0) +#CHECK: 6b20c001 { p1{{ *}}={{ *}}or(p0,p0) p1=p0 -#CHECK: eafcdc82 { r3:2 += vrcmpys(r29:28, r29:28):<<1:sat:raw:lo +#CHECK: eafcdc82 { r3:2+=vrcmpys(r29:28,r29:28):<<1:sat:raw:lo r3:2+=vrcmpys(r29:28,r28):<<1:sat -#CHECK: e8ead092 { r19:18{{ *}}={{ *}}vrcmpys(r11:10, r17:16):<<1:sat:raw:lo +#CHECK: e8ead092 { r19:18{{ *}}={{ *}}vrcmpys(r11:10,r17:16):<<1:sat:raw:lo r19:18=vrcmpys(r11:10,r16):<<1:sat #CHECK: 9082c014 { r21:20{{ *}}={{ *}}memb_fifo(r2{{ *}}+{{ *}}#0) Index: test/MC/Hexagon/capitalizedEndloop.s =================================================================== --- test/MC/Hexagon/capitalizedEndloop.s +++ test/MC/Hexagon/capitalizedEndloop.s @@ -3,19 +3,19 @@ # Verify that capitaizled endloops work - { R0 = mpyi(R0,R0) } : endloop0 - { R0 = mpyi(R0,R0) } : ENDLOOP0 - { R0 = mpyi(R0,R0) }:endloop0 + { R0=mpyi(R0,R0) } : endloop0 + { R0=mpyi(R0,R0) } : ENDLOOP0 + { R0=mpyi(R0,R0) }:endloop0 - { R0 = mpyi(R0,R0) } : endloop1 - { R0 = mpyi(R0,R0) } : ENDLOOP1 - { R0 = mpyi(R0,R0) }:endloop1 + { R0=mpyi(R0,R0) } : endloop1 + { R0=mpyi(R0,R0) } : ENDLOOP1 + { R0=mpyi(R0,R0) }:endloop1 - { R0 = mpyi(R0,R0) } : endloop0 : endloop1 - { R0 = mpyi(R0,R0) } : ENDLOOP0 : ENDLOOP1 - { R0 = mpyi(R0,R0) }:endloop0:endloop1 + { R0=mpyi(R0,R0) } : endloop0 : endloop1 + { R0=mpyi(R0,R0) } : ENDLOOP0 : ENDLOOP1 + { R0=mpyi(R0,R0) }:endloop0:endloop1 -# CHECK: r0 = mpyi(r0, r0) +# CHECK: r0=mpyi(r0,r0) # CHECK: :endloop0 # CHECK: :endloop0 # CHECK: :endloop0 Index: test/MC/Hexagon/dis-duplex-p0.s =================================================================== --- test/MC/Hexagon/dis-duplex-p0.s +++ test/MC/Hexagon/dis-duplex-p0.s @@ -1,7 +1,7 @@ // RUN: llvm-mc -arch=hexagon -filetype=obj -o - %s | llvm-objdump -d - | FileCheck %s // REQUIRES: asserts .text -// CHECK: { r7 = #-1; r7 = #-1 } +// CHECK: { r7=#-1; r7=#-1 } .long 0x3a373a27 -// CHECK: { if (!p0.new) r7 = #0; if (p0.new) r7 = #0 } +// CHECK: { if (!p0.new) r7=#0; if (p0.new) r7=#0 } .long 0x3a573a47 Index: test/MC/Hexagon/duplex-registers.s =================================================================== --- test/MC/Hexagon/duplex-registers.s +++ test/MC/Hexagon/duplex-registers.s @@ -7,4 +7,4 @@ } # CHECK: 289808ba -# CHECK: r16 = memuh(r17 + #0);{{ *}}r18 = memuh(r19 + #0) +# CHECK: r16=memuh(r17+#0);{{ *}}r18=memuh(r19+#0) Index: test/MC/Hexagon/fixups.s =================================================================== --- test/MC/Hexagon/fixups.s +++ test/MC/Hexagon/fixups.s @@ -3,7 +3,7 @@ .text # CHECK-LABEL: 0: # CHECK: 2442e106 -# CHECK: if (!cmp.eq(r1.new, #1)) jump:t 0xc +# CHECK: if (!cmp.eq(r1.new,#1)) jump:t 0xc { r1 = zxth(r2) if (!cmp.eq(r1.new, #1)) jump:t .L1 @@ -15,7 +15,7 @@ # CHECK: 00004020 # CHECK: immext(#2048) # CHECK: 2442e118 -# CHECK: if (!cmp.eq(r1.new, #1)) jump:t 0x81c +# CHECK: if (!cmp.eq(r1.new,#1)) jump:t 0x81c { r1 = zxth(r2) if (!cmp.eq(r1.new, #1)) jump:t .L2 Index: test/MC/Hexagon/iconst.s =================================================================== --- test/MC/Hexagon/iconst.s +++ test/MC/Hexagon/iconst.s @@ -1,6 +1,6 @@ # RUN: llvm-mc -triple=hexagon -filetype=obj %s | llvm-objdump -d -r - | FileCheck %s a: -# CHECK: r0 = add(r0, #0) +# CHECK: r0=add(r0,#0) # CHECK: R_HEX_23_REG r0 = iconst(#a) \ No newline at end of file Index: test/MC/Hexagon/inst_cmp_eq.ll =================================================================== --- test/MC/Hexagon/inst_cmp_eq.ll +++ test/MC/Hexagon/inst_cmp_eq.ll @@ -3,10 +3,10 @@ define i1 @foo (i32 %a, i32 %b) { - %1 = icmp eq i32 %a, %b + %1=icmp eq i32 %a, %b ret i1 %1 } -; CHECK: p0 = cmp.eq(r0, r1) -; CHECK: r0 = p0 +; CHECK: p0=cmp.eq(r0,r1) +; CHECK: r0=p0 ; CHECK: jumpr r31 Index: test/MC/Hexagon/inst_cmp_eqi.ll =================================================================== --- test/MC/Hexagon/inst_cmp_eqi.ll +++ test/MC/Hexagon/inst_cmp_eqi.ll @@ -3,10 +3,10 @@ define i1 @foo (i32 %a) { - %1 = icmp eq i32 %a, 42 + %1=icmp eq i32 %a, 42 ret i1 %1 } -; CHECK: p0 = cmp.eq(r0, #42) -; CHECK: r0 = p0 +; CHECK: p0=cmp.eq(r0,#42) +; CHECK: r0=p0 ; CHECK: jumpr r31 Index: test/MC/Hexagon/inst_cmp_gt.ll =================================================================== --- test/MC/Hexagon/inst_cmp_gt.ll +++ test/MC/Hexagon/inst_cmp_gt.ll @@ -3,10 +3,10 @@ define i1 @foo (i32 %a, i32 %b) { - %1 = icmp sgt i32 %a, %b + %1=icmp sgt i32 %a, %b ret i1 %1 } -; CHECK: p0 = cmp.gt(r0, r1) -; CHECK: r0 = p0 -; CHECK: jumpr r31 } \ No newline at end of file +; CHECK: p0=cmp.gt(r0,r1) +; CHECK: r0=p0 +; CHECK: jumpr r31 } Index: test/MC/Hexagon/inst_cmp_gti.ll =================================================================== --- test/MC/Hexagon/inst_cmp_gti.ll +++ test/MC/Hexagon/inst_cmp_gti.ll @@ -3,10 +3,10 @@ define i1 @foo (i32 %a) { - %1 = icmp sgt i32 %a, 42 + %1=icmp sgt i32 %a, 42 ret i1 %1 } -; CHECK: p0 = cmp.gt(r0, #42) -; CHECK: r0 = p0 +; CHECK: p0=cmp.gt(r0,#42) +; CHECK: r0=p0 ; CHECK: jumpr r31 Index: test/MC/Hexagon/inst_cmp_lt.ll =================================================================== --- test/MC/Hexagon/inst_cmp_lt.ll +++ test/MC/Hexagon/inst_cmp_lt.ll @@ -3,10 +3,10 @@ define i1 @foo (i32 %a, i32 %b) { - %1 = icmp slt i32 %a, %b + %1=icmp slt i32 %a, %b ret i1 %1 } -; CHECK: p0 = cmp.gt(r1, r0) -; CHECK: r0 = p0 +; CHECK: p0=cmp.gt(r1,r0) +; CHECK: r0=p0 ; CHECK: jumpr r31 Index: test/MC/Hexagon/inst_cmp_ugt.ll =================================================================== --- test/MC/Hexagon/inst_cmp_ugt.ll +++ test/MC/Hexagon/inst_cmp_ugt.ll @@ -3,10 +3,10 @@ define i1 @foo (i32 %a, i32 %b) { - %1 = icmp ugt i32 %a, %b + %1=icmp ugt i32 %a, %b ret i1 %1 } -; CHECK: p0 = cmp.gtu(r0, r1) -; CHECK: r0 = p0 +; CHECK: p0=cmp.gtu(r0,r1) +; CHECK: r0=p0 ; CHECK: jumpr r31 Index: test/MC/Hexagon/inst_cmp_ugti.ll =================================================================== --- test/MC/Hexagon/inst_cmp_ugti.ll +++ test/MC/Hexagon/inst_cmp_ugti.ll @@ -3,10 +3,10 @@ define i1 @foo (i32 %a) { - %1 = icmp ugt i32 %a, 42 + %1=icmp ugt i32 %a, 42 ret i1 %1 } -; CHECK: p0 = cmp.gtu(r0, #42) -; CHECK: r0 = p0 +; CHECK: p0=cmp.gtu(r0,#42) +; CHECK: r0=p0 ; CHECK: jumpr r31 Index: test/MC/Hexagon/inst_cmp_ult.ll =================================================================== --- test/MC/Hexagon/inst_cmp_ult.ll +++ test/MC/Hexagon/inst_cmp_ult.ll @@ -3,10 +3,10 @@ define i1 @foo (i32 %a, i32 %b) { - %1 = icmp ult i32 %a, %b + %1=icmp ult i32 %a, %b ret i1 %1 } -; CHECK: p0 = cmp.gtu(r1, r0) -; CHECK: r0 = p0 -; CHECK: jumpr r31 \ No newline at end of file +; CHECK: p0=cmp.gtu(r1,r0) +; CHECK: r0=p0 +; CHECK: jumpr r31 Index: test/MC/Hexagon/jumpdoublepound.s =================================================================== --- test/MC/Hexagon/jumpdoublepound.s +++ test/MC/Hexagon/jumpdoublepound.s @@ -7,7 +7,7 @@ # CHECK: if (p0) jump if (p0) jump ##mylabel -# CHECK: if (cmp.gtu(r5.new, r4)) jump:t +# CHECK: if (cmp.gtu(r5.new,r4)) jump:t { r5 = r4 if (cmp.gtu(r5.new, r4)) jump:t ##mylabel } Index: test/MC/Hexagon/labels.s =================================================================== --- test/MC/Hexagon/labels.s +++ test/MC/Hexagon/labels.s @@ -10,17 +10,17 @@ # CHECK: nop r3:nop -# CHECK: r5:4 = combine(r5, r4) -r5:4 = r5:4 +# CHECK: r5:4=combine(r5,r4) +r5:4=r5:4 -# CHECK: r0 = r1 -# CHECK: p0 = tstbit(r0, #10) +# CHECK: r0=r1 +# CHECK: p0=tstbit(r0,#10) # CHECK: if (!p0) jump -1:r0=r1; p0=tstbit(r0, #10); if !p0 jump 1b; +1:r0=r1; p0=tstbit(r0,#10); if !p0 jump 1b; # CHECK: nop -# CHECK: r1 = add(r1, #4) -# CHECK: r5 = memw(r1 + #0) +# CHECK: r1=add(r1,#4) +# CHECK: r5=memw(r1+#0) # CHECK: endloop0 -b: { r5 = memw(r1) - r1 = add(r1, #4) } : endloop0 \ No newline at end of file +b: { r5=memw(r1) + r1=add(r1,#4) } : endloop0 \ No newline at end of file Index: test/MC/Hexagon/register-alt-names.s =================================================================== --- test/MC/Hexagon/register-alt-names.s +++ test/MC/Hexagon/register-alt-names.s @@ -1,14 +1,14 @@ # RUN: llvm-mc -arch=hexagon < %s | FileCheck %s -# CHECK: r0 = r31 -r0 = lr +# CHECK: r0=r31 +r0=lr -# CHECK: r1 = r30 -r1 = fp +# CHECK: r1=r30 +r1=fp -# CHECK: r2 = r29 -r2 = sp +# CHECK: r2=r29 +r2=sp -# CHECK: r1:0 = combine(r31, r30) -r1:0 = lr:fp +# CHECK: r1:0=combine(r31,r30) +r1:0=lr:fp Index: test/MC/Hexagon/relaxed_newvalue.s =================================================================== --- test/MC/Hexagon/relaxed_newvalue.s +++ test/MC/Hexagon/relaxed_newvalue.s @@ -1,10 +1,10 @@ # RUN: llvm-mc -triple=hexagon -filetype=obj %s | llvm-objdump -d - | FileCheck %s # Make sure relaxation doesn't hinder newvalue calculation -#CHECK: r18 = add(r2, #-6) +#CHECK: r18=add(r2,#-6) #CHECK-NEXT: immext(#0) -#CHECK-NEXT: if (!cmp.gt(r18.new, #1)) jump:t +#CHECK-NEXT: if (!cmp.gt(r18.new,#1)) jump:t { - r18 = add(r2, #-6) - if (!cmp.gt(r18.new, #1)) jump:t .unknown + r18=add(r2,#-6) + if (!cmp.gt(r18.new,#1)) jump:t .unknown } Index: test/MC/Hexagon/relocations.s =================================================================== --- test/MC/Hexagon/relocations.s +++ test/MC/Hexagon/relocations.s @@ -30,19 +30,19 @@ # CHECK: R_HEX_GPREL16_0 r_hex_gprel16_0: -{ r0 = memb (#undefined@gotrel) } +{ r0 = memb (#undefined) } # CHECK: R_HEX_GPREL16_1 r_hex_gprel16_1: -{ r0 = memh (#undefined@gotrel) } +{ r0 = memh (#undefined) } # CHECK: R_HEX_GPREL16_2 r_hex_gprel16_2: -{ r0 = memw (#undefined@gotrel) } +{ r0 = memw (#undefined) } # CHECK: R_HEX_GPREL16_3 r_hex_gprel16_3: -{ r1:0 = memd (#undefined@gotrel) } +{ r1:0 = memd (#undefined) } # CHECK: R_HEX_B13_PCREL r_hex_b13_pcrel: Index: test/MC/Hexagon/v60-alu.s =================================================================== --- test/MC/Hexagon/v60-alu.s +++ test/MC/Hexagon/v60-alu.s @@ -2,311 +2,311 @@ #RUN: llvm-objdump -triple=hexagon -mcpu=hexagonv60 -d - | \ #RUN: FileCheck %s -#CHECK: 1ce2cbd7 { v23.w = vavg(v11.w,{{ *}}v2.w):rnd } +#CHECK: 1ce2cbd7 { v23.w=vavg(v11.w,v2.w):rnd } v23.w=vavg(v11.w,v2.w):rnd -#CHECK: 1cf4d323 { v3.h = vnavg(v19.h,{{ *}}v20.h) } +#CHECK: 1cf4d323 { v3.h=vnavg(v19.h,v20.h) } v3.h=vnavg(v19.h,v20.h) -#CHECK: 1cffce9a { v26.uh = vavg(v14.uh,{{ *}}v31.uh):rnd } +#CHECK: 1cffce9a { v26.uh=vavg(v14.uh,v31.uh):rnd } v26.uh=vavg(v14.uh,v31.uh):rnd -#CHECK: 1ce5cba1 { v1.h = vavg(v11.h,{{ *}}v5.h):rnd } +#CHECK: 1ce5cba1 { v1.h=vavg(v11.h,v5.h):rnd } v1.h=vavg(v11.h,v5.h):rnd -#CHECK: 1cc0d012 { v18.ub = vabsdiff(v16.ub,{{ *}}v0.ub) } +#CHECK: 1cc0d012 { v18.ub=vabsdiff(v16.ub,v0.ub) } v18.ub=vabsdiff(v16.ub,v0.ub) -#CHECK: 1cc2de29 { v9.uh = vabsdiff(v30.h,{{ *}}v2.h) } +#CHECK: 1cc2de29 { v9.uh=vabsdiff(v30.h,v2.h) } v9.uh=vabsdiff(v30.h,v2.h) -#CHECK: 1ce9ca06 { v6.b = vnavg(v10.ub,{{ *}}v9.ub) } +#CHECK: 1ce9ca06 { v6.b=vnavg(v10.ub,v9.ub) } v6.b=vnavg(v10.ub,v9.ub) -#CHECK: 1caacf90 { v17:16.w = vadd(v15.h,{{ *}}v10.h) } +#CHECK: 1caacf90 { v17:16.w=vadd(v15.h,v10.h) } v17:16.w=vadd(v15.h,v10.h) -#CHECK: 1cb4cabe { v31:30.h = vsub(v10.ub,{{ *}}v20.ub) } +#CHECK: 1cb4cabe { v31:30.h=vsub(v10.ub,v20.ub) } v31:30.h=vsub(v10.ub,v20.ub) -#CHECK: 1cb8cada { v27:26.w = vsub(v10.uh,{{ *}}v24.uh) } +#CHECK: 1cb8cada { v27:26.w=vsub(v10.uh,v24.uh) } v27:26.w=vsub(v10.uh,v24.uh) -#CHECK: 1cbcdbe8 { v9:8.w = vsub(v27.h,{{ *}}v28.h) } +#CHECK: 1cbcdbe8 { v9:8.w=vsub(v27.h,v28.h) } v9:8.w=vsub(v27.h,v28.h) -#CHECK: 1caeca00 { v1:0.h = vsub(v11:10.h,{{ *}}v15:14.h):sat } +#CHECK: 1caeca00 { v1:0.h=vsub(v11:10.h,v15:14.h):sat } v1:0.h=vsub(v11:10.h,v15:14.h):sat -#CHECK: 1ca8c43e { v31:30.w = vsub(v5:4.w,{{ *}}v9:8.w):sat } +#CHECK: 1ca8c43e { v31:30.w=vsub(v5:4.w,v9:8.w):sat } v31:30.w=vsub(v5:4.w,v9:8.w):sat -#CHECK: 1cbad95c { v29:28.h = vadd(v25.ub,{{ *}}v26.ub) } +#CHECK: 1cbad95c { v29:28.h=vadd(v25.ub,v26.ub) } v29:28.h=vadd(v25.ub,v26.ub) -#CHECK: 1ca1dc64 { v5:4.w = vadd(v28.uh,{{ *}}v1.uh) } +#CHECK: 1ca1dc64 { v5:4.w=vadd(v28.uh,v1.uh) } v5:4.w=vadd(v28.uh,v1.uh) -#CHECK: 1c79c350 { v16.h = vsub(v3.h,{{ *}}v25.h):sat } +#CHECK: 1c79c350 { v16.h=vsub(v3.h,v25.h):sat } v16.h=vsub(v3.h,v25.h):sat -#CHECK: 1c7fd364 { v4.w = vsub(v19.w,{{ *}}v31.w):sat } +#CHECK: 1c7fd364 { v4.w=vsub(v19.w,v31.w):sat } v4.w=vsub(v19.w,v31.w):sat -#CHECK: 1c67d816 { v22.ub = vsub(v24.ub,{{ *}}v7.ub):sat } +#CHECK: 1c67d816 { v22.ub=vsub(v24.ub,v7.ub):sat } v22.ub=vsub(v24.ub,v7.ub):sat -#CHECK: 1c7ddc2f { v15.uh = vsub(v28.uh,{{ *}}v29.uh):sat } +#CHECK: 1c7ddc2f { v15.uh=vsub(v28.uh,v29.uh):sat } v15.uh=vsub(v28.uh,v29.uh):sat -#CHECK: 1c5cc6d7 { v23.h = vsub(v6.h,{{ *}}v28.h) } +#CHECK: 1c5cc6d7 { v23.h=vsub(v6.h,v28.h) } v23.h=vsub(v6.h,v28.h) -#CHECK: 1c54cae4 { v4.w = vsub(v10.w,{{ *}}v20.w) } +#CHECK: 1c54cae4 { v4.w=vsub(v10.w,v20.w) } v4.w=vsub(v10.w,v20.w) -#CHECK: 1c4dc78b { v11.w = vadd(v7.w,{{ *}}v13.w):sat } +#CHECK: 1c4dc78b { v11.w=vadd(v7.w,v13.w):sat } v11.w=vadd(v7.w,v13.w):sat -#CHECK: 1c48c7a4 { v4.b = vsub(v7.b,{{ *}}v8.b) } +#CHECK: 1c48c7a4 { v4.b=vsub(v7.b,v8.b) } v4.b=vsub(v7.b,v8.b) -#CHECK: 1cdec3b0 { v16.uh = vavg(v3.uh,{{ *}}v30.uh) } +#CHECK: 1cdec3b0 { v16.uh=vavg(v3.uh,v30.uh) } v16.uh=vavg(v3.uh,v30.uh) -#CHECK: 1c76dc98 { v25:24.b = vadd(v29:28.b,{{ *}}v23:22.b) } +#CHECK: 1c76dc98 { v25:24.b=vadd(v29:28.b,v23:22.b) } v25:24.b=vadd(v29:28.b,v23:22.b) -#CHECK: 1c7ad4a6 { v7:6.h = vadd(v21:20.h,{{ *}}v27:26.h) } +#CHECK: 1c7ad4a6 { v7:6.h=vadd(v21:20.h,v27:26.h) } v7:6.h=vadd(v21:20.h,v27:26.h) -#CHECK: 1cc7c564 { v4.uw = vabsdiff(v5.w,{{ *}}v7.w) } +#CHECK: 1cc7c564 { v4.uw=vabsdiff(v5.w,v7.w) } v4.uw=vabsdiff(v5.w,v7.w) -#CHECK: 1cd2cdc1 { v1.h = vavg(v13.h,{{ *}}v18.h) } +#CHECK: 1cd2cdc1 { v1.h=vavg(v13.h,v18.h) } v1.h=vavg(v13.h,v18.h) -#CHECK: 1cd5d246 { v6.uh = vabsdiff(v18.uh,{{ *}}v21.uh) } +#CHECK: 1cd5d246 { v6.uh=vabsdiff(v18.uh,v21.uh) } v6.uh=vabsdiff(v18.uh,v21.uh) -#CHECK: 1cdcd987 { v7.ub = vavg(v25.ub,{{ *}}v28.ub) } +#CHECK: 1cdcd987 { v7.ub=vavg(v25.ub,v28.ub) } v7.ub=vavg(v25.ub,v28.ub) -#CHECK: 1c92c6e4 { v5:4.uh = vsub(v7:6.uh,{{ *}}v19:18.uh):sat } +#CHECK: 1c92c6e4 { v5:4.uh=vsub(v7:6.uh,v19:18.uh):sat } v5:4.uh=vsub(v7:6.uh,v19:18.uh):sat -#CHECK: 1c86dace { v15:14.ub = vsub(v27:26.ub,{{ *}}v7:6.ub):sat } +#CHECK: 1c86dace { v15:14.ub=vsub(v27:26.ub,v7:6.ub):sat } v15:14.ub=vsub(v27:26.ub,v7:6.ub):sat -#CHECK: 1cffc07c { v28.ub = vavg(v0.ub,{{ *}}v31.ub):rnd } +#CHECK: 1cffc07c { v28.ub=vavg(v0.ub,v31.ub):rnd } v28.ub=vavg(v0.ub,v31.ub):rnd -#CHECK: 1cf8d851 { v17.w = vnavg(v24.w,{{ *}}v24.w) } +#CHECK: 1cf8d851 { v17.w=vnavg(v24.w,v24.w) } v17.w=vnavg(v24.w,v24.w) -#CHECK: 1c70d2e6 { v7:6.ub = vadd(v19:18.ub,{{ *}}v17:16.ub):sat } +#CHECK: 1c70d2e6 { v7:6.ub=vadd(v19:18.ub,v17:16.ub):sat } v7:6.ub=vadd(v19:18.ub,v17:16.ub):sat -#CHECK: 1c72dec6 { v7:6.w = vadd(v31:30.w,{{ *}}v19:18.w) } +#CHECK: 1c72dec6 { v7:6.w=vadd(v31:30.w,v19:18.w) } v7:6.w=vadd(v31:30.w,v19:18.w) -#CHECK: 1c92d23e { v31:30.h = vadd(v19:18.h,{{ *}}v19:18.h):sat } +#CHECK: 1c92d23e { v31:30.h=vadd(v19:18.h,v19:18.h):sat } v31:30.h=vadd(v19:18.h,v19:18.h):sat -#CHECK: 1c94de1e { v31:30.uh = vadd(v31:30.uh,{{ *}}v21:20.uh):sat } +#CHECK: 1c94de1e { v31:30.uh=vadd(v31:30.uh,v21:20.uh):sat } v31:30.uh=vadd(v31:30.uh,v21:20.uh):sat -#CHECK: 1c9ec07c { v29:28.b = vsub(v1:0.b,{{ *}}v31:30.b) } +#CHECK: 1c9ec07c { v29:28.b=vsub(v1:0.b,v31:30.b) } v29:28.b=vsub(v1:0.b,v31:30.b) -#CHECK: 1c88da56 { v23:22.w = vadd(v27:26.w,{{ *}}v9:8.w):sat } +#CHECK: 1c88da56 { v23:22.w=vadd(v27:26.w,v9:8.w):sat } v23:22.w=vadd(v27:26.w,v9:8.w):sat -#CHECK: 1c9acab8 { v25:24.w = vsub(v11:10.w,{{ *}}v27:26.w) } +#CHECK: 1c9acab8 { v25:24.w=vsub(v11:10.w,v27:26.w) } v25:24.w=vsub(v11:10.w,v27:26.w) -#CHECK: 1c82d282 { v3:2.h = vsub(v19:18.h,{{ *}}v3:2.h) } +#CHECK: 1c82d282 { v3:2.h=vsub(v19:18.h,v3:2.h) } v3:2.h=vsub(v19:18.h,v3:2.h) -#CHECK: 1c2bd9a6 { v6 = vand(v25,{{ *}}v11) } +#CHECK: 1c2bd9a6 { v6=vand(v25,v11) } v6=vand(v25,v11) -#CHECK: 1c43c22d { v13.ub = vadd(v2.ub,{{ *}}v3.ub):sat } +#CHECK: 1c43c22d { v13.ub=vadd(v2.ub,v3.ub):sat } v13.ub=vadd(v2.ub,v3.ub):sat -#CHECK: 1c59d707 { v7.w = vadd(v23.w,{{ *}}v25.w) } +#CHECK: 1c59d707 { v7.w=vadd(v23.w,v25.w) } v7.w=vadd(v23.w,v25.w) -#CHECK: 1c3fc9e1 { v1 = vxor(v9,{{ *}}v31) } +#CHECK: 1c3fc9e1 { v1=vxor(v9,v31) } v1=vxor(v9,v31) -#CHECK: 1c2acbdf { v31 = vor(v11,{{ *}}v10) } +#CHECK: 1c2acbdf { v31=vor(v11,v10) } v31=vor(v11,v10) -#CHECK: 1cdaccf6 { v22.w = vavg(v12.w,{{ *}}v26.w) } +#CHECK: 1cdaccf6 { v22.w=vavg(v12.w,v26.w) } v22.w=vavg(v12.w,v26.w) -#CHECK: 1c5ac767 { v7.h = vadd(v7.h,{{ *}}v26.h):sat } +#CHECK: 1c5ac767 { v7.h=vadd(v7.h,v26.h):sat } v7.h=vadd(v7.h,v26.h):sat -#CHECK: 1c40d956 { v22.uh = vadd(v25.uh,{{ *}}v0.uh):sat } +#CHECK: 1c40d956 { v22.uh=vadd(v25.uh,v0.uh):sat } v22.uh=vadd(v25.uh,v0.uh):sat -#CHECK: 1fbbd611 { v17.w = vasr(v22.w{{ *}},{{ *}}v27.w) } +#CHECK: 1fbbd611 { v17.w=vasr(v22.w,v27.w) } v17.w=vasr(v22.w,v27.w) -#CHECK: 1fbad835 { v21.w = vlsr(v24.w{{ *}},{{ *}}v26.w) } +#CHECK: 1fbad835 { v21.w=vlsr(v24.w,v26.w) } v21.w=vlsr(v24.w,v26.w) -#CHECK: 1f79cedc { v28.b = vround(v14.h{{ *}},{{ *}}v25.h):sat } +#CHECK: 1f79cedc { v28.b=vround(v14.h,v25.h):sat } v28.b=vround(v14.h,v25.h):sat -#CHECK: 1f69c4e0 { v0.ub = vround(v4.h{{ *}},{{ *}}v9.h):sat } +#CHECK: 1f69c4e0 { v0.ub=vround(v4.h,v9.h):sat } v0.ub=vround(v4.h,v9.h):sat -#CHECK: 1f72c485 { v5.h = vround(v4.w{{ *}},{{ *}}v18.w):sat } +#CHECK: 1f72c485 { v5.h=vround(v4.w,v18.w):sat } v5.h=vround(v4.w,v18.w):sat -#CHECK: 1f6bc8b1 { v17.uh = vround(v8.w{{ *}},{{ *}}v11.w):sat } +#CHECK: 1f6bc8b1 { v17.uh=vround(v8.w,v11.w):sat } v17.uh=vround(v8.w,v11.w):sat -#CHECK: 1f71c25b { v27.ub = vsat(v2.h{{ *}},{{ *}}v17.h) } +#CHECK: 1f71c25b { v27.ub=vsat(v2.h,v17.h) } v27.ub=vsat(v2.h,v17.h) -#CHECK: 1f66c560 { v0.h = vsat(v5.w{{ *}},{{ *}}v6.w) } +#CHECK: 1f66c560 { v0.h=vsat(v5.w,v6.w) } v0.h=vsat(v5.w,v6.w) -#CHECK: 1fb3d148 { v8.h = vlsr(v17.h{{ *}},{{ *}}v19.h) } +#CHECK: 1fb3d148 { v8.h=vlsr(v17.h,v19.h) } v8.h=vlsr(v17.h,v19.h) -#CHECK: 1fbec56e { v14.h = vasr(v5.h{{ *}},{{ *}}v30.h) } +#CHECK: 1fbec56e { v14.h=vasr(v5.h,v30.h) } v14.h=vasr(v5.h,v30.h) -#CHECK: 1fb2d2a2 { v2.h = vasl(v18.h{{ *}},{{ *}}v18.h) } +#CHECK: 1fb2d2a2 { v2.h=vasl(v18.h,v18.h) } v2.h=vasl(v18.h,v18.h) -#CHECK: 1faccc95 { v21.w = vasl(v12.w{{ *}},{{ *}}v12.w) } +#CHECK: 1faccc95 { v21.w=vasl(v12.w,v12.w) } v21.w=vasl(v12.w,v12.w) -#CHECK: 1fb9c1e2 { v2.h = vadd(v1.h{{ *}},{{ *}}v25.h) } +#CHECK: 1fb9c1e2 { v2.h=vadd(v1.h,v25.h) } v2.h=vadd(v1.h,v25.h) -#CHECK: 1fbbd5df { v31.b = vadd(v21.b{{ *}},{{ *}}v27.b) } +#CHECK: 1fbbd5df { v31.b=vadd(v21.b,v27.b) } v31.b=vadd(v21.b,v27.b) -#CHECK: 1f25c578 { v24 = vrdelta(v5{{ *}},{{ *}}v5) } +#CHECK: 1f25c578 { v24=vrdelta(v5,v5) } v24=vrdelta(v5,v5) -#CHECK: 1f22c62a { v10 = vdelta(v6{{ *}},{{ *}}v2) } +#CHECK: 1f22c62a { v10=vdelta(v6,v2) } v10=vdelta(v6,v2) -#CHECK: 1f20d102 { v2.w = vmax(v17.w{{ *}},{{ *}}v0.w) } +#CHECK: 1f20d102 { v2.w=vmax(v17.w,v0.w) } v2.w=vmax(v17.w,v0.w) -#CHECK: 1f1ed6fc { v28.h = vmax(v22.h{{ *}},{{ *}}v30.h) } +#CHECK: 1f1ed6fc { v28.h=vmax(v22.h,v30.h) } v28.h=vmax(v22.h,v30.h) -#CHECK: 1f0cc8d8 { v24.uh = vmax(v8.uh{{ *}},{{ *}}v12.uh) } +#CHECK: 1f0cc8d8 { v24.uh=vmax(v8.uh,v12.uh) } v24.uh=vmax(v8.uh,v12.uh) -#CHECK: 1f00c1b0 { v16.ub = vmax(v1.ub{{ *}},{{ *}}v0.ub) } +#CHECK: 1f00c1b0 { v16.ub=vmax(v1.ub,v0.ub) } v16.ub=vmax(v1.ub,v0.ub) -#CHECK: 1f12d08e { v14.w = vmin(v16.w{{ *}},{{ *}}v18.w) } +#CHECK: 1f12d08e { v14.w=vmin(v16.w,v18.w) } v14.w=vmin(v16.w,v18.w) -#CHECK: 1f1ad466 { v6.h = vmin(v20.h{{ *}},{{ *}}v26.h) } +#CHECK: 1f1ad466 { v6.h=vmin(v20.h,v26.h) } v6.h=vmin(v20.h,v26.h) -#CHECK: 1f13df5d { v29.uh = vmin(v31.uh{{ *}},{{ *}}v19.uh) } +#CHECK: 1f13df5d { v29.uh=vmin(v31.uh,v19.uh) } v29.uh=vmin(v31.uh,v19.uh) -#CHECK: 1f09c226 { v6.ub = vmin(v2.ub{{ *}},{{ *}}v9.ub) } +#CHECK: 1f09c226 { v6.ub=vmin(v2.ub,v9.ub) } v6.ub=vmin(v2.ub,v9.ub) -#CHECK: 1f41d34f { v15.b = vshuffo(v19.b{{ *}},{{ *}}v1.b) } +#CHECK: 1f41d34f { v15.b=vshuffo(v19.b,v1.b) } v15.b=vshuffo(v19.b,v1.b) -#CHECK: 1f5fc72e { v14.b = vshuffe(v7.b{{ *}},{{ *}}v31.b) } +#CHECK: 1f5fc72e { v14.b=vshuffe(v7.b,v31.b) } v14.b=vshuffe(v7.b,v31.b) -#CHECK: 1f34d0f7 { v23.b = vdeale(v16.b{{ *}},{{ *}}v20.b) } +#CHECK: 1f34d0f7 { v23.b=vdeale(v16.b,v20.b) } v23.b=vdeale(v16.b,v20.b) -#CHECK: 1f4bd6c4 { v5:4.b = vshuffoe(v22.b{{ *}},{{ *}}v11.b) } +#CHECK: 1f4bd6c4 { v5:4.b=vshuffoe(v22.b,v11.b) } v5:4.b=vshuffoe(v22.b,v11.b) -#CHECK: 1f5dcea2 { v3:2.h = vshuffoe(v14.h{{ *}},{{ *}}v29.h) } +#CHECK: 1f5dcea2 { v3:2.h=vshuffoe(v14.h,v29.h) } v3:2.h=vshuffoe(v14.h,v29.h) -#CHECK: 1f4fd186 { v6.h = vshuffo(v17.h{{ *}},{{ *}}v15.h) } +#CHECK: 1f4fd186 { v6.h=vshuffo(v17.h,v15.h) } v6.h=vshuffo(v17.h,v15.h) -#CHECK: 1f5bda79 { v25.h = vshuffe(v26.h{{ *}},{{ *}}v27.h) } +#CHECK: 1f5bda79 { v25.h=vshuffe(v26.h,v27.h) } v25.h=vshuffe(v26.h,v27.h) -#CHECK: 1f41d1f2 { v19:18 = vcombine(v17{{ *}},{{ *}}v1) } +#CHECK: 1f41d1f2 { v19:18=vcombine(v17,v1) } v19:18=vcombine(v17,v1) -#CHECK: 1e82f432 { if (!q2) v18.b -= v20.b } +#CHECK: 1e82f432 { if (!q2) v18.b-=v20.b } if (!q2) v18.b-=v20.b -#CHECK: 1ec2fd13 { if (q3) v19.w -= v29.w } +#CHECK: 1ec2fd13 { if (q3) v19.w-=v29.w } if (q3) v19.w-=v29.w -#CHECK: 1e81fef9 { if (q2) v25.h -= v30.h } +#CHECK: 1e81fef9 { if (q2) v25.h-=v30.h } if (q2) v25.h-=v30.h -#CHECK: 1e81e2d3 { if (q2) v19.b -= v2.b } +#CHECK: 1e81e2d3 { if (q2) v19.b-=v2.b } if (q2) v19.b-=v2.b -#CHECK: 1e41ecad { if (!q1) v13.w += v12.w } +#CHECK: 1e41ecad { if (!q1) v13.w+=v12.w } if (!q1) v13.w+=v12.w -#CHECK: 1e41e789 { if (!q1) v9.h += v7.h } +#CHECK: 1e41e789 { if (!q1) v9.h+=v7.h } if (!q1) v9.h+=v7.h -#CHECK: 1e81e967 { if (!q2) v7.b += v9.b } +#CHECK: 1e81e967 { if (!q2) v7.b+=v9.b } if (!q2) v7.b+=v9.b -#CHECK: 1e41f04f { if (q1) v15.w += v16.w } +#CHECK: 1e41f04f { if (q1) v15.w+=v16.w } if (q1) v15.w+=v16.w -#CHECK: 1e01e838 { if (q0) v24.h += v8.h } +#CHECK: 1e01e838 { if (q0) v24.h+=v8.h } if (q0) v24.h+=v8.h -#CHECK: 1ec1f112 { if (q3) v18.b += v17.b } +#CHECK: 1ec1f112 { if (q3) v18.b+=v17.b } if (q3) v18.b+=v17.b -#CHECK: 1e42f67b { if (!q1) v27.w -= v22.w } +#CHECK: 1e42f67b { if (!q1) v27.w-=v22.w } if (!q1) v27.w-=v22.w -#CHECK: 1e82ea5b { if (!q2) v27.h -= v10.h } +#CHECK: 1e82ea5b { if (!q2) v27.h-=v10.h } if (!q2) v27.h-=v10.h -#CHECK: 1e00c586 { v6 = vnot(v5) } +#CHECK: 1e00c586 { v6=vnot(v5) } v6=vnot(v5) -#CHECK: 1e00df70 { v16.w = vabs(v31.w):sat } +#CHECK: 1e00df70 { v16.w=vabs(v31.w):sat } v16.w=vabs(v31.w):sat -#CHECK: 1e00d45f { v31.w = vabs(v20.w) } +#CHECK: 1e00d45f { v31.w=vabs(v20.w) } v31.w=vabs(v20.w) -#CHECK: 1e00db2f { v15.h = vabs(v27.h):sat } +#CHECK: 1e00db2f { v15.h=vabs(v27.h):sat } v15.h=vabs(v27.h):sat -#CHECK: 1e00d001 { v1.h = vabs(v16.h) } +#CHECK: 1e00d001 { v1.h=vabs(v16.h) } v1.h=vabs(v16.h) -#CHECK: 1e02c832 { v19:18.uh = vzxt(v8.ub) } +#CHECK: 1e02c832 { v19:18.uh=vzxt(v8.ub) } v19:18.uh=vzxt(v8.ub) -#CHECK: 1e02c98a { v11:10.w = vsxt(v9.h) } +#CHECK: 1e02c98a { v11:10.w=vsxt(v9.h) } v11:10.w=vsxt(v9.h) -#CHECK: 1e02cf76 { v23:22.h = vsxt(v15.b) } +#CHECK: 1e02cf76 { v23:22.h=vsxt(v15.b) } v23:22.h=vsxt(v15.b) -#CHECK: 1e02c258 { v25:24.uw = vzxt(v2.uh) } +#CHECK: 1e02c258 { v25:24.uw=vzxt(v2.uh) } v25:24.uw=vzxt(v2.uh) Index: test/MC/Hexagon/v60-misc.s =================================================================== --- test/MC/Hexagon/v60-misc.s +++ test/MC/Hexagon/v60-misc.s @@ -14,108 +14,108 @@ # CHECK: 5361c300 { if (!p3) jumpr:nt if (!p3) jumpr r1 -# CHECK: 1c2eceee { v14 = vxor(v14,{{ *}}v14) } +# CHECK: 1c2eceee { v14=vxor(v14,{{ *}}v14) } v14 = #0 -# CHECK: 1c80c0a0 { v1:0.w = vsub(v1:0.w,v1:0.w) } +# CHECK: 1c80c0a0 { v1:0.w=vsub(v1:0.w,v1:0.w) } v1:0 = #0 -# CHECK: 1f42c3e0 { v1:0 = vcombine(v3,v2) } -v1:0 = v3:2 +# CHECK: 1f42c3e0 { v1:0=vcombine(v3,v2) } +v1:0=v3:2 -# CHECK: 1f90cf00 { q0 = vcmp.eq(v15.b,v16.b) } -q0 = vcmp.eq(v15.ub, v16.ub) +# CHECK: 1f90cf00 { q0=vcmp.eq(v15.b,v16.b) } +q0=vcmp.eq(v15.ub, v16.ub) -# CHECK: 1c92f101 { q1 &= vcmp.eq(v17.b,v18.b) } -q1 &= vcmp.eq(v17.ub, v18.ub) +# CHECK: 1c92f101 { q1&=vcmp.eq(v17.b,v18.b) } +q1&=vcmp.eq(v17.ub, v18.ub) -# CHECK: 1c94f342 { q2 |= vcmp.eq(v19.b,v20.b) } -q2 |= vcmp.eq(v19.ub, v20.ub) +# CHECK: 1c94f342 { q2|=vcmp.eq(v19.b,v20.b) } +q2|=vcmp.eq(v19.ub, v20.ub) -# CHECK: 1c96f583 { q3 ^= vcmp.eq(v21.b,v22.b) } -q3 ^= vcmp.eq(v21.ub, v22.ub) +# CHECK: 1c96f583 { q3^=vcmp.eq(v21.b,v22.b) } +q3^=vcmp.eq(v21.ub, v22.ub) -# CHECK: 1f81c004 { q0 = vcmp.eq(v0.h,v1.h) } -q0 = vcmp.eq(v0.uh, v1.uh) +# CHECK: 1f81c004 { q0=vcmp.eq(v0.h,v1.h) } +q0=vcmp.eq(v0.uh, v1.uh) -# CHECK: 1c83e205 { q1 &= vcmp.eq(v2.h,v3.h) } -q1 &= vcmp.eq(v2.uh, v3.uh) +# CHECK: 1c83e205 { q1&=vcmp.eq(v2.h,v3.h) } +q1&=vcmp.eq(v2.uh, v3.uh) -# CHECK: 1c85e446 { q2 |= vcmp.eq(v4.h,v5.h) } -q2 |= vcmp.eq(v4.uh, v5.uh) +# CHECK: 1c85e446 { q2|=vcmp.eq(v4.h,v5.h) } +q2|=vcmp.eq(v4.uh, v5.uh) -# CHECK: 1c87e687 { q3 ^= vcmp.eq(v6.h,v7.h) } -q3 ^= vcmp.eq(v6.uh, v7.uh) +# CHECK: 1c87e687 { q3^=vcmp.eq(v6.h,v7.h) } +q3^=vcmp.eq(v6.uh, v7.uh) -# CHECK: 1f89c808 { q0 = vcmp.eq(v8.w,v9.w) } -q0 = vcmp.eq(v8.uw, v9.uw) +# CHECK: 1f89c808 { q0=vcmp.eq(v8.w,v9.w) } +q0=vcmp.eq(v8.uw, v9.uw) -# CHECK: 1c8aea09 { q1 &= vcmp.eq(v10.w,v10.w) } -q1 &= vcmp.eq(v10.uw, v10.uw) +# CHECK: 1c8aea09 { q1&=vcmp.eq(v10.w,v10.w) } +q1&=vcmp.eq(v10.uw, v10.uw) -# CHECK: 1c8ceb46 { q2 |= vcmp.eq(v11.h,v12.h) } -q2 |= vcmp.eq(v11.uw, v12.uw) +# CHECK: 1c8ceb46 { q2|=vcmp.eq(v11.h,v12.h) } +q2|=vcmp.eq(v11.uh, v12.uh) -# CHECK: 1c8eed8b { q3 ^= vcmp.eq(v13.w,v14.w) } +# CHECK: 1c8eed8b { q3^=vcmp.eq(v13.w,v14.w) } q3 ^= vcmp.eq(v13.uw, v14.uw) -# CHECK: 2800c00f { v15 = vmem(r0+#0) } +# CHECK: 2800c00f { v15=vmem(r0+#0) } v15 = vmem(r0) -# CHECK: 2841c010 { v16 = vmem(r1+#0):nt } +# CHECK: 2841c010 { v16=vmem(r1+#0):nt } v16 = vmem(r1):nt -# CHECK: 2822c011 { vmem(r2+#0) = v17 } +# CHECK: 2822c011 { vmem(r2+#0)=v17 } vmem(r2) = v17 -# CHECK: 2863c012 { vmem(r3+#0):nt = v18 } +# CHECK: 2863c012 { vmem(r3+#0):nt=v18 } vmem(r3):nt = v18 -# CHECK: 2884c013 { if (q0) vmem(r4+#0) = v19 } +# CHECK: 2884c013 { if (q0) vmem(r4+#0)=v19 } if (q0) vmem(r4) = v19 -# CHECK: 2885c834 { if (!q1) vmem(r5+#0) = v20 } +# CHECK: 2885c834 { if (!q1) vmem(r5+#0)=v20 } if (!q1) vmem(r5) = v20 -# CHECK: 28c6d015 { if (q2) vmem(r6+#0):nt = v21 } +# CHECK: 28c6d015 { if (q2) vmem(r6+#0):nt=v21 } if (q2) vmem(r6):nt = v21 -# CHECK: 28c7d836 { if (!q3) vmem(r7+#0):nt = v22 } +# CHECK: 28c7d836 { if (!q3) vmem(r7+#0):nt=v22 } if (!q3) vmem(r7):nt = v22 -# CHECK: 28a8c017 { if (p0) vmem(r8+#0) = v23 } +# CHECK: 28a8c017 { if (p0) vmem(r8+#0)=v23 } if (p0) vmem(r8) = v23 -# CHECK: 28a9c838 { if (!p1) vmem(r9+#0) = v24 } +# CHECK: 28a9c838 { if (!p1) vmem(r9+#0)=v24 } if (!p1) vmem(r9) = v24 -# CHECK: 28ead019 { if (p2) vmem(r10+#0):nt = v25 } +# CHECK: 28ead019 { if (p2) vmem(r10+#0):nt=v25 } if (p2) vmem(r10):nt = v25 -# CHECK: 28ebd83a { if (!p3) vmem(r11+#0):nt = v26 } +# CHECK: 28ebd83a { if (!p3) vmem(r11+#0):nt=v26 } if (!p3) vmem(r11):nt = v26 -# CHECK: 282cc022 vmem(r12+#0) = v27.new +# CHECK: 282cc022 vmem(r12+#0)=v27.new { v27 = vxor(v28, v29) vmem(r12) = v27.new } -# CHECK: 286dc022 vmem(r13+#0):nt = v30.new +# CHECK: 286dc022 vmem(r13+#0):nt=v30.new { v30 = vxor(v31, v0) vmem(r13):nt = v30.new } -# CHECK: 280ec0e1 { v1 = vmemu(r14+#0) } +# CHECK: 280ec0e1 { v1=vmemu(r14+#0) } v1 = vmemu(r14) -# CHECK: 282fc0e2 { vmemu(r15+#0) = v2 } +# CHECK: 282fc0e2 { vmemu(r15+#0)=v2 } vmemu(r15) = v2 -# CHECK: 28b0c0c3 { if (p0) vmemu(r16+#0) = v3 } +# CHECK: 28b0c0c3 { if (p0) vmemu(r16+#0)=v3 } if (p0) vmemu(r16) = v3 -# CHECK: 28b1c8e4 { if (!p1) vmemu(r17+#0) = v4 } +# CHECK: 28b1c8e4 { if (!p1) vmemu(r17+#0)=v4 } if (!p1) vmemu(r17) = v4 Index: test/MC/Hexagon/v60-permute.s =================================================================== --- test/MC/Hexagon/v60-permute.s +++ test/MC/Hexagon/v60-permute.s @@ -2,50 +2,50 @@ #RUN: llvm-objdump -triple=hexagon -mcpu=hexagonv60 -d - | \ #RUN: FileCheck %s -#CHECK: 1fd2d5cf { v15.b = vpack(v21.h{{ *}},{{ *}}v18.h):sat } +#CHECK: 1fd2d5cf { v15.b=vpack(v21.h,v18.h):sat } v15.b=vpack(v21.h,v18.h):sat -#CHECK: 1fd7d7a2 { v2.ub = vpack(v23.h{{ *}},{{ *}}v23.h):sat } +#CHECK: 1fd7d7a2 { v2.ub=vpack(v23.h,v23.h):sat } v2.ub=vpack(v23.h,v23.h):sat -#CHECK: 1fc7d464 { v4.h = vpacke(v20.w{{ *}},{{ *}}v7.w) } +#CHECK: 1fc7d464 { v4.h=vpacke(v20.w,v7.w) } v4.h=vpacke(v20.w,v7.w) -#CHECK: 1fc2c75b { v27.b = vpacke(v7.h{{ *}},{{ *}}v2.h) } +#CHECK: 1fc2c75b { v27.b=vpacke(v7.h,v2.h) } v27.b=vpacke(v7.h,v2.h) -#CHECK: 1fc9c5ed { v13.uh = vpack(v5.w{{ *}},{{ *}}v9.w):sat } +#CHECK: 1fc9c5ed { v13.uh=vpack(v5.w,v9.w):sat } v13.uh=vpack(v5.w,v9.w):sat -#CHECK: 1ff1d81f { v31.h = vpack(v24.w{{ *}},{{ *}}v17.w):sat } +#CHECK: 1ff1d81f { v31.h=vpack(v24.w,v17.w):sat } v31.h=vpack(v24.w,v17.w):sat -#CHECK: 1fe6c435 { v21.b = vpacko(v4.h{{ *}},{{ *}}v6.h) } +#CHECK: 1fe6c435 { v21.b=vpacko(v4.h,v6.h) } v21.b=vpacko(v4.h,v6.h) -#CHECK: 1febc140 { v0.h = vpacko(v1.w{{ *}},{{ *}}v11.w) } +#CHECK: 1febc140 { v0.h=vpacko(v1.w,v11.w) } v0.h=vpacko(v1.w,v11.w) -#CHECK: 1e01d256 { v23:22.h = vunpack(v18.b) } +#CHECK: 1e01d256 { v23:22.h=vunpack(v18.b) } v23:22.h=vunpack(v18.b) -#CHECK: 1e01cc38 { v25:24.uw = vunpack(v12.uh) } +#CHECK: 1e01cc38 { v25:24.uw=vunpack(v12.uh) } v25:24.uw=vunpack(v12.uh) -#CHECK: 1e01c61e { v31:30.uh = vunpack(v6.ub) } +#CHECK: 1e01c61e { v31:30.uh=vunpack(v6.ub) } v31:30.uh=vunpack(v6.ub) -#CHECK: 1e01d778 { v25:24.w = vunpack(v23.h) } +#CHECK: 1e01d778 { v25:24.w=vunpack(v23.h) } v25:24.w=vunpack(v23.h) -#CHECK: 1e00c0e0 { v0.b = vdeal(v0.b) } +#CHECK: 1e00c0e0 { v0.b=vdeal(v0.b) } v0.b=vdeal(v0.b) -#CHECK: 1e00d5c9 { v9.h = vdeal(v21.h) } +#CHECK: 1e00d5c9 { v9.h=vdeal(v21.h) } v9.h=vdeal(v21.h) -#CHECK: 1e02cb1c { v28.b = vshuff(v11.b) } +#CHECK: 1e02cb1c { v28.b=vshuff(v11.b) } v28.b=vshuff(v11.b) -#CHECK: 1e01d8fe { v30.h = vshuff(v24.h) } +#CHECK: 1e01d8fe { v30.h=vshuff(v24.h) } v30.h=vshuff(v24.h) Index: test/MC/Hexagon/v60-shift.s =================================================================== --- test/MC/Hexagon/v60-shift.s +++ test/MC/Hexagon/v60-shift.s @@ -2,38 +2,38 @@ #RUN: llvm-objdump -triple=hexagon -mcpu=hexagonv60 -d - | \ #RUN: FileCheck %s -#CHECK: 198fd829 { v9.uw = vlsr(v24.uw,{{ *}}r15) } +#CHECK: 198fd829 { v9.uw=vlsr(v24.uw,r15) } v9.uw=vlsr(v24.uw,r15) -#CHECK: 1999d645 { v5.uh = vlsr(v22.uh,{{ *}}r25) } +#CHECK: 1999d645 { v5.uh=vlsr(v22.uh,r25) } v5.uh=vlsr(v22.uh,r25) -#CHECK: 198cc303 { v3.h = vasl(v3.h,{{ *}}r12) } +#CHECK: 198cc303 { v3.h=vasl(v3.h,r12) } v3.h=vasl(v3.h,r12) -#CHECK: 1965d7ac { v12.w = vasr(v23.w,{{ *}}r5) } +#CHECK: 1965d7ac { v12.w=vasr(v23.w,r5) } v12.w=vasr(v23.w,r5) -#CHECK: 197dddc3 { v3.h = vasr(v29.h,{{ *}}r29) } +#CHECK: 197dddc3 { v3.h=vasr(v29.h,r29) } v3.h=vasr(v29.h,r29) -#CHECK: 197adde8 { v8.w = vasl(v29.w,{{ *}}r26) } +#CHECK: 197adde8 { v8.w=vasl(v29.w,r26) } v8.w=vasl(v29.w,r26) -#CHECK: 1977cc26 { v6 = vror(v12,{{ *}}r23) } +#CHECK: 1977cc26 { v6=vror(v12,r23) } v6=vror(v12,r23) -#CHECK: 1e02cfad { v13.uw = vcl0(v15.uw) } +#CHECK: 1e02cfad { v13.uw=vcl0(v15.uw) } v13.uw=vcl0(v15.uw) -#CHECK: 1e02defb { v27.uh = vcl0(v30.uh) } +#CHECK: 1e02defb { v27.uh=vcl0(v30.uh) } v27.uh=vcl0(v30.uh) -#CHECK: 1e03de90 { v16.w = vnormamt(v30.w) } +#CHECK: 1e03de90 { v16.w=vnormamt(v30.w) } v16.w=vnormamt(v30.w) -#CHECK: 1e03d4a3 { v3.h = vnormamt(v20.h) } +#CHECK: 1e03d4a3 { v3.h=vnormamt(v20.h) } v3.h=vnormamt(v20.h) -#CHECK: 1e02c2d8 { v24.h = vpopcount(v2.h) } +#CHECK: 1e02c2d8 { v24.h=vpopcount(v2.h) } v24.h=vpopcount(v2.h) Index: test/MC/Hexagon/v60-vcmp.s =================================================================== --- test/MC/Hexagon/v60-vcmp.s +++ test/MC/Hexagon/v60-vcmp.s @@ -2,83 +2,83 @@ #RUN: llvm-objdump -triple=hexagon -mcpu=hexagonv60 -d - | \ #RUN: FileCheck %s -#CHECK: 1c81f142 { q2 |= vcmp.eq(v17.b{{ *}},{{ *}}v1.b) } +#CHECK: 1c81f142 { q2|=vcmp.eq(v17.b,v1.b) } q2|=vcmp.eq(v17.b,v1.b) -#CHECK: 1c84fb2a { q2 &= vcmp.gt(v27.uw{{ *}},{{ *}}v4.uw) } +#CHECK: 1c84fb2a { q2&=vcmp.gt(v27.uw,v4.uw) } q2&=vcmp.gt(v27.uw,v4.uw) -#CHECK: 1c8cf826 { q2 &= vcmp.gt(v24.uh{{ *}},{{ *}}v12.uh) } +#CHECK: 1c8cf826 { q2&=vcmp.gt(v24.uh,v12.uh) } q2&=vcmp.gt(v24.uh,v12.uh) -#CHECK: 1c80e720 { q0 &= vcmp.gt(v7.ub{{ *}},{{ *}}v0.ub) } +#CHECK: 1c80e720 { q0&=vcmp.gt(v7.ub,v0.ub) } q0&=vcmp.gt(v7.ub,v0.ub) -#CHECK: 1c9aed1a { q2 &= vcmp.gt(v13.w{{ *}},{{ *}}v26.w) } +#CHECK: 1c9aed1a { q2&=vcmp.gt(v13.w,v26.w) } q2&=vcmp.gt(v13.w,v26.w) -#CHECK: 1c8de516 { q2 &= vcmp.gt(v5.h{{ *}},{{ *}}v13.h) } +#CHECK: 1c8de516 { q2&=vcmp.gt(v5.h,v13.h) } q2&=vcmp.gt(v5.h,v13.h) -#CHECK: 1c8dfc11 { q1 &= vcmp.gt(v28.b{{ *}},{{ *}}v13.b) } +#CHECK: 1c8dfc11 { q1&=vcmp.gt(v28.b,v13.b) } q1&=vcmp.gt(v28.b,v13.b) -#CHECK: 1c94fa0b { q3 &= vcmp.eq(v26.w{{ *}},{{ *}}v20.w) } +#CHECK: 1c94fa0b { q3&=vcmp.eq(v26.w,v20.w) } q3&=vcmp.eq(v26.w,v20.w) -#CHECK: 1c83e206 { q2 &= vcmp.eq(v2.h{{ *}},{{ *}}v3.h) } +#CHECK: 1c83e206 { q2&=vcmp.eq(v2.h,v3.h) } q2&=vcmp.eq(v2.h,v3.h) -#CHECK: 1c85e900 { q0 &= vcmp.eq(v9.b{{ *}},{{ *}}v5.b) } +#CHECK: 1c85e900 { q0&=vcmp.eq(v9.b,v5.b) } q0&=vcmp.eq(v9.b,v5.b) -#CHECK: 1c9cfca8 { q0 ^= vcmp.gt(v28.uw{{ *}},{{ *}}v28.uw) } +#CHECK: 1c9cfca8 { q0^=vcmp.gt(v28.uw,v28.uw) } q0^=vcmp.gt(v28.uw,v28.uw) -#CHECK: 1c81faa0 { q0 ^= vcmp.gt(v26.ub{{ *}},{{ *}}v1.ub) } +#CHECK: 1c81faa0 { q0^=vcmp.gt(v26.ub,v1.ub) } q0^=vcmp.gt(v26.ub,v1.ub) -#CHECK: 1c96f0a4 { q0 ^= vcmp.gt(v16.uh{{ *}},{{ *}}v22.uh) } +#CHECK: 1c96f0a4 { q0^=vcmp.gt(v16.uh,v22.uh) } q0^=vcmp.gt(v16.uh,v22.uh) -#CHECK: 1c9bf795 { q1 ^= vcmp.gt(v23.h{{ *}},{{ *}}v27.h) } +#CHECK: 1c9bf795 { q1^=vcmp.gt(v23.h,v27.h) } q1^=vcmp.gt(v23.h,v27.h) -#CHECK: 1c9de698 { q0 ^= vcmp.gt(v6.w{{ *}},{{ *}}v29.w) } +#CHECK: 1c9de698 { q0^=vcmp.gt(v6.w,v29.w) } q0^=vcmp.gt(v6.w,v29.w) -#CHECK: 1c82ef8a { q2 ^= vcmp.eq(v15.w{{ *}},{{ *}}v2.w) } +#CHECK: 1c82ef8a { q2^=vcmp.eq(v15.w,v2.w) } q2^=vcmp.eq(v15.w,v2.w) -#CHECK: 1c99e891 { q1 ^= vcmp.gt(v8.b{{ *}},{{ *}}v25.b) } +#CHECK: 1c99e891 { q1^=vcmp.gt(v8.b,v25.b) } q1^=vcmp.gt(v8.b,v25.b) -#CHECK: 1c8afe55 { q1 |= vcmp.gt(v30.h{{ *}},{{ *}}v10.h) } +#CHECK: 1c8afe55 { q1|=vcmp.gt(v30.h,v10.h) } q1|=vcmp.gt(v30.h,v10.h) -#CHECK: 1c92ef50 { q0 |= vcmp.gt(v15.b{{ *}},{{ *}}v18.b) } +#CHECK: 1c92ef50 { q0|=vcmp.gt(v15.b,v18.b) } q0|=vcmp.gt(v15.b,v18.b) -#CHECK: 1c9ffb4b { q3 |= vcmp.eq(v27.w{{ *}},{{ *}}v31.w) } +#CHECK: 1c9ffb4b { q3|=vcmp.eq(v27.w,v31.w) } q3|=vcmp.eq(v27.w,v31.w) -#CHECK: 1c87e944 { q0 |= vcmp.eq(v9.h{{ *}},{{ *}}v7.h) } +#CHECK: 1c87e944 { q0|=vcmp.eq(v9.h,v7.h) } q0|=vcmp.eq(v9.h,v7.h) -#CHECK: 1c8ee768 { q0 |= vcmp.gt(v7.uw{{ *}},{{ *}}v14.uw) } +#CHECK: 1c8ee768 { q0|=vcmp.gt(v7.uw,v14.uw) } q0|=vcmp.gt(v7.uw,v14.uw) -#CHECK: 1c92e265 { q1 |= vcmp.gt(v2.uh{{ *}},{{ *}}v18.uh) } +#CHECK: 1c92e265 { q1|=vcmp.gt(v2.uh,v18.uh) } q1|=vcmp.gt(v2.uh,v18.uh) -#CHECK: 1c80f062 { q2 |= vcmp.gt(v16.ub{{ *}},{{ *}}v0.ub) } +#CHECK: 1c80f062 { q2|=vcmp.gt(v16.ub,v0.ub) } q2|=vcmp.gt(v16.ub,v0.ub) -#CHECK: 1c91f75a { q2 |= vcmp.gt(v23.w{{ *}},{{ *}}v17.w) } +#CHECK: 1c91f75a { q2|=vcmp.gt(v23.w,v17.w) } q2|=vcmp.gt(v23.w,v17.w) -#CHECK: 1c86fe84 { q0 ^= vcmp.eq(v30.h{{ *}},{{ *}}v6.h) } +#CHECK: 1c86fe84 { q0^=vcmp.eq(v30.h,v6.h) } q0^=vcmp.eq(v30.h,v6.h) -#CHECK: 1c86ec82 { q2 ^= vcmp.eq(v12.b{{ *}},{{ *}}v6.b) } +#CHECK: 1c86ec82 { q2^=vcmp.eq(v12.b{{ *}},{{ *}}v6.b) } q2^=vcmp.eq(v12.b,v6.b) Index: test/MC/Hexagon/v60-vmem.s =================================================================== --- test/MC/Hexagon/v60-vmem.s +++ test/MC/Hexagon/v60-vmem.s @@ -2,289 +2,289 @@ #RUN: llvm-objdump -triple=hexagon -mcpu=hexagonv60 -d - | \ #RUN: FileCheck %s -#CHECK: 292cc11b { vmem(r12++#1) = v27 } +#CHECK: 292cc11b { vmem(r12++#1)=v27 } { vmem(r12++#1)=v27 } -#CHECK: 294dc319 { v25 = vmem(r13++#3):nt } +#CHECK: 294dc319 { v25=vmem(r13++#3):nt } { v25=vmem(r13++#3):nt } -#CHECK: 2904c1fb { v27 = vmemu(r4++#1) } +#CHECK: 2904c1fb { v27=vmemu(r4++#1) } { v27=vmemu(r4++#1) } -#CHECK: 291dc01f { v31 = vmem(r29++#0) } +#CHECK: 291dc01f { v31=vmem(r29++#0) } { v31=vmem(r29++#0) } -#CHECK: 293ec0ff { vmemu(r30++#0) = v31 } +#CHECK: 293ec0ff { vmemu(r30++#0)=v31 } { vmemu(r30++#0)=v31 } -#CHECK: 296ec411 { vmem(r14++#-4):nt = v17 } +#CHECK: 296ec411 { vmem(r14++#-4):nt=v17 } { vmem(r14++#-4):nt=v17 } -#CHECK: 29fec62f { if (!p0) vmem(r30++#-2):nt = v15 } +#CHECK: 29fec62f { if (!p0) vmem(r30++#-2):nt=v15 } { if (!p0) vmem(r30++#-2):nt=v15 } -#CHECK: 29f9c914 { if (p1) vmem(r25++#1):nt = v20 } +#CHECK: 29f9c914 { if (p1) vmem(r25++#1):nt=v20 } { if (p1) vmem(r25++#1):nt=v20 } -#CHECK: 2984de30 { if (!q3) vmem(r4++#-2) = v16 } +#CHECK: 2984de30 { if (!q3) vmem(r4++#-2)=v16 } { if (!q3) vmem(r4++#-2)=v16 } -#CHECK: 2992dd1f { if (q3) vmem(r18++#-3) = v31 } +#CHECK: 2992dd1f { if (q3) vmem(r18++#-3)=v31 } { if (q3) vmem(r18++#-3)=v31 } -#CHECK: 29c9c425 { if (!q0) vmem(r9++#-4):nt = v5 } +#CHECK: 29c9c425 { if (!q0) vmem(r9++#-4):nt=v5 } { if (!q0) vmem(r9++#-4):nt=v5 } -#CHECK: 29d1cf11 { if (q1) vmem(r17++#-1):nt = v17 } +#CHECK: 29d1cf11 { if (q1) vmem(r17++#-1):nt=v17 } { if (q1) vmem(r17++#-1):nt=v17 } -#CHECK: 29a7c328 { if (!p0) vmem(r7++#3) = v8 } +#CHECK: 29a7c328 { if (!p0) vmem(r7++#3)=v8 } { if (!p0) vmem(r7++#3)=v8 } -#CHECK: 29b6cc1d { if (p1) vmem(r22++#-4) = v29 } +#CHECK: 29b6cc1d { if (p1) vmem(r22++#-4)=v29 } { if (p1) vmem(r22++#-4)=v29 } -#CHECK: 29abc5fe { if (!p0) vmemu(r11++#-3) = v30 } +#CHECK: 29abc5fe { if (!p0) vmemu(r11++#-3)=v30 } { if (!p0) vmemu(r11++#-3)=v30 } -#CHECK: 29b8d5c4 { if (p2) vmemu(r24++#-3) = v4 } +#CHECK: 29b8d5c4 { if (p2) vmemu(r24++#-3)=v4 } { if (p2) vmemu(r24++#-3)=v4 } -#CHECK: 2860e407 { vmem(r0+#-4):nt = v7 } +#CHECK: 2860e407 { vmem(r0+#-4):nt=v7 } { vmem(r0+#-4):nt=v7 } -#CHECK: 2830e2e7 { vmemu(r16+#-6) = v7 } +#CHECK: 2830e2e7 { vmemu(r16+#-6)=v7 } { vmemu(r16+#-6)=v7 } -#CHECK: 2839c316 { vmem(r25+#3) = v22 } +#CHECK: 2839c316 { vmem(r25+#3)=v22 } { vmem(r25+#3)=v22 } -#CHECK: 284be316 { v22 = vmem(r11+#-5):nt } +#CHECK: 284be316 { v22=vmem(r11+#-5):nt } { v22=vmem(r11+#-5):nt } -#CHECK: 280ec1e6 { v6 = vmemu(r14+#1) } +#CHECK: 280ec1e6 { v6=vmemu(r14+#1) } { v6=vmemu(r14+#1) } -#CHECK: 280ae50c { v12 = vmem(r10+#-3) } +#CHECK: 280ae50c { v12=vmem(r10+#-3) } { v12=vmem(r10+#-3) } -#CHECK: 2b62e005 { vmem(r2++m1):nt = v5 } +#CHECK: 2b62e005 { vmem(r2++m1):nt=v5 } { vmem(r2++m1):nt=v5 } -#CHECK: 2b28e0f2 { vmemu(r8++m1) = v18 } +#CHECK: 2b28e0f2 { vmemu(r8++m1)=v18 } { vmemu(r8++m1)=v18 } -#CHECK: 2b42e019 { v25 = vmem(r2++m1):nt } +#CHECK: 2b42e019 { v25=vmem(r2++m1):nt } { v25=vmem(r2++m1):nt } -#CHECK: 2b2ce009 { vmem(r12++m1) = v9 } +#CHECK: 2b2ce009 { vmem(r12++m1)=v9 } { vmem(r12++m1)=v9 } -#CHECK: 2b03c005 { v5 = vmem(r3++m0) } +#CHECK: 2b03c005 { v5=vmem(r3++m0) } { v5=vmem(r3++m0) } -#CHECK: 2b0ec0f5 { v21 = vmemu(r14++m0) } +#CHECK: 2b0ec0f5 { v21=vmemu(r14++m0) } { v21=vmemu(r14++m0) } -#CHECK: 2be8c022 { if (!p0) vmem(r8++m0):nt = v2 } +#CHECK: 2be8c022 { if (!p0) vmem(r8++m0):nt=v2 } { if (!p0) vmem(r8++m0):nt=v2 } -#CHECK: 2bebd813 { if (p3) vmem(r11++m0):nt = v19 } +#CHECK: 2bebd813 { if (p3) vmem(r11++m0):nt=v19 } { if (p3) vmem(r11++m0):nt=v19 } -#CHECK: 2ba5e0e7 { if (!p0) vmemu(r5++m1) = v7 } +#CHECK: 2ba5e0e7 { if (!p0) vmemu(r5++m1)=v7 } { if (!p0) vmemu(r5++m1)=v7 } -#CHECK: 2ba4f0dd { if (p2) vmemu(r4++m1) = v29 } +#CHECK: 2ba4f0dd { if (p2) vmemu(r4++m1)=v29 } { if (p2) vmemu(r4++m1)=v29 } -#CHECK: 2ba4e828 { if (!p1) vmem(r4++m1) = v8 } +#CHECK: 2ba4e828 { if (!p1) vmem(r4++m1)=v8 } { if (!p1) vmem(r4++m1)=v8 } -#CHECK: 2bbae803 { if (p1) vmem(r26++m1) = v3 } +#CHECK: 2bbae803 { if (p1) vmem(r26++m1)=v3 } { if (p1) vmem(r26++m1)=v3 } -#CHECK: 2bc9c027 { if (!q0) vmem(r9++m0):nt = v7 } +#CHECK: 2bc9c027 { if (!q0) vmem(r9++m0):nt=v7 } { if (!q0) vmem(r9++m0):nt=v7 } -#CHECK: 2bcfc001 { if (q0) vmem(r15++m0):nt = v1 } +#CHECK: 2bcfc001 { if (q0) vmem(r15++m0):nt=v1 } { if (q0) vmem(r15++m0):nt=v1 } -#CHECK: 2b97f031 { if (!q2) vmem(r23++m1) = v17 } +#CHECK: 2b97f031 { if (!q2) vmem(r23++m1)=v17 } { if (!q2) vmem(r23++m1)=v17 } -#CHECK: 2b8ad809 { if (q3) vmem(r10++m0) = v9 } +#CHECK: 2b8ad809 { if (q3) vmem(r10++m0)=v9 } { if (q3) vmem(r10++m0)=v9 } -#CHECK: 28c7f438 { if (!q2) vmem(r7+#-4):nt = v24 } +#CHECK: 28c7f438 { if (!q2) vmem(r7+#-4):nt=v24 } { if (!q2) vmem(r7+#-4):nt=v24 } -#CHECK: 28d1eb15 { if (q1) vmem(r17+#-5):nt = v21 } +#CHECK: 28d1eb15 { if (q1) vmem(r17+#-5):nt=v21 } { if (q1) vmem(r17+#-5):nt=v21 } -#CHECK: 289cfe2b { if (!q3) vmem(r28+#-2) = v11 } +#CHECK: 289cfe2b { if (!q3) vmem(r28+#-2)=v11 } { if (!q3) vmem(r28+#-2)=v11 } -#CHECK: 288eef0f { if (q1) vmem(r14+#-1) = v15 } +#CHECK: 288eef0f { if (q1) vmem(r14+#-1)=v15 } { if (q1) vmem(r14+#-1)=v15 } -#CHECK: 28a2d1e1 { if (!p2) vmemu(r2+#1) = v1 } +#CHECK: 28a2d1e1 { if (!p2) vmemu(r2+#1)=v1 } { if (!p2) vmemu(r2+#1)=v1 } -#CHECK: 28bcf4db { if (p2) vmemu(r28+#-4) = v27 } +#CHECK: 28bcf4db { if (p2) vmemu(r28+#-4)=v27 } { if (p2) vmemu(r28+#-4)=v27 } -#CHECK: 28b2c925 { if (!p1) vmem(r18+#1) = v5 } +#CHECK: 28b2c925 { if (!p1) vmem(r18+#1)=v5 } { if (!p1) vmem(r18+#1)=v5 } -#CHECK: 28afe41a { if (p0) vmem(r15+#-4) = v26 } +#CHECK: 28afe41a { if (p0) vmem(r15+#-4)=v26 } { if (p0) vmem(r15+#-4)=v26 } -#CHECK: 28f7fd3a { if (!p3) vmem(r23+#-3):nt = v26 } +#CHECK: 28f7fd3a { if (!p3) vmem(r23+#-3):nt=v26 } { if (!p3) vmem(r23+#-3):nt=v26 } -#CHECK: 28f5fd10 { if (p3) vmem(r21+#-3):nt = v16 } +#CHECK: 28f5fd10 { if (p3) vmem(r21+#-3):nt=v16 } { if (p3) vmem(r21+#-3):nt=v16 } -#CHECK: 2945c440 v0.tmp = vmem(r5++#-4):nt } +#CHECK: 2945c440 v0.tmp=vmem(r5++#-4):nt } { v0.tmp=vmem(r5++#-4):nt v26=v0 } -#CHECK: 2942c338 v24.cur = vmem(r2++#3):nt } +#CHECK: 2942c338 v24.cur=vmem(r2++#3):nt } { v24.cur=vmem(r2++#3):nt v6=v24 } -#CHECK: 2908c157 v23.tmp = vmem(r8++#1) } +#CHECK: 2908c157 v23.tmp=vmem(r8++#1) } { v25=v23 v23.tmp=vmem(r8++#1) } -#CHECK: 2903c72d v13.cur = vmem(r3++#-1) } +#CHECK: 2903c72d v13.cur=vmem(r3++#-1) } { v13.cur=vmem(r3++#-1) v21=v13 } -#CHECK: 2855c743 v3.tmp = vmem(r21+#7):nt } +#CHECK: 2855c743 v3.tmp=vmem(r21+#7):nt } { v3.tmp=vmem(r21+#7):nt v21=v3 } -#CHECK: 2856e025 v5.cur = vmem(r22+#-8):nt } +#CHECK: 2856e025 v5.cur=vmem(r22+#-8):nt } { v5.cur=vmem(r22+#-8):nt v29=v5 } -#CHECK: 2802c555 v21.tmp = vmem(r2+#5) } +#CHECK: 2802c555 v21.tmp=vmem(r2+#5) } { v31=v21 v21.tmp=vmem(r2+#5) } -#CHECK: 2814e12a v10.cur = vmem(r20+#-7) } +#CHECK: 2814e12a v10.cur=vmem(r20+#-7) } { v9=v10 v10.cur=vmem(r20+#-7) @@ -291,134 +291,134 @@ } -#CHECK: 2b52c02c v12.cur = vmem(r18++m0):nt } +#CHECK: 2b52c02c v12.cur=vmem(r18++m0):nt } { v12.cur=vmem(r18++m0):nt v25=v12 } -#CHECK: 2b4ae043 v3.tmp = vmem(r10++m1):nt } +#CHECK: 2b4ae043 v3.tmp=vmem(r10++m1):nt } { v25=v3 v3.tmp=vmem(r10++m1):nt } -#CHECK: 2b06c025 v5.cur = vmem(r6++m0) } +#CHECK: 2b06c025 v5.cur=vmem(r6++m0) } { v5.cur=vmem(r6++m0) v10=v5 } -#CHECK: 2b17e048 v8.tmp = vmem(r23++m1) } +#CHECK: 2b17e048 v8.tmp=vmem(r23++m1) } { v8.tmp=vmem(r23++m1) v28=v8 } -#CHECK: 282ee422 vmem(r14+#-4) = v14.new } +#CHECK: 282ee422 vmem(r14+#-4)=v14.new } { - v14 = v14 + v14=v14 vmem(r14+#-4)=v14.new } -#CHECK: 2866e222 vmem(r6+#-6):nt = v16.new } +#CHECK: 2866e222 vmem(r6+#-6):nt=v16.new } { - v16 = v8 + v16=v8 vmem(r6+#-6):nt=v16.new } -#CHECK: 28b1cd42 if(p1) vmem(r17+#5) = v17.new } +#CHECK: 28b1cd42 if (p1) vmem(r17+#5)=v17.new } { - v17 = v25 + v17=v25 if(p1)vmem(r17+#5)=v17.new } -#CHECK: 28bbeb6a if(!p1) vmem(r27+#-5) = v17.new } +#CHECK: 28bbeb6a if (!p1) vmem(r27+#-5)=v17.new } { - v17 = v15 + v17=v15 if(!p1)vmem(r27+#-5)=v17.new } -#CHECK: 28e4d252 if(p2) vmem(r4+#2):nt = v24.new } +#CHECK: 28e4d252 if (p2) vmem(r4+#2):nt=v24.new } { - v24 = v10 + v24=v10 if(p2)vmem(r4+#2):nt=v24.new } -#CHECK: 28f8d17a if(!p2) vmem(r24+#1):nt = v4.new } +#CHECK: 28f8d17a if (!p2) vmem(r24+#1):nt=v4.new } { - v4 = v8 + v4=v8 if(!p2)vmem(r24+#1):nt=v4.new } -#CHECK: 2924c322 vmem(r4++#3) = v4.new } +#CHECK: 2924c322 vmem(r4++#3)=v4.new } { - v4 = v3 + v4=v3 vmem(r4++#3)=v4.new } -#CHECK: 2961c122 vmem(r1++#1):nt = v7.new } +#CHECK: 2961c122 vmem(r1++#1):nt=v7.new } { - v7 = v8 + v7=v8 vmem(r1++#1):nt=v7.new } -#CHECK: 29a6d042 if(p2) vmem(r6++#0) = v11.new } +#CHECK: 29a6d042 if (p2) vmem(r6++#0)=v11.new } { - v11 = v13 + v11=v13 if(p2)vmem(r6++#0)=v11.new } -#CHECK: 29a2cb6a if(!p1) vmem(r2++#3) = v25.new } +#CHECK: 29a2cb6a if (!p1) vmem(r2++#3)=v25.new } { - v25 = v17 + v25=v17 if(!p1)vmem(r2++#3)=v25.new } -#CHECK: 29f5c952 if(p1) vmem(r21++#1):nt = v14.new } +#CHECK: 29f5c952 if (p1) vmem(r21++#1):nt=v14.new } { - v14 = v13 + v14=v13 if(p1)vmem(r21++#1):nt=v14.new } -#CHECK: 29f7cd7a if(!p1) vmem(r23++#-3):nt = v1.new } +#CHECK: 29f7cd7a if (!p1) vmem(r23++#-3):nt=v1.new } { - v1 = v0 + v1=v0 if(!p1)vmem(r23++#-3):nt=v1.new } -#CHECK: 2b3ec022 vmem(r30++m0) = v10.new } +#CHECK: 2b3ec022 vmem(r30++m0)=v10.new } { - v10 = v23 + v10=v23 vmem(r30++m0)=v10.new } -#CHECK: 2b6fc022 vmem(r15++m0):nt = v19.new } +#CHECK: 2b6fc022 vmem(r15++m0):nt=v19.new } { - v19 = v20 + v19=v20 vmem(r15++m0):nt=v19.new } -#CHECK: 2bb7f042 if(p2) vmem(r23++m1) = v6.new } +#CHECK: 2bb7f042 if (p2) vmem(r23++m1)=v6.new } { - v6 = v30 + v6=v30 if(p2)vmem(r23++m1)=v6.new } -#CHECK: 2ba2f06a if(!p2) vmem(r2++m1) = v12.new } +#CHECK: 2ba2f06a if (!p2) vmem(r2++m1)=v12.new } { - v12 = v9 + v12=v9 if(!p2)vmem(r2++m1)=v12.new } -#CHECK: 2be7e852 if(p1) vmem(r7++m1):nt = v3.new } +#CHECK: 2be7e852 if (p1) vmem(r7++m1):nt=v3.new } { - v3 = v13 + v3=v13 if(p1)vmem(r7++m1):nt=v3.new } -#CHECK: 2bfdd07a if(!p2) vmem(r29++m0):nt = v29.new } +#CHECK: 2bfdd07a if (!p2) vmem(r29++m0):nt=v29.new } { - v29 = v9 + v29=v9 if(!p2)vmem(r29++m0):nt=v29.new } Index: test/MC/Hexagon/v60-vmpy-acc.s =================================================================== --- test/MC/Hexagon/v60-vmpy-acc.s +++ test/MC/Hexagon/v60-vmpy-acc.s @@ -2,122 +2,122 @@ #RUN: llvm-objdump -triple=hexagon -mcpu=hexagonv60 -d - | \ #RUN: FileCheck %s -#CHECK: 1936ee37 { v23.w += vdmpy(v15:14.h,r22.uh,#1):sat } -v23.w += vdmpy(v15:14.h,r22.uh,#1):sat +#CHECK: 1936ee37 { v23.w+=vdmpy(v15:14.h,r22.uh,#1):sat } +v23.w+=vdmpy(v15:14.h,r22.uh,#1):sat -#CHECK: 193bf90f { v15.w += vdmpy(v25.h,r27.uh):sat } -v15.w += vdmpy(v25.h,r27.uh):sat +#CHECK: 193bf90f { v15.w+=vdmpy(v25.h,r27.uh):sat } +v15.w+=vdmpy(v25.h,r27.uh):sat -#CHECK: 1902fcf0 { v17:16.h += vdmpy(v29:28.ub,r2.b) } -v17:16.h += vdmpy(v29:28.ub,r2.b) +#CHECK: 1902fcf0 { v17:16.h+=vdmpy(v29:28.ub,r2.b) } +v17:16.h+=vdmpy(v29:28.ub,r2.b) -#CHECK: 190cffd1 { v17.h += vdmpy(v31.ub,r12.b) } -v17.h += vdmpy(v31.ub,r12.b) +#CHECK: 190cffd1 { v17.h+=vdmpy(v31.ub,r12.b) } +v17.h+=vdmpy(v31.ub,r12.b) -#CHECK: 1900f5ac { v12.w += vrmpy(v21.ub,r0.b) } -v12.w += vrmpy(v21.ub,r0.b) +#CHECK: 1900f5ac { v12.w+=vrmpy(v21.ub,r0.b) } +v12.w+=vrmpy(v21.ub,r0.b) -#CHECK: 1905fb86 { v6.uw += vrmpy(v27.ub,r5.ub) } -v6.uw += vrmpy(v27.ub,r5.ub) +#CHECK: 1905fb86 { v6.uw+=vrmpy(v27.ub,r5.ub) } +v6.uw+=vrmpy(v27.ub,r5.ub) -#CHECK: 191de570 { v16.w += vdmpy(v5.h,r29.b) } -v16.w += vdmpy(v5.h,r29.b) +#CHECK: 191de570 { v16.w+=vdmpy(v5.h,r29.b) } +v16.w+=vdmpy(v5.h,r29.b) -#CHECK: 191de846 { v7:6.w += vtmpy(v9:8.h,r29.b) } -v7:6.w += vtmpy(v9:8.h,r29.b) +#CHECK: 191de846 { v7:6.w+=vtmpy(v9:8.h,r29.b) } +v7:6.w+=vtmpy(v9:8.h,r29.b) -#CHECK: 190bfa22 { v3:2.h += vtmpy(v27:26.ub,r11.b) } -v3:2.h += vtmpy(v27:26.ub,r11.b) +#CHECK: 190bfa22 { v3:2.h+=vtmpy(v27:26.ub,r11.b) } +v3:2.h+=vtmpy(v27:26.ub,r11.b) -#CHECK: 1915e408 { v9:8.h += vtmpy(v5:4.b,r21.b) } -v9:8.h += vtmpy(v5:4.b,r21.b) +#CHECK: 1915e408 { v9:8.h+=vtmpy(v5:4.b,r21.b) } +v9:8.h+=vtmpy(v5:4.b,r21.b) -#CHECK: 1987f71e { v31:30.uh += vmpy(v23.ub,r7.ub) } -v31:30.uh += vmpy(v23.ub,r7.ub) +#CHECK: 1987f71e { v31:30.uh+=vmpy(v23.ub,r7.ub) } +v31:30.uh+=vmpy(v23.ub,r7.ub) -#CHECK: 1969ff47 { v7.w += vasl(v31.w,r9) } -v7.w += vasl(v31.w,r9) +#CHECK: 1969ff47 { v7.w+=vasl(v31.w,r9) } +v7.w+=vasl(v31.w,r9) -#CHECK: 196de3b0 { v16.w += vasr(v3.w,r13) } -v16.w += vasr(v3.w,r13) +#CHECK: 196de3b0 { v16.w+=vasr(v3.w,r13) } +v16.w+=vasr(v3.w,r13) -#CHECK: 1977fe0a { v11:10.uw += vdsad(v31:30.uh,r23.uh) } -v11:10.uw += vdsad(v31:30.uh,r23.uh) +#CHECK: 1977fe0a { v11:10.uw+=vdsad(v31:30.uh,r23.uh) } +v11:10.uw+=vdsad(v31:30.uh,r23.uh) -#CHECK: 196eee36 { v22.h += vmpyi(v14.h,r14.b) } -v22.h += vmpyi(v14.h,r14.b) +#CHECK: 196eee36 { v22.h+=vmpyi(v14.h,r14.b) } +v22.h+=vmpyi(v14.h,r14.b) -#CHECK: 1931faac { v13:12.h += vmpy(v26.ub,r17.b) } -v13:12.h += vmpy(v26.ub,r17.b) +#CHECK: 1931faac { v13:12.h+=vmpy(v26.ub,r17.b) } +v13:12.h+=vmpy(v26.ub,r17.b) -#CHECK: 193cfc94 { v21:20.w += vdmpy(v29:28.h,r28.b) } -v21:20.w += vdmpy(v29:28.h,r28.b) +#CHECK: 193cfc94 { v21:20.w+=vdmpy(v29:28.h,r28.b) } +v21:20.w+=vdmpy(v29:28.h,r28.b) -#CHECK: 1934fc62 { v2.w += vdmpy(v28.h,r20.h):sat } -v2.w += vdmpy(v28.h,r20.h):sat +#CHECK: 1934fc62 { v2.w+=vdmpy(v28.h,r20.h):sat } +v2.w+=vdmpy(v28.h,r20.h):sat -#CHECK: 1925fe5f { v31.w += vdmpy(v31:30.h,r5.h):sat } -v31.w += vdmpy(v31:30.h,r5.h):sat +#CHECK: 1925fe5f { v31.w+=vdmpy(v31:30.h,r5.h):sat } +v31.w+=vdmpy(v31:30.h,r5.h):sat -#CHECK: 194efe36 { v23:22.uw += vmpy(v30.uh,r14.uh) } -v23:22.uw += vmpy(v30.uh,r14.uh) +#CHECK: 194efe36 { v23:22.uw+=vmpy(v30.uh,r14.uh) } +v23:22.uw+=vmpy(v30.uh,r14.uh) -#CHECK: 1948e306 { v7:6.w += vmpy(v3.h,r8.h):sat } -v7:6.w += vmpy(v3.h,r8.h):sat +#CHECK: 1948e306 { v7:6.w+=vmpy(v3.h,r8.h):sat } +v7:6.w+=vmpy(v3.h,r8.h):sat -#CHECK: 192af2f8 { v25:24.w += vmpa(v19:18.h,r10.b) } -v25:24.w += vmpa(v19:18.h,r10.b) +#CHECK: 192af2f8 { v25:24.w+=vmpa(v19:18.h,r10.b) } +v25:24.w+=vmpa(v19:18.h,r10.b) -#CHECK: 1926e4da { v27:26.h += vmpa(v5:4.ub,r6.b) } -v27:26.h += vmpa(v5:4.ub,r6.b) +#CHECK: 1926e4da { v27:26.h+=vmpa(v5:4.ub,r6.b) } +v27:26.h+=vmpa(v5:4.ub,r6.b) -#CHECK: 194ff078 { v24.w += vmpyi(v16.w,r15.h) } -v24.w += vmpyi(v16.w,r15.h) +#CHECK: 194ff078 { v24.w+=vmpyi(v16.w,r15.h) } +v24.w+=vmpyi(v16.w,r15.h) -#CHECK: 1946e247 { v7.w += vmpyi(v2.w,r6.b) } -v7.w += vmpyi(v2.w,r6.b) +#CHECK: 1946e247 { v7.w+=vmpyi(v2.w,r6.b) } +v7.w+=vmpyi(v2.w,r6.b) -#CHECK: 1c3fead5 { v21.w += vmpyo(v10.w,v31.h):<<1:sat:shift } -v21.w += vmpyo(v10.w,v31.h):<<1:sat:shift +#CHECK: 1c3fead5 { v21.w+=vmpyo(v10.w,v31.h):<<1:sat:shift } +v21.w+=vmpyo(v10.w,v31.h):<<1:sat:shift -#CHECK: 1c30e1fa { v26.w += vmpyo(v1.w,v16.h):<<1:rnd:sat:shift } -v26.w += vmpyo(v1.w,v16.h):<<1:rnd:sat:shift +#CHECK: 1c30e1fa { v26.w+=vmpyo(v1.w,v16.h):<<1:rnd:sat:shift } +v26.w+=vmpyo(v1.w,v16.h):<<1:rnd:sat:shift -#CHECK: 1c34f690 { v16.h += vmpyi(v22.h,v20.h) } -v16.h += vmpyi(v22.h,v20.h) +#CHECK: 1c34f690 { v16.h+=vmpyi(v22.h,v20.h) } +v16.h+=vmpyi(v22.h,v20.h) -#CHECK: 1c34f4b5 { v21.w += vmpyie(v20.w,v20.uh) } -v21.w += vmpyie(v20.w,v20.uh) +#CHECK: 1c34f4b5 { v21.w+=vmpyie(v20.w,v20.uh) } +v21.w+=vmpyie(v20.w,v20.uh) -#CHECK: 1c54f804 { v4.w += vmpyie(v24.w,v20.h) } -v4.w += vmpyie(v24.w,v20.h) +#CHECK: 1c54f804 { v4.w+=vmpyie(v24.w,v20.h) } +v4.w+=vmpyie(v24.w,v20.h) -#CHECK: 1c1ff6f4 { v21:20.w += vmpy(v22.h,v31.h) } -v21:20.w += vmpy(v22.h,v31.h) +#CHECK: 1c1ff6f4 { v21:20.w+=vmpy(v22.h,v31.h) } +v21:20.w+=vmpy(v22.h,v31.h) -#CHECK: 1c31f026 { v7:6.w += vmpy(v16.h,v17.uh) } -v7:6.w += vmpy(v16.h,v17.uh) +#CHECK: 1c31f026 { v7:6.w+=vmpy(v16.h,v17.uh) } +v7:6.w+=vmpy(v16.h,v17.uh) -#CHECK: 1c12fb98 { v25:24.h += vmpy(v27.b,v18.b) } -v25:24.h += vmpy(v27.b,v18.b) +#CHECK: 1c12fb98 { v25:24.h+=vmpy(v27.b,v18.b) } +v25:24.h+=vmpy(v27.b,v18.b) -#CHECK: 1c17fcc0 { v1:0.h += vmpy(v28.ub,v23.b) } -v1:0.h += vmpy(v28.ub,v23.b) +#CHECK: 1c17fcc0 { v1:0.h+=vmpy(v28.ub,v23.b) } +v1:0.h+=vmpy(v28.ub,v23.b) -#CHECK: 1c16f26f { v15.w += vdmpy(v18.h,v22.h):sat } -v15.w += vdmpy(v18.h,v22.h):sat +#CHECK: 1c16f26f { v15.w+=vdmpy(v18.h,v22.h):sat } +v15.w+=vdmpy(v18.h,v22.h):sat -#CHECK: 1c0bea3a { v26.w += vrmpy(v10.b,v11.b) } -v26.w += vrmpy(v10.b,v11.b) +#CHECK: 1c0bea3a { v26.w+=vrmpy(v10.b,v11.b) } +v26.w+=vrmpy(v10.b,v11.b) -#CHECK: 1c15eb47 { v7.w += vrmpy(v11.ub,v21.b) } -v7.w += vrmpy(v11.ub,v21.b) +#CHECK: 1c15eb47 { v7.w+=vrmpy(v11.ub,v21.b) } +v7.w+=vrmpy(v11.ub,v21.b) -#CHECK: 1c26e40e { v15:14.uw += vmpy(v4.uh,v6.uh) } -v15:14.uw += vmpy(v4.uh,v6.uh) +#CHECK: 1c26e40e { v15:14.uw+=vmpy(v4.uh,v6.uh) } +v15:14.uw+=vmpy(v4.uh,v6.uh) -#CHECK: 1c0df9a8 { v9:8.uh += vmpy(v25.ub,v13.ub) } -v9:8.uh += vmpy(v25.ub,v13.ub) +#CHECK: 1c0df9a8 { v9:8.uh+=vmpy(v25.ub,v13.ub) } +v9:8.uh+=vmpy(v25.ub,v13.ub) -#CHECK: 1c0afc15 { v21.uw += vrmpy(v28.ub,v10.ub) } +#CHECK: 1c0afc15 { v21.uw+=vrmpy(v28.ub,v10.ub) } v21.uw += vrmpy(v28.ub,v10.ub) Index: test/MC/Hexagon/v60-vmpy1.s =================================================================== --- test/MC/Hexagon/v60-vmpy1.s +++ test/MC/Hexagon/v60-vmpy1.s @@ -2,137 +2,137 @@ #RUN: llvm-objdump -triple=hexagon -mcpu=hexagonv60 -d - | \ #RUN: FileCheck %s -#CHECK: 1939c223 { v3.w = vdmpy(v3:2.h,{{ *}}r25.uh,{{ *}}#1):sat } +#CHECK: 1939c223 { v3.w=vdmpy(v3:2.h,r25.uh,#1):sat } v3.w=vdmpy(v3:2.h,r25.uh,#1):sat -#CHECK: 1936de0d { v13.w = vdmpy(v30.h,{{ *}}r22.uh):sat } +#CHECK: 1936de0d { v13.w=vdmpy(v30.h,r22.uh):sat } v13.w=vdmpy(v30.h,r22.uh):sat -#CHECK: 1919ccea { v11:10.h = vdmpy(v13:12.ub,{{ *}}r25.b) } +#CHECK: 1919ccea { v11:10.h=vdmpy(v13:12.ub,r25.b) } v11:10.h=vdmpy(v13:12.ub,r25.b) -#CHECK: 1918ced6 { v22.h = vdmpy(v14.ub,{{ *}}r24.b) } +#CHECK: 1918ced6 { v22.h=vdmpy(v14.ub,r24.b) } v22.h=vdmpy(v14.ub,r24.b) -#CHECK: 1911deba { v27:26.uw = vdsad(v31:30.uh,{{ *}}r17.uh) } +#CHECK: 1911deba { v27:26.uw=vdsad(v31:30.uh,r17.uh) } v27:26.uw=vdsad(v31:30.uh,r17.uh) -#CHECK: 1908da97 { v23.w = vrmpy(v26.ub,{{ *}}r8.b) } +#CHECK: 1908da97 { v23.w=vrmpy(v26.ub,r8.b) } v23.w=vrmpy(v26.ub,r8.b) -#CHECK: 1915c974 { v20.uw = vrmpy(v9.ub,{{ *}}r21.ub) } +#CHECK: 1915c974 { v20.uw=vrmpy(v9.ub,r21.ub) } v20.uw=vrmpy(v9.ub,r21.ub) -#CHECK: 190dd446 { v6.w = vdmpy(v20.h,{{ *}}r13.b) } +#CHECK: 190dd446 { v6.w=vdmpy(v20.h,r13.b) } v6.w=vdmpy(v20.h,r13.b) -#CHECK: 190ec030 { v17:16.h = vtmpy(v1:0.ub,{{ *}}r14.b) } +#CHECK: 190ec030 { v17:16.h=vtmpy(v1:0.ub,r14.b) } v17:16.h=vtmpy(v1:0.ub,r14.b) -#CHECK: 1918de1c { v29:28.h = vtmpy(v31:30.b,{{ *}}r24.b) } +#CHECK: 1918de1c { v29:28.h=vtmpy(v31:30.b,r24.b) } v29:28.h=vtmpy(v31:30.b,r24.b) -#CHECK: 198dddf1 { v17.w = vmpyi(v29.w,{{ *}}r13.h) } +#CHECK: 198dddf1 { v17.w=vmpyi(v29.w,r13.h) } v17.w=vmpyi(v29.w,r13.h) -#CHECK: 19bccb13 { v19.w = vmpyi(v11.w,{{ *}}r28.b) } +#CHECK: 19bccb13 { v19.w=vmpyi(v11.w,r28.b) } v19.w=vmpyi(v11.w,r28.b) -#CHECK: 19c8cb0a { v11:10.uh = vmpy(v11.ub,{{ *}}r8.ub) } +#CHECK: 19c8cb0a { v11:10.uh=vmpy(v11.ub,r8.ub) } v11:10.uh=vmpy(v11.ub,r8.ub) -#CHECK: 1973d012 { v18.h = vmpyi(v16.h,{{ *}}r19.b) } +#CHECK: 1973d012 { v18.h=vmpyi(v16.h,r19.b) } v18.h=vmpyi(v16.h,r19.b) -#CHECK: 1922d1aa { v11:10.h = vmpy(v17.ub,{{ *}}r2.b) } +#CHECK: 1922d1aa { v11:10.h=vmpy(v17.ub,r2.b) } v11:10.h=vmpy(v17.ub,r2.b) -#CHECK: 1936ce9c { v29:28.w = vdmpy(v15:14.h,{{ *}}r22.b) } +#CHECK: 1936ce9c { v29:28.w=vdmpy(v15:14.h,r22.b) } v29:28.w=vdmpy(v15:14.h,r22.b) -#CHECK: 1925d86b { v11.w = vdmpy(v25:24.h,{{ *}}r5.h):sat } +#CHECK: 1925d86b { v11.w=vdmpy(v25:24.h,r5.h):sat } v11.w=vdmpy(v25:24.h,r5.h):sat -#CHECK: 1925c255 { v21.w = vdmpy(v2.h,{{ *}}r5.h):sat } +#CHECK: 1925c255 { v21.w=vdmpy(v2.h,r5.h):sat } v21.w=vdmpy(v2.h,r5.h):sat -#CHECK: 1941d424 { v4.h = vmpy(v20.h,{{ *}}r1.h):<<1:sat } +#CHECK: 1941d424 { v4.h=vmpy(v20.h,r1.h):<<1:sat } v4.h=vmpy(v20.h,r1.h):<<1:sat -#CHECK: 1943cf0a { v11:10.w = vmpy(v15.h,{{ *}}r3.h) } +#CHECK: 1943cf0a { v11:10.w=vmpy(v15.h,r3.h) } v11:10.w=vmpy(v15.h,r3.h) -#CHECK: 193ec2f0 { v17:16.w = vmpa(v3:2.h,{{ *}}r30.b) } +#CHECK: 193ec2f0 { v17:16.w=vmpa(v3:2.h,r30.b) } v17:16.w=vmpa(v3:2.h,r30.b) -#CHECK: 193ddcde { v31:30.h = vmpa(v29:28.ub,{{ *}}r29.b) } +#CHECK: 193ddcde { v31:30.h=vmpa(v29:28.ub,r29.b) } v31:30.h=vmpa(v29:28.ub,r29.b) -#CHECK: 1946de76 { v23:22.uw = vmpy(v30.uh,{{ *}}r6.uh) } +#CHECK: 1946de76 { v23:22.uw=vmpy(v30.uh,r6.uh) } v23:22.uw=vmpy(v30.uh,r6.uh) -#CHECK: 1945c945 { v5.h = vmpy(v9.h,{{ *}}r5.h):<<1:rnd:sat } +#CHECK: 1945c945 { v5.h=vmpy(v9.h,r5.h):<<1:rnd:sat } v5.h=vmpy(v9.h,r5.h):<<1:rnd:sat -#CHECK: 19b0c280 { v1:0.w = vtmpy(v3:2.h,{{ *}}r16.b) } +#CHECK: 19b0c280 { v1:0.w=vtmpy(v3:2.h,r16.b) } v1:0.w=vtmpy(v3:2.h,r16.b) -#CHECK: 1c34d937 { v23.h = vmpy(v25.h,{{ *}}v20.h):<<1:rnd:sat } +#CHECK: 1c34d937 { v23.h=vmpy(v25.h,v20.h):<<1:rnd:sat } v23.h=vmpy(v25.h,v20.h):<<1:rnd:sat -#CHECK: 1c36c90a { v11:10.uw = vmpy(v9.uh,{{ *}}v22.uh) } +#CHECK: 1c36c90a { v11:10.uw=vmpy(v9.uh,v22.uh) } v11:10.uw=vmpy(v9.uh,v22.uh) -#CHECK: 1c09c3ec { v13:12.w = vmpy(v3.h,{{ *}}v9.h) } +#CHECK: 1c09c3ec { v13:12.w=vmpy(v3.h,v9.h) } v13:12.w=vmpy(v3.h,v9.h) -#CHECK: 1c0dd1d8 { v25:24.h = vmpy(v17.ub,{{ *}}v13.b) } +#CHECK: 1c0dd1d8 { v25:24.h=vmpy(v17.ub,v13.b) } v25:24.h=vmpy(v17.ub,v13.b) -#CHECK: 1c0dc0a4 { v5:4.uh = vmpy(v0.ub,{{ *}}v13.ub) } +#CHECK: 1c0dc0a4 { v5:4.uh=vmpy(v0.ub,v13.ub) } v5:4.uh=vmpy(v0.ub,v13.ub) -#CHECK: 1c14df84 { v5:4.h = vmpy(v31.b,{{ *}}v20.b) } +#CHECK: 1c14df84 { v5:4.h=vmpy(v31.b,v20.b) } v5:4.h=vmpy(v31.b,v20.b) -#CHECK: 1c16d77c { v28.w = vdmpy(v23.h,{{ *}}v22.h):sat } +#CHECK: 1c16d77c { v28.w=vdmpy(v23.h,v22.h):sat } v28.w=vdmpy(v23.h,v22.h):sat -#CHECK: 1c08d84f { v15.w = vrmpy(v24.ub,{{ *}}v8.b) } +#CHECK: 1c08d84f { v15.w=vrmpy(v24.ub,v8.b) } v15.w=vrmpy(v24.ub,v8.b) -#CHECK: 1c06da29 { v9.w = vrmpy(v26.b,{{ *}}v6.b) } +#CHECK: 1c06da29 { v9.w=vrmpy(v26.b,v6.b) } v9.w=vrmpy(v26.b,v6.b) -#CHECK: 1c1ac805 { v5.uw = vrmpy(v8.ub,{{ *}}v26.ub) } +#CHECK: 1c1ac805 { v5.uw=vrmpy(v8.ub,v26.ub) } v5.uw=vrmpy(v8.ub,v26.ub) -#CHECK: 1c39d089 { v9.h = vmpyi(v16.h,{{ *}}v25.h) } +#CHECK: 1c39d089 { v9.h=vmpyi(v16.h,v25.h) } v9.h=vmpyi(v16.h,v25.h) -#CHECK: 1c3ecc64 { v5:4.h = vmpa(v13:12.ub,{{ *}}v31:30.b) } +#CHECK: 1c3ecc64 { v5:4.h=vmpa(v13:12.ub,v31:30.b) } v5:4.h=vmpa(v13:12.ub,v31:30.b) -#CHECK: 1c21ce54 { v21:20.w = vmpy(v14.h,{{ *}}v1.uh) } +#CHECK: 1c21ce54 { v21:20.w=vmpy(v14.h,v1.uh) } v21:20.w=vmpy(v14.h,v1.uh) -#CHECK: 1cf2c6f0 { v17:16.h = vmpa(v7:6.ub,{{ *}}v19:18.ub) } +#CHECK: 1cf2c6f0 { v17:16.h=vmpa(v7:6.ub,v19:18.ub) } v17:16.h=vmpa(v7:6.ub,v19:18.ub) -#CHECK: 1fcdc82b { v11.w = vmpyio(v8.w{{ *}},{{ *}}v13.h) } +#CHECK: 1fcdc82b { v11.w=vmpyio(v8.w,v13.h) } v11.w=vmpyio(v8.w,v13.h) -#CHECK: 1fdeda10 { v16.w = vmpyie(v26.w{{ *}},{{ *}}v30.uh) } +#CHECK: 1fdeda10 { v16.w=vmpyie(v26.w,v30.uh) } v16.w=vmpyie(v26.w,v30.uh) -#CHECK: 1ff2c2a6 { v6.w = vmpye(v2.w{{ *}},{{ *}}v18.uh) } +#CHECK: 1ff2c2a6 { v6.w=vmpye(v2.w,v18.uh) } v6.w=vmpye(v2.w,v18.uh) -#CHECK: 1ff7cbfa { v26.w = vmpyo(v11.w{{ *}},{{ *}}v23.h):<<1:sat } +#CHECK: 1ff7cbfa { v26.w=vmpyo(v11.w,v23.h):<<1:sat } v26.w=vmpyo(v11.w,v23.h):<<1:sat -#CHECK: 1f5cd411 { v17.w = vmpyo(v20.w{{ *}},{{ *}}v28.h):<<1:rnd:sat } +#CHECK: 1f5cd411 { v17.w=vmpyo(v20.w,v28.h):<<1:rnd:sat } v17.w=vmpyo(v20.w,v28.h):<<1:rnd:sat -#CHECK: 1f71cf1d { v29.w = vmpyieo(v15.h{{ *}},{{ *}}v17.h) } +#CHECK: 1f71cf1d { v29.w=vmpyieo(v15.h{{ *}},{{ *}}v17.h) } v29.w=vmpyieo(v15.h,v17.h) Index: test/MC/Hexagon/v60lookup.s =================================================================== --- test/MC/Hexagon/v60lookup.s +++ test/MC/Hexagon/v60lookup.s @@ -2,13 +2,13 @@ #RUN: llvm-objdump -triple=hexagon -mcpu=hexagonv60 -d - | \ #RUN: FileCheck %s - V31.b = vlut32(V29.b, V15.b, R1) -# CHECK: 1b79fd3f { v31.b = vlut32(v29.b,v15.b,r1) } - V31.b |= vlut32(V29.b, V15.b, R2) -# CHECK: 1b7afdbf { v31.b |= vlut32(v29.b,v15.b,r2) } - V31:30.h = vlut16(V29.b, V15.h, R3) -# CHECK: 1b7bfdde { v31:30.h = vlut16(v29.b,v15.h,r3) } - v31:30.h |= vlut16(v2.b, v9.h, r4) -# CHECK: 1b4ce2fe { v31:30.h |= vlut16(v2.b,v9.h,r4) } - v31.w = vinsert(r4) -# CHECK: 19a4e03f { v31.w = vinsert(r4) } + V31.b=vlut32(V29.b, V15.b, R1) +# CHECK: 1b79fd3f { v31.b=vlut32(v29.b,v15.b,r1) } + V31.b|=vlut32(V29.b, V15.b, R2) +# CHECK: 1b7afdbf { v31.b|=vlut32(v29.b,v15.b,r2) } + V31:30.h=vlut16(V29.b, V15.h, R3) +# CHECK: 1b7bfdde { v31:30.h=vlut16(v29.b,v15.h,r3) } + v31:30.h|=vlut16(v2.b, v9.h, r4) +# CHECK: 1b4ce2fe { v31:30.h|=vlut16(v2.b,v9.h,r4) } + v31.w=vinsert(r4) +# CHECK: 19a4e03f { v31.w=vinsert(r4) }