Index: llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -103,20 +103,6 @@ list fpinfo = [SCALAR_F16, SCALAR_F32, SCALAR_F64]; } -class getScalarSuffix { - string suffix = !cond(!eq(type, XLenVT): "VX", - !eq(type, f16): "VF_F16", - !eq(type, f32): "VF_F32", - !eq(type, f64): "VF_F64"); -} - -class getWScalarSuffix { - string suffix = !cond(!eq(type, XLenVT): "WX", - !eq(type, f16): "WF_F16", - !eq(type, f32): "WF_F32", - !eq(type, f64): "WF_F64"); -} - class MxSet { list m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8], !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8], @@ -186,6 +172,11 @@ // The pattern fragment which produces the AVL operand, representing the // "natural" vector length for this type. For scalable vectors this is VLMax. OutPatFrag AVL = VLMax; + + string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X", + !eq(Scal, f16) : "F16", + !eq(Scal, f32) : "F32", + !eq(Scal, f64) : "F64"); } class GroupVTypeInfo { foreach m = MxList.m in foreach f = FPList.fpinfo in - defm "_VF_" # f.FX : VPseudoBinary; + defm "_V" # f.FX : VPseudoBinary; } multiclass VPseudoBinaryV_VI { @@ -1445,9 +1436,9 @@ multiclass VPseudoBinaryW_VF { foreach m = MxList.m[0-5] in foreach f = FPList.fpinfo[0-1] in - defm "_VF_" # f.FX : VPseudoBinary; + defm "_V" # f.FX : VPseudoBinary; } multiclass VPseudoBinaryW_WV { @@ -1465,9 +1456,9 @@ multiclass VPseudoBinaryW_WF { foreach m = MxList.m[0-5] in foreach f = FPList.fpinfo[0-1] in - defm "_WF_" # f.FX : VPseudoBinary; + defm "_W" # f.FX : VPseudoBinary; } multiclass VPseudoBinaryV_WV { @@ -1514,7 +1505,7 @@ multiclass VPseudoBinaryV_FM { foreach m = MxList.m in foreach f = FPList.fpinfo in - def "_VFM_" # f.FX # "_" # m.MX : + def "_V" # f.FX # "M_" # m.MX : VPseudoBinaryCarryIn.R, m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">; } @@ -1543,7 +1534,7 @@ foreach m = MxList.m in { foreach f = FPList.fpinfo in { let VLMul = m.value in { - def "_F_" # f.FX # "_" # m.MX : VPseudoUnaryNoDummyMask; + def "_" # f.FX # "_" # m.MX : VPseudoUnaryNoDummyMask; } } } @@ -1612,7 +1603,7 @@ multiclass VPseudoBinaryM_VF { foreach m = MxList.m in foreach f = FPList.fpinfo in - defm "_VF_" # f.FX : + defm "_V" # f.FX : VPseudoBinary; } @@ -1730,8 +1721,8 @@ multiclass VPseudoTernaryV_VF_AAXA { foreach m = MxList.m in foreach f = FPList.fpinfo in - defm "_VF_" # f.FX : VPseudoTernary; + defm "_V" # f.FX : VPseudoTernary; } multiclass VPseudoTernaryW_VV { @@ -1750,8 +1741,8 @@ defvar constraint = "@earlyclobber $rd"; foreach m = MxList.m[0-5] in foreach f = FPList.fpinfo[0-1] in - defm "_VF_" # f.FX : VPseudoTernary; + defm "_V" # f.FX : VPseudoTernary; } multiclass VPseudoTernaryV_VI { @@ -2506,7 +2497,7 @@ multiclass VPatBinaryV_VX vtilist> { foreach vti = vtilist in { - defvar kind = getScalarSuffix.suffix; + defvar kind = "V"#vti.ScalarSuffix; defm : VPatBinary.suffix; + defvar kind = "V"#Vti.ScalarSuffix; defm : VPatBinary.suffix; + defvar kind = "W"#Vti.ScalarSuffix; defm : VPatBinary.suffix; + defvar kind = "W"#Vti.ScalarSuffix; defm : VPatBinary vtilist = AllIntegerVectors> { foreach vti = vtilist in defm : VPatBinaryCarryIn vtilist> { foreach vti = vtilist in { - defvar kind = getScalarSuffix.suffix; + defvar kind = "V"#vti.ScalarSuffix; defm : VPatBinary vtilist> { foreach vti = vtilist in defm : VPatTernary.suffix, + "V"#vti.ScalarSuffix, vti.Vector, vti.Scalar, vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass, vti.ScalarRegClass, vti.RegClass>; @@ -2873,7 +2861,7 @@ defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; defm : VPatTernary.suffix, + "V"#vti.ScalarSuffix, wti.Vector, vti.Scalar, vti.Vector, vti.Mask, vti.SEW, vti.LMul, wti.RegClass, vti.ScalarRegClass, vti.RegClass>; @@ -3654,14 +3642,14 @@ foreach f = FPList.fpinfo in { let VLMul = m.value in { let HasSEWOp = 1, BaseInstr = VFMV_F_S in - def PseudoVFMV_F_S # "_" # f.FX # "_" # m.MX : + def "PseudoVFMV_" # f.FX # "_S_" # m.MX : Pseudo<(outs f.fprclass:$rd), (ins m.vrclass:$rs2, ixlenimm:$sew), []>, RISCVVPseudo; let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, WritesElement0 = 1, Constraints = "$rd = $rs1" in - def PseudoVFMV_S_F # "_" # f.FX #"_" # m.MX : + def "PseudoVFMV_S_" # f.FX # "_" # m.MX : Pseudo<(outs m.vrclass:$rd), (ins m.vrclass:$rs1, f.fprclass:$rs2, GPR:$vl, ixlenimm:$sew), @@ -4177,10 +4165,7 @@ def : Pat<(fvti.Vector (int_riscv_vfmv_v_f (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)), - (!cast("PseudoVFMV_V_F_" # - !cond(!eq(fvti.Scalar, f16): "F16_", - !eq(fvti.Scalar, f32): "F32_", - !eq(fvti.Scalar, f64): "F64_") # + (!cast("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # fvti.LMul.MX) (fvti.Scalar fvti.ScalarRegClass:$rs2), (NoX0 GPR:$vl), fvti.SEW)>; @@ -4340,20 +4325,14 @@ let Predicates = [HasStdExtV, HasStdExtF] in { foreach fvti = AllFloatVectors in { - defvar instr = !cast("PseudoVFMV_F_S_" # - !cond(!eq(fvti.Scalar, f16): "F16_", - !eq(fvti.Scalar, f32): "F32_", - !eq(fvti.Scalar, f64): "F64_") # + defvar instr = !cast("PseudoVFMV_"#fvti.ScalarSuffix#"_S_" # fvti.LMul.MX); def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))), (instr $rs2, fvti.SEW)>; def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1), (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)), - (!cast("PseudoVFMV_S_F_" # - !cond(!eq(fvti.Scalar, f16): "F16_", - !eq(fvti.Scalar, f32): "F32_", - !eq(fvti.Scalar, f64): "F64_") # + (!cast("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" # fvti.LMul.MX) (fvti.Vector $rs1), (fvti.Scalar fvti.ScalarRegClass:$rs2), Index: llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -32,15 +32,6 @@ SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>>; -class FromFPR32 { - dag ret = !cond(!eq(!cast(operand), !cast(FPR64)): - (INSERT_SUBREG (IMPLICIT_DEF), input_dag, sub_32), - !eq(!cast(operand), !cast(FPR16)): - (EXTRACT_SUBREG input_dag, sub_16), - !eq(1, 1): - input_dag); -} - // Penalize the generic form with Complexity=1 to give the simm5/uimm5 variants // precedence def SplatPat : ComplexPattern; @@ -48,18 +39,6 @@ def SplatPat_simm5 : ComplexPattern; def SplatPat_uimm5 : ComplexPattern; -// FIXME: We only model FPR32 for V instructions in RISCVInstrInfoV.td. -// FP16/FP32/FP64 registers are alias each other. Convert FPR16 and FPR64 -// to FPR32 for V instructions is enough. -class ToFPR32 { - dag ret = !cond(!eq(!cast(operand), !cast(FPR64)): - (EXTRACT_SUBREG !dag(type, [FPR64], [name]), sub_32), - !eq(!cast(operand), !cast(FPR16)): - (SUBREG_TO_REG (i16 -1), !dag(type, [FPR16], [name]), sub_16), - !eq(1, 1): - !dag(type, [operand], [name])); -} - class SwapHelper { dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix); } @@ -174,9 +153,9 @@ DAGOperand xop_kind> : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (splat_vector xop_kind:$rs2)))), - (!cast(instruction_name#"_VF_F32_"#vlmul.MX) + (!cast(instruction_name#"_"#vlmul.MX) vop_reg_class:$rs1, - ToFPR32.ret, + (xop_type xop_kind:$rs2), avl, sew)>; multiclass VPatBinaryFPSDNode_VV_VF { @@ -184,7 +163,7 @@ def : VPatBinarySDNode_VV; - def : VPatBinarySDNode_VF; @@ -195,9 +174,9 @@ foreach fvti = AllFloatVectors in def : Pat<(fvti.Vector (vop (fvti.Vector (splat_vector fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), - (!cast(instruction_name#"_VF_F32_"#fvti.LMul.MX) + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) fvti.RegClass:$rs1, - ToFPR32.ret, + (fvti.Scalar fvti.ScalarRegClass:$rs2), fvti.AVL, fvti.SEW)>; } @@ -275,9 +254,9 @@ def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), (fvti.Vector (splat_vector fvti.ScalarRegClass:$rs2)), cc)), - (!cast(instruction_name#"_VF_F32_"#fvti.LMul.MX) + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) fvti.RegClass:$rs1, - ToFPR32.ret, + (fvti.Scalar fvti.ScalarRegClass:$rs2), fvti.AVL, fvti.SEW)>; } @@ -286,9 +265,9 @@ def : Pat<(fvti.Mask (setcc (fvti.Vector (splat_vector fvti.ScalarRegClass:$rs2)), (fvti.Vector fvti.RegClass:$rs1), cc)), - (!cast(swapped_op_instruction_name#"_VF_F32_"#fvti.LMul.MX) + (!cast(swapped_op_instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) fvti.RegClass:$rs1, - ToFPR32.ret, + (fvti.Scalar fvti.ScalarRegClass:$rs2), fvti.AVL, fvti.SEW)>; } @@ -499,9 +478,9 @@ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), (splat_vector fvti.ScalarRegClass:$rs1), fvti.RegClass:$rs2)), - (!cast("PseudoVFMERGE_VFM_F32_"#fvti.LMul.MX) + (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) fvti.RegClass:$rs2, - ToFPR32.ret, + (fvti.Scalar fvti.ScalarRegClass:$rs1), VMV0:$vm, fvti.AVL, fvti.SEW)>; def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), @@ -550,8 +529,8 @@ let Predicates = [HasStdExtV, HasStdExtF] in { foreach fvti = AllFloatVectors in { def : Pat<(fvti.Vector (splat_vector fvti.ScalarRegClass:$rs1)), - (!cast("PseudoVFMV_V_F_F32_"#fvti.LMul.MX) - ToFPR32.ret, + (!cast("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) + (fvti.Scalar fvti.ScalarRegClass:$rs1), fvti.AVL, fvti.SEW)>; def : Pat<(fvti.Vector (splat_vector (fvti.Scalar fpimm0))), @@ -582,22 +561,27 @@ defvar extractelt_node = !if(IsFloat, extractelt, riscv_extract_vector_elt); foreach vti = vtilist in { defvar MX = vti.LMul.MX; - defvar vmv_xf_s_inst = !cast(!if(IsFloat, "PseudoVFMV_F_S_", - "PseudoVMV_X_S_")#MX); - defvar vmv_s_xf_inst = !cast(!if(IsFloat, "PseudoVFMV_S_F_", - "PseudoVMV_S_X_")#MX); + defvar vmv_xf_s_inst = !cast(!strconcat("PseudoV", + !if(IsFloat, "F", ""), + "MV_", + vti.ScalarSuffix, + "_S_", MX)); + defvar vmv_s_xf_inst = !cast(!strconcat("PseudoV", + !if(IsFloat, "F", ""), + "MV_S_", + vti.ScalarSuffix, + "_", MX)); // Only pattern-match insert/extract-element operations where the index is // 0. Any other index will have been custom-lowered to slide the vector // correctly into place (and, in the case of insert, slide it back again // afterwards). def : Pat<(vti.Scalar (extractelt_node (vti.Vector vti.RegClass:$rs2), 0)), - FromFPR32.ret>; + (vmv_xf_s_inst vti.RegClass:$rs2, vti.SEW)>; def : Pat<(vti.Vector (insertelt_node (vti.Vector vti.RegClass:$merge), vti.ScalarRegClass:$rs1, 0)), (vmv_s_xf_inst vti.RegClass:$merge, - ToFPR32.ret, + (vti.Scalar vti.ScalarRegClass:$rs1), vti.AVL, vti.SEW)>; } } Index: llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll @@ -7,7 +7,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -19,7 +18,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -31,7 +29,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -42,7 +39,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -54,7 +50,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -66,7 +61,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -77,7 +71,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -89,7 +82,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -101,7 +93,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -112,7 +103,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -124,7 +114,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v26 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -136,7 +125,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v26 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -147,7 +135,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -159,7 +146,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v28 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -171,7 +157,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v28 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -182,7 +167,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -194,7 +178,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -206,7 +189,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r Index: llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll +++ llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll @@ -7,7 +7,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -19,7 +18,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -31,7 +29,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -42,7 +39,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -54,7 +50,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -66,7 +61,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -77,7 +71,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -89,7 +82,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -101,7 +93,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v25 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -112,7 +103,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -124,7 +114,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v26 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -136,7 +125,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v26 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -147,7 +135,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -159,7 +146,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v28 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -171,7 +157,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v28 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -182,7 +167,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -194,7 +178,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -206,7 +189,6 @@ ; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r Index: llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll @@ -5,7 +5,6 @@ define @insertelt_nxv1f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv1f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -16,7 +15,6 @@ define @insertelt_nxv1f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv1f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 3 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -30,7 +28,6 @@ define @insertelt_nxv1f16_idx( %v, half %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv1f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -44,7 +41,6 @@ define @insertelt_nxv2f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv2f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -55,7 +51,6 @@ define @insertelt_nxv2f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv2f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 3 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -69,7 +64,6 @@ define @insertelt_nxv2f16_idx( %v, half %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv2f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -83,7 +77,6 @@ define @insertelt_nxv4f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv4f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -94,7 +87,6 @@ define @insertelt_nxv4f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv4f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 3 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -108,7 +100,6 @@ define @insertelt_nxv4f16_idx( %v, half %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv4f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -122,7 +113,6 @@ define @insertelt_nxv8f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv8f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -133,7 +123,6 @@ define @insertelt_nxv8f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv8f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 3 ; CHECK-NEXT: vfmv.s.f v26, fa0 @@ -147,7 +136,6 @@ define @insertelt_nxv8f16_idx( %v, half %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv8f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 ; CHECK-NEXT: vfmv.s.f v26, fa0 @@ -161,7 +149,6 @@ define @insertelt_nxv16f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv16f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -172,7 +159,6 @@ define @insertelt_nxv16f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv16f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 3 ; CHECK-NEXT: vfmv.s.f v28, fa0 @@ -186,7 +172,6 @@ define @insertelt_nxv16f16_idx( %v, half %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv16f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 ; CHECK-NEXT: vfmv.s.f v28, fa0 @@ -200,7 +185,6 @@ define @insertelt_nxv32f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv32f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -211,7 +195,6 @@ define @insertelt_nxv32f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv32f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v16, v8, 3 ; CHECK-NEXT: vfmv.s.f v16, fa0 @@ -225,7 +208,6 @@ define @insertelt_nxv32f16_idx( %v, half %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv32f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vfmv.s.f v16, fa0 Index: llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll +++ llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll @@ -5,7 +5,6 @@ define @insertelt_nxv1f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv1f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -16,7 +15,6 @@ define @insertelt_nxv1f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv1f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 3 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -30,7 +28,6 @@ define @insertelt_nxv1f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv1f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -44,7 +41,6 @@ define @insertelt_nxv2f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv2f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -55,7 +51,6 @@ define @insertelt_nxv2f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv2f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 3 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -69,7 +64,6 @@ define @insertelt_nxv2f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv2f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -83,7 +77,6 @@ define @insertelt_nxv4f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv4f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -94,7 +87,6 @@ define @insertelt_nxv4f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv4f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 3 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -108,7 +100,6 @@ define @insertelt_nxv4f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv4f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 ; CHECK-NEXT: vfmv.s.f v25, fa0 @@ -122,7 +113,6 @@ define @insertelt_nxv8f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv8f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -133,7 +123,6 @@ define @insertelt_nxv8f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv8f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 3 ; CHECK-NEXT: vfmv.s.f v26, fa0 @@ -147,7 +136,6 @@ define @insertelt_nxv8f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv8f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 ; CHECK-NEXT: vfmv.s.f v26, fa0 @@ -161,7 +149,6 @@ define @insertelt_nxv16f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv16f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -172,7 +159,6 @@ define @insertelt_nxv16f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv16f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 3 ; CHECK-NEXT: vfmv.s.f v28, fa0 @@ -186,7 +172,6 @@ define @insertelt_nxv16f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv16f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 ; CHECK-NEXT: vfmv.s.f v28, fa0 @@ -200,7 +185,6 @@ define @insertelt_nxv32f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv32f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -211,7 +195,6 @@ define @insertelt_nxv32f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv32f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v16, v8, 3 ; CHECK-NEXT: vfmv.s.f v16, fa0 @@ -225,7 +208,6 @@ define @insertelt_nxv32f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv32f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vfmv.s.f v16, fa0 Index: llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll @@ -18,7 +18,6 @@ define @fcmp_oeq_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oeq_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -31,7 +30,6 @@ define @fcmp_oeq_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oeq_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -54,7 +52,6 @@ define @fcmp_oeq_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_oeq_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -77,7 +74,6 @@ define @fcmp_ogt_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ogt_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -90,7 +86,6 @@ define @fcmp_ogt_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ogt_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -113,7 +108,6 @@ define @fcmp_ogt_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ogt_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -136,7 +130,6 @@ define @fcmp_oge_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oge_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -149,7 +142,6 @@ define @fcmp_oge_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oge_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -172,7 +164,6 @@ define @fcmp_oge_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_oge_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -195,7 +186,6 @@ define @fcmp_olt_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_olt_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -208,7 +198,6 @@ define @fcmp_olt_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_olt_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -231,7 +220,6 @@ define @fcmp_olt_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_olt_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -254,7 +242,6 @@ define @fcmp_ole_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ole_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -267,7 +254,6 @@ define @fcmp_ole_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ole_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -290,7 +276,6 @@ define @fcmp_ole_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ole_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -316,7 +301,6 @@ define @fcmp_one_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_one_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 @@ -332,7 +316,6 @@ define @fcmp_one_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_one_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 @@ -358,7 +341,6 @@ define @fcmp_one_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_one_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -384,7 +366,6 @@ define @fcmp_ord_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ord_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 @@ -401,7 +382,6 @@ define @fcmp_ord_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ord_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 @@ -431,7 +411,6 @@ define @fcmp_ord_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ord_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 @@ -461,7 +440,6 @@ define @fcmp_ueq_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ueq_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 @@ -477,7 +455,6 @@ define @fcmp_ueq_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ueq_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 @@ -503,7 +480,6 @@ define @fcmp_ueq_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ueq_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -529,7 +505,6 @@ define @fcmp_ugt_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ugt_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -545,7 +520,6 @@ define @fcmp_ugt_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ugt_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -571,7 +545,6 @@ define @fcmp_ugt_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ugt_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -597,7 +570,6 @@ define @fcmp_uge_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uge_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -613,7 +585,6 @@ define @fcmp_uge_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uge_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -639,7 +610,6 @@ define @fcmp_uge_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_uge_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -665,7 +635,6 @@ define @fcmp_ult_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ult_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -681,7 +650,6 @@ define @fcmp_ult_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ult_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -707,7 +675,6 @@ define @fcmp_ult_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ult_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -733,7 +700,6 @@ define @fcmp_ule_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ule_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -749,7 +715,6 @@ define @fcmp_ule_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ule_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -775,7 +740,6 @@ define @fcmp_ule_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ule_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -798,7 +762,6 @@ define @fcmp_une_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_une_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -811,7 +774,6 @@ define @fcmp_une_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_une_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -834,7 +796,6 @@ define @fcmp_une_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_une_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -860,7 +821,6 @@ define @fcmp_uno_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uno_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 @@ -877,7 +837,6 @@ define @fcmp_uno_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uno_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 @@ -907,7 +866,6 @@ define @fcmp_uno_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_uno_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 Index: llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll +++ llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll @@ -18,7 +18,6 @@ define @fcmp_oeq_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oeq_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -31,7 +30,6 @@ define @fcmp_oeq_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oeq_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -54,7 +52,6 @@ define @fcmp_oeq_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_oeq_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -77,7 +74,6 @@ define @fcmp_ogt_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ogt_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -90,7 +86,6 @@ define @fcmp_ogt_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ogt_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -113,7 +108,6 @@ define @fcmp_ogt_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ogt_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -136,7 +130,6 @@ define @fcmp_oge_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oge_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -149,7 +142,6 @@ define @fcmp_oge_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oge_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -172,7 +164,6 @@ define @fcmp_oge_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_oge_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -195,7 +186,6 @@ define @fcmp_olt_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_olt_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -208,7 +198,6 @@ define @fcmp_olt_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_olt_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -231,7 +220,6 @@ define @fcmp_olt_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_olt_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -254,7 +242,6 @@ define @fcmp_ole_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ole_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -267,7 +254,6 @@ define @fcmp_ole_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ole_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -290,7 +276,6 @@ define @fcmp_ole_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ole_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -316,7 +301,6 @@ define @fcmp_one_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_one_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 @@ -332,7 +316,6 @@ define @fcmp_one_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_one_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 @@ -358,7 +341,6 @@ define @fcmp_one_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_one_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -384,7 +366,6 @@ define @fcmp_ord_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ord_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 @@ -401,7 +382,6 @@ define @fcmp_ord_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ord_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 @@ -431,7 +411,6 @@ define @fcmp_ord_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ord_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 @@ -461,7 +440,6 @@ define @fcmp_ueq_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ueq_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 @@ -477,7 +455,6 @@ define @fcmp_ueq_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ueq_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 @@ -503,7 +480,6 @@ define @fcmp_ueq_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ueq_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -529,7 +505,6 @@ define @fcmp_ugt_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ugt_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -545,7 +520,6 @@ define @fcmp_ugt_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ugt_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -571,7 +545,6 @@ define @fcmp_ugt_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ugt_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -597,7 +570,6 @@ define @fcmp_uge_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uge_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -613,7 +585,6 @@ define @fcmp_uge_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uge_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -639,7 +610,6 @@ define @fcmp_uge_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_uge_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -665,7 +635,6 @@ define @fcmp_ult_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ult_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -681,7 +650,6 @@ define @fcmp_ult_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ult_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -707,7 +675,6 @@ define @fcmp_ult_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ult_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -733,7 +700,6 @@ define @fcmp_ule_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ule_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -749,7 +715,6 @@ define @fcmp_ule_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ule_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu @@ -775,7 +740,6 @@ define @fcmp_ule_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ule_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -798,7 +762,6 @@ define @fcmp_une_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_une_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -811,7 +774,6 @@ define @fcmp_une_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_une_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -834,7 +796,6 @@ define @fcmp_une_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_une_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret @@ -860,7 +821,6 @@ define @fcmp_uno_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uno_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 @@ -877,7 +837,6 @@ define @fcmp_uno_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uno_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 @@ -907,7 +866,6 @@ define @fcmp_uno_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_uno_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 Index: llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv32.ll @@ -15,7 +15,6 @@ define @vfadd_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -38,7 +37,6 @@ define @vfadd_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -61,7 +59,6 @@ define @vfadd_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -84,7 +81,6 @@ define @vfadd_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -97,7 +93,6 @@ define @vfadd_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfadd_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -120,7 +115,6 @@ define @vfadd_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -143,7 +137,6 @@ define @vfadd_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv64.ll +++ llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv64.ll @@ -15,7 +15,6 @@ define @vfadd_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -38,7 +37,6 @@ define @vfadd_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -61,7 +59,6 @@ define @vfadd_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -84,7 +81,6 @@ define @vfadd_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -97,7 +93,6 @@ define @vfadd_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfadd_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -120,7 +115,6 @@ define @vfadd_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -143,7 +137,6 @@ define @vfadd_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv32.ll @@ -15,7 +15,6 @@ define @vfdiv_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -38,7 +37,6 @@ define @vfdiv_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -61,7 +59,6 @@ define @vfdiv_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -84,7 +81,6 @@ define @vfdiv_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -97,7 +93,6 @@ define @vfdiv_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfdiv_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -120,7 +115,6 @@ define @vfdiv_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -143,7 +137,6 @@ define @vfdiv_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv64.ll +++ llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv64.ll @@ -15,7 +15,6 @@ define @vfdiv_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -38,7 +37,6 @@ define @vfdiv_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -61,7 +59,6 @@ define @vfdiv_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -84,7 +81,6 @@ define @vfdiv_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -97,7 +93,6 @@ define @vfdiv_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfdiv_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -120,7 +115,6 @@ define @vfdiv_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -143,7 +137,6 @@ define @vfdiv_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv32.ll @@ -15,7 +15,6 @@ define @vfmul_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -38,7 +37,6 @@ define @vfmul_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -61,7 +59,6 @@ define @vfmul_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -84,7 +81,6 @@ define @vfmul_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -97,7 +93,6 @@ define @vfmul_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfmul_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -120,7 +115,6 @@ define @vfmul_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -143,7 +137,6 @@ define @vfmul_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv64.ll +++ llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv64.ll @@ -15,7 +15,6 @@ define @vfmul_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -38,7 +37,6 @@ define @vfmul_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -61,7 +59,6 @@ define @vfmul_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -84,7 +81,6 @@ define @vfmul_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -97,7 +93,6 @@ define @vfmul_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfmul_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -120,7 +115,6 @@ define @vfmul_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -143,7 +137,6 @@ define @vfmul_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv32.ll @@ -15,7 +15,6 @@ define @vfsub_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -38,7 +37,6 @@ define @vfsub_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -61,7 +59,6 @@ define @vfsub_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -84,7 +81,6 @@ define @vfsub_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -97,7 +93,6 @@ define @vfsub_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfsub_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -120,7 +115,6 @@ define @vfsub_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -143,7 +137,6 @@ define @vfsub_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv64.ll +++ llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv64.ll @@ -15,7 +15,6 @@ define @vfsub_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -38,7 +37,6 @@ define @vfsub_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -61,7 +59,6 @@ define @vfsub_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -84,7 +81,6 @@ define @vfsub_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -97,7 +93,6 @@ define @vfsub_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfsub_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -120,7 +115,6 @@ define @vfsub_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret @@ -143,7 +137,6 @@ define @vfsub_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll @@ -15,7 +15,6 @@ define @vfmerge_fv_nxv1f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret @@ -38,7 +37,6 @@ define @vfmerge_fv_nxv2f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret @@ -61,7 +59,6 @@ define @vfmerge_fv_nxv4f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret @@ -84,7 +81,6 @@ define @vfmerge_fv_nxv8f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret @@ -119,7 +115,6 @@ define @vfmerge_fv_nxv16f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret @@ -142,7 +137,6 @@ define @vfmerge_fv_nxv32f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll +++ llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll @@ -15,7 +15,6 @@ define @vfmerge_fv_nxv1f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret @@ -38,7 +37,6 @@ define @vfmerge_fv_nxv2f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret @@ -61,7 +59,6 @@ define @vfmerge_fv_nxv4f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret @@ -84,7 +81,6 @@ define @vfmerge_fv_nxv8f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret @@ -119,7 +115,6 @@ define @vfmerge_fv_nxv16f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret @@ -142,7 +137,6 @@ define @vfmerge_fv_nxv32f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll +++ llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll @@ -7,14 +7,12 @@ define @vsplat_nxv8f16(half %f) { ; RV32V-LABEL: vsplat_nxv8f16: ; RV32V: # %bb.0: -; RV32V-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; RV32V-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; RV32V-NEXT: vfmv.v.f v8, fa0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8f16: ; RV64V: # %bb.0: -; RV64V-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; RV64V-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; RV64V-NEXT: vfmv.v.f v8, fa0 ; RV64V-NEXT: ret