diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -90,6 +90,33 @@ list m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8]; } +class FPR_Info { + RegisterClass fprclass = regclass; + string FX = fx; +} + +def SCALAR_F16 : FPR_Info; +def SCALAR_F32 : FPR_Info; +def SCALAR_F64 : FPR_Info; + +def FPList { + list fpinfo = [SCALAR_F16, SCALAR_F32, SCALAR_F64]; +} + +class getScalarSuffix { + string suffix = !cond(!eq(type, XLenVT): "VX", + !eq(type, f16): "VF_F16", + !eq(type, f32): "VF_F32", + !eq(type, f64): "VF_F64"); +} + +class getWScalarSuffix { + string suffix = !cond(!eq(type, XLenVT): "WX", + !eq(type, f16): "WF_F16", + !eq(type, f32): "WF_F32", + !eq(type, f64): "WF_F64"); +} + class MxSet { list m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8], !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8], @@ -135,18 +162,6 @@ // List of EEW. defvar EEWList = [8, 16, 32, 64]; -// We only model FPR32 for V instructions in RISCVInstrInfoV.td. -// FP16/FP32/FP64 registers are alias each other. Convert FPR16 and FPR64 -// to FPR32 for V instructions is enough. -class ToFPR32 { - dag ret = !cond(!eq(!cast(operand), !cast(FPR64)): - (EXTRACT_SUBREG !dag(type, [FPR64], [name]), sub_32), - !eq(!cast(operand), !cast(FPR16)): - (SUBREG_TO_REG (i16 -1), !dag(type, [FPR16], [name]), sub_16), - !eq(1, 1): - !dag(type, [operand], [name])); -} - class SegRegClass { VReg RC = !cast("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX, !eq(m.value, V_MF4.value): V_M1.MX, @@ -452,7 +467,10 @@ !subst("_B32", "", !subst("_B64", "", !subst("_MASK", "", - !subst("Pseudo", "", PseudoInst)))))))))))))))); + !subst("_F16", "", + !subst("_F32", "", + !subst("_F64", "", + !subst("Pseudo", "", PseudoInst))))))))))))))))))); } class ToLowerCase { @@ -1382,10 +1400,16 @@ } } -multiclass VPseudoBinaryV_VX { +multiclass VPseudoBinaryV_VX { + foreach m = MxList.m in + defm "_VX" : VPseudoBinary; +} + +multiclass VPseudoBinaryV_VF { foreach m = MxList.m in - defm !if(IsFloat, "_VF", "_VX") : VPseudoBinary; + foreach f = FPList.fpinfo in + defm "_VF_" # f.FX : VPseudoBinary; } multiclass VPseudoBinaryV_VI { @@ -1413,11 +1437,18 @@ "@earlyclobber $rd">; } -multiclass VPseudoBinaryW_VX { +multiclass VPseudoBinaryW_VX { foreach m = MxList.m[0-5] in - defm !if(IsFloat, "_VF", "_VX") : VPseudoBinary; + defm "_VX" : VPseudoBinary; +} + +multiclass VPseudoBinaryW_VF { + foreach m = MxList.m[0-5] in + foreach f = FPList.fpinfo[0-1] in + defm "_VF_" # f.FX : VPseudoBinary; } multiclass VPseudoBinaryW_WV { @@ -1426,11 +1457,18 @@ "@earlyclobber $rd">; } -multiclass VPseudoBinaryW_WX { +multiclass VPseudoBinaryW_WX { foreach m = MxList.m[0-5] in - defm !if(IsFloat, "_WF", "_WX") : VPseudoBinary; + defm "_WX" : VPseudoBinary; +} + +multiclass VPseudoBinaryW_WF { + foreach m = MxList.m[0-5] in + foreach f = FPList.fpinfo[0-1] in + defm "_WF_" # f.FX : VPseudoBinary; } multiclass VPseudoBinaryV_WV { @@ -1465,14 +1503,21 @@ } multiclass VPseudoBinaryV_XM { + string Constraint = ""> { foreach m = MxList.m in - def !if(IsFloat, "_VF", "_VX") # !if(CarryIn, "M", "") # "_" # m.MX : + def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX : VPseudoBinaryCarryIn.R, m.vrclass)), - m.vrclass, !if(IsFloat, FPR32, GPR), - m, CarryIn, Constraint>; + m.vrclass, GPR, m, CarryIn, Constraint>; +} + +multiclass VPseudoBinaryV_FM { + foreach m = MxList.m in + foreach f = FPList.fpinfo in + def "_VFM_" # f.FX # "_" # m.MX : + VPseudoBinaryCarryIn.R, + m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">; } multiclass VPseudoBinaryV_IM; + foreach f = FPList.fpinfo in { + let VLMul = m.value in { + def "_F_" # f.FX # "_" # m.MX : VPseudoUnaryNoDummyMask; + } } } } @@ -1557,11 +1604,17 @@ defm _VV : VPseudoBinary; } -multiclass VPseudoBinaryM_VX { +multiclass VPseudoBinaryM_VX { + foreach m = MxList.m in + defm "_VX" : + VPseudoBinary; +} + +multiclass VPseudoBinaryM_VF { foreach m = MxList.m in - defm !if(IsFloat, "_VF", "_VX") : - VPseudoBinary; + foreach f = FPList.fpinfo in + defm "_VF_" # f.FX : + VPseudoBinary; } multiclass VPseudoBinaryM_VI { @@ -1571,28 +1624,43 @@ multiclass VPseudoBinaryV_VV_VX_VI { defm "" : VPseudoBinaryV_VV; - defm "" : VPseudoBinaryV_VX; + defm "" : VPseudoBinaryV_VX; defm "" : VPseudoBinaryV_VI; } -multiclass VPseudoBinaryV_VV_VX { +multiclass VPseudoBinaryV_VV_VX { + defm "" : VPseudoBinaryV_VV; + defm "" : VPseudoBinaryV_VX; +} + +multiclass VPseudoBinaryV_VV_VF { defm "" : VPseudoBinaryV_VV; - defm "" : VPseudoBinaryV_VX; + defm "" : VPseudoBinaryV_VF; } multiclass VPseudoBinaryV_VX_VI { - defm "" : VPseudoBinaryV_VX; + defm "" : VPseudoBinaryV_VX; defm "" : VPseudoBinaryV_VI; } -multiclass VPseudoBinaryW_VV_VX { +multiclass VPseudoBinaryW_VV_VX { defm "" : VPseudoBinaryW_VV; - defm "" : VPseudoBinaryW_VX; + defm "" : VPseudoBinaryW_VX; } -multiclass VPseudoBinaryW_WV_WX { +multiclass VPseudoBinaryW_VV_VF { + defm "" : VPseudoBinaryW_VV; + defm "" : VPseudoBinaryW_VF; +} + +multiclass VPseudoBinaryW_WV_WX { defm "" : VPseudoBinaryW_WV; - defm "" : VPseudoBinaryW_WX; + defm "" : VPseudoBinaryW_WX; +} + +multiclass VPseudoBinaryW_WV_WF { + defm "" : VPseudoBinaryW_WV; + defm "" : VPseudoBinaryW_WF; } multiclass VPseudoBinaryV_VM_XM_IM { @@ -1655,10 +1723,16 @@ defm _VX : VPseudoTernary; } -multiclass VPseudoTernaryV_VX_AAXA { +multiclass VPseudoTernaryV_VX_AAXA { foreach m = MxList.m in - defm !if(IsFloat, "_VF", "_VX") : VPseudoTernary; + defm "_VX" : VPseudoTernary; +} + +multiclass VPseudoTernaryV_VF_AAXA { + foreach m = MxList.m in + foreach f = FPList.fpinfo in + defm "_VF_" # f.FX : VPseudoTernary; } multiclass VPseudoTernaryW_VV { @@ -1667,11 +1741,18 @@ defm _VV : VPseudoTernary; } -multiclass VPseudoTernaryW_VX { +multiclass VPseudoTernaryW_VX { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxList.m[0-5] in + defm "_VX" : VPseudoTernary; +} + +multiclass VPseudoTernaryW_VF { defvar constraint = "@earlyclobber $rd"; foreach m = MxList.m[0-5] in - defm !if(IsFloat, "_VF", "_VX") : VPseudoTernary; + foreach f = FPList.fpinfo[0-1] in + defm "_VF_" # f.FX : VPseudoTernary; } multiclass VPseudoTernaryV_VI { @@ -1679,9 +1760,14 @@ defm _VI : VPseudoTernary; } -multiclass VPseudoTernaryV_VV_VX_AAXA { +multiclass VPseudoTernaryV_VV_VX_AAXA { + defm "" : VPseudoTernaryV_VV; + defm "" : VPseudoTernaryV_VX_AAXA; +} + +multiclass VPseudoTernaryV_VV_VF_AAXA { defm "" : VPseudoTernaryV_VV; - defm "" : VPseudoTernaryV_VX_AAXA; + defm "" : VPseudoTernaryV_VF_AAXA; } multiclass VPseudoTernaryV_VX_VI { @@ -1689,24 +1775,34 @@ defm "" : VPseudoTernaryV_VI; } -multiclass VPseudoTernaryW_VV_VX { +multiclass VPseudoTernaryW_VV_VX { defm "" : VPseudoTernaryW_VV; - defm "" : VPseudoTernaryW_VX; + defm "" : VPseudoTernaryW_VX; +} + +multiclass VPseudoTernaryW_VV_VF { + defm "" : VPseudoTernaryW_VV; + defm "" : VPseudoTernaryW_VF; } multiclass VPseudoBinaryM_VV_VX_VI { defm "" : VPseudoBinaryM_VV; - defm "" : VPseudoBinaryM_VX; + defm "" : VPseudoBinaryM_VX; defm "" : VPseudoBinaryM_VI; } -multiclass VPseudoBinaryM_VV_VX { +multiclass VPseudoBinaryM_VV_VX { + defm "" : VPseudoBinaryM_VV; + defm "" : VPseudoBinaryM_VX; +} + +multiclass VPseudoBinaryM_VV_VF { defm "" : VPseudoBinaryM_VV; - defm "" : VPseudoBinaryM_VX; + defm "" : VPseudoBinaryM_VF; } multiclass VPseudoBinaryM_VX_VI { - defm "" : VPseudoBinaryM_VX; + defm "" : VPseudoBinaryM_VX; defm "" : VPseudoBinaryM_VI; } @@ -1947,7 +2043,7 @@ (XLenVT GPR:$vl))), (!cast(inst) (op1_type op1_reg_class:$rs1), - ToFPR32.ret, + (op2_type op2_kind:$rs2), (NoX0 GPR:$vl), sew)>; class VPatBinaryMask(inst#"_MASK") (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), - ToFPR32.ret, + (op2_type op2_kind:$rs2), (mask_type V0), (NoX0 GPR:$vl), sew)>; class VPatTernaryNoMask(inst#_#kind#"_"# vlmul.MX) + (!cast(inst#"_"#kind#"_"#vlmul.MX) result_reg_class:$rs3, - ToFPR32.ret, + (op1_type op1_reg_class:$rs1), op2_kind:$rs2, (NoX0 GPR:$vl), sew)>; @@ -2013,9 +2109,9 @@ (op2_type op2_kind:$rs2), (mask_type V0), (XLenVT GPR:$vl))), - (!cast(inst#_#kind#"_"# vlmul.MX # "_MASK") + (!cast(inst#"_"#kind#"_"#vlmul.MX # "_MASK") result_reg_class:$rs3, - ToFPR32.ret, + (op1_type op1_reg_class:$rs1), op2_kind:$rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>; @@ -2330,7 +2426,7 @@ (XLenVT GPR:$vl))), (!cast(inst#"_"#kind#"_"#vlmul.MX) (op1_type op1_reg_class:$rs1), - ToFPR32.ret, + (op2_type op2_kind:$rs2), (mask_type V0), (NoX0 GPR:$vl), sew)>; } @@ -2351,7 +2447,7 @@ (XLenVT GPR:$vl))), (!cast(inst#"_"#kind#"_"#vlmul.MX) (op1_type op1_reg_class:$rs1), - ToFPR32.ret, + (op2_type op2_kind:$rs2), (NoX0 GPR:$vl), sew)>; } @@ -2414,8 +2510,8 @@ multiclass VPatBinaryV_VX vtilist> { foreach vti = vtilist in { - defvar kind = !if(!eq(vti.Scalar, XLenVT), "_VX_", "_VF_"); - defm : VPatBinary.suffix; + defm : VPatBinary; @@ -2464,8 +2560,8 @@ foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; - defvar kind = !if(!eq(Vti.Scalar, XLenVT), "_VX_", "_VF_"); - defm : VPatBinary.suffix; + defm : VPatBinary; @@ -2489,8 +2585,8 @@ foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; - defvar kind = !if(!eq(Vti.Scalar, XLenVT), "_WX_", "_WF_"); - defm : VPatBinary.suffix; + defm : VPatBinary; @@ -2514,8 +2610,8 @@ foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; - defvar kind = !if(!eq(Vti.Scalar, XLenVT), "_WX_", "_WF_"); - defm : VPatBinary.suffix; + defm : VPatBinary; @@ -2550,7 +2646,10 @@ list vtilist = AllIntegerVectors> { foreach vti = vtilist in defm : VPatBinaryCarryIn vtilist> { foreach vti = vtilist in { - defvar kind = !if(!eq(vti.Scalar, XLenVT), "_VX_", "_VF_"); - defm : VPatBinary.suffix; + defm : VPatBinary; @@ -2745,7 +2844,7 @@ list vtilist> { foreach vti = vtilist in defm : VPatTernary.suffix, vti.Vector, vti.Scalar, vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass, vti.ScalarRegClass, vti.RegClass>; @@ -2778,7 +2877,7 @@ defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; defm : VPatTernary.suffix, wti.Vector, vti.Scalar, vti.Vector, vti.Mask, vti.SEW, vti.LMul, wti.RegClass, vti.ScalarRegClass, vti.RegClass>; @@ -3237,7 +3336,7 @@ defm PseudoVWMACCU : VPseudoTernaryW_VV_VX; defm PseudoVWMACC : VPseudoTernaryW_VV_VX; defm PseudoVWMACCSU : VPseudoTernaryW_VV_VX; -defm PseudoVWMACCUS : VPseudoTernaryW_VX; +defm PseudoVWMACCUS : VPseudoTernaryW_VX; //===----------------------------------------------------------------------===// // 12.16. Vector Integer Merge Instructions @@ -3298,49 +3397,49 @@ //===----------------------------------------------------------------------===// // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions //===----------------------------------------------------------------------===// -defm PseudoVFADD : VPseudoBinaryV_VV_VX; -defm PseudoVFSUB : VPseudoBinaryV_VV_VX; -defm PseudoVFRSUB : VPseudoBinaryV_VX; +defm PseudoVFADD : VPseudoBinaryV_VV_VF; +defm PseudoVFSUB : VPseudoBinaryV_VV_VF; +defm PseudoVFRSUB : VPseudoBinaryV_VF; //===----------------------------------------------------------------------===// // 14.3. Vector Widening Floating-Point Add/Subtract Instructions //===----------------------------------------------------------------------===// -defm PseudoVFWADD : VPseudoBinaryW_VV_VX; -defm PseudoVFWSUB : VPseudoBinaryW_VV_VX; -defm PseudoVFWADD : VPseudoBinaryW_WV_WX; -defm PseudoVFWSUB : VPseudoBinaryW_WV_WX; +defm PseudoVFWADD : VPseudoBinaryW_VV_VF; +defm PseudoVFWSUB : VPseudoBinaryW_VV_VF; +defm PseudoVFWADD : VPseudoBinaryW_WV_WF; +defm PseudoVFWSUB : VPseudoBinaryW_WV_WF; //===----------------------------------------------------------------------===// // 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions //===----------------------------------------------------------------------===// -defm PseudoVFMUL : VPseudoBinaryV_VV_VX; -defm PseudoVFDIV : VPseudoBinaryV_VV_VX; -defm PseudoVFRDIV : VPseudoBinaryV_VX; +defm PseudoVFMUL : VPseudoBinaryV_VV_VF; +defm PseudoVFDIV : VPseudoBinaryV_VV_VF; +defm PseudoVFRDIV : VPseudoBinaryV_VF; //===----------------------------------------------------------------------===// // 14.5. Vector Widening Floating-Point Multiply //===----------------------------------------------------------------------===// -defm PseudoVFWMUL : VPseudoBinaryW_VV_VX; +defm PseudoVFWMUL : VPseudoBinaryW_VV_VF; //===----------------------------------------------------------------------===// // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions //===----------------------------------------------------------------------===// -defm PseudoVFMACC : VPseudoTernaryV_VV_VX_AAXA; -defm PseudoVFNMACC : VPseudoTernaryV_VV_VX_AAXA; -defm PseudoVFMSAC : VPseudoTernaryV_VV_VX_AAXA; -defm PseudoVFNMSAC : VPseudoTernaryV_VV_VX_AAXA; -defm PseudoVFMADD : VPseudoTernaryV_VV_VX_AAXA; -defm PseudoVFNMADD : VPseudoTernaryV_VV_VX_AAXA; -defm PseudoVFMSUB : VPseudoTernaryV_VV_VX_AAXA; -defm PseudoVFNMSUB : VPseudoTernaryV_VV_VX_AAXA; +defm PseudoVFMACC : VPseudoTernaryV_VV_VF_AAXA; +defm PseudoVFNMACC : VPseudoTernaryV_VV_VF_AAXA; +defm PseudoVFMSAC : VPseudoTernaryV_VV_VF_AAXA; +defm PseudoVFNMSAC : VPseudoTernaryV_VV_VF_AAXA; +defm PseudoVFMADD : VPseudoTernaryV_VV_VF_AAXA; +defm PseudoVFNMADD : VPseudoTernaryV_VV_VF_AAXA; +defm PseudoVFMSUB : VPseudoTernaryV_VV_VF_AAXA; +defm PseudoVFNMSUB : VPseudoTernaryV_VV_VF_AAXA; //===----------------------------------------------------------------------===// // 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions //===----------------------------------------------------------------------===// -defm PseudoVFWMACC : VPseudoTernaryW_VV_VX; -defm PseudoVFWNMACC : VPseudoTernaryW_VV_VX; -defm PseudoVFWMSAC : VPseudoTernaryW_VV_VX; -defm PseudoVFWNMSAC : VPseudoTernaryW_VV_VX; +defm PseudoVFWMACC : VPseudoTernaryW_VV_VF; +defm PseudoVFWNMACC : VPseudoTernaryW_VV_VF; +defm PseudoVFWMSAC : VPseudoTernaryW_VV_VF; +defm PseudoVFWNMSAC : VPseudoTernaryW_VV_VF; //===----------------------------------------------------------------------===// // 14.8. Vector Floating-Point Square-Root Instruction @@ -3360,25 +3459,25 @@ //===----------------------------------------------------------------------===// // 14.11. Vector Floating-Point Min/Max Instructions //===----------------------------------------------------------------------===// -defm PseudoVFMIN : VPseudoBinaryV_VV_VX; -defm PseudoVFMAX : VPseudoBinaryV_VV_VX; +defm PseudoVFMIN : VPseudoBinaryV_VV_VF; +defm PseudoVFMAX : VPseudoBinaryV_VV_VF; //===----------------------------------------------------------------------===// // 14.12. Vector Floating-Point Sign-Injection Instructions //===----------------------------------------------------------------------===// -defm PseudoVFSGNJ : VPseudoBinaryV_VV_VX; -defm PseudoVFSGNJN : VPseudoBinaryV_VV_VX; -defm PseudoVFSGNJX : VPseudoBinaryV_VV_VX; +defm PseudoVFSGNJ : VPseudoBinaryV_VV_VF; +defm PseudoVFSGNJN : VPseudoBinaryV_VV_VF; +defm PseudoVFSGNJX : VPseudoBinaryV_VV_VF; //===----------------------------------------------------------------------===// // 14.13. Vector Floating-Point Compare Instructions //===----------------------------------------------------------------------===// -defm PseudoVMFEQ : VPseudoBinaryM_VV_VX; -defm PseudoVMFNE : VPseudoBinaryM_VV_VX; -defm PseudoVMFLT : VPseudoBinaryM_VV_VX; -defm PseudoVMFLE : VPseudoBinaryM_VV_VX; -defm PseudoVMFGT : VPseudoBinaryM_VX; -defm PseudoVMFGE : VPseudoBinaryM_VX; +defm PseudoVMFEQ : VPseudoBinaryM_VV_VF; +defm PseudoVMFNE : VPseudoBinaryM_VV_VF; +defm PseudoVMFLT : VPseudoBinaryM_VV_VF; +defm PseudoVMFLE : VPseudoBinaryM_VV_VF; +defm PseudoVMFGT : VPseudoBinaryM_VF; +defm PseudoVMFGE : VPseudoBinaryM_VF; //===----------------------------------------------------------------------===// // 14.14. Vector Floating-Point Classify Instruction @@ -3388,8 +3487,7 @@ //===----------------------------------------------------------------------===// // 14.15. Vector Floating-Point Merge Instruction //===----------------------------------------------------------------------===// -defm PseudoVFMERGE : VPseudoBinaryV_XM; +defm PseudoVFMERGE : VPseudoBinaryV_FM; //===----------------------------------------------------------------------===// // 14.16. Vector Floating-Point Move Instruction @@ -3561,18 +3659,22 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1, Uses = [VL, VTYPE] in { foreach m = MxList.m in { - let VLMul = m.value in { - let HasSEWOp = 1, BaseInstr = VFMV_F_S in - def PseudoVFMV_F_S # "_" # m.MX : Pseudo<(outs FPR32:$rd), - (ins m.vrclass:$rs2, - ixlenimm:$sew), - []>, RISCVVPseudo; - let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, WritesElement0 = 1, - Constraints = "$rd = $rs1" in - def PseudoVFMV_S_F # "_" # m.MX : Pseudo<(outs m.vrclass:$rd), - (ins m.vrclass:$rs1, FPR32:$rs2, - GPR:$vl, ixlenimm:$sew), - []>, RISCVVPseudo; + foreach f = FPList.fpinfo in { + let VLMul = m.value in { + let HasSEWOp = 1, BaseInstr = VFMV_F_S in + def PseudoVFMV_F_S # "_" # f.FX # "_" # m.MX : + Pseudo<(outs f.fprclass:$rd), + (ins m.vrclass:$rs2, + ixlenimm:$sew), + []>, RISCVVPseudo; + let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, WritesElement0 = 1, + Constraints = "$rd = $rs1" in + def PseudoVFMV_S_F # "_" # f.FX #"_" # m.MX : + Pseudo<(outs m.vrclass:$rd), + (ins m.vrclass:$rs1, f.fprclass:$rs2, + GPR:$vl, ixlenimm:$sew), + []>, RISCVVPseudo; + } } } } @@ -3584,13 +3686,13 @@ let Predicates = [HasStdExtV] in { defm PseudoVSLIDEUP : VPseudoTernaryV_VX_VI; defm PseudoVSLIDEDOWN : VPseudoTernaryV_VX_VI; - defm PseudoVSLIDE1UP : VPseudoBinaryV_VX; - defm PseudoVSLIDE1DOWN : VPseudoBinaryV_VX; + defm PseudoVSLIDE1UP : VPseudoBinaryV_VX<"@earlyclobber $rd">; + defm PseudoVSLIDE1DOWN : VPseudoBinaryV_VX; } // Predicates = [HasStdExtV] let Predicates = [HasStdExtV, HasStdExtF] in { - defm PseudoVFSLIDE1UP : VPseudoBinaryV_VX; - defm PseudoVFSLIDE1DOWN : VPseudoBinaryV_VX; + defm PseudoVFSLIDE1UP : VPseudoBinaryV_VF<"@earlyclobber $rd">; + defm PseudoVFSLIDE1DOWN : VPseudoBinaryV_VF; } // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// @@ -4083,8 +4185,12 @@ def : Pat<(fvti.Vector (int_riscv_vfmv_v_f (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)), - (!cast("PseudoVFMV_V_F_"#fvti.LMul.MX) - ToFPR32.ret, + (!cast("PseudoVFMV_V_F_" # + !cond(!eq(fvti.Scalar, f16): "F16_", + !eq(fvti.Scalar, f32): "F32_", + !eq(fvti.Scalar, f64): "F64_") # + fvti.LMul.MX) + (fvti.Scalar fvti.ScalarRegClass:$rs2), (NoX0 GPR:$vl), fvti.SEW)>; } @@ -4242,27 +4348,23 @@ let Predicates = [HasStdExtV, HasStdExtF] in { foreach fvti = AllFloatVectors in { - defvar instr = !cast("PseudoVFMV_F_S_" # fvti.LMul.MX); + defvar instr = !cast("PseudoVFMV_F_S_" # + !cond(!eq(fvti.Scalar, f16): "F16_", + !eq(fvti.Scalar, f32): "F32_", + !eq(fvti.Scalar, f64): "F64_") # + fvti.LMul.MX); def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))), - // Floating point instructions with a scalar result will always - // generate the result in a register of class FPR32. When dealing - // with the f64 variant of a pattern we need to promote the FPR32 - // subregister generated by the instruction to the FPR64 base - // register expected by the type in the pattern - !cond(!eq(!cast(fvti.ScalarRegClass), - !cast(FPR64)): - (SUBREG_TO_REG (i32 -1), - (instr $rs2, fvti.SEW), sub_32), - !eq(!cast(fvti.ScalarRegClass), - !cast(FPR16)): - (EXTRACT_SUBREG (instr $rs2, fvti.SEW), sub_16), - !eq(1, 1): - (instr $rs2, fvti.SEW))>; + (instr $rs2, fvti.SEW)>; def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1), (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)), - (!cast("PseudoVFMV_S_F_" # fvti.LMul.MX) - (fvti.Vector $rs1), ToFPR32.ret, + (!cast("PseudoVFMV_S_F_" # + !cond(!eq(fvti.Scalar, f16): "F16_", + !eq(fvti.Scalar, f32): "F32_", + !eq(fvti.Scalar, f64): "F64_") # + fvti.LMul.MX) + (fvti.Vector $rs1), + (fvti.Scalar fvti.ScalarRegClass:$rs2), (NoX0 GPR:$vl), fvti.SEW)>; } } // Predicates = [HasStdExtV, HasStdExtF] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -48,6 +48,18 @@ def SplatPat_simm5 : ComplexPattern; def SplatPat_uimm5 : ComplexPattern; +// FIXME: We only model FPR32 for V instructions in RISCVInstrInfoV.td. +// FP16/FP32/FP64 registers are alias each other. Convert FPR16 and FPR64 +// to FPR32 for V instructions is enough. +class ToFPR32 { + dag ret = !cond(!eq(!cast(operand), !cast(FPR64)): + (EXTRACT_SUBREG !dag(type, [FPR64], [name]), sub_32), + !eq(!cast(operand), !cast(FPR16)): + (SUBREG_TO_REG (i16 -1), !dag(type, [FPR16], [name]), sub_16), + !eq(1, 1): + !dag(type, [operand], [name])); +} + class SwapHelper { dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix); } @@ -162,7 +174,7 @@ DAGOperand xop_kind> : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (splat_vector xop_kind:$rs2)))), - (!cast(instruction_name#"_VF_"#vlmul.MX) + (!cast(instruction_name#"_VF_F32_"#vlmul.MX) vop_reg_class:$rs1, ToFPR32.ret, avl, sew)>; @@ -183,7 +195,7 @@ foreach fvti = AllFloatVectors in def : Pat<(fvti.Vector (vop (fvti.Vector (splat_vector fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), - (!cast(instruction_name#"_VF_"#fvti.LMul.MX) + (!cast(instruction_name#"_VF_F32_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32.ret, fvti.AVL, fvti.SEW)>; @@ -263,7 +275,7 @@ def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), (fvti.Vector (splat_vector fvti.ScalarRegClass:$rs2)), cc)), - (!cast(instruction_name#"_VF_"#fvti.LMul.MX) + (!cast(instruction_name#"_VF_F32_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32.ret, fvti.AVL, fvti.SEW)>; @@ -274,7 +286,7 @@ def : Pat<(fvti.Mask (setcc (fvti.Vector (splat_vector fvti.ScalarRegClass:$rs2)), (fvti.Vector fvti.RegClass:$rs1), cc)), - (!cast(swapped_op_instruction_name#"_VF_"#fvti.LMul.MX) + (!cast(swapped_op_instruction_name#"_VF_F32_"#fvti.LMul.MX) fvti.RegClass:$rs1, ToFPR32.ret, fvti.AVL, fvti.SEW)>; @@ -487,7 +499,7 @@ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), (splat_vector fvti.ScalarRegClass:$rs1), fvti.RegClass:$rs2)), - (!cast("PseudoVFMERGE_VFM_"#fvti.LMul.MX) + (!cast("PseudoVFMERGE_VFM_F32_"#fvti.LMul.MX) fvti.RegClass:$rs2, ToFPR32.ret, VMV0:$vm, fvti.AVL, fvti.SEW)>; @@ -538,7 +550,7 @@ let Predicates = [HasStdExtV, HasStdExtF] in { foreach fvti = AllFloatVectors in { def : Pat<(fvti.Vector (splat_vector fvti.ScalarRegClass:$rs1)), - (!cast("PseudoVFMV_V_F_"#fvti.LMul.MX) + (!cast("PseudoVFMV_V_F_F32_"#fvti.LMul.MX) ToFPR32.ret, fvti.AVL, fvti.SEW)>; @@ -570,9 +582,9 @@ defvar extractelt_node = !if(IsFloat, extractelt, riscv_extract_vector_elt); foreach vti = vtilist in { defvar MX = vti.LMul.MX; - defvar vmv_xf_s_inst = !cast(!if(IsFloat, "PseudoVFMV_F_S_", + defvar vmv_xf_s_inst = !cast(!if(IsFloat, "PseudoVFMV_F_S_F32_", "PseudoVMV_X_S_")#MX); - defvar vmv_s_xf_inst = !cast(!if(IsFloat, "PseudoVFMV_S_F_", + defvar vmv_s_xf_inst = !cast(!if(IsFloat, "PseudoVFMV_S_F_F32_", "PseudoVMV_S_X_")#MX); // Only pattern-match insert/extract-element operations where the index is // 0. Any other index will have been custom-lowered to slide the vector diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp --- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp +++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp @@ -178,6 +178,12 @@ RISCV::VRM8RegClass.contains(Reg)) { Reg = TRI->getSubReg(Reg, RISCV::sub_vrm1_0); assert(Reg && "Subregister does not exist"); + } else if (RISCV::FPR16RegClass.contains(Reg)) { + Reg = TRI->getMatchingSuperReg(Reg, RISCV::sub_16, &RISCV::FPR32RegClass); + assert(Reg && "Subregister does not exist"); + } else if (RISCV::FPR64RegClass.contains(Reg)) { + Reg = TRI->getSubReg(Reg, RISCV::sub_32); + assert(Reg && "Superregister does not exist"); } MCOp = MCOperand::createReg(Reg); diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll @@ -9,7 +9,6 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret entry: %a = call half @llvm.riscv.vfmv.f.s.nxv1f16( %0) @@ -23,7 +22,6 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret entry: %a = call half @llvm.riscv.vfmv.f.s.nxv2f16( %0) @@ -37,7 +35,6 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret entry: %a = call half @llvm.riscv.vfmv.f.s.nxv4f16( %0) @@ -51,7 +48,6 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret entry: %a = call half @llvm.riscv.vfmv.f.s.nxv8f16( %0) @@ -65,7 +61,6 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret entry: %a = call half @llvm.riscv.vfmv.f.s.nxv16f16( %0) @@ -79,7 +74,6 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f ; CHECK-NEXT: ret entry: %a = call half @llvm.riscv.vfmv.f.s.nxv32f16( %0) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll @@ -6,7 +6,6 @@ define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -20,7 +19,6 @@ define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -34,7 +32,6 @@ define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -48,7 +45,6 @@ define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -62,7 +58,6 @@ define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -76,7 +71,6 @@ define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll @@ -6,7 +6,6 @@ define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -20,7 +19,6 @@ define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -34,7 +32,6 @@ define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -48,7 +45,6 @@ define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -62,7 +58,6 @@ define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret @@ -76,7 +71,6 @@ define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll @@ -8,7 +8,6 @@ define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra) @@ -27,7 +26,6 @@ define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra) @@ -46,7 +44,6 @@ define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra) @@ -65,7 +62,6 @@ define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra) @@ -84,7 +80,6 @@ define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra) @@ -103,7 +98,6 @@ define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll @@ -8,7 +8,6 @@ define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra) @@ -27,7 +26,6 @@ define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra) @@ -46,7 +44,6 @@ define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra) @@ -65,7 +62,6 @@ define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra) @@ -84,7 +80,6 @@ define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra) @@ -103,7 +98,6 @@ define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f ; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: jalr zero, 0(ra)