diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -53,18 +53,15 @@ // FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as // XLEN is the only legal integer width. // - // FMV_H_X_RV32/RV64 match the semantics of the FMV.H.X. - // FMV_X_ANYEXTH_RV32/RV64 are similar to FMV.X.H but has an any-extended - // result. + // FMV_H_X matches the semantics of the FMV.H.X. + // FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result. // FMV_W_X_RV64 matches the semantics of the FMV.W.X. // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result. // // This is a more convenient semantic for producing dagcombines that remove // unnecessary GPR->FPR->GPR moves. - FMV_H_X_RV32, - FMV_H_X_RV64, - FMV_X_ANYEXTH_RV32, - FMV_X_ANYEXTH_RV64, + FMV_H_X, + FMV_X_ANYEXTH, FMV_W_X_RV64, FMV_X_ANYEXTW_RV64, // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -512,14 +512,9 @@ if (Op.getValueType() == MVT::f16 && Subtarget.hasStdExtZfh()) { if (Op0.getValueType() != MVT::i16) return SDValue(); - unsigned Opcode = RISCVISD::FMV_H_X_RV32; - EVT ExtType = MVT::i32; - if (Subtarget.is64Bit()) { - Opcode = RISCVISD::FMV_H_X_RV64; - ExtType = MVT::i64; - } - SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, ExtType, Op0); - SDValue FPConv = DAG.getNode(Opcode, DL, MVT::f16, NewOp0); + SDValue NewOp0 = + DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Op0); + SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0); return FPConv; } else if (Op.getValueType() == MVT::f32 && Subtarget.is64Bit() && Subtarget.hasStdExtF()) { @@ -1119,13 +1114,8 @@ if (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh()) { if (Op0.getValueType() != MVT::f16) return; - unsigned Opcode = RISCVISD::FMV_X_ANYEXTH_RV32; - EVT ExtType = MVT::i32; - if (Subtarget.is64Bit()) { - Opcode = RISCVISD::FMV_X_ANYEXTH_RV64; - ExtType = MVT::i64; - } - SDValue FPConv = DAG.getNode(Opcode, DL, ExtType, Op0); + SDValue FPConv = + DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, Subtarget.getXLenVT(), Op0); Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv)); } else if (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtF()) { @@ -2204,10 +2194,8 @@ case CCValAssign::Full: break; case CCValAssign::BCvt: - if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f16) - Val = DAG.getNode(RISCVISD::FMV_H_X_RV32, DL, MVT::f16, Val); - else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f16) - Val = DAG.getNode(RISCVISD::FMV_H_X_RV64, DL, MVT::f16, Val); + if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) + Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val); else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); else @@ -2265,10 +2253,8 @@ case CCValAssign::Full: break; case CCValAssign::BCvt: - if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f16) - Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH_RV32, DL, MVT::i32, Val); - else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f16) - Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH_RV64, DL, MVT::i64, Val); + if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) + Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val); else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); else @@ -3123,10 +3109,8 @@ NODE_NAME_CASE(RORW) NODE_NAME_CASE(FSLW) NODE_NAME_CASE(FSRW) - NODE_NAME_CASE(FMV_H_X_RV32) - NODE_NAME_CASE(FMV_H_X_RV64) - NODE_NAME_CASE(FMV_X_ANYEXTH_RV32) - NODE_NAME_CASE(FMV_X_ANYEXTH_RV64) + NODE_NAME_CASE(FMV_H_X) + NODE_NAME_CASE(FMV_X_ANYEXTH) NODE_NAME_CASE(FMV_W_X_RV64) NODE_NAME_CASE(FMV_X_ANYEXTW_RV64) NODE_NAME_CASE(READ_CYCLE_WIDE) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -361,6 +361,7 @@ /// Float constants def : Pat<(f64 (fpimm0)), (FMV_D_X X0)>; +// Moves (no conversion) def : Pat<(bitconvert GPR:$rs1), (FMV_D_X GPR:$rs1)>; def : Pat<(bitconvert FPR64:$rs1), (FMV_X_D FPR64:$rs1)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -385,6 +385,7 @@ } // Predicates = [HasStdExtF] let Predicates = [HasStdExtF, IsRV32] in { +// Moves (no conversion) def : Pat<(bitconvert GPR:$rs1), (FMV_W_X GPR:$rs1)>; def : Pat<(bitconvert FPR32:$rs1), (FMV_X_W FPR32:$rs1)>; @@ -398,6 +399,7 @@ } // Predicates = [HasStdExtF, IsRV32] let Predicates = [HasStdExtF, IsRV64] in { +// Moves (no conversion) def : Pat<(riscv_fmv_w_x_rv64 GPR:$src), (FMV_W_X GPR:$src)>; def : Pat<(riscv_fmv_x_anyextw_rv64 FPR32:$src), (FMV_X_W FPR32:$src)>; def : Pat<(sext_inreg (riscv_fmv_x_anyextw_rv64 FPR32:$src), i32), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -17,23 +17,15 @@ // RISC-V specific DAG Nodes. //===----------------------------------------------------------------------===// -def SDT_RISCVFMV_H_X_RV32 - : SDTypeProfile<1, 1, [SDTCisVT<0, f16>, SDTCisVT<1, i32>]>; -def SDT_RISCVFMV_H_X_RV64 - : SDTypeProfile<1, 1, [SDTCisVT<0, f16>, SDTCisVT<1, i64>]>; -def SDT_RISCVFMV_X_ANYEXTH_RV64 - : SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, f16>]>; -def SDT_RISCVFMV_X_ANYEXTH_RV32 - : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, f16>]>; - -def riscv_fmv_h_x_rv32 - : SDNode<"RISCVISD::FMV_H_X_RV32", SDT_RISCVFMV_H_X_RV32>; -def riscv_fmv_h_x_rv64 - : SDNode<"RISCVISD::FMV_H_X_RV64", SDT_RISCVFMV_H_X_RV64>; -def riscv_fmv_x_anyexth_rv64 - : SDNode<"RISCVISD::FMV_X_ANYEXTH_RV64", SDT_RISCVFMV_X_ANYEXTH_RV64>; -def riscv_fmv_x_anyexth_rv32 - : SDNode<"RISCVISD::FMV_X_ANYEXTH_RV32", SDT_RISCVFMV_X_ANYEXTH_RV32>; +def SDT_RISCVFMV_H_X + : SDTypeProfile<1, 1, [SDTCisVT<0, f16>, SDTCisVT<1, XLenVT>]>; +def SDT_RISCVFMV_X_ANYEXTH + : SDTypeProfile<1, 1, [SDTCisVT<0, XLenVT>, SDTCisVT<1, f16>]>; + +def riscv_fmv_h_x + : SDNode<"RISCVISD::FMV_H_X", SDT_RISCVFMV_H_X>; +def riscv_fmv_x_anyexth + : SDNode<"RISCVISD::FMV_X_ANYEXTH", SDT_RISCVFMV_X_ANYEXTH>; //===----------------------------------------------------------------------===// // Instruction class templates @@ -349,16 +341,17 @@ defm : StPat; /// Float conversion operations + // f32 -> f16, f16 -> f32 def : Pat<(fpround FPR32:$rs1), (FCVT_H_S FPR32:$rs1, 0b111)>; def : Pat<(fpextend FPR16:$rs1), (FCVT_S_H FPR16:$rs1)>; +// Moves (no conversion) +def : Pat<(riscv_fmv_h_x GPR:$src), (FMV_H_X GPR:$src)>; +def : Pat<(riscv_fmv_x_anyexth FPR16:$src), (FMV_X_H FPR16:$src)>; } // Predicates = [HasStdExtZfh] let Predicates = [HasStdExtZfh, IsRV32] in { -def : Pat<(riscv_fmv_h_x_rv32 GPR:$src), (FMV_H_X GPR:$src)>; -def : Pat<(riscv_fmv_x_anyexth_rv32 FPR16:$src), (FMV_X_H FPR16:$src)>; - // float->[u]int. Round-to-zero must be used. def : Pat<(fp_to_sint FPR16:$rs1), (FCVT_W_H $rs1, 0b001)>; def : Pat<(fp_to_uint FPR16:$rs1), (FCVT_WU_H $rs1, 0b001)>; @@ -369,9 +362,6 @@ } // Predicates = [HasStdExtZfh, IsRV32] let Predicates = [HasStdExtZfh, IsRV64] in { -def : Pat<(riscv_fmv_h_x_rv64 GPR:$src), (FMV_H_X GPR:$src)>; -def : Pat<(riscv_fmv_x_anyexth_rv64 FPR16:$src), (FMV_X_H FPR16:$src)>; - // FP->[u]int32 is mostly handled by the FP->[u]int64 patterns. This is safe // because fpto[u|s]i produces poison if the value can't fit into the target. // We match the single case below because fcvt.wu.s sign-extends its result so