Index: lib/Target/RISCV/RISCVISelDAGToDAG.cpp =================================================================== --- lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -163,12 +163,19 @@ break; SDValue Op0 = Node->getOperand(0); EVT Op1VT = cast(Node->getOperand(1))->getVT(); - if (Op1VT == MVT::i32 && Op0.getOpcode() == ISD::FP_TO_SINT && - Op0.getOperand(0).getValueType() == MVT::f32) { + if (Op1VT == MVT::i32 && Op0.getOpcode() == ISD::FP_TO_SINT) { + EVT FPVT = Op0.getOperand(0).getValueType(); + unsigned Opcode; + if (FPVT == MVT::f32) + Opcode = RISCV::FCVT_W_S; + else if (FPVT == MVT::f64) + Opcode = RISCV::FCVT_W_D; + else + llvm_unreachable("Unexpected FP type"); + // Round-to-zero must be used. SDValue RndMode = CurDAG->getTargetConstant(1, SDLoc(Node), MVT::i64); - CurDAG->SelectNodeTo(Node, RISCV::FCVT_W_S, MVT::i64, Op0.getOperand(0), - RndMode); + CurDAG->SelectNodeTo(Node, Opcode, MVT::i64, Op0.getOperand(0), RndMode); return; } break; @@ -178,12 +185,19 @@ break; SDValue Op0 = Node->getOperand(0); EVT Op1VT = cast(Node->getOperand(1))->getVT(); - if (Op1VT == MVT::i32 && Op0.getOpcode() == ISD::FP_TO_UINT && - Op0.getOperand(0).getValueType() == MVT::f32) { + if (Op1VT == MVT::i32 && Op0.getOpcode() == ISD::FP_TO_UINT) { + EVT FPVT = Op0.getOperand(0).getValueType(); + unsigned Opcode; + if (FPVT == MVT::f32) + Opcode = RISCV::FCVT_WU_S; + else if (FPVT == MVT::f64) + Opcode = RISCV::FCVT_WU_D; + else + llvm_unreachable("Unexpected FP type"); + // Round-to-zero must be used. SDValue RndMode = CurDAG->getTargetConstant(1, SDLoc(Node), MVT::i64); - CurDAG->SelectNodeTo(Node, RISCV::FCVT_WU_S, MVT::i64, Op0.getOperand(0), - RndMode); + CurDAG->SelectNodeTo(Node, Opcode, MVT::i64, Op0.getOperand(0), RndMode); return; } break; Index: lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- lib/Target/RISCV/RISCVISelLowering.cpp +++ lib/Target/RISCV/RISCVISelLowering.cpp @@ -791,6 +791,10 @@ LocVT = XLenVT; LocInfo = CCValAssign::BCvt; } + if (XLen == 64 && ValVT == MVT::f64) { + LocVT = MVT::i64; + LocInfo = CCValAssign::BCvt; + } // Any return value split in to more than two values can't be returned // directly. @@ -898,8 +902,9 @@ return false; } - if (ValVT == MVT::f32) { - LocVT = MVT::f32; + // When an f32 or f64 is passed on the stack, no bit-conversion is needed. + if (ValVT == MVT::f32 || ValVT == MVT::f64) { + LocVT = ValVT; LocInfo = CCValAssign::Full; } State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); @@ -1034,8 +1039,6 @@ ExtType = ISD::NON_EXTLOAD; break; } - if (ValVT == MVT::f32) - LocVT = MVT::f32; Val = DAG.getExtLoad( ExtType, DL, LocVT, Chain, FIN, MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); Index: lib/Target/RISCV/RISCVInstrInfoD.td =================================================================== --- lib/Target/RISCV/RISCVInstrInfoD.td +++ lib/Target/RISCV/RISCVInstrInfoD.td @@ -292,3 +292,22 @@ def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_W GPR:$rs1)>; def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_WU GPR:$rs1)>; } // Predicates = [HasStdExtD, IsRV32] + +let Predicates = [HasStdExtD, IsRV64] in { +def : Pat<(bitconvert GPR:$rs1), (FMV_D_X GPR:$rs1)>; +def : Pat<(bitconvert FPR64:$rs1), (FMV_X_D FPR64:$rs1)>; + +// FP->[u]int32 are handled with custom selection code, becuase the root +// assertsext node never reaches the tablegenerated matcher. + +// [u]int32->fp +def : Pat<(sint_to_fp (sext_inreg GPR:$rs1, i32)), (FCVT_D_W $rs1)>; +def : Pat<(uint_to_fp (zexti32 GPR:$rs1)), (FCVT_D_WU $rs1)>; + +def : Pat<(fp_to_sint FPR64:$rs1), (FCVT_L_D FPR64:$rs1, 0b001)>; +def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_LU_D FPR64:$rs1, 0b001)>; + +// [u]int64->fp. Match GCC and default to using dynamic rounding mode. +def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_L GPR:$rs1, 0b111)>; +def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_LU GPR:$rs1, 0b111)>; +} // Predicates = [HasStdExtD, IsRV64] Index: test/CodeGen/RISCV/double-arith.ll =================================================================== --- test/CodeGen/RISCV/double-arith.ll +++ test/CodeGen/RISCV/double-arith.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define double @fadd_d(double %a, double %b) nounwind { ; RV32IFD-LABEL: fadd_d: @@ -18,6 +20,14 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fadd_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fadd double %a, %b ret double %1 } @@ -38,6 +48,14 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsub_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsub.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fsub double %a, %b ret double %1 } @@ -58,6 +76,14 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmul_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmul.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fmul double %a, %b ret double %1 } @@ -78,6 +104,14 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fdiv_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fdiv.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fdiv double %a, %b ret double %1 } @@ -97,6 +131,13 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsqrt_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fsqrt.d ft0, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.sqrt.f32(double %a) ret double %1 } @@ -119,11 +160,23 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsgnj_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.copysign.f32(double %a, double %b) ret double %1 } define double @fneg_d(double %a) nounwind { +; TODO: doesn't test the fneg selection pattern for RV64 because +; DAGCombiner::visitBITCAST will generate a xor on the incoming integer +; argument +; ; RV32IFD-LABEL: fneg_d: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 @@ -136,11 +189,21 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fneg_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi a1, zero, -1 +; RV64IFD-NEXT: slli a1, a1, 63 +; RV64IFD-NEXT: xor a0, a0, a1 +; RV64IFD-NEXT: ret %1 = fsub double -0.0, %a ret double %1 } define double @fsgnjn_d(double %a, double %b) nounwind { +; TODO: fsgnjn.s isn't selected on RV64 because DAGCombiner::visitBITCAST will +; convert (bitconvert (fneg x)) to a xor. +; ; RV32IFD-LABEL: fsgnjn_d: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 @@ -156,6 +219,17 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsgnjn_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi a2, zero, -1 +; RV64IFD-NEXT: slli a2, a2, 63 +; RV64IFD-NEXT: xor a1, a1, a2 +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fsub double -0.0, %b %2 = call double @llvm.copysign.f32(double %a, double %1) ret double %2 @@ -164,6 +238,10 @@ declare double @llvm.fabs.f32(double) define double @fabs_d(double %a) nounwind { +; TODO: doesn't test the fabs selection pattern for RV64 because +; DAGCombiner::visitBITCAST will generate an and on the incoming integer +; argument. +; ; RV32IFD-LABEL: fabs_d: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 @@ -176,6 +254,14 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fabs_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi a1, zero, -1 +; RV64IFD-NEXT: slli a1, a1, 63 +; RV64IFD-NEXT: addi a1, a1, -1 +; RV64IFD-NEXT: and a0, a0, a1 +; RV64IFD-NEXT: ret %1 = call double @llvm.fabs.f32(double %a) ret double %1 } @@ -198,6 +284,14 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmin_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmin.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.minnum.f32(double %a, double %b) ret double %1 } @@ -220,6 +314,14 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmax_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmax.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.maxnum.f32(double %a, double %b) ret double %1 } @@ -237,6 +339,13 @@ ; RV32IFD-NEXT: feq.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: feq_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -255,6 +364,13 @@ ; RV32IFD-NEXT: flt.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: flt_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp olt double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -273,6 +389,13 @@ ; RV32IFD-NEXT: fle.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fle_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ole double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 Index: test/CodeGen/RISCV/double-br-fcmp.ll =================================================================== --- test/CodeGen/RISCV/double-br-fcmp.ll +++ test/CodeGen/RISCV/double-br-fcmp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s declare void @abort() declare void @exit(i32) @@ -18,6 +20,19 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB0_2: # %if.else ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_false: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: addi a0, zero, 1 +; RV64IFD-NEXT: bnez a0, .LBB0_2 +; RV64IFD-NEXT: # %bb.1: # %if.then +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB0_2: # %if.else +; RV64IFD-NEXT: call abort %1 = fcmp false double %a, %b br i1 %1, label %if.then, label %if.else if.then: @@ -46,6 +61,21 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB1_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_oeq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB1_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB1_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp oeq double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -78,6 +108,22 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB2_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_oeq_alt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: beqz a0, .LBB2_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB2_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp oeq double %a, %b br i1 %1, label %if.then, label %if.else if.then: @@ -106,6 +152,21 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB3_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ogt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB3_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB3_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ogt double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -134,6 +195,21 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB4_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_oge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB4_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB4_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp oge double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -162,6 +238,21 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB5_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_olt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB5_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB5_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp olt double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -190,6 +281,21 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB6_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ole: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB6_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB6_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ole double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -226,6 +332,28 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB7_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_one: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: feq.d a1, ft0, ft1 +; RV64IFD-NEXT: not a1, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: bnez a0, .LBB7_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB7_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp one double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -258,6 +386,25 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB8_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ord: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB8_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB8_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ord double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -291,6 +438,26 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB9_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ueq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: feq.d a2, ft1, ft1 +; RV64IFD-NEXT: and a1, a2, a1 +; RV64IFD-NEXT: seqz a1, a1 +; RV64IFD-NEXT: or a0, a0, a1 +; RV64IFD-NEXT: bnez a0, .LBB9_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB9_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ueq double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -320,6 +487,22 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB10_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ugt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB10_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB10_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ugt double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -349,6 +532,22 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB11_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_uge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB11_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB11_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp uge double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -378,6 +577,22 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB12_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ult: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB12_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB12_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ult double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -407,6 +622,22 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB13_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ule: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB13_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB13_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ule double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -436,6 +667,22 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB14_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_une: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB14_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB14_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp une double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -468,6 +715,24 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB15_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_uno: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: bnez a0, .LBB15_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB15_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp uno double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -490,6 +755,19 @@ ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB16_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_true: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: addi a0, zero, 1 +; RV64IFD-NEXT: bnez a0, .LBB16_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB16_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp true double %a, %b br i1 %1, label %if.then, label %if.else if.else: Index: test/CodeGen/RISCV/double-convert.ll =================================================================== --- test/CodeGen/RISCV/double-convert.ll +++ test/CodeGen/RISCV/double-convert.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define float @fcvt_s_d(double %a) nounwind { ; RV32IFD-LABEL: fcvt_s_d: @@ -13,6 +15,13 @@ ; RV32IFD-NEXT: fmv.x.w a0, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_s_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fcvt.s.d ft0, ft0 +; RV64IFD-NEXT: fmv.x.w a0, ft0 +; RV64IFD-NEXT: ret %1 = fptrunc double %a to float ret float %1 } @@ -28,6 +37,13 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_d_s: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.w.x ft0, a0 +; RV64IFD-NEXT: fcvt.d.s ft0, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fpext float %a to double ret double %1 } @@ -42,6 +58,12 @@ ; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_w_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fcvt.w.d a0, ft0, rtz +; RV64IFD-NEXT: ret %1 = fptosi double %a to i32 ret i32 %1 } @@ -56,6 +78,12 @@ ; RV32IFD-NEXT: fcvt.wu.d a0, ft0, rtz ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_wu_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fcvt.wu.d a0, ft0, rtz +; RV64IFD-NEXT: ret %1 = fptoui double %a to i32 ret i32 %1 } @@ -70,6 +98,12 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_d_w: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.d.w ft0, a0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = sitofp i32 %a to double ret double %1 } @@ -84,6 +118,148 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_d_wu: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.d.wu ft0, a0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = uitofp i32 %a to double ret double %1 } + +define i64 @fcvt_l_d(double %a) nounwind { +; RV32IFD-LABEL: fcvt_l_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) +; RV32IFD-NEXT: call __fixdfdi +; RV32IFD-NEXT: lw ra, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_l_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz +; RV64IFD-NEXT: ret + %1 = fptosi double %a to i64 + ret i64 %1 +} + +define i64 @fcvt_lu_d(double %a) nounwind { +; RV32IFD-LABEL: fcvt_lu_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) +; RV32IFD-NEXT: call __fixunsdfdi +; RV32IFD-NEXT: lw ra, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_lu_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fcvt.lu.d a0, ft0, rtz +; RV64IFD-NEXT: ret + %1 = fptoui double %a to i64 + ret i64 %1 +} + +define i64 @fmv_x_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: fmv_x_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 0(sp) +; RV32IFD-NEXT: sw a3, 4(sp) +; RV32IFD-NEXT: fld ft0, 0(sp) +; RV32IFD-NEXT: sw a0, 0(sp) +; RV32IFD-NEXT: sw a1, 4(sp) +; RV32IFD-NEXT: fld ft1, 0(sp) +; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmv_x_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret + %1 = fadd double %a, %b + %2 = bitcast double %1 to i64 + ret i64 %2 +} + +define double @fcvt_d_l(i64 %a) nounwind { +; RV32IFD-LABEL: fcvt_d_l: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) +; RV32IFD-NEXT: call __floatdidf +; RV32IFD-NEXT: lw ra, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_d_l: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.d.l ft0, a0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret + %1 = sitofp i64 %a to double + ret double %1 +} + +define double @fcvt_d_lu(i64 %a) nounwind { +; RV32IFD-LABEL: fcvt_d_lu: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) +; RV32IFD-NEXT: call __floatundidf +; RV32IFD-NEXT: lw ra, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_d_lu: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.d.lu ft0, a0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret + %1 = uitofp i64 %a to double + ret double %1 +} + +define double @fmv_d_x(i64 %a, i64 %b) nounwind { +; Ensure fmv.w.x is generated even for a soft double calling convention +; RV32IFD-LABEL: fmv_d_x: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -32 +; RV32IFD-NEXT: sw a3, 20(sp) +; RV32IFD-NEXT: sw a2, 16(sp) +; RV32IFD-NEXT: sw a1, 28(sp) +; RV32IFD-NEXT: sw a0, 24(sp) +; RV32IFD-NEXT: fld ft0, 16(sp) +; RV32IFD-NEXT: fld ft1, 24(sp) +; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 32 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmv_d_x: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret + %1 = bitcast i64 %a to double + %2 = bitcast i64 %b to double + %3 = fadd double %1, %2 + ret double %3 +} Index: test/CodeGen/RISCV/double-fcmp.ll =================================================================== --- test/CodeGen/RISCV/double-fcmp.ll +++ test/CodeGen/RISCV/double-fcmp.ll @@ -1,12 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define i32 @fcmp_false(double %a, double %b) nounwind { ; RV32IFD-LABEL: fcmp_false: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: mv a0, zero ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_false: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: mv a0, zero +; RV64IFD-NEXT: ret %1 = fcmp false double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -25,6 +32,13 @@ ; RV32IFD-NEXT: feq.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_oeq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -43,6 +57,13 @@ ; RV32IFD-NEXT: flt.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ogt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ogt double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -61,6 +82,13 @@ ; RV32IFD-NEXT: fle.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_oge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oge double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -79,6 +107,13 @@ ; RV32IFD-NEXT: flt.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_olt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp olt double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -97,6 +132,13 @@ ; RV32IFD-NEXT: fle.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ole: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ole double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -122,6 +164,20 @@ ; RV32IFD-NEXT: and a0, a1, a0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_one: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: feq.d a1, ft0, ft1 +; RV64IFD-NEXT: not a1, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: ret %1 = fcmp one double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -144,6 +200,17 @@ ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ord: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp ord double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -167,6 +234,18 @@ ; RV32IFD-NEXT: or a0, a0, a1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ueq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: feq.d a2, ft1, ft1 +; RV64IFD-NEXT: and a1, a2, a1 +; RV64IFD-NEXT: seqz a1, a1 +; RV64IFD-NEXT: or a0, a0, a1 +; RV64IFD-NEXT: ret %1 = fcmp ueq double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -186,6 +265,14 @@ ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ugt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp ugt double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -205,6 +292,14 @@ ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_uge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp uge double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -224,6 +319,14 @@ ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ult: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp ult double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -243,6 +346,14 @@ ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ule: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp ule double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -262,6 +373,14 @@ ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_une: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp une double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -283,6 +402,16 @@ ; RV32IFD-NEXT: seqz a0, a0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_uno: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: ret %1 = fcmp uno double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -293,6 +422,11 @@ ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi a0, zero, 1 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_true: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi a0, zero, 1 +; RV64IFD-NEXT: ret %1 = fcmp true double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 Index: test/CodeGen/RISCV/double-imm.ll =================================================================== --- test/CodeGen/RISCV/double-imm.ll +++ test/CodeGen/RISCV/double-imm.ll @@ -1,8 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define double @double_imm() nounwind { +; TODO: Should probably prefer fld or ld on RV64 rather than materialising an +; expensive constant. +; ; RV32IFD-LABEL: double_imm: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 @@ -14,6 +19,18 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: double_imm: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 512 +; RV64IFD-NEXT: addiw a0, a0, 1169 +; RV64IFD-NEXT: slli a0, a0, 15 +; RV64IFD-NEXT: addi a0, a0, -299 +; RV64IFD-NEXT: slli a0, a0, 14 +; RV64IFD-NEXT: addi a0, a0, 1091 +; RV64IFD-NEXT: slli a0, a0, 12 +; RV64IFD-NEXT: addi a0, a0, -744 +; RV64IFD-NEXT: ret ret double 3.1415926535897931159979634685441851615905761718750 } @@ -33,6 +50,16 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: double_imm_op: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: lui a0, %hi(.LCPI1_0) +; RV64IFD-NEXT: addi a0, a0, %lo(.LCPI1_0) +; RV64IFD-NEXT: fld ft1, 0(a0) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fadd double %a, 1.0 ret double %1 } Index: test/CodeGen/RISCV/double-intrinsics.ll =================================================================== --- test/CodeGen/RISCV/double-intrinsics.ll +++ test/CodeGen/RISCV/double-intrinsics.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s declare double @llvm.floor.f64(double) @@ -17,6 +19,15 @@ ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: foo: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call floor +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.floor.f64(double %a) ret double %1 } Index: test/CodeGen/RISCV/double-mem.ll =================================================================== --- test/CodeGen/RISCV/double-mem.ll +++ test/CodeGen/RISCV/double-mem.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define double @fld(double *%a) nounwind { ; RV32IFD-LABEL: fld: @@ -14,6 +16,14 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fld: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fld ft0, 24(a0) +; RV64IFD-NEXT: fld ft1, 0(a0) +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = load double, double* %a %2 = getelementptr double, double* %a, i32 3 %3 = load double, double* %2 @@ -38,6 +48,15 @@ ; RV32IFD-NEXT: fsd ft0, 0(a0) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsd: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fsd ft0, 64(a0) +; RV64IFD-NEXT: fsd ft0, 0(a0) +; RV64IFD-NEXT: ret ; Use %b and %c in an FP op to ensure floating point registers are used, even ; for the soft float ABI %1 = fadd double %b, %c @@ -72,6 +91,20 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fld_fsd_global: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: lui a0, %hi(G) +; RV64IFD-NEXT: fld ft1, %lo(G)(a0) +; RV64IFD-NEXT: fsd ft0, %lo(G)(a0) +; RV64IFD-NEXT: addi a0, a0, %lo(G) +; RV64IFD-NEXT: fld ft1, 72(a0) +; RV64IFD-NEXT: fsd ft0, 72(a0) +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret ; Use %a and %b in an FP op to ensure floating point registers are used, even ; for the soft float ABI %1 = fadd double %a, %b @@ -100,6 +133,18 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fld_fsd_constant: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: lui a0, 56 +; RV64IFD-NEXT: addiw a0, a0, -1353 +; RV64IFD-NEXT: slli a0, a0, 14 +; RV64IFD-NEXT: fld ft1, -273(a0) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fsd ft0, -273(a0) +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = inttoptr i32 3735928559 to double* %2 = load volatile double, double* %1 %3 = fadd double %a, %2 @@ -133,6 +178,23 @@ ; RV32IFD-NEXT: lw ra, 28(sp) ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fld_stack: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -32 +; RV64IFD-NEXT: sd ra, 24(sp) +; RV64IFD-NEXT: sd s1, 16(sp) +; RV64IFD-NEXT: mv s1, a0 +; RV64IFD-NEXT: addi a0, sp, 8 +; RV64IFD-NEXT: call notdead +; RV64IFD-NEXT: fmv.d.x ft0, s1 +; RV64IFD-NEXT: fld ft1, 8(sp) +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ld s1, 16(sp) +; RV64IFD-NEXT: ld ra, 24(sp) +; RV64IFD-NEXT: addi sp, sp, 32 +; RV64IFD-NEXT: ret %1 = alloca double, align 8 %2 = bitcast double* %1 to i8* call void @notdead(i8* %2) @@ -159,6 +221,20 @@ ; RV32IFD-NEXT: lw ra, 28(sp) ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsd_stack: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fsd ft0, 0(sp) +; RV64IFD-NEXT: mv a0, sp +; RV64IFD-NEXT: call notdead +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = fadd double %a, %b ; force store from FPR64 %2 = alloca double, align 8 store double %1, double* %2 @@ -179,6 +255,13 @@ ; RV32IFD-NEXT: fsw ft0, 0(a0) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsd_trunc: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fcvt.s.d ft0, ft0 +; RV64IFD-NEXT: fsw ft0, 0(a0) +; RV64IFD-NEXT: ret %1 = fptrunc double %b to float store float %1, float* %a, align 4 ret void Index: test/CodeGen/RISCV/double-select-fcmp.ll =================================================================== --- test/CodeGen/RISCV/double-select-fcmp.ll +++ test/CodeGen/RISCV/double-select-fcmp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define double @select_fcmp_false(double %a, double %b) nounwind { ; RV32IFD-LABEL: select_fcmp_false: @@ -8,6 +10,11 @@ ; RV32IFD-NEXT: mv a1, a3 ; RV32IFD-NEXT: mv a0, a2 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_false: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: mv a0, a1 +; RV64IFD-NEXT: ret %1 = fcmp false double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -33,6 +40,18 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_oeq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft1 +; RV64IFD-NEXT: bnez a0, .LBB1_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB1_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -58,6 +77,18 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ogt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB2_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB2_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ogt double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -83,6 +114,18 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_oge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB3_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB3_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oge double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -108,6 +151,18 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_olt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: flt.d a0, ft0, ft1 +; RV64IFD-NEXT: bnez a0, .LBB4_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB4_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp olt double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -133,6 +188,18 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ole: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fle.d a0, ft0, ft1 +; RV64IFD-NEXT: bnez a0, .LBB5_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB5_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ole double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -166,6 +233,25 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_one: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: feq.d a1, ft0, ft1 +; RV64IFD-NEXT: not a1, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: bnez a0, .LBB6_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB6_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp one double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -195,6 +281,22 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ord: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB7_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB7_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ord double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -225,6 +327,23 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ueq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: feq.d a1, ft0, ft1 +; RV64IFD-NEXT: or a0, a1, a0 +; RV64IFD-NEXT: bnez a0, .LBB8_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB8_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ueq double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -251,6 +370,19 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ugt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fle.d a0, ft0, ft1 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB9_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB9_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ugt double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -277,6 +409,19 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_uge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: flt.d a0, ft0, ft1 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB10_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB10_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp uge double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -303,6 +448,19 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ult: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB11_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB11_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ult double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -329,6 +487,19 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ule: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB12_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB12_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ule double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -355,6 +526,19 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_une: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft1 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB13_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB13_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp une double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -384,6 +568,21 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_uno: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: bnez a0, .LBB14_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB14_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp uno double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -393,6 +592,10 @@ ; RV32IFD-LABEL: select_fcmp_true: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_true: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: ret %1 = fcmp true double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -417,6 +620,18 @@ ; RV32IFD-NEXT: mv a0, a4 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: i32_select_fcmp_oeq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB16_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: mv a2, a3 +; RV64IFD-NEXT: .LBB16_2: +; RV64IFD-NEXT: mv a0, a2 +; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = select i1 %1, i32 %c, i32 %d ret i32 %2 Index: test/CodeGen/RISCV/double-stack-spill-restore.ll =================================================================== --- test/CodeGen/RISCV/double-stack-spill-restore.ll +++ test/CodeGen/RISCV/double-stack-spill-restore.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define double @func(double %d, i32 %n) nounwind { ; RV32IFD-LABEL: func: @@ -30,6 +32,28 @@ ; RV32IFD-NEXT: lw ra, 28(sp) ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: func: +; RV64IFD: # %bb.0: # %entry +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: slli a0, a1, 32 +; RV64IFD-NEXT: srli a0, a0, 32 +; RV64IFD-NEXT: beqz a0, .LBB0_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: addi a1, a1, -1 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: fsd ft0, 0(sp) +; RV64IFD-NEXT: call func +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fld ft1, 0(sp) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: .LBB0_2: # %return +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret entry: %cmp = icmp eq i32 %n, 0 br i1 %cmp, label %return, label %if.else Index: test/CodeGen/RISCV/rv64d-double-convert.ll =================================================================== --- /dev/null +++ test/CodeGen/RISCV/rv64d-double-convert.ll @@ -0,0 +1,33 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ID + +define double @uitofp_aext_i32_to_f32(i32 %a) nounwind { +; RV64ID-LABEL: uitofp_aext_i32_to_f32: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fcvt.d.wu ft0, a0 +; RV64ID-NEXT: fmv.x.d a0, ft0 +; RV64ID-NEXT: ret + %1 = uitofp i32 %a to double + ret double %1 +} + +define double @uitofp_sext_i32_to_f32(i32 signext %a) nounwind { +; RV64ID-LABEL: uitofp_sext_i32_to_f32: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fcvt.d.wu ft0, a0 +; RV64ID-NEXT: fmv.x.d a0, ft0 +; RV64ID-NEXT: ret + %1 = uitofp i32 %a to double + ret double %1 +} + +define double @uitofp_zext_i32_to_f32(i32 zeroext %a) nounwind { +; RV64ID-LABEL: uitofp_zext_i32_to_f32: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fcvt.d.wu ft0, a0 +; RV64ID-NEXT: fmv.x.d a0, ft0 +; RV64ID-NEXT: ret + %1 = uitofp i32 %a to double + ret double %1 +}