Index: llvm/lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -8704,10 +8704,15 @@ if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) return SDValue(); - if (Src.getValueType() == MVT::i1) - return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src, - DAG.getConstantFP(1.0, dl, Op.getValueType()), - DAG.getConstantFP(0.0, dl, Op.getValueType())); + if (Src.getValueType() == MVT::i1) { + SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src, + DAG.getConstantFP(1.0, dl, Op.getValueType()), + DAG.getConstantFP(0.0, dl, Op.getValueType())); + if (IsStrict) + return DAG.getMergeValues({Sel, Chain}, dl); + else + return Sel; + } // If we have direct moves, we can do all the conversion, skip the store/load // however, without FPCVT we can't do most conversions. Index: llvm/test/CodeGen/PowerPC/i1-to-fp-chain.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/PowerPC/i1-to-fp-chain.ll @@ -0,0 +1,59 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs -mtriple=ppc32 < %s | FileCheck %s + +@foo = dso_local global double 0.000000e+00, align 8 + +; Verify the cases won't crash because of missing chains + +define double @u1tofp(i1 %i, double %d) #0 { +; CHECK-LABEL: u1tofp: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li 4, .LCPI0_0@l +; CHECK-NEXT: andi. 3, 3, 1 +; CHECK-NEXT: addis 3, 4, .LCPI0_0@ha +; CHECK-NEXT: li 4, .LCPI0_1@l +; CHECK-NEXT: addis 4, 4, .LCPI0_1@ha +; CHECK-NEXT: bc 12, 1, .LBB0_1 +; CHECK-NEXT: b .LBB0_2 +; CHECK-NEXT: .LBB0_1: # %entry +; CHECK-NEXT: addi 3, 4, 0 +; CHECK-NEXT: .LBB0_2: # %entry +; CHECK-NEXT: fmr 0, 1 +; CHECK-NEXT: lfs 1, 0(3) +; CHECK-NEXT: lis 3, foo@ha +; CHECK-NEXT: stfd 0, foo@l(3) +; CHECK-NEXT: blr +entry: + %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i1(i1 %i, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 + store volatile double %d, double* @foo, align 8 + ret double %conv +} + +define double @s1tofp(i1 %i, double %d) #0 { +; CHECK-LABEL: s1tofp: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li 4, .LCPI1_0@l +; CHECK-NEXT: andi. 3, 3, 1 +; CHECK-NEXT: addis 3, 4, .LCPI1_0@ha +; CHECK-NEXT: li 4, .LCPI1_1@l +; CHECK-NEXT: addis 4, 4, .LCPI1_1@ha +; CHECK-NEXT: bc 12, 1, .LBB1_1 +; CHECK-NEXT: b .LBB1_2 +; CHECK-NEXT: .LBB1_1: # %entry +; CHECK-NEXT: addi 3, 4, 0 +; CHECK-NEXT: .LBB1_2: # %entry +; CHECK-NEXT: fmr 0, 1 +; CHECK-NEXT: lfs 1, 0(3) +; CHECK-NEXT: lis 3, foo@ha +; CHECK-NEXT: stfd 0, foo@l(3) +; CHECK-NEXT: blr +entry: + %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i1(i1 %i, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 + store volatile double %d, double* @foo, align 8 + ret double %conv +} + +declare double @llvm.experimental.constrained.uitofp.f64.i1(i1, metadata, metadata) +declare double @llvm.experimental.constrained.sitofp.f64.i1(i1, metadata, metadata) + +attributes #0 = { strictfp }