diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -6055,11 +6055,11 @@ } if (!VT.isFixedLengthVector()) - return DAG.getNode(RISCVISDOpc, DL, VT, Ops); + return DAG.getNode(RISCVISDOpc, DL, VT, Ops, Op->getFlags()); MVT ContainerVT = getContainerForFixedLengthVector(VT); - SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops); + SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops, Op->getFlags()); return convertFromScalableVector(VT, VPOp, DAG, Subtarget); } diff --git a/llvm/test/CodeGen/RISCV/pass-fast-math-flags-sdnode.ll b/llvm/test/CodeGen/RISCV/pass-fast-math-flags-sdnode.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/pass-fast-math-flags-sdnode.ll +++ /dev/null @@ -1,9 +0,0 @@ -; REQUIRES: asserts -; RUN: llc < %s -mtriple=riscv64 -mattr=+v -debug-only=isel -o /dev/null 2>&1 | FileCheck %s -declare @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) - -define @foo( %x, %y, %z, %m, i32 %vl) { -; CHECK: t14: nxv1f64 = vp_fmul nnan ninf nsz arcp contract afn reassoc t2, t4, t8, t13 - %1 = call fast @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) - ret %1 -} diff --git a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll @@ -0,0 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc < %s -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel | FileCheck %s + +declare @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) + +define @foo( %x, %y, %z, %m, i32 %vl) { + ; CHECK-LABEL: name: foo + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $v8, $v9, $v0, $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v9 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v8 + ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[COPY]], 32 + ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gprnox0 = SRLI killed [[SLLI]], 32 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrnov0 = IMPLICIT_DEF + ; CHECK-NEXT: %7:vrnov0 = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFMUL_VV_M1_MASK [[DEF]], [[COPY3]], [[COPY2]], $v0, killed [[SRLI]], 6 /* e64 */, 1, implicit $frm + ; CHECK-NEXT: $v8 = COPY %7 + ; CHECK-NEXT: PseudoRET implicit $v8 + %1 = call fast @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) + ret %1 +}