diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll @@ -361,6 +361,36 @@ ret %b } +; This shouldn't be folded because we need to preserve exceptions with +; "fpexcept.strict" exception behaviour, and masking may hide them. +define @vpmerge_constrained_fadd( %passthru, %x, %y, %m, i64 %vl) strictfp { +; CHECK-LABEL: vpmerge_constrained_fadd: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vfadd.vv v9, v9, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.fadd( %x, %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + %b = call @llvm.riscv.vmerge.nxv2f32.nxv2f32( %passthru, %passthru, %a, %m, i64 %vl) + ret %b +} +declare @llvm.experimental.constrained.fadd(, , metadata, metadata) +declare @llvm.riscv.vmerge.nxv2f32.nxv2f32(, , , , i64) + +; FIXME: This shouldn't be folded because we need to preserve exceptions with +; "fpexcept.strict" exception behaviour, and masking may hide them. +define @vpmerge_constrained_fadd_vlmax( %passthru, %x, %y, %m) strictfp { +; CHECK-LABEL: vpmerge_constrained_fadd_vlmax: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.fadd( %x, %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + %b = call @llvm.riscv.vmerge.nxv2f32.nxv2f32( %passthru, %passthru, %a, %m, i64 -1) + ret %b +} + ; Test conversion by fptosi. define @vpmerge_fptosi( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_fptosi: