diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -8558,7 +8558,7 @@ EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); if (VT.isVector()) WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT, - VT.getVectorNumElements()); + VT.getVectorElementCount()); SDValue BottomHalf; SDValue TopHalf; diff --git a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll @@ -0,0 +1,424 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +declare { , } @llvm.smul.with.overflow.nxv1i8(, ) + +define @smulo_nxv1i8( %x, %y) { +; CHECK-LABEL: smulo_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 7 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv1i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv2i8(, ) + +define @smulo_nxv2i8( %x, %y) { +; CHECK-LABEL: smulo_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 7 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i8(, ) + +define @smulo_nxv4i8( %x, %y) { +; CHECK-LABEL: smulo_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 7 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i8(, ) + +define @smulo_nxv8i8( %x, %y) { +; CHECK-LABEL: smulo_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 7 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv16i8(, ) + +define @smulo_nxv16i8( %x, %y) { +; CHECK-LABEL: smulo_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vmulh.vv v26, v8, v10 +; CHECK-NEXT: vmul.vv v28, v8, v10 +; CHECK-NEXT: vsra.vi v30, v28, 7 +; CHECK-NEXT: vmsne.vv v0, v26, v30 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv16i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv32i8(, ) + +define @smulo_nxv32i8( %x, %y) { +; CHECK-LABEL: smulo_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: vsra.vi v12, v8, 7 +; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv32i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv64i8(, ) + +define @smulo_nxv64i8( %x, %y) { +; CHECK-LABEL: smulo_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vmulh.vv v24, v8, v16 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vsra.vi v16, v8, 7 +; CHECK-NEXT: vmsne.vv v0, v24, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv64i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv1i16(, ) + +define @smulo_nxv1i16( %x, %y) { +; CHECK-LABEL: smulo_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 15 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv1i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv2i16(, ) + +define @smulo_nxv2i16( %x, %y) { +; CHECK-LABEL: smulo_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 15 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i16(, ) + +define @smulo_nxv4i16( %x, %y) { +; CHECK-LABEL: smulo_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 15 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i16(, ) + +define @smulo_nxv8i16( %x, %y) { +; CHECK-LABEL: smulo_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vmulh.vv v26, v8, v10 +; CHECK-NEXT: vmul.vv v28, v8, v10 +; CHECK-NEXT: vsra.vi v30, v28, 15 +; CHECK-NEXT: vmsne.vv v0, v26, v30 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv16i16(, ) + +define @smulo_nxv16i16( %x, %y) { +; CHECK-LABEL: smulo_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: vsra.vi v12, v8, 15 +; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv16i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv32i16(, ) + +define @smulo_nxv32i16( %x, %y) { +; CHECK-LABEL: smulo_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vmulh.vv v24, v8, v16 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vsra.vi v16, v8, 15 +; CHECK-NEXT: vmsne.vv v0, v24, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv32i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv1i32(, ) + +define @smulo_nxv1i32( %x, %y) { +; CHECK-LABEL: smulo_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 31 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv1i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv2i32(, ) + +define @smulo_nxv2i32( %x, %y) { +; CHECK-LABEL: smulo_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 31 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i32(, ) + +define @smulo_nxv4i32( %x, %y) { +; CHECK-LABEL: smulo_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vmulh.vv v26, v8, v10 +; CHECK-NEXT: vmul.vv v28, v8, v10 +; CHECK-NEXT: vsra.vi v30, v28, 31 +; CHECK-NEXT: vmsne.vv v0, v26, v30 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i32(, ) + +define @smulo_nxv8i32( %x, %y) { +; CHECK-LABEL: smulo_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: vsra.vi v12, v8, 31 +; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv16i32(, ) + +define @smulo_nxv16i32( %x, %y) { +; CHECK-LABEL: smulo_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vmulh.vv v24, v8, v16 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vsra.vi v16, v8, 31 +; CHECK-NEXT: vmsne.vv v0, v24, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv16i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv1i64(, ) + +define @smulo_nxv1i64( %x, %y) { +; CHECK-LABEL: smulo_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: addi a0, zero, 63 +; CHECK-NEXT: vsra.vx v27, v26, a0 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv1i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv2i64(, ) + +define @smulo_nxv2i64( %x, %y) { +; CHECK-LABEL: smulo_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vmulh.vv v26, v8, v10 +; CHECK-NEXT: vmul.vv v28, v8, v10 +; CHECK-NEXT: addi a0, zero, 63 +; CHECK-NEXT: vsra.vx v30, v28, a0 +; CHECK-NEXT: vmsne.vv v0, v26, v30 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i64(, ) + +define @smulo_nxv4i64( %x, %y) { +; CHECK-LABEL: smulo_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: addi a0, zero, 63 +; CHECK-NEXT: vsra.vx v12, v8, a0 +; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i64(, ) + +define @smulo_nxv8i64( %x, %y) { +; CHECK-LABEL: smulo_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vmulh.vv v24, v8, v16 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: addi a0, zero, 63 +; CHECK-NEXT: vsra.vx v16, v8, a0 +; CHECK-NEXT: vmsne.vv v0, v24, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} diff --git a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll @@ -0,0 +1,398 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +declare { , } @llvm.umul.with.overflow.nxv1i8(, ) + +define @umulo_nxv1i8( %x, %y) { +; CHECK-LABEL: umulo_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv1i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv2i8(, ) + +define @umulo_nxv2i8( %x, %y) { +; CHECK-LABEL: umulo_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i8(, ) + +define @umulo_nxv4i8( %x, %y) { +; CHECK-LABEL: umulo_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i8(, ) + +define @umulo_nxv8i8( %x, %y) { +; CHECK-LABEL: umulo_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv16i8(, ) + +define @umulo_nxv16i8( %x, %y) { +; CHECK-LABEL: umulo_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vmulhu.vv v26, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: vmul.vv v26, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv16i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv32i8(, ) + +define @umulo_nxv32i8( %x, %y) { +; CHECK-LABEL: umulo_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vmulhu.vv v28, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: vmul.vv v28, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv32i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv64i8(, ) + +define @umulo_nxv64i8( %x, %y) { +; CHECK-LABEL: umulo_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vmulhu.vv v24, v8, v16 +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv64i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv1i16(, ) + +define @umulo_nxv1i16( %x, %y) { +; CHECK-LABEL: umulo_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv1i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv2i16(, ) + +define @umulo_nxv2i16( %x, %y) { +; CHECK-LABEL: umulo_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i16(, ) + +define @umulo_nxv4i16( %x, %y) { +; CHECK-LABEL: umulo_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i16(, ) + +define @umulo_nxv8i16( %x, %y) { +; CHECK-LABEL: umulo_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vmulhu.vv v26, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: vmul.vv v26, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv16i16(, ) + +define @umulo_nxv16i16( %x, %y) { +; CHECK-LABEL: umulo_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vmulhu.vv v28, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: vmul.vv v28, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv16i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv32i16(, ) + +define @umulo_nxv32i16( %x, %y) { +; CHECK-LABEL: umulo_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vmulhu.vv v24, v8, v16 +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv32i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv1i32(, ) + +define @umulo_nxv1i32( %x, %y) { +; CHECK-LABEL: umulo_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv1i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv2i32(, ) + +define @umulo_nxv2i32( %x, %y) { +; CHECK-LABEL: umulo_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i32(, ) + +define @umulo_nxv4i32( %x, %y) { +; CHECK-LABEL: umulo_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vmulhu.vv v26, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: vmul.vv v26, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i32(, ) + +define @umulo_nxv8i32( %x, %y) { +; CHECK-LABEL: umulo_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vmulhu.vv v28, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: vmul.vv v28, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv16i32(, ) + +define @umulo_nxv16i32( %x, %y) { +; CHECK-LABEL: umulo_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vmulhu.vv v24, v8, v16 +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv16i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv1i64(, ) + +define @umulo_nxv1i64( %x, %y) { +; CHECK-LABEL: umulo_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv1i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv2i64(, ) + +define @umulo_nxv2i64( %x, %y) { +; CHECK-LABEL: umulo_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vmulhu.vv v26, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: vmul.vv v26, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i64(, ) + +define @umulo_nxv4i64( %x, %y) { +; CHECK-LABEL: umulo_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vmulhu.vv v28, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: vmul.vv v28, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i64(, ) + +define @umulo_nxv8i64( %x, %y) { +; CHECK-LABEL: umulo_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vmulhu.vv v24, v8, v16 +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +}