diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -8558,7 +8558,7 @@ EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); if (VT.isVector()) WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT, - VT.getVectorNumElements()); + VT.getVectorElementCount()); SDValue BottomHalf; SDValue TopHalf; diff --git a/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll b/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll @@ -0,0 +1,494 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -verify-machineinstrs < %s | FileCheck %s + +declare { , } @llvm.smul.with.overflow.nxv2i8(, ) + +define @smulo_nxv2i8( %x, %y) { +; CHECK-LABEL: smulo_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: sxtb z1.d, p0/m, z1.d +; CHECK-NEXT: sxtb z0.d, p0/m, z0.d +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: asr z1.d, z0.d, #63 +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: sxtb z3.d, p0/m, z0.d +; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, z1.d +; CHECK-NEXT: cmpne p2.d, p0/z, z3.d, z0.d +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i8(, ) + +define @smulo_nxv4i8( %x, %y) { +; CHECK-LABEL: smulo_nxv4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: sxtb z1.s, p0/m, z1.s +; CHECK-NEXT: sxtb z0.s, p0/m, z0.s +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.s, p0/m, z2.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: asr z1.s, z0.s, #31 +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: sxtb z3.s, p0/m, z0.s +; CHECK-NEXT: cmpne p1.s, p0/z, z2.s, z1.s +; CHECK-NEXT: cmpne p2.s, p0/z, z3.s, z0.s +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i8(, ) + +define @smulo_nxv8i8( %x, %y) { +; CHECK-LABEL: smulo_nxv8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: sxtb z1.h, p0/m, z1.h +; CHECK-NEXT: sxtb z0.h, p0/m, z0.h +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: asr z1.h, z0.h, #15 +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: sxtb z3.h, p0/m, z0.h +; CHECK-NEXT: cmpne p1.h, p0/z, z2.h, z1.h +; CHECK-NEXT: cmpne p2.h, p0/z, z3.h, z0.h +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv16i8(, ) + +define @smulo_nxv16i8( %x, %y) { +; CHECK-LABEL: smulo_nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.b, p0/m, z2.b, z1.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: asr z1.b, z0.b, #7 +; CHECK-NEXT: cmpne p0.b, p0/z, z2.b, z1.b +; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv16i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv32i8(, ) + +define @smulo_nxv32i8( %x, %y) { +; CHECK-LABEL: smulo_nxv32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: movprfx z4, z1 +; CHECK-NEXT: smulh z4.b, p0/m, z4.b, z3.b +; CHECK-NEXT: mul z1.b, p0/m, z1.b, z3.b +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: smulh z3.b, p0/m, z3.b, z2.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: asr z2.b, z1.b, #7 +; CHECK-NEXT: cmpne p1.b, p0/z, z4.b, z2.b +; CHECK-NEXT: asr z2.b, z0.b, #7 +; CHECK-NEXT: cmpne p0.b, p0/z, z3.b, z2.b +; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z1.b, p1/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv32i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv64i8(, ) + +define @smulo_nxv64i8( %x, %y) { +; CHECK-LABEL: smulo_nxv64i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: movprfx z24, z3 +; CHECK-NEXT: smulh z24.b, p0/m, z24.b, z7.b +; CHECK-NEXT: mul z3.b, p0/m, z3.b, z7.b +; CHECK-NEXT: movprfx z7, z2 +; CHECK-NEXT: smulh z7.b, p0/m, z7.b, z6.b +; CHECK-NEXT: mul z2.b, p0/m, z2.b, z6.b +; CHECK-NEXT: movprfx z6, z1 +; CHECK-NEXT: smulh z6.b, p0/m, z6.b, z5.b +; CHECK-NEXT: mul z1.b, p0/m, z1.b, z5.b +; CHECK-NEXT: movprfx z5, z0 +; CHECK-NEXT: smulh z5.b, p0/m, z5.b, z4.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z4.b +; CHECK-NEXT: asr z4.b, z3.b, #7 +; CHECK-NEXT: cmpne p1.b, p0/z, z24.b, z4.b +; CHECK-NEXT: asr z4.b, z2.b, #7 +; CHECK-NEXT: asr z24.b, z1.b, #7 +; CHECK-NEXT: cmpne p2.b, p0/z, z7.b, z4.b +; CHECK-NEXT: asr z4.b, z0.b, #7 +; CHECK-NEXT: cmpne p3.b, p0/z, z6.b, z24.b +; CHECK-NEXT: cmpne p0.b, p0/z, z5.b, z4.b +; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z1.b, p3/m, #0 // =0x0 +; CHECK-NEXT: mov z2.b, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z3.b, p1/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv64i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv2i16(, ) + +define @smulo_nxv2i16( %x, %y) { +; CHECK-LABEL: smulo_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: sxth z1.d, p0/m, z1.d +; CHECK-NEXT: sxth z0.d, p0/m, z0.d +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: asr z1.d, z0.d, #63 +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: sxth z3.d, p0/m, z0.d +; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, z1.d +; CHECK-NEXT: cmpne p2.d, p0/z, z3.d, z0.d +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i16(, ) + +define @smulo_nxv4i16( %x, %y) { +; CHECK-LABEL: smulo_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: sxth z1.s, p0/m, z1.s +; CHECK-NEXT: sxth z0.s, p0/m, z0.s +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.s, p0/m, z2.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: asr z1.s, z0.s, #31 +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: sxth z3.s, p0/m, z0.s +; CHECK-NEXT: cmpne p1.s, p0/z, z2.s, z1.s +; CHECK-NEXT: cmpne p2.s, p0/z, z3.s, z0.s +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i16(, ) + +define @smulo_nxv8i16( %x, %y) { +; CHECK-LABEL: smulo_nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: asr z1.h, z0.h, #15 +; CHECK-NEXT: cmpne p0.h, p0/z, z2.h, z1.h +; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv16i16(, ) + +define @smulo_nxv16i16( %x, %y) { +; CHECK-LABEL: smulo_nxv16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: movprfx z4, z1 +; CHECK-NEXT: smulh z4.h, p0/m, z4.h, z3.h +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: smulh z3.h, p0/m, z3.h, z2.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: asr z2.h, z1.h, #15 +; CHECK-NEXT: cmpne p1.h, p0/z, z4.h, z2.h +; CHECK-NEXT: asr z2.h, z0.h, #15 +; CHECK-NEXT: cmpne p0.h, p0/z, z3.h, z2.h +; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z1.h, p1/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv16i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv32i16(, ) + +define @smulo_nxv32i16( %x, %y) { +; CHECK-LABEL: smulo_nxv32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: movprfx z24, z3 +; CHECK-NEXT: smulh z24.h, p0/m, z24.h, z7.h +; CHECK-NEXT: mul z3.h, p0/m, z3.h, z7.h +; CHECK-NEXT: movprfx z7, z2 +; CHECK-NEXT: smulh z7.h, p0/m, z7.h, z6.h +; CHECK-NEXT: mul z2.h, p0/m, z2.h, z6.h +; CHECK-NEXT: movprfx z6, z1 +; CHECK-NEXT: smulh z6.h, p0/m, z6.h, z5.h +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z5.h +; CHECK-NEXT: movprfx z5, z0 +; CHECK-NEXT: smulh z5.h, p0/m, z5.h, z4.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z4.h +; CHECK-NEXT: asr z4.h, z3.h, #15 +; CHECK-NEXT: cmpne p1.h, p0/z, z24.h, z4.h +; CHECK-NEXT: asr z4.h, z2.h, #15 +; CHECK-NEXT: asr z24.h, z1.h, #15 +; CHECK-NEXT: cmpne p2.h, p0/z, z7.h, z4.h +; CHECK-NEXT: asr z4.h, z0.h, #15 +; CHECK-NEXT: cmpne p3.h, p0/z, z6.h, z24.h +; CHECK-NEXT: cmpne p0.h, p0/z, z5.h, z4.h +; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z1.h, p3/m, #0 // =0x0 +; CHECK-NEXT: mov z2.h, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z3.h, p1/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv32i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv2i32(, ) + +define @smulo_nxv2i32( %x, %y) { +; CHECK-LABEL: smulo_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: sxtw z1.d, p0/m, z1.d +; CHECK-NEXT: sxtw z0.d, p0/m, z0.d +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: asr z1.d, z0.d, #63 +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: sxtw z3.d, p0/m, z0.d +; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, z1.d +; CHECK-NEXT: cmpne p2.d, p0/z, z3.d, z0.d +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i32(, ) + +define @smulo_nxv4i32( %x, %y) { +; CHECK-LABEL: smulo_nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.s, p0/m, z2.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: asr z1.s, z0.s, #31 +; CHECK-NEXT: cmpne p0.s, p0/z, z2.s, z1.s +; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i32(, ) + +define @smulo_nxv8i32( %x, %y) { +; CHECK-LABEL: smulo_nxv8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: movprfx z4, z1 +; CHECK-NEXT: smulh z4.s, p0/m, z4.s, z3.s +; CHECK-NEXT: mul z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: smulh z3.s, p0/m, z3.s, z2.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: asr z2.s, z1.s, #31 +; CHECK-NEXT: cmpne p1.s, p0/z, z4.s, z2.s +; CHECK-NEXT: asr z2.s, z0.s, #31 +; CHECK-NEXT: cmpne p0.s, p0/z, z3.s, z2.s +; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z1.s, p1/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv16i32(, ) + +define @smulo_nxv16i32( %x, %y) { +; CHECK-LABEL: smulo_nxv16i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: movprfx z24, z3 +; CHECK-NEXT: smulh z24.s, p0/m, z24.s, z7.s +; CHECK-NEXT: mul z3.s, p0/m, z3.s, z7.s +; CHECK-NEXT: movprfx z7, z2 +; CHECK-NEXT: smulh z7.s, p0/m, z7.s, z6.s +; CHECK-NEXT: mul z2.s, p0/m, z2.s, z6.s +; CHECK-NEXT: movprfx z6, z1 +; CHECK-NEXT: smulh z6.s, p0/m, z6.s, z5.s +; CHECK-NEXT: mul z1.s, p0/m, z1.s, z5.s +; CHECK-NEXT: movprfx z5, z0 +; CHECK-NEXT: smulh z5.s, p0/m, z5.s, z4.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z4.s +; CHECK-NEXT: asr z4.s, z3.s, #31 +; CHECK-NEXT: cmpne p1.s, p0/z, z24.s, z4.s +; CHECK-NEXT: asr z4.s, z2.s, #31 +; CHECK-NEXT: asr z24.s, z1.s, #31 +; CHECK-NEXT: cmpne p2.s, p0/z, z7.s, z4.s +; CHECK-NEXT: asr z4.s, z0.s, #31 +; CHECK-NEXT: cmpne p3.s, p0/z, z6.s, z24.s +; CHECK-NEXT: cmpne p0.s, p0/z, z5.s, z4.s +; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z1.s, p3/m, #0 // =0x0 +; CHECK-NEXT: mov z2.s, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z3.s, p1/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv16i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv2i64(, ) + +define @smulo_nxv2i64( %x, %y) { +; CHECK-LABEL: smulo_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: asr z1.d, z0.d, #63 +; CHECK-NEXT: cmpne p0.d, p0/z, z2.d, z1.d +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i64(, ) + +define @smulo_nxv4i64( %x, %y) { +; CHECK-LABEL: smulo_nxv4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: movprfx z4, z1 +; CHECK-NEXT: smulh z4.d, p0/m, z4.d, z3.d +; CHECK-NEXT: mul z1.d, p0/m, z1.d, z3.d +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: smulh z3.d, p0/m, z3.d, z2.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: asr z2.d, z1.d, #63 +; CHECK-NEXT: cmpne p1.d, p0/z, z4.d, z2.d +; CHECK-NEXT: asr z2.d, z0.d, #63 +; CHECK-NEXT: cmpne p0.d, p0/z, z3.d, z2.d +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z1.d, p1/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i64(, ) + +define @smulo_nxv8i64( %x, %y) { +; CHECK-LABEL: smulo_nxv8i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: movprfx z24, z3 +; CHECK-NEXT: smulh z24.d, p0/m, z24.d, z7.d +; CHECK-NEXT: mul z3.d, p0/m, z3.d, z7.d +; CHECK-NEXT: movprfx z7, z2 +; CHECK-NEXT: smulh z7.d, p0/m, z7.d, z6.d +; CHECK-NEXT: mul z2.d, p0/m, z2.d, z6.d +; CHECK-NEXT: movprfx z6, z1 +; CHECK-NEXT: smulh z6.d, p0/m, z6.d, z5.d +; CHECK-NEXT: mul z1.d, p0/m, z1.d, z5.d +; CHECK-NEXT: movprfx z5, z0 +; CHECK-NEXT: smulh z5.d, p0/m, z5.d, z4.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z4.d +; CHECK-NEXT: asr z4.d, z3.d, #63 +; CHECK-NEXT: cmpne p1.d, p0/z, z24.d, z4.d +; CHECK-NEXT: asr z4.d, z2.d, #63 +; CHECK-NEXT: asr z24.d, z1.d, #63 +; CHECK-NEXT: cmpne p2.d, p0/z, z7.d, z4.d +; CHECK-NEXT: asr z4.d, z0.d, #63 +; CHECK-NEXT: cmpne p3.d, p0/z, z6.d, z24.d +; CHECK-NEXT: cmpne p0.d, p0/z, z5.d, z4.d +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z1.d, p3/m, #0 // =0x0 +; CHECK-NEXT: mov z2.d, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z3.d, p1/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} diff --git a/llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll b/llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll @@ -0,0 +1,482 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -verify-machineinstrs < %s | FileCheck %s + +declare { , } @llvm.umul.with.overflow.nxv2i8(, ) + +define @umulo_nxv2i8( %x, %y) { +; CHECK-LABEL: umulo_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: and z1.d, z1.d, #0xff +; CHECK-NEXT: and z0.d, z0.d, #0xff +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: umulh z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: lsr z1.d, z0.d, #8 +; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0 +; CHECK-NEXT: cmpne p2.d, p0/z, z1.d, #0 +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i8(, ) + +define @umulo_nxv4i8( %x, %y) { +; CHECK-LABEL: umulo_nxv4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: and z1.s, z1.s, #0xff +; CHECK-NEXT: and z0.s, z0.s, #0xff +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: umulh z2.s, p0/m, z2.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: lsr z1.s, z0.s, #8 +; CHECK-NEXT: cmpne p1.s, p0/z, z2.s, #0 +; CHECK-NEXT: cmpne p2.s, p0/z, z1.s, #0 +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i8(, ) + +define @umulo_nxv8i8( %x, %y) { +; CHECK-LABEL: umulo_nxv8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: and z1.h, z1.h, #0xff +; CHECK-NEXT: and z0.h, z0.h, #0xff +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: umulh z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: lsr z1.h, z0.h, #8 +; CHECK-NEXT: cmpne p1.h, p0/z, z2.h, #0 +; CHECK-NEXT: cmpne p2.h, p0/z, z1.h, #0 +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv16i8(, ) + +define @umulo_nxv16i8( %x, %y) { +; CHECK-LABEL: umulo_nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: mul z2.b, p0/m, z2.b, z1.b +; CHECK-NEXT: umulh z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: mov z2.b, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv16i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv32i8(, ) + +define @umulo_nxv32i8( %x, %y) { +; CHECK-LABEL: umulo_nxv32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: movprfx z4, z1 +; CHECK-NEXT: mul z4.b, p0/m, z4.b, z3.b +; CHECK-NEXT: umulh z3.b, p0/m, z3.b, z1.b +; CHECK-NEXT: movprfx z1, z0 +; CHECK-NEXT: mul z1.b, p0/m, z1.b, z2.b +; CHECK-NEXT: umulh z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: cmpne p1.b, p0/z, z3.b, #0 +; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: mov z1.b, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z4.b, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z1.d, z4.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv32i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv64i8(, ) + +define @umulo_nxv64i8( %x, %y) { +; CHECK-LABEL: umulo_nxv64i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: movprfx z24, z3 +; CHECK-NEXT: mul z24.b, p0/m, z24.b, z7.b +; CHECK-NEXT: umulh z7.b, p0/m, z7.b, z3.b +; CHECK-NEXT: movprfx z3, z2 +; CHECK-NEXT: mul z3.b, p0/m, z3.b, z6.b +; CHECK-NEXT: umulh z6.b, p0/m, z6.b, z2.b +; CHECK-NEXT: movprfx z2, z1 +; CHECK-NEXT: mul z2.b, p0/m, z2.b, z5.b +; CHECK-NEXT: umulh z5.b, p0/m, z5.b, z1.b +; CHECK-NEXT: movprfx z1, z0 +; CHECK-NEXT: mul z1.b, p0/m, z1.b, z4.b +; CHECK-NEXT: umulh z0.b, p0/m, z0.b, z4.b +; CHECK-NEXT: cmpne p1.b, p0/z, z7.b, #0 +; CHECK-NEXT: cmpne p2.b, p0/z, z6.b, #0 +; CHECK-NEXT: cmpne p3.b, p0/z, z5.b, #0 +; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: mov z1.b, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.b, p3/m, #0 // =0x0 +; CHECK-NEXT: mov z3.b, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z24.b, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z1.d, z2.d +; CHECK-NEXT: mov z2.d, z3.d +; CHECK-NEXT: mov z3.d, z24.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv64i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv2i16(, ) + +define @umulo_nxv2i16( %x, %y) { +; CHECK-LABEL: umulo_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: and z1.d, z1.d, #0xffff +; CHECK-NEXT: and z0.d, z0.d, #0xffff +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: umulh z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: lsr z1.d, z0.d, #16 +; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0 +; CHECK-NEXT: cmpne p2.d, p0/z, z1.d, #0 +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i16(, ) + +define @umulo_nxv4i16( %x, %y) { +; CHECK-LABEL: umulo_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: and z1.s, z1.s, #0xffff +; CHECK-NEXT: and z0.s, z0.s, #0xffff +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: umulh z2.s, p0/m, z2.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: lsr z1.s, z0.s, #16 +; CHECK-NEXT: cmpne p1.s, p0/z, z2.s, #0 +; CHECK-NEXT: cmpne p2.s, p0/z, z1.s, #0 +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i16(, ) + +define @umulo_nxv8i16( %x, %y) { +; CHECK-LABEL: umulo_nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: mul z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: mov z2.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv16i16(, ) + +define @umulo_nxv16i16( %x, %y) { +; CHECK-LABEL: umulo_nxv16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: movprfx z4, z1 +; CHECK-NEXT: mul z4.h, p0/m, z4.h, z3.h +; CHECK-NEXT: umulh z3.h, p0/m, z3.h, z1.h +; CHECK-NEXT: movprfx z1, z0 +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z2.h +; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: cmpne p1.h, p0/z, z3.h, #0 +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: mov z1.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z4.h, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z1.d, z4.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv16i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv32i16(, ) + +define @umulo_nxv32i16( %x, %y) { +; CHECK-LABEL: umulo_nxv32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: movprfx z24, z3 +; CHECK-NEXT: mul z24.h, p0/m, z24.h, z7.h +; CHECK-NEXT: umulh z7.h, p0/m, z7.h, z3.h +; CHECK-NEXT: movprfx z3, z2 +; CHECK-NEXT: mul z3.h, p0/m, z3.h, z6.h +; CHECK-NEXT: umulh z6.h, p0/m, z6.h, z2.h +; CHECK-NEXT: movprfx z2, z1 +; CHECK-NEXT: mul z2.h, p0/m, z2.h, z5.h +; CHECK-NEXT: umulh z5.h, p0/m, z5.h, z1.h +; CHECK-NEXT: movprfx z1, z0 +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z4.h +; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z4.h +; CHECK-NEXT: cmpne p1.h, p0/z, z7.h, #0 +; CHECK-NEXT: cmpne p2.h, p0/z, z6.h, #0 +; CHECK-NEXT: cmpne p3.h, p0/z, z5.h, #0 +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: mov z1.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.h, p3/m, #0 // =0x0 +; CHECK-NEXT: mov z3.h, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z24.h, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z1.d, z2.d +; CHECK-NEXT: mov z2.d, z3.d +; CHECK-NEXT: mov z3.d, z24.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv32i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv2i32(, ) + +define @umulo_nxv2i32( %x, %y) { +; CHECK-LABEL: umulo_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: and z1.d, z1.d, #0xffffffff +; CHECK-NEXT: and z0.d, z0.d, #0xffffffff +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: umulh z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: lsr z1.d, z0.d, #32 +; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0 +; CHECK-NEXT: cmpne p2.d, p0/z, z1.d, #0 +; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i32(, ) + +define @umulo_nxv4i32( %x, %y) { +; CHECK-LABEL: umulo_nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: mul z2.s, p0/m, z2.s, z1.s +; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 +; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i32(, ) + +define @umulo_nxv8i32( %x, %y) { +; CHECK-LABEL: umulo_nxv8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: movprfx z4, z1 +; CHECK-NEXT: mul z4.s, p0/m, z4.s, z3.s +; CHECK-NEXT: umulh z3.s, p0/m, z3.s, z1.s +; CHECK-NEXT: movprfx z1, z0 +; CHECK-NEXT: mul z1.s, p0/m, z1.s, z2.s +; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: cmpne p1.s, p0/z, z3.s, #0 +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 +; CHECK-NEXT: mov z1.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z4.s, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z1.d, z4.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv16i32(, ) + +define @umulo_nxv16i32( %x, %y) { +; CHECK-LABEL: umulo_nxv16i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: movprfx z24, z3 +; CHECK-NEXT: mul z24.s, p0/m, z24.s, z7.s +; CHECK-NEXT: umulh z7.s, p0/m, z7.s, z3.s +; CHECK-NEXT: movprfx z3, z2 +; CHECK-NEXT: mul z3.s, p0/m, z3.s, z6.s +; CHECK-NEXT: umulh z6.s, p0/m, z6.s, z2.s +; CHECK-NEXT: movprfx z2, z1 +; CHECK-NEXT: mul z2.s, p0/m, z2.s, z5.s +; CHECK-NEXT: umulh z5.s, p0/m, z5.s, z1.s +; CHECK-NEXT: movprfx z1, z0 +; CHECK-NEXT: mul z1.s, p0/m, z1.s, z4.s +; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z4.s +; CHECK-NEXT: cmpne p1.s, p0/z, z7.s, #0 +; CHECK-NEXT: cmpne p2.s, p0/z, z6.s, #0 +; CHECK-NEXT: cmpne p3.s, p0/z, z5.s, #0 +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 +; CHECK-NEXT: mov z1.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.s, p3/m, #0 // =0x0 +; CHECK-NEXT: mov z3.s, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z24.s, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z1.d, z2.d +; CHECK-NEXT: mov z2.d, z3.d +; CHECK-NEXT: mov z3.d, z24.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv16i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv2i64(, ) + +define @umulo_nxv2i64( %x, %y) { +; CHECK-LABEL: umulo_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: mul z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i64(, ) + +define @umulo_nxv4i64( %x, %y) { +; CHECK-LABEL: umulo_nxv4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: movprfx z4, z1 +; CHECK-NEXT: mul z4.d, p0/m, z4.d, z3.d +; CHECK-NEXT: umulh z3.d, p0/m, z3.d, z1.d +; CHECK-NEXT: movprfx z1, z0 +; CHECK-NEXT: mul z1.d, p0/m, z1.d, z2.d +; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: cmpne p1.d, p0/z, z3.d, #0 +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z4.d, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z1.d, z4.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i64(, ) + +define @umulo_nxv8i64( %x, %y) { +; CHECK-LABEL: umulo_nxv8i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: movprfx z24, z3 +; CHECK-NEXT: mul z24.d, p0/m, z24.d, z7.d +; CHECK-NEXT: umulh z7.d, p0/m, z7.d, z3.d +; CHECK-NEXT: movprfx z3, z2 +; CHECK-NEXT: mul z3.d, p0/m, z3.d, z6.d +; CHECK-NEXT: umulh z6.d, p0/m, z6.d, z2.d +; CHECK-NEXT: movprfx z2, z1 +; CHECK-NEXT: mul z2.d, p0/m, z2.d, z5.d +; CHECK-NEXT: umulh z5.d, p0/m, z5.d, z1.d +; CHECK-NEXT: movprfx z1, z0 +; CHECK-NEXT: mul z1.d, p0/m, z1.d, z4.d +; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z4.d +; CHECK-NEXT: cmpne p1.d, p0/z, z7.d, #0 +; CHECK-NEXT: cmpne p2.d, p0/z, z6.d, #0 +; CHECK-NEXT: cmpne p3.d, p0/z, z5.d, #0 +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.d, p3/m, #0 // =0x0 +; CHECK-NEXT: mov z3.d, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z24.d, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z1.d, z2.d +; CHECK-NEXT: mov z2.d, z3.d +; CHECK-NEXT: mov z3.d, z24.d +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} diff --git a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll @@ -0,0 +1,424 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +declare { , } @llvm.smul.with.overflow.nxv1i8(, ) + +define @smulo_nxv1i8( %x, %y) { +; CHECK-LABEL: smulo_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 7 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv1i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv2i8(, ) + +define @smulo_nxv2i8( %x, %y) { +; CHECK-LABEL: smulo_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 7 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i8(, ) + +define @smulo_nxv4i8( %x, %y) { +; CHECK-LABEL: smulo_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 7 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i8(, ) + +define @smulo_nxv8i8( %x, %y) { +; CHECK-LABEL: smulo_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 7 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv16i8(, ) + +define @smulo_nxv16i8( %x, %y) { +; CHECK-LABEL: smulo_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vmulh.vv v26, v8, v10 +; CHECK-NEXT: vmul.vv v28, v8, v10 +; CHECK-NEXT: vsra.vi v30, v28, 7 +; CHECK-NEXT: vmsne.vv v0, v26, v30 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv16i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv32i8(, ) + +define @smulo_nxv32i8( %x, %y) { +; CHECK-LABEL: smulo_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: vsra.vi v12, v8, 7 +; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv32i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv64i8(, ) + +define @smulo_nxv64i8( %x, %y) { +; CHECK-LABEL: smulo_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vmulh.vv v24, v8, v16 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vsra.vi v16, v8, 7 +; CHECK-NEXT: vmsne.vv v0, v24, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv64i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv1i16(, ) + +define @smulo_nxv1i16( %x, %y) { +; CHECK-LABEL: smulo_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 15 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv1i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv2i16(, ) + +define @smulo_nxv2i16( %x, %y) { +; CHECK-LABEL: smulo_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 15 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i16(, ) + +define @smulo_nxv4i16( %x, %y) { +; CHECK-LABEL: smulo_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 15 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i16(, ) + +define @smulo_nxv8i16( %x, %y) { +; CHECK-LABEL: smulo_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vmulh.vv v26, v8, v10 +; CHECK-NEXT: vmul.vv v28, v8, v10 +; CHECK-NEXT: vsra.vi v30, v28, 15 +; CHECK-NEXT: vmsne.vv v0, v26, v30 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv16i16(, ) + +define @smulo_nxv16i16( %x, %y) { +; CHECK-LABEL: smulo_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: vsra.vi v12, v8, 15 +; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv16i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv32i16(, ) + +define @smulo_nxv32i16( %x, %y) { +; CHECK-LABEL: smulo_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vmulh.vv v24, v8, v16 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vsra.vi v16, v8, 15 +; CHECK-NEXT: vmsne.vv v0, v24, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv32i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv1i32(, ) + +define @smulo_nxv1i32( %x, %y) { +; CHECK-LABEL: smulo_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 31 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv1i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv2i32(, ) + +define @smulo_nxv2i32( %x, %y) { +; CHECK-LABEL: smulo_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vsra.vi v27, v26, 31 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i32(, ) + +define @smulo_nxv4i32( %x, %y) { +; CHECK-LABEL: smulo_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vmulh.vv v26, v8, v10 +; CHECK-NEXT: vmul.vv v28, v8, v10 +; CHECK-NEXT: vsra.vi v30, v28, 31 +; CHECK-NEXT: vmsne.vv v0, v26, v30 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i32(, ) + +define @smulo_nxv8i32( %x, %y) { +; CHECK-LABEL: smulo_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: vsra.vi v12, v8, 31 +; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv16i32(, ) + +define @smulo_nxv16i32( %x, %y) { +; CHECK-LABEL: smulo_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vmulh.vv v24, v8, v16 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vsra.vi v16, v8, 31 +; CHECK-NEXT: vmsne.vv v0, v24, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv16i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv1i64(, ) + +define @smulo_nxv1i64( %x, %y) { +; CHECK-LABEL: smulo_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vmulh.vv v25, v8, v9 +; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: addi a0, zero, 63 +; CHECK-NEXT: vsra.vx v27, v26, a0 +; CHECK-NEXT: vmsne.vv v0, v25, v27 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv1i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv2i64(, ) + +define @smulo_nxv2i64( %x, %y) { +; CHECK-LABEL: smulo_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vmulh.vv v26, v8, v10 +; CHECK-NEXT: vmul.vv v28, v8, v10 +; CHECK-NEXT: addi a0, zero, 63 +; CHECK-NEXT: vsra.vx v30, v28, a0 +; CHECK-NEXT: vmsne.vv v0, v26, v30 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv2i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv4i64(, ) + +define @smulo_nxv4i64( %x, %y) { +; CHECK-LABEL: smulo_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: addi a0, zero, 63 +; CHECK-NEXT: vsra.vx v12, v8, a0 +; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv4i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.smul.with.overflow.nxv8i64(, ) + +define @smulo_nxv8i64( %x, %y) { +; CHECK-LABEL: smulo_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vmulh.vv v24, v8, v16 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: addi a0, zero, 63 +; CHECK-NEXT: vsra.vx v16, v8, a0 +; CHECK-NEXT: vmsne.vv v0, v24, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.smul.with.overflow.nxv8i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} diff --git a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll @@ -0,0 +1,398 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +declare { , } @llvm.umul.with.overflow.nxv1i8(, ) + +define @umulo_nxv1i8( %x, %y) { +; CHECK-LABEL: umulo_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv1i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv2i8(, ) + +define @umulo_nxv2i8( %x, %y) { +; CHECK-LABEL: umulo_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i8(, ) + +define @umulo_nxv4i8( %x, %y) { +; CHECK-LABEL: umulo_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i8(, ) + +define @umulo_nxv8i8( %x, %y) { +; CHECK-LABEL: umulo_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv16i8(, ) + +define @umulo_nxv16i8( %x, %y) { +; CHECK-LABEL: umulo_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vmulhu.vv v26, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: vmul.vv v26, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv16i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv32i8(, ) + +define @umulo_nxv32i8( %x, %y) { +; CHECK-LABEL: umulo_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vmulhu.vv v28, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: vmul.vv v28, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv32i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv64i8(, ) + +define @umulo_nxv64i8( %x, %y) { +; CHECK-LABEL: umulo_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vmulhu.vv v24, v8, v16 +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv64i8( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv1i16(, ) + +define @umulo_nxv1i16( %x, %y) { +; CHECK-LABEL: umulo_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv1i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv2i16(, ) + +define @umulo_nxv2i16( %x, %y) { +; CHECK-LABEL: umulo_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i16(, ) + +define @umulo_nxv4i16( %x, %y) { +; CHECK-LABEL: umulo_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i16(, ) + +define @umulo_nxv8i16( %x, %y) { +; CHECK-LABEL: umulo_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vmulhu.vv v26, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: vmul.vv v26, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv16i16(, ) + +define @umulo_nxv16i16( %x, %y) { +; CHECK-LABEL: umulo_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vmulhu.vv v28, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: vmul.vv v28, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv16i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv32i16(, ) + +define @umulo_nxv32i16( %x, %y) { +; CHECK-LABEL: umulo_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vmulhu.vv v24, v8, v16 +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv32i16( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv1i32(, ) + +define @umulo_nxv1i32( %x, %y) { +; CHECK-LABEL: umulo_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv1i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv2i32(, ) + +define @umulo_nxv2i32( %x, %y) { +; CHECK-LABEL: umulo_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i32(, ) + +define @umulo_nxv4i32( %x, %y) { +; CHECK-LABEL: umulo_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vmulhu.vv v26, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: vmul.vv v26, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i32(, ) + +define @umulo_nxv8i32( %x, %y) { +; CHECK-LABEL: umulo_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vmulhu.vv v28, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: vmul.vv v28, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv16i32(, ) + +define @umulo_nxv16i32( %x, %y) { +; CHECK-LABEL: umulo_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vmulhu.vv v24, v8, v16 +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv16i32( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv1i64(, ) + +define @umulo_nxv1i64( %x, %y) { +; CHECK-LABEL: umulo_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vmulhu.vv v25, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: vmul.vv v25, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv1i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv2i64(, ) + +define @umulo_nxv2i64( %x, %y) { +; CHECK-LABEL: umulo_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vmulhu.vv v26, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: vmul.vv v26, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv2i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv4i64(, ) + +define @umulo_nxv4i64( %x, %y) { +; CHECK-LABEL: umulo_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vmulhu.vv v28, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: vmul.vv v28, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv4i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +} + +declare { , } @llvm.umul.with.overflow.nxv8i64(, ) + +define @umulo_nxv8i64( %x, %y) { +; CHECK-LABEL: umulo_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vmulhu.vv v24, v8, v16 +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: ret + %a = call { , } @llvm.umul.with.overflow.nxv8i64( %x, %y) + %b = extractvalue { , } %a, 0 + %c = extractvalue { , } %a, 1 + %d = select %c, zeroinitializer, %b + ret %d +}