diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -345,10 +345,9 @@ ///// Shuffles { // llvm.vp.select(mask,on_true,on_false,vlen) -BEGIN_REGISTER_VP_INTRINSIC(vp_select, 0, 3) -// BEGIN_REGISTER_VP_SDNODE(VP_SELECT, -1, vp_select, 0, 4) -// END_REGISTER_CASES(vp_select, VP_SELECT) -END_REGISTER_VP_INTRINSIC(vp_select) +BEGIN_REGISTER_VP(vp_select, 0, 3, VP_SELECT, -1) +VP_PROPERTY_FUNCTIONAL_OPC(Select) +END_REGISTER_VP(vp_select, VP_SELECT) BEGIN_REGISTER_VP(experimental_vp_splice, 3, 5, EXPERIMENTAL_VP_SPLICE, -1) END_REGISTER_VP(experimental_vp_splice, EXPERIMENTAL_VP_SPLICE) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -471,12 +471,13 @@ ISD::VP_XOR, ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX, - ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN}; + ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN, + ISD::VP_SELECT}; static const unsigned FloatingPointVPOps[] = { ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL, ISD::VP_FDIV, ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD, - ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX}; + ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_SELECT}; if (!Subtarget.is64Bit()) { // We must custom-lower certain vXi64 operations on RV32 due to the vector @@ -3133,6 +3134,8 @@ return lowerGET_ROUNDING(Op, DAG); case ISD::SET_ROUNDING: return lowerSET_ROUNDING(Op, DAG); + case ISD::VP_SELECT: + return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL); case ISD::VP_ADD: return lowerVPOp(Op, DAG, RISCVISD::ADD_VL); case ISD::VP_SUB: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll @@ -0,0 +1,341 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x i8> @llvm.vp.select.v2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) + +define <2 x i8> @select_v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <2 x i8> @llvm.vp.select.v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 %evl) + ret <2 x i8> %v +} + +declare <4 x i8> @llvm.vp.select.v4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) + +define <4 x i8> @select_v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.select.v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 %evl) + ret <4 x i8> %v +} + +declare <8 x i8> @llvm.vp.select.v8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) + +define <8 x i8> @select_v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <8 x i8> @llvm.vp.select.v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 %evl) + ret <8 x i8> %v +} + +declare <16 x i8> @llvm.vp.select.v16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) + +define <16 x i8> @select_v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <16 x i8> @llvm.vp.select.v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 %evl) + ret <16 x i8> %v +} + +declare <2 x i16> @llvm.vp.select.v2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) + +define <2 x i16> @select_v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <2 x i16> @llvm.vp.select.v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 %evl) + ret <2 x i16> %v +} + +declare <4 x i16> @llvm.vp.select.v4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) + +define <4 x i16> @select_v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.select.v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 %evl) + ret <4 x i16> %v +} + +declare <8 x i16> @llvm.vp.select.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) + +define <8 x i16> @select_v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <8 x i16> @llvm.vp.select.v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 %evl) + ret <8 x i16> %v +} + +declare <16 x i16> @llvm.vp.select.v16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) + +define <16 x i16> @select_v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call <16 x i16> @llvm.vp.select.v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 %evl) + ret <16 x i16> %v +} + +declare <2 x i32> @llvm.vp.select.v2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) + +define <2 x i32> @select_v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <2 x i32> @llvm.vp.select.v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 %evl) + ret <2 x i32> %v +} + +declare <4 x i32> @llvm.vp.select.v4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) + +define <4 x i32> @select_v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.select.v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 %evl) + ret <4 x i32> %v +} + +declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) + +define <8 x i32> @select_v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 %evl) + ret <8 x i32> %v +} + +declare <16 x i32> @llvm.vp.select.v16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) + +define <16 x i32> @select_v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: ret + %v = call <16 x i32> @llvm.vp.select.v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 %evl) + ret <16 x i32> %v +} + +declare <2 x i64> @llvm.vp.select.v2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) + +define <2 x i64> @select_v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <2 x i64> @llvm.vp.select.v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 %evl) + ret <2 x i64> %v +} + +declare <4 x i64> @llvm.vp.select.v4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) + +define <4 x i64> @select_v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.select.v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 %evl) + ret <4 x i64> %v +} + +declare <8 x i64> @llvm.vp.select.v8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) + +define <8 x i64> @select_v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: ret + %v = call <8 x i64> @llvm.vp.select.v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 %evl) + ret <8 x i64> %v +} + +declare <16 x i64> @llvm.vp.select.v16i64(<16 x i1>, <16 x i64>, <16 x i64>, i32) + +define <16 x i64> @select_v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v16i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: ret + %v = call <16 x i64> @llvm.vp.select.v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 %evl) + ret <16 x i64> %v +} + +declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) + +define <2 x half> @select_v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) + +define <4 x half> @select_v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) + +define <8 x half> @select_v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) + +define <16 x half> @select_v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) + +define <2 x float> @select_v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) + +define <4 x float> @select_v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) + +define <8 x float> @select_v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) + +define <16 x float> @select_v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> %c, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) + +define <2 x double> @select_v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) + +define <4 x double> @select_v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) + +define <8 x double> @select_v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.select.v16f64(<16 x i1>, <16 x double>, <16 x double>, i32) + +define <16 x double> @select_v16f64(<16 x i1> %a, <16 x double> %b, <16 x double> %c, i32 zeroext %evl) { +; CHECK-LABEL: select_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: ret + %v = call <16 x double> @llvm.vp.select.v16f64(<16 x i1> %a, <16 x double> %b, <16 x double> %c, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll @@ -0,0 +1,449 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.vp.select.nxv1i8(, , , i32) + +define @select_nxv1i8( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv1i8( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv2i8(, , , i32) + +define @select_nxv2i8( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv2i8( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv4i8(, , , i32) + +define @select_nxv4i8( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv4i8( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv8i8(, , , i32) + +define @select_nxv8i8( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv8i8( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv16i8(, , , i32) + +define @select_nxv16i8( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv16i8( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv32i8(, , , i32) + +define @select_nxv32i8( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv32i8( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv64i8(, , , i32) + +define @select_nxv64i8( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv64i8( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv1i16(, , , i32) + +define @select_nxv1i16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv1i16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv2i16(, , , i32) + +define @select_nxv2i16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv2i16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv4i16(, , , i32) + +define @select_nxv4i16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv4i16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv8i16(, , , i32) + +define @select_nxv8i16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv8i16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv16i16(, , , i32) + +define @select_nxv16i16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv16i16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv32i16(, , , i32) + +define @select_nxv32i16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv32i16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv1i32(, , , i32) + +define @select_nxv1i32( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv1i32( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv2i32(, , , i32) + +define @select_nxv2i32( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv2i32( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv4i32(, , , i32) + +define @select_nxv4i32( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv4i32( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv8i32(, , , i32) + +define @select_nxv8i32( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv8i32( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv16i32(, , , i32) + +define @select_nxv16i32( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv16i32( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv1i64(, , , i32) + +define @select_nxv1i64( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv1i64( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv2i64(, , , i32) + +define @select_nxv2i64( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv2i64( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv4i64(, , , i32) + +define @select_nxv4i64( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv4i64( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv8i64(, , , i32) + +define @select_nxv8i64( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv8i64( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv1f16(, , , i32) + +define @select_nxv1f16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv1f16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv2f16(, , , i32) + +define @select_nxv2f16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv2f16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv4f16(, , , i32) + +define @select_nxv4f16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv4f16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv8f16(, , , i32) + +define @select_nxv8f16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv8f16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv16f16(, , , i32) + +define @select_nxv16f16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv16f16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv32f16(, , , i32) + +define @select_nxv32f16( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv32f16( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv1f32(, , , i32) + +define @select_nxv1f32( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv1f32( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv2f32(, , , i32) + +define @select_nxv2f32( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv2f32( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv4f32(, , , i32) + +define @select_nxv4f32( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv4f32( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv8f32(, , , i32) + +define @select_nxv8f32( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv8f32( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv16f32(, , , i32) + +define @select_nxv16f32( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv16f32( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv1f64(, , , i32) + +define @select_nxv1f64( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv1f64( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv2f64(, , , i32) + +define @select_nxv2f64( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv2f64( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv4f64(, , , i32) + +define @select_nxv4f64( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv4f64( %a, %b, %c, i32 %evl) + ret %v +} + +declare @llvm.vp.select.nxv8f64(, , , i32) + +define @select_nxv8f64( %a, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: select_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.select.nxv8f64( %a, %b, %c, i32 %evl) + ret %v +}