diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -15400,6 +15400,8 @@ .. _int_overflow: +.. _int_fshl: + '``llvm.fshl.*``' Intrinsic ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -15446,6 +15448,8 @@ %r = call i8 @llvm.fshl.i8(i8 15, i8 15, i8 11) ; %r = i8: 120 (0b01111000) %r = call i8 @llvm.fshl.i8(i8 0, i8 255, i8 8) ; %r = i8: 0 (0b00000000) +.. _int_fshr: + '``llvm.fshr.*``' Intrinsic ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -22096,6 +22100,100 @@ %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison +.. _int_vp_fshl: + +'``llvm.vp.fshl.*``' Intrinsics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" +This is an overloaded intrinsic. + +:: + + declare <16 x i32> @llvm.vp.fshl.v16i32 (<16 x i32> , <16 x i32> , <16 x i32> , <16 x i1> , i32 ) + declare @llvm.vp.fshl.nxv4i32 ( , , , , i32 ) + declare <256 x i64> @llvm.vp.fshl.v256i64 (<256 x i64> , <256 x i64> , <256 x i64> , <256 x i1> , i32 ) + +Overview: +""""""""" + +Predicated fshl of three vectors of integers. + + +Arguments: +"""""""""" + +The first three operand and the result have the same vector of integer type. The +fourth operand is the vector mask and has the same number of elements as the +result vector type. The fifth operand is the explicit vector length of the +operation. + +Semantics: +"""""""""" + +The '``llvm.vp.fshl``' intrinsic performs fshl (:ref:`fshl `) of the first, second, and third +vector operand on each enabled lane. The result on disabled lanes is a :ref:`poison value `. + + +Examples: +""""""""" + +.. code-block:: llvm + + %r = call <4 x i32> @llvm.vp.fshl.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %mask, i32 %evl) + ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r + + %t = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) + %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison + + +'``llvm.vp.fshr.*``' Intrinsics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" +This is an overloaded intrinsic. + +:: + + declare <16 x i32> @llvm.vp.fshr.v16i32 (<16 x i32> , <16 x i32> , <16 x i32> , <16 x i1> , i32 ) + declare @llvm.vp.fshr.nxv4i32 ( , , , , i32 ) + declare <256 x i64> @llvm.vp.fshr.v256i64 (<256 x i64> , <256 x i64> , <256 x i64> , <256 x i1> , i32 ) + +Overview: +""""""""" + +Predicated fshr of three vectors of integers. + + +Arguments: +"""""""""" + +The first three operand and the result have the same vector of integer type. The +fourth operand is the vector mask and has the same number of elements as the +result vector type. The fifth operand is the explicit vector length of the +operation. + +Semantics: +"""""""""" + +The '``llvm.vp.fshr``' intrinsic performs fshr (:ref:`fshr `) of the first, second, and third +vector operand on each enabled lane. The result on disabled lanes is a :ref:`poison value `. + + +Examples: +""""""""" + +.. code-block:: llvm + + %r = call <4 x i32> @llvm.vp.fshr.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %mask, i32 %evl) + ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r + + %t = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) + %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison + + .. _int_mload_mstore: Masked Vector Load and Store Intrinsics diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1562,6 +1562,18 @@ [ LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty]>; + def int_vp_fshl : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + def int_vp_fshr : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; // Floating-point arithmetic def int_vp_fadd : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -219,6 +219,14 @@ // llvm.vp.bswap(x,mask,vlen) BEGIN_REGISTER_VP(vp_bswap, 1, 2, VP_BSWAP, -1) END_REGISTER_VP(vp_bswap, VP_BSWAP) + +// llvm.vp.fshl(x,y,z,mask,vlen) +BEGIN_REGISTER_VP(vp_fshl, 3, 4, VP_FSHL, -1) +END_REGISTER_VP(vp_fshl, VP_FSHL) + +// llvm.vp.fshr(x,y,z,mask,vlen) +BEGIN_REGISTER_VP(vp_fshr, 3, 4, VP_FSHR, -1) +END_REGISTER_VP(vp_fshr, VP_FSHR) ///// } Integer Arithmetic ///// Floating-Point Arithmetic { diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -809,7 +809,9 @@ } break; case ISD::FSHL: + case ISD::VP_FSHL: case ISD::FSHR: + case ISD::VP_FSHR: if (SDValue Expanded = TLI.expandFunnelShift(Node, DAG)) { Results.push_back(Expanded); return; diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1113,7 +1113,9 @@ break; case ISD::FMA: case ISD::VP_FMA: case ISD::FSHL: + case ISD::VP_FSHL: case ISD::FSHR: + case ISD::VP_FSHR: SplitVecRes_TernaryOp(N, Lo, Hi); break; @@ -4109,7 +4111,9 @@ break; case ISD::FMA: case ISD::VP_FMA: case ISD::FSHL: + case ISD::VP_FSHL: case ISD::FSHR: + case ISD::VP_FSHR: Res = WidenVecRes_Ternary(N); break; } diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -7336,8 +7336,68 @@ true); } +static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG) { + EVT VT = Node->getValueType(0); + SDValue ShX, ShY; + SDValue ShAmt, InvShAmt; + SDValue X = Node->getOperand(0); + SDValue Y = Node->getOperand(1); + SDValue Z = Node->getOperand(2); + SDValue Mask = Node->getOperand(3); + SDValue VL = Node->getOperand(4); + + unsigned BW = VT.getScalarSizeInBits(); + bool IsFSHL = Node->getOpcode() == ISD::VP_FSHL; + SDLoc DL(SDValue(Node, 0)); + + EVT ShVT = Z.getValueType(); + if (isNonZeroModBitWidthOrUndef(Z, BW)) { + // fshl: X << C | Y >> (BW - C) + // fshr: X << (BW - C) | Y >> C + // where C = Z % BW is not zero + SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); + ShAmt = DAG.getNode(ISD::VP_UREM, DL, ShVT, Z, BitWidthC, Mask, VL); + InvShAmt = DAG.getNode(ISD::VP_SUB, DL, ShVT, BitWidthC, ShAmt, Mask, VL); + ShX = DAG.getNode(ISD::VP_SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt, Mask, + VL); + ShY = DAG.getNode(ISD::VP_LSHR, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt, Mask, + VL); + } else { + // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW)) + // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW) + SDValue BitMask = DAG.getConstant(BW - 1, DL, ShVT); + if (isPowerOf2_32(BW)) { + // Z % BW -> Z & (BW - 1) + ShAmt = DAG.getNode(ISD::VP_AND, DL, ShVT, Z, BitMask, Mask, VL); + // (BW - 1) - (Z % BW) -> ~Z & (BW - 1) + SDValue NotZ = DAG.getNode(ISD::VP_XOR, DL, ShVT, Z, + DAG.getAllOnesConstant(DL, ShVT), Mask, VL); + InvShAmt = DAG.getNode(ISD::VP_AND, DL, ShVT, NotZ, BitMask, Mask, VL); + } else { + SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); + ShAmt = DAG.getNode(ISD::VP_UREM, DL, ShVT, Z, BitWidthC, Mask, VL); + InvShAmt = DAG.getNode(ISD::VP_SUB, DL, ShVT, BitMask, ShAmt, Mask, VL); + } + + SDValue One = DAG.getConstant(1, DL, ShVT); + if (IsFSHL) { + ShX = DAG.getNode(ISD::VP_SHL, DL, VT, X, ShAmt, Mask, VL); + SDValue ShY1 = DAG.getNode(ISD::VP_LSHR, DL, VT, Y, One, Mask, VL); + ShY = DAG.getNode(ISD::VP_LSHR, DL, VT, ShY1, InvShAmt, Mask, VL); + } else { + SDValue ShX1 = DAG.getNode(ISD::VP_SHL, DL, VT, X, One, Mask, VL); + ShX = DAG.getNode(ISD::VP_SHL, DL, VT, ShX1, InvShAmt, Mask, VL); + ShY = DAG.getNode(ISD::VP_LSHR, DL, VT, Y, ShAmt, Mask, VL); + } + } + return DAG.getNode(ISD::VP_OR, DL, VT, ShX, ShY, Mask, VL); +} + SDValue TargetLowering::expandFunnelShift(SDNode *Node, SelectionDAG &DAG) const { + if (Node->isVPOpcode()) + return expandVPFunnelShift(Node, DAG); + EVT VT = Node->getValueType(0); if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -591,6 +591,7 @@ setOperationAction(ISD::BSWAP, VT, Expand); setOperationAction(ISD::VP_BSWAP, VT, Expand); + setOperationAction({ISD::VP_FSHL, ISD::VP_FSHR}, VT, Expand); // Custom-lower extensions and truncations from/to mask types. setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -571,6 +571,14 @@ return Cost * LT.first; break; } + case Intrinsic::vp_fshl: + case Intrinsic::vp_fshr: { + unsigned Cost = 9; + auto LT = getTypeLegalizationCost(RetTy); + if (TLI->isOperationCustom(ISD::VP_FRINT, LT.second)) + return Cost * LT.first; + break; + } } if (ST->hasVInstructions() && RetTy->isVectorTy()) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll @@ -0,0 +1,925 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v,+m -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v,+m -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +declare <2 x i8> @llvm.vp.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>, <2 x i1>, i32) +define <2 x i8> @fshr_v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <2 x i8> @llvm.vp.fshr.v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 %evl) + ret <2 x i8> %res +} + +declare <2 x i8> @llvm.vp.fshl.v2i8(<2 x i8>, <2 x i8>, <2 x i8>, <2 x i1>, i32) +define <2 x i8> @fshl_v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <2 x i8> @llvm.vp.fshl.v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 %evl) + ret <2 x i8> %res +} + +declare <4 x i8> @llvm.vp.fshr.v4i8(<4 x i8>, <4 x i8>, <4 x i8>, <4 x i1>, i32) +define <4 x i8> @fshr_v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <4 x i8> @llvm.vp.fshr.v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 %evl) + ret <4 x i8> %res +} + +declare <4 x i8> @llvm.vp.fshl.v4i8(<4 x i8>, <4 x i8>, <4 x i8>, <4 x i1>, i32) +define <4 x i8> @fshl_v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <4 x i8> @llvm.vp.fshl.v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 %evl) + ret <4 x i8> %res +} + +declare <8 x i8> @llvm.vp.fshr.v8i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i1>, i32) +define <8 x i8> @fshr_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.vp.fshr.v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 %evl) + ret <8 x i8> %res +} + +declare <8 x i8> @llvm.vp.fshl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i1>, i32) +define <8 x i8> @fshl_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.vp.fshl.v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 %evl) + ret <8 x i8> %res +} + +declare <16 x i8> @llvm.vp.fshr.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i1>, i32) +define <16 x i8> @fshr_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.vp.fshr.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 %evl) + ret <16 x i8> %res +} + +declare <16 x i8> @llvm.vp.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i1>, i32) +define <16 x i8> @fshl_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.vp.fshl.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 %evl) + ret <16 x i8> %res +} + +declare <32 x i8> @llvm.vp.fshr.v32i8(<32 x i8>, <32 x i8>, <32 x i8>, <32 x i1>, i32) +define <32 x i8> @fshr_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v14, v12, v0.t +; CHECK-NEXT: vand.vi v14, v14, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v14, v0.t +; CHECK-NEXT: vand.vi v12, v12, 7, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call <32 x i8> @llvm.vp.fshr.v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 %evl) + ret <32 x i8> %res +} + +declare <32 x i8> @llvm.vp.fshl.v32i8(<32 x i8>, <32 x i8>, <32 x i8>, <32 x i1>, i32) +define <32 x i8> @fshl_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsrl.vi v10, v10, 1, v0.t +; CHECK-NEXT: vnot.v v14, v12, v0.t +; CHECK-NEXT: vand.vi v14, v14, 7, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v14, v0.t +; CHECK-NEXT: vand.vi v12, v12, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call <32 x i8> @llvm.vp.fshl.v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 %evl) + ret <32 x i8> %res +} + +declare <64 x i8> @llvm.vp.fshr.v64i8(<64 x i8>, <64 x i8>, <64 x i8>, <64 x i1>, i32) +define <64 x i8> @fshr_v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v20, v16, v0.t +; CHECK-NEXT: vand.vi v20, v20, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v20, v0.t +; CHECK-NEXT: vand.vi v16, v16, 7, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call <64 x i8> @llvm.vp.fshr.v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 %evl) + ret <64 x i8> %res +} + +declare <64 x i8> @llvm.vp.fshl.v64i8(<64 x i8>, <64 x i8>, <64 x i8>, <64 x i1>, i32) +define <64 x i8> @fshl_v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsrl.vi v12, v12, 1, v0.t +; CHECK-NEXT: vnot.v v20, v16, v0.t +; CHECK-NEXT: vand.vi v20, v20, 7, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v20, v0.t +; CHECK-NEXT: vand.vi v16, v16, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call <64 x i8> @llvm.vp.fshl.v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 %evl) + ret <64 x i8> %res +} + +declare <2 x i16> @llvm.vp.fshr.v2i16(<2 x i16>, <2 x i16>, <2 x i16>, <2 x i1>, i32) +define <2 x i16> @fshr_v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <2 x i16> @llvm.vp.fshr.v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 %evl) + ret <2 x i16> %res +} + +declare <2 x i16> @llvm.vp.fshl.v2i16(<2 x i16>, <2 x i16>, <2 x i16>, <2 x i1>, i32) +define <2 x i16> @fshl_v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <2 x i16> @llvm.vp.fshl.v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 %evl) + ret <2 x i16> %res +} + +declare <4 x i16> @llvm.vp.fshr.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i1>, i32) +define <4 x i16> @fshr_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.vp.fshr.v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 %evl) + ret <4 x i16> %res +} + +declare <4 x i16> @llvm.vp.fshl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i1>, i32) +define <4 x i16> @fshl_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.vp.fshl.v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 %evl) + ret <4 x i16> %res +} + +declare <8 x i16> @llvm.vp.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32) +define <8 x i16> @fshr_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.vp.fshr.v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 %evl) + ret <8 x i16> %res +} + +declare <8 x i16> @llvm.vp.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32) +define <8 x i16> @fshl_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.vp.fshl.v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 %evl) + ret <8 x i16> %res +} + +declare <16 x i16> @llvm.vp.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>, <16 x i1>, i32) +define <16 x i16> @fshr_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v14, v12, v0.t +; CHECK-NEXT: vand.vi v14, v14, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v14, v0.t +; CHECK-NEXT: vand.vi v12, v12, 15, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call <16 x i16> @llvm.vp.fshr.v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 %evl) + ret <16 x i16> %res +} + +declare <16 x i16> @llvm.vp.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>, <16 x i1>, i32) +define <16 x i16> @fshl_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vsrl.vi v10, v10, 1, v0.t +; CHECK-NEXT: vnot.v v14, v12, v0.t +; CHECK-NEXT: vand.vi v14, v14, 15, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v14, v0.t +; CHECK-NEXT: vand.vi v12, v12, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call <16 x i16> @llvm.vp.fshl.v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 %evl) + ret <16 x i16> %res +} + +declare <32 x i16> @llvm.vp.fshr.v32i16(<32 x i16>, <32 x i16>, <32 x i16>, <32 x i1>, i32) +define <32 x i16> @fshr_v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v20, v16, v0.t +; CHECK-NEXT: vand.vi v20, v20, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v20, v0.t +; CHECK-NEXT: vand.vi v16, v16, 15, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call <32 x i16> @llvm.vp.fshr.v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 %evl) + ret <32 x i16> %res +} + +declare <32 x i16> @llvm.vp.fshl.v32i16(<32 x i16>, <32 x i16>, <32 x i16>, <32 x i1>, i32) +define <32 x i16> @fshl_v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vsrl.vi v12, v12, 1, v0.t +; CHECK-NEXT: vnot.v v20, v16, v0.t +; CHECK-NEXT: vand.vi v20, v20, 15, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v20, v0.t +; CHECK-NEXT: vand.vi v16, v16, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call <32 x i16> @llvm.vp.fshl.v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 %evl) + ret <32 x i16> %res +} + +declare <2 x i32> @llvm.vp.fshr.v2i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i1>, i32) +define <2 x i32> @fshr_v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vand.vx v11, v10, a1, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vnot.v v10, v10, v0.t +; CHECK-NEXT: vand.vx v10, v10, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.vp.fshr.v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 %evl) + ret <2 x i32> %res +} + +declare <2 x i32> @llvm.vp.fshl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i1>, i32) +define <2 x i32> @fshl_v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vand.vx v11, v10, a1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vnot.v v10, v10, v0.t +; CHECK-NEXT: vand.vx v10, v10, a1, v0.t +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.vp.fshl.v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 %evl) + ret <2 x i32> %res +} + +declare <4 x i32> @llvm.vp.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i1>, i32) +define <4 x i32> @fshr_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vand.vx v11, v10, a1, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vnot.v v10, v10, v0.t +; CHECK-NEXT: vand.vx v10, v10, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.vp.fshr.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 %evl) + ret <4 x i32> %res +} + +declare <4 x i32> @llvm.vp.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i1>, i32) +define <4 x i32> @fshl_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vand.vx v11, v10, a1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vnot.v v10, v10, v0.t +; CHECK-NEXT: vand.vx v10, v10, a1, v0.t +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.vp.fshl.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 %evl) + ret <4 x i32> %res +} + +declare <8 x i32> @llvm.vp.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>, <8 x i1>, i32) +define <8 x i32> @fshr_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vand.vx v14, v12, a1, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v14, v0.t +; CHECK-NEXT: vnot.v v12, v12, v0.t +; CHECK-NEXT: vand.vx v12, v12, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call <8 x i32> @llvm.vp.fshr.v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 %evl) + ret <8 x i32> %res +} + +declare <8 x i32> @llvm.vp.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>, <8 x i1>, i32) +define <8 x i32> @fshl_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vand.vx v14, v12, a1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v14, v0.t +; CHECK-NEXT: vnot.v v12, v12, v0.t +; CHECK-NEXT: vand.vx v12, v12, a1, v0.t +; CHECK-NEXT: vsrl.vi v10, v10, 1, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call <8 x i32> @llvm.vp.fshl.v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 %evl) + ret <8 x i32> %res +} + +declare <16 x i32> @llvm.vp.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>, <16 x i1>, i32) +define <16 x i32> @fshr_v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vand.vx v20, v16, a1, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v20, v0.t +; CHECK-NEXT: vnot.v v16, v16, v0.t +; CHECK-NEXT: vand.vx v16, v16, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call <16 x i32> @llvm.vp.fshr.v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 %evl) + ret <16 x i32> %res +} + +declare <16 x i32> @llvm.vp.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>, <16 x i1>, i32) +define <16 x i32> @fshl_v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vand.vx v20, v16, a1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v20, v0.t +; CHECK-NEXT: vnot.v v16, v16, v0.t +; CHECK-NEXT: vand.vx v16, v16, a1, v0.t +; CHECK-NEXT: vsrl.vi v12, v12, 1, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call <16 x i32> @llvm.vp.fshl.v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 %evl) + ret <16 x i32> %res +} + +declare <2 x i64> @llvm.vp.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i1>, i32) +define <2 x i64> @fshr_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: fshr_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 63 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vx v11, v10, a1, v0.t +; RV32-NEXT: vsrl.vv v9, v9, v11, v0.t +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.i v11, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vxor.vv v10, v10, v11, v0.t +; RV32-NEXT: vand.vx v10, v10, a1, v0.t +; RV32-NEXT: vsll.vi v8, v8, 1, v0.t +; RV32-NEXT: vsll.vv v8, v8, v10, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: fshr_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 63 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vand.vx v11, v10, a1, v0.t +; RV64-NEXT: vsrl.vv v9, v9, v11, v0.t +; RV64-NEXT: vnot.v v10, v10, v0.t +; RV64-NEXT: vand.vx v10, v10, a1, v0.t +; RV64-NEXT: vsll.vi v8, v8, 1, v0.t +; RV64-NEXT: vsll.vv v8, v8, v10, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: ret + %res = call <2 x i64> @llvm.vp.fshr.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 %evl) + ret <2 x i64> %res +} + +declare <2 x i64> @llvm.vp.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i1>, i32) +define <2 x i64> @fshl_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: fshl_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 63 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vx v11, v10, a1, v0.t +; RV32-NEXT: vsll.vv v8, v8, v11, v0.t +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.i v11, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vxor.vv v10, v10, v11, v0.t +; RV32-NEXT: vand.vx v10, v10, a1, v0.t +; RV32-NEXT: vsrl.vi v9, v9, 1, v0.t +; RV32-NEXT: vsrl.vv v9, v9, v10, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: fshl_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 63 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vand.vx v11, v10, a1, v0.t +; RV64-NEXT: vsll.vv v8, v8, v11, v0.t +; RV64-NEXT: vnot.v v10, v10, v0.t +; RV64-NEXT: vand.vx v10, v10, a1, v0.t +; RV64-NEXT: vsrl.vi v9, v9, 1, v0.t +; RV64-NEXT: vsrl.vv v9, v9, v10, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: ret + %res = call <2 x i64> @llvm.vp.fshl.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 %evl) + ret <2 x i64> %res +} + +declare <4 x i64> @llvm.vp.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, <4 x i1>, i32) +define <4 x i64> @fshr_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: fshr_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 63 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vx v14, v12, a1, v0.t +; RV32-NEXT: vsrl.vv v10, v10, v14, v0.t +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.i v14, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vxor.vv v12, v12, v14, v0.t +; RV32-NEXT: vand.vx v12, v12, a1, v0.t +; RV32-NEXT: vsll.vi v8, v8, 1, v0.t +; RV32-NEXT: vsll.vv v8, v8, v12, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: fshr_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 63 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vand.vx v14, v12, a1, v0.t +; RV64-NEXT: vsrl.vv v10, v10, v14, v0.t +; RV64-NEXT: vnot.v v12, v12, v0.t +; RV64-NEXT: vand.vx v12, v12, a1, v0.t +; RV64-NEXT: vsll.vi v8, v8, 1, v0.t +; RV64-NEXT: vsll.vv v8, v8, v12, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: ret + %res = call <4 x i64> @llvm.vp.fshr.v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 %evl) + ret <4 x i64> %res +} + +declare <4 x i64> @llvm.vp.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, <4 x i1>, i32) +define <4 x i64> @fshl_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: fshl_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 63 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vx v14, v12, a1, v0.t +; RV32-NEXT: vsll.vv v8, v8, v14, v0.t +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.i v14, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vxor.vv v12, v12, v14, v0.t +; RV32-NEXT: vand.vx v12, v12, a1, v0.t +; RV32-NEXT: vsrl.vi v10, v10, 1, v0.t +; RV32-NEXT: vsrl.vv v10, v10, v12, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: fshl_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 63 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vand.vx v14, v12, a1, v0.t +; RV64-NEXT: vsll.vv v8, v8, v14, v0.t +; RV64-NEXT: vnot.v v12, v12, v0.t +; RV64-NEXT: vand.vx v12, v12, a1, v0.t +; RV64-NEXT: vsrl.vi v10, v10, 1, v0.t +; RV64-NEXT: vsrl.vv v10, v10, v12, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: ret + %res = call <4 x i64> @llvm.vp.fshl.v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 %evl) + ret <4 x i64> %res +} + +declare <7 x i64> @llvm.vp.fshr.v7i64(<7 x i64>, <7 x i64>, <7 x i64>, <7 x i1>, i32) +define <7 x i64> @fshr_v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: fshr_v7i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 63 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vx v20, v16, a1, v0.t +; RV32-NEXT: vsrl.vv v12, v12, v20, v0.t +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v20, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v16, v16, v20, v0.t +; RV32-NEXT: vand.vx v16, v16, a1, v0.t +; RV32-NEXT: vsll.vi v8, v8, 1, v0.t +; RV32-NEXT: vsll.vv v8, v8, v16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: fshr_v7i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 63 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vand.vx v20, v16, a1, v0.t +; RV64-NEXT: vsrl.vv v12, v12, v20, v0.t +; RV64-NEXT: vnot.v v16, v16, v0.t +; RV64-NEXT: vand.vx v16, v16, a1, v0.t +; RV64-NEXT: vsll.vi v8, v8, 1, v0.t +; RV64-NEXT: vsll.vv v8, v8, v16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: ret + %res = call <7 x i64> @llvm.vp.fshr.v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> %m, i32 %evl) + ret <7 x i64> %res +} + +declare <7 x i64> @llvm.vp.fshl.v7i64(<7 x i64>, <7 x i64>, <7 x i64>, <7 x i1>, i32) +define <7 x i64> @fshl_v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: fshl_v7i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 63 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vx v20, v16, a1, v0.t +; RV32-NEXT: vsll.vv v8, v8, v20, v0.t +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v20, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v16, v16, v20, v0.t +; RV32-NEXT: vand.vx v16, v16, a1, v0.t +; RV32-NEXT: vsrl.vi v12, v12, 1, v0.t +; RV32-NEXT: vsrl.vv v12, v12, v16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: fshl_v7i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 63 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vand.vx v20, v16, a1, v0.t +; RV64-NEXT: vsll.vv v8, v8, v20, v0.t +; RV64-NEXT: vnot.v v16, v16, v0.t +; RV64-NEXT: vand.vx v16, v16, a1, v0.t +; RV64-NEXT: vsrl.vi v12, v12, 1, v0.t +; RV64-NEXT: vsrl.vv v12, v12, v16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: ret + %res = call <7 x i64> @llvm.vp.fshl.v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> %m, i32 %evl) + ret <7 x i64> %res +} + +declare <8 x i64> @llvm.vp.fshr.v8i64(<8 x i64>, <8 x i64>, <8 x i64>, <8 x i1>, i32) +define <8 x i64> @fshr_v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: fshr_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 63 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vx v20, v16, a1, v0.t +; RV32-NEXT: vsrl.vv v12, v12, v20, v0.t +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v20, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v16, v16, v20, v0.t +; RV32-NEXT: vand.vx v16, v16, a1, v0.t +; RV32-NEXT: vsll.vi v8, v8, 1, v0.t +; RV32-NEXT: vsll.vv v8, v8, v16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: fshr_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 63 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vand.vx v20, v16, a1, v0.t +; RV64-NEXT: vsrl.vv v12, v12, v20, v0.t +; RV64-NEXT: vnot.v v16, v16, v0.t +; RV64-NEXT: vand.vx v16, v16, a1, v0.t +; RV64-NEXT: vsll.vi v8, v8, 1, v0.t +; RV64-NEXT: vsll.vv v8, v8, v16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: ret + %res = call <8 x i64> @llvm.vp.fshr.v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 %evl) + ret <8 x i64> %res +} + +declare <8 x i64> @llvm.vp.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>, <8 x i1>, i32) +define <8 x i64> @fshl_v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: fshl_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 63 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vx v20, v16, a1, v0.t +; RV32-NEXT: vsll.vv v8, v8, v20, v0.t +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v20, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v16, v16, v20, v0.t +; RV32-NEXT: vand.vx v16, v16, a1, v0.t +; RV32-NEXT: vsrl.vi v12, v12, 1, v0.t +; RV32-NEXT: vsrl.vv v12, v12, v16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: fshl_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 63 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vand.vx v20, v16, a1, v0.t +; RV64-NEXT: vsll.vv v8, v8, v20, v0.t +; RV64-NEXT: vnot.v v16, v16, v0.t +; RV64-NEXT: vand.vx v16, v16, a1, v0.t +; RV64-NEXT: vsrl.vi v12, v12, 1, v0.t +; RV64-NEXT: vsrl.vv v12, v12, v16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: ret + %res = call <8 x i64> @llvm.vp.fshl.v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 %evl) + ret <8 x i64> %res +} + +declare <16 x i64> @llvm.vp.fshr.v16i64(<16 x i64>, <16 x i64>, <16 x i64>, <16 x i1>, i32) +define <16 x i64> @fshr_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: fshr_v16i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: sub sp, sp, a2 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV32-NEXT: vle64.v v24, (a0) +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; RV32-NEXT: li a0, 63 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vand.vx v8, v24, a0, v0.t +; RV32-NEXT: vsrl.vv v16, v16, v8, v0.t +; RV32-NEXT: li a2, 32 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v8, -1 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v24, v8, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsll.vi v24, v24, 1, v0.t +; RV32-NEXT: vsll.vv v8, v24, v8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: fshr_v16i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: sub sp, sp, a2 +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: vle64.v v24, (a0) +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; RV64-NEXT: li a0, 63 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vand.vx v8, v24, a0, v0.t +; RV64-NEXT: vsrl.vv v16, v16, v8, v0.t +; RV64-NEXT: vnot.v v8, v24, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsll.vi v24, v24, 1, v0.t +; RV64-NEXT: vsll.vv v8, v24, v8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %res = call <16 x i64> @llvm.vp.fshr.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl) + ret <16 x i64> %res +} + +declare <16 x i64> @llvm.vp.fshl.v16i64(<16 x i64>, <16 x i64>, <16 x i64>, <16 x i1>, i32) +define <16 x i64> @fshl_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: fshl_v16i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: sub sp, sp, a2 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV32-NEXT: vle64.v v24, (a0) +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; RV32-NEXT: vmv8r.v v16, v8 +; RV32-NEXT: li a0, 63 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vand.vx v8, v24, a0, v0.t +; RV32-NEXT: vsll.vv v8, v16, v8, v0.t +; RV32-NEXT: li a2, 32 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v16, v24, v16, v0.t +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v24, v24, 1, v0.t +; RV32-NEXT: vsrl.vv v16, v24, v16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: fshl_v16i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: sub sp, sp, a2 +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: vle64.v v24, (a0) +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; RV64-NEXT: vmv8r.v v16, v8 +; RV64-NEXT: li a0, 63 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vand.vx v8, v24, a0, v0.t +; RV64-NEXT: vsll.vv v8, v16, v8, v0.t +; RV64-NEXT: vnot.v v16, v24, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsrl.vi v24, v24, 1, v0.t +; RV64-NEXT: vsrl.vv v16, v24, v16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %res = call <16 x i64> @llvm.vp.fshl.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl) + ret <16 x i64> %res +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll @@ -0,0 +1,1326 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.vp.fshr.nxv1i8(, , , , i32) +define @fshr_v1i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv1i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv1i8(, , , , i32) +define @fshl_v1i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv1i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv2i8(, , , , i32) +define @fshr_v2i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv2i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv2i8(, , , , i32) +define @fshl_v2i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv2i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv4i8(, , , , i32) +define @fshr_v4i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv4i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv4i8(, , , , i32) +define @fshl_v4i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv4i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv8i8(, , , , i32) +define @fshr_v8i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv8i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv8i8(, , , , i32) +define @fshl_v8i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 7, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv8i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv16i8(, , , , i32) +define @fshr_v16i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v14, v12, v0.t +; CHECK-NEXT: vand.vi v14, v14, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v14, v0.t +; CHECK-NEXT: vand.vi v12, v12, 7, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv16i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv16i8(, , , , i32) +define @fshl_v16i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsrl.vi v10, v10, 1, v0.t +; CHECK-NEXT: vnot.v v14, v12, v0.t +; CHECK-NEXT: vand.vi v14, v14, 7, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v14, v0.t +; CHECK-NEXT: vand.vi v12, v12, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv16i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv32i8(, , , , i32) +define @fshr_v32i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v20, v16, v0.t +; CHECK-NEXT: vand.vi v20, v20, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v20, v0.t +; CHECK-NEXT: vand.vi v16, v16, 7, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv32i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv32i8(, , , , i32) +define @fshl_v32i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsrl.vi v12, v12, 1, v0.t +; CHECK-NEXT: vnot.v v20, v16, v0.t +; CHECK-NEXT: vand.vi v20, v20, 7, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v20, v0.t +; CHECK-NEXT: vand.vi v16, v16, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv32i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv64i8(, , , , i32) +define @fshr_v64i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsll.vi v16, v8, 1, v0.t +; CHECK-NEXT: vnot.v v8, v24, v0.t +; CHECK-NEXT: vand.vi v8, v8, 7, v0.t +; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t +; CHECK-NEXT: vand.vi v16, v24, 7, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv64i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv64i8(, , , , i32) +define @fshl_v64i8( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsrl.vi v16, v16, 1, v0.t +; CHECK-NEXT: vnot.v v8, v24, v0.t +; CHECK-NEXT: vand.vi v8, v8, 7, v0.t +; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t +; CHECK-NEXT: vand.vi v16, v24, 7, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsll.vv v16, v24, v16, v0.t +; CHECK-NEXT: vor.vv v8, v16, v8, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv64i8( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv1i16(, , , , i32) +define @fshr_v1i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv1i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv1i16(, , , , i32) +define @fshl_v1i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv1i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv2i16(, , , , i32) +define @fshr_v2i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv2i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv2i16(, , , , i32) +define @fshl_v2i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv2i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv4i16(, , , , i32) +define @fshr_v4i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv4i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv4i16(, , , , i32) +define @fshl_v4i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vnot.v v11, v10, v0.t +; CHECK-NEXT: vand.vi v11, v11, 15, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vand.vi v10, v10, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv4i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv8i16(, , , , i32) +define @fshr_v8i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v14, v12, v0.t +; CHECK-NEXT: vand.vi v14, v14, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v14, v0.t +; CHECK-NEXT: vand.vi v12, v12, 15, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv8i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv8i16(, , , , i32) +define @fshl_v8i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vsrl.vi v10, v10, 1, v0.t +; CHECK-NEXT: vnot.v v14, v12, v0.t +; CHECK-NEXT: vand.vi v14, v14, 15, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v14, v0.t +; CHECK-NEXT: vand.vi v12, v12, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv8i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv16i16(, , , , i32) +define @fshr_v16i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vnot.v v20, v16, v0.t +; CHECK-NEXT: vand.vi v20, v20, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v20, v0.t +; CHECK-NEXT: vand.vi v16, v16, 15, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv16i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv16i16(, , , , i32) +define @fshl_v16i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vsrl.vi v12, v12, 1, v0.t +; CHECK-NEXT: vnot.v v20, v16, v0.t +; CHECK-NEXT: vand.vi v20, v20, 15, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v20, v0.t +; CHECK-NEXT: vand.vi v16, v16, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv16i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv32i16(, , , , i32) +define @fshr_v32i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsll.vi v16, v8, 1, v0.t +; CHECK-NEXT: vnot.v v8, v24, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t +; CHECK-NEXT: vand.vi v16, v24, 15, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv32i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv32i16(, , , , i32) +define @fshl_v32i16( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsrl.vi v16, v16, 1, v0.t +; CHECK-NEXT: vnot.v v8, v24, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t +; CHECK-NEXT: vand.vi v16, v24, 15, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsll.vv v16, v24, v16, v0.t +; CHECK-NEXT: vor.vv v8, v16, v8, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv32i16( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv1i32(, , , , i32) +define @fshr_v1i32( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vand.vx v11, v10, a1, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vnot.v v10, v10, v0.t +; CHECK-NEXT: vand.vx v10, v10, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv1i32( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv1i32(, , , , i32) +define @fshl_v1i32( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vand.vx v11, v10, a1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vnot.v v10, v10, v0.t +; CHECK-NEXT: vand.vx v10, v10, a1, v0.t +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv1i32( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv2i32(, , , , i32) +define @fshr_v2i32( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vand.vx v11, v10, a1, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vnot.v v10, v10, v0.t +; CHECK-NEXT: vand.vx v10, v10, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv2i32( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv2i32(, , , , i32) +define @fshl_v2i32( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vand.vx v11, v10, a1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vnot.v v10, v10, v0.t +; CHECK-NEXT: vand.vx v10, v10, a1, v0.t +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv2i32( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv4i32(, , , , i32) +define @fshr_v4i32( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vand.vx v14, v12, a1, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v14, v0.t +; CHECK-NEXT: vnot.v v12, v12, v0.t +; CHECK-NEXT: vand.vx v12, v12, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv4i32( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv4i32(, , , , i32) +define @fshl_v4i32( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vand.vx v14, v12, a1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v14, v0.t +; CHECK-NEXT: vnot.v v12, v12, v0.t +; CHECK-NEXT: vand.vx v12, v12, a1, v0.t +; CHECK-NEXT: vsrl.vi v10, v10, 1, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv4i32( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv8i32(, , , , i32) +define @fshr_v8i32( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vand.vx v20, v16, a1, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v20, v0.t +; CHECK-NEXT: vnot.v v16, v16, v0.t +; CHECK-NEXT: vand.vx v16, v16, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv8i32( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv8i32(, , , , i32) +define @fshl_v8i32( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 31 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vand.vx v20, v16, a1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v20, v0.t +; CHECK-NEXT: vnot.v v16, v16, v0.t +; CHECK-NEXT: vand.vx v16, v16, a1, v0.t +; CHECK-NEXT: vsrl.vi v12, v12, 1, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv8i32( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv16i32(, , , , i32) +define @fshr_v16i32( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: li a0, 31 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v24, a0, v0.t +; CHECK-NEXT: vsrl.vv v16, v16, v8, v0.t +; CHECK-NEXT: vnot.v v8, v24, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsll.vi v24, v24, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v24, v8, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv16i32( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv16i32(, , , , i32) +define @fshl_v16i32( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: li a0, 31 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v24, a0, v0.t +; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t +; CHECK-NEXT: vnot.v v16, v24, v0.t +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsrl.vi v24, v24, 1, v0.t +; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv16i32( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv1i64(, , , , i32) +define @fshr_v1i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 63 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vand.vx v11, v10, a1, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t +; CHECK-NEXT: vnot.v v10, v10, v0.t +; CHECK-NEXT: vand.vx v10, v10, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv1i64( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv1i64(, , , , i32) +define @fshl_v1i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 63 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vand.vx v11, v10, a1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t +; CHECK-NEXT: vnot.v v10, v10, v0.t +; CHECK-NEXT: vand.vx v10, v10, a1, v0.t +; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t +; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv1i64( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv2i64(, , , , i32) +define @fshr_v2i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 63 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vand.vx v14, v12, a1, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v14, v0.t +; CHECK-NEXT: vnot.v v12, v12, v0.t +; CHECK-NEXT: vand.vx v12, v12, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv2i64( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv2i64(, , , , i32) +define @fshl_v2i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 63 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vand.vx v14, v12, a1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v14, v0.t +; CHECK-NEXT: vnot.v v12, v12, v0.t +; CHECK-NEXT: vand.vx v12, v12, a1, v0.t +; CHECK-NEXT: vsrl.vi v10, v10, 1, v0.t +; CHECK-NEXT: vsrl.vv v10, v10, v12, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv2i64( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv4i64(, , , , i32) +define @fshr_v4i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 63 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vand.vx v20, v16, a1, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v20, v0.t +; CHECK-NEXT: vnot.v v16, v16, v0.t +; CHECK-NEXT: vand.vx v16, v16, a1, v0.t +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv4i64( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv4i64(, , , , i32) +define @fshl_v4i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 63 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vand.vx v20, v16, a1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v20, v0.t +; CHECK-NEXT: vnot.v v16, v16, v0.t +; CHECK-NEXT: vand.vx v16, v16, a1, v0.t +; CHECK-NEXT: vsrl.vi v12, v12, 1, v0.t +; CHECK-NEXT: vsrl.vv v12, v12, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv4i64( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv7i64(, , , , i32) +define @fshr_v7i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v7i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: li a0, 63 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v24, a0, v0.t +; CHECK-NEXT: vsrl.vv v16, v16, v8, v0.t +; CHECK-NEXT: vnot.v v8, v24, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsll.vi v24, v24, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v24, v8, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv7i64( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv7i64(, , , , i32) +define @fshl_v7i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v7i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: li a0, 63 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v24, a0, v0.t +; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t +; CHECK-NEXT: vnot.v v16, v24, v0.t +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsrl.vi v24, v24, 1, v0.t +; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv7i64( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv8i64(, , , , i32) +define @fshr_v8i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: li a0, 63 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v24, a0, v0.t +; CHECK-NEXT: vsrl.vv v16, v16, v8, v0.t +; CHECK-NEXT: vnot.v v8, v24, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsll.vi v24, v24, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v24, v8, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv8i64( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv8i64(, , , , i32) +define @fshl_v8i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: li a0, 63 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v24, a0, v0.t +; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t +; CHECK-NEXT: vnot.v v16, v24, v0.t +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsrl.vi v24, v24, 1, v0.t +; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv8i64( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshr.nxv16i64(, , , , i32) +define @fshr_v16i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshr_v16i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 56 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 40 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a3, a1, 3 +; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a3 +; CHECK-NEXT: slli a3, a1, 3 +; CHECK-NEXT: add a5, a0, a3 +; CHECK-NEXT: vl8re64.v v8, (a5) +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: slli a5, a5, 4 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill +; CHECK-NEXT: sub a5, a4, a1 +; CHECK-NEXT: sltu a6, a4, a5 +; CHECK-NEXT: addi a6, a6, -1 +; CHECK-NEXT: and a5, a6, a5 +; CHECK-NEXT: add a3, a2, a3 +; CHECK-NEXT: vl8re64.v v8, (a3) +; CHECK-NEXT: addi a3, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; CHECK-NEXT: vl8re64.v v16, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 5 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vl8re64.v v16, (a2) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a2, 48 +; CHECK-NEXT: mul a0, a0, a2 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, ma +; CHECK-NEXT: li a0, 63 +; CHECK-NEXT: vnot.v v16, v8, v0.t +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a2, a2, a3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v16, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vsll.vi v16, v16, 1, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a2, a2, a3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v16, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a2, a2, a3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v8, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vsll.vv v16, v8, v16, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a2, a2, a3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v16, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a2, a2, a3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v16, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vor.vv v8, v16, v8, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a2, a2, a3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: bltu a4, a1, .LBB46_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a4, a1 +; CHECK-NEXT: .LBB46_2: +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a2, 48 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 5 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 5 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a2, 48 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vnot.v v16, v8, v0.t +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 40 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 5 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 56 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshr.nxv16i64( %a, %b, %c, %m, i32 %evl) + ret %res +} + +declare @llvm.vp.fshl.nxv16i64(, , , , i32) +define @fshl_v16i64( %a, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: fshl_v16i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 56 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 40 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a3, a1, 3 +; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a3 +; CHECK-NEXT: slli a3, a1, 3 +; CHECK-NEXT: add a5, a0, a3 +; CHECK-NEXT: vl8re64.v v8, (a5) +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: slli a5, a5, 4 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill +; CHECK-NEXT: sub a5, a4, a1 +; CHECK-NEXT: sltu a6, a4, a5 +; CHECK-NEXT: addi a6, a6, -1 +; CHECK-NEXT: and a5, a6, a5 +; CHECK-NEXT: add a3, a2, a3 +; CHECK-NEXT: vl8re64.v v8, (a3) +; CHECK-NEXT: addi a3, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; CHECK-NEXT: vl8re64.v v16, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 5 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vl8re64.v v16, (a2) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a2, 48 +; CHECK-NEXT: mul a0, a0, a2 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, ma +; CHECK-NEXT: li a0, 63 +; CHECK-NEXT: vnot.v v16, v8, v0.t +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v16, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vsrl.vi v16, v16, 1, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v16, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v8, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vsrl.vv v16, v8, v16, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a2, a2, a3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v16, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v16, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a2, a2, a3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: bltu a4, a1, .LBB47_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a4, a1 +; CHECK-NEXT: .LBB47_2: +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a2, 48 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vand.vx v16, v8, a0, v0.t +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a2, 40 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vsll.vv v16, v16, v8, v0.t +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a2, 40 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a2, 48 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vnot.v v16, v8, v0.t +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 5 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vsrl.vi v8, v8, 1, v0.t +; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 40 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vor.vv v8, v16, v8, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 56 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.vp.fshl.nxv16i64( %a, %b, %c, %m, i32 %evl) + ret %res +} diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp --- a/llvm/unittests/IR/VPIntrinsicTest.cpp +++ b/llvm/unittests/IR/VPIntrinsicTest.cpp @@ -147,6 +147,10 @@ Str << " declare <8 x i16> @llvm.vp.bswap.v8i16" << "(<8 x i16>, <8 x i1>, i32) "; + Str << " declare <8 x i16> @llvm.vp.fshl.v8i16" + << "(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32) "; + Str << " declare <8 x i16> @llvm.vp.fshr.v8i16" + << "(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32) "; return parseAssemblyString(Str.str(), Err, C); }