diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -943,6 +943,20 @@ SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask, SDValue EVL, EVT VT); + /// Convert a vector-predicated Op, which must be an integer vector, to the + /// vector-type VT, by performing either vector-predicated zext or truncating + /// it. The Op will be returned as-is if Op and VT are vectors containing + /// integer with same width. + SDValue getVPZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, + ArrayRef Ops); + + /// Convert a vector predicated Op, which must be of integer type, to the + /// vector-type integer type VT, by either truncating it or performing either + /// vector-predicated zero or sign extension as appropriate extension for the + /// pointer's semantics. This method just call getVPZExtOrTrunc right now. + SDValue getVPPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, + ArrayRef Ops); + /// Returns sum of the base pointer and offset. /// Unlike getObjectPtrOffset this does not set NoUnsignedWrap by default. SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1482,6 +1482,22 @@ return getNode(ISD::VP_XOR, DL, VT, Val, TrueValue, Mask, EVL); } +SDValue SelectionDAG::getVPPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, + ArrayRef Ops) { + return getVPZExtOrTrunc(Op, DL, VT, Ops); +} + +SDValue SelectionDAG::getVPZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, + ArrayRef Ops) { + if (VT.getVectorElementType().bitsEq( + Op.getValueType().getVectorElementType())) + return Op; + return VT.getVectorElementType().bitsGT( + Op.getValueType().getVectorElementType()) + ? getNode(ISD::VP_ZERO_EXTEND, DL, VT, Ops) + : getNode(ISD::VP_TRUNCATE, DL, VT, Ops); +} + SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT) { if (!V) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7714,6 +7714,26 @@ } break; } + case ISD::VP_INTTOPTR: { + SDValue N = getValue(VPIntrin.getOperand(0)); + EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType()); + EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType()); + N = DAG.getVPPtrExtOrTrunc(N, getCurSDLoc(), DestVT, OpValues); + N = DAG.getVPZExtOrTrunc(N, getCurSDLoc(), PtrMemVT, OpValues); + setValue(&VPIntrin, N); + break; + } + case ISD::VP_PTRTOINT: { + SDValue N = getValue(VPIntrin.getOperand(0)); + EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), + VPIntrin.getType()); + EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), + VPIntrin.getOperand(0)->getType()); + N = DAG.getVPPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT, OpValues); + N = DAG.getVPZExtOrTrunc(N, getCurSDLoc(), DestVT, OpValues); + setValue(&VPIntrin, N); + break; + } } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-inttoptr-ptrtoint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-inttoptr-ptrtoint.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-inttoptr-ptrtoint.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=128 < %s \ +; RUN: | FileCheck %s + +declare <4 x ptr> @llvm.vp.inttoptr.v4p0.v4i32(<4 x i32>, <4 x i1>, i32) + +define <4 x ptr> @inttoptr_v4p0_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: inttoptr_v4p0_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vzext.vf2 v10, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x ptr> @llvm.vp.inttoptr.v4p0.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) + ret <4 x ptr> %v +} + +declare <4 x ptr> @llvm.vp.inttoptr.v4p0.v4i64(<4 x i64>, <4 x i1>, i32) + +define <4 x ptr> @inttoptr_v4p0_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: inttoptr_v4p0_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: ret + %v = call <4 x ptr> @llvm.vp.inttoptr.v4p0.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) + ret <4 x ptr> %v +} + +declare <4 x i32> @llvm.vp.ptrtoint.v4i32.v4p0(<4 x ptr>, <4 x i1>, i32) + +define <4 x i32> @ptrtoint_v4i32_v4p0(<4 x ptr> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: ptrtoint_v4i32_v4p0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.ptrtoint.v4i32.v4p0(<4 x ptr> %va, <4 x i1> %m, i32 %evl) + ret <4 x i32> %v +} + +declare <4 x i64> @llvm.vp.ptrtoint.v4i64.v4p0(<4 x ptr>, <4 x i1>, i32) + +define <4 x i64> @ptrtoint_v4i64_v4p0(<4 x ptr> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: ptrtoint_v4i64_v4p0: +; CHECK: # %bb.0: +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.ptrtoint.v4i64.v4p0(<4 x ptr> %va, <4 x i1> %m, i32 %evl) + ret <4 x i64> %v +}