diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h --- a/llvm/include/llvm/CodeGen/ISDOpcodes.h +++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -1358,37 +1358,17 @@ /// MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's /// index parameter when calculating addresses. /// -/// SIGNED_SCALED Addr = Base + ((signed)Index * sizeof(element)) -/// SIGNED_UNSCALED Addr = Base + (signed)Index -/// UNSIGNED_SCALED Addr = Base + ((unsigned)Index * sizeof(element)) -/// UNSIGNED_UNSCALED Addr = Base + (unsigned)Index -enum MemIndexType { - SIGNED_SCALED = 0, - SIGNED_UNSCALED, - UNSIGNED_SCALED, - UNSIGNED_UNSCALED -}; +/// SIGNED_SCALED Addr = Base + ((signed)Index * Scale) +/// UNSIGNED_SCALED Addr = Base + ((unsigned)Index * Scale) +/// +/// NOTE: The value of Scale is typically only known to the node owning the +/// IndexType, with a value of 1 the equivalent of being unscaled. +enum MemIndexType { SIGNED_SCALED = 0, UNSIGNED_SCALED }; -static const int LAST_MEM_INDEX_TYPE = UNSIGNED_UNSCALED + 1; - -inline bool isIndexTypeScaled(MemIndexType IndexType) { - return IndexType == SIGNED_SCALED || IndexType == UNSIGNED_SCALED; -} +static const int LAST_MEM_INDEX_TYPE = UNSIGNED_SCALED + 1; inline bool isIndexTypeSigned(MemIndexType IndexType) { - return IndexType == SIGNED_SCALED || IndexType == SIGNED_UNSCALED; -} - -inline MemIndexType getSignedIndexType(MemIndexType IndexType) { - return isIndexTypeScaled(IndexType) ? SIGNED_SCALED : SIGNED_UNSCALED; -} - -inline MemIndexType getUnsignedIndexType(MemIndexType IndexType) { - return isIndexTypeScaled(IndexType) ? UNSIGNED_SCALED : UNSIGNED_UNSCALED; -} - -inline MemIndexType getUnscaledIndexType(MemIndexType IndexType) { - return isIndexTypeSigned(IndexType) ? SIGNED_UNSCALED : UNSIGNED_UNSCALED; + return IndexType == SIGNED_SCALED; } //===--------------------------------------------------------------------===// diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -2702,7 +2702,9 @@ ISD::MemIndexType getIndexType() const { return static_cast(LSBaseSDNodeBits.AddressingMode); } - bool isIndexScaled() const { return isIndexTypeScaled(getIndexType()); } + bool isIndexScaled() const { + return !cast(getScale())->isOne(); + } bool isIndexSigned() const { return isIndexTypeSigned(getIndexType()); } // In the both nodes address is Op1, mask is Op2: @@ -2784,7 +2786,9 @@ ISD::MemIndexType getIndexType() const { return static_cast(LSBaseSDNodeBits.AddressingMode); } - bool isIndexScaled() const { return isIndexTypeScaled(getIndexType()); } + bool isIndexScaled() const { + return !cast(getScale())->isOne(); + } bool isIndexSigned() const { return isIndexTypeSigned(getIndexType()); } // In the both nodes address is Op1, mask is Op2: diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -4891,10 +4891,6 @@ // combiner can fold the new nodes. SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const; - /// Give targets the chance to reduce the number of distinct addresing modes. - ISD::MemIndexType getCanonicalIndexType(ISD::MemIndexType IndexType, - EVT MemVT, SDValue Offsets) const; - private: SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &DL, DAGCombinerInfo &DCI) const; diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -10446,12 +10446,12 @@ if (Index.getOpcode() == ISD::ZERO_EXTEND) { SDValue Op = Index.getOperand(0); if (TLI.shouldRemoveExtendFromGSIndex(Op.getValueType())) { - IndexType = ISD::getUnsignedIndexType(IndexType); + IndexType = ISD::UNSIGNED_SCALED; Index = Op; return true; } if (ISD::isIndexTypeSigned(IndexType)) { - IndexType = ISD::getUnsignedIndexType(IndexType); + IndexType = ISD::UNSIGNED_SCALED; return true; } } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -8605,7 +8605,6 @@ return SDValue(E, 0); } - IndexType = TLI->getCanonicalIndexType(IndexType, MemVT, Ops[4]); auto *N = newSDNode(dl.getIROrder(), dl.getDebugLoc(), VTs, MemVT, MMO, IndexType, ExtTy); createOperands(N, Ops); @@ -8653,7 +8652,6 @@ return SDValue(E, 0); } - IndexType = TLI->getCanonicalIndexType(IndexType, MemVT, Ops[4]); auto *N = newSDNode(dl.getIROrder(), dl.getDebugLoc(), VTs, MemVT, MMO, IndexType, IsTrunc); createOperands(N, Ops); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4444,7 +4444,7 @@ if (!UniformBase) { Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(Ptr); - IndexType = ISD::SIGNED_UNSCALED; + IndexType = ISD::SIGNED_SCALED; Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); } @@ -4552,7 +4552,7 @@ if (!UniformBase) { Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(Ptr); - IndexType = ISD::SIGNED_UNSCALED; + IndexType = ISD::SIGNED_SCALED; Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); } @@ -7416,7 +7416,7 @@ if (!UniformBase) { Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(PtrOperand); - IndexType = ISD::SIGNED_UNSCALED; + IndexType = ISD::SIGNED_SCALED; Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())); } @@ -7473,7 +7473,7 @@ if (!UniformBase) { Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(PtrOperand); - IndexType = ISD::SIGNED_UNSCALED; + IndexType = ISD::SIGNED_SCALED; Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())); } diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -8631,18 +8631,6 @@ return SDValue(); } -// Convert redundant addressing modes (e.g. scaling is redundant -// when accessing bytes). -ISD::MemIndexType -TargetLowering::getCanonicalIndexType(ISD::MemIndexType IndexType, EVT MemVT, - SDValue Offsets) const { - // Scaling is unimportant for bytes, canonicalize to unscaled. - if (ISD::isIndexTypeScaled(IndexType) && MemVT.getScalarType() == MVT::i8) - return ISD::getUnscaledIndexType(IndexType); - - return IndexType; -} - SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const { SDValue Op0 = Node->getOperand(0); SDValue Op1 = Node->getOperand(1); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -4695,7 +4695,6 @@ Scale = DAG.getTargetConstant(1, DL, Scale.getValueType()); SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale}; - IndexType = getUnscaledIndexType(IndexType); return DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops, MGT->getMemOperand(), IndexType, ExtType); } @@ -4794,7 +4793,6 @@ Scale = DAG.getTargetConstant(1, DL, Scale.getValueType()); SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale}; - IndexType = getUnscaledIndexType(IndexType); return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops, MSC->getMemOperand(), IndexType, MSC->isTruncatingStore()); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8851,40 +8851,41 @@ DL, IndexVT, Index); } - unsigned Scale = cast(ScaleOp)->getZExtValue(); - if (IsIndexScaled && Scale != 1) { - // Manually scale the indices by the element size. + if (IsIndexScaled) { + // Manually scale the indices. // TODO: Sanitize the scale operand here? // TODO: For VP nodes, should we use VP_SHL here? + unsigned Scale = cast(ScaleOp)->getZExtValue(); assert(isPowerOf2_32(Scale) && "Expecting power-of-two types"); SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT); Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale); + ScaleOp = DAG.getTargetConstant(1, DL, ScaleOp.getValueType()); } - ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED; + ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_SCALED; if (const auto *VPGN = dyn_cast(N)) return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL, {VPGN->getChain(), VPGN->getBasePtr(), Index, - VPGN->getScale(), VPGN->getMask(), + ScaleOp, VPGN->getMask(), VPGN->getVectorLength()}, VPGN->getMemOperand(), NewIndexTy); if (const auto *VPSN = dyn_cast(N)) return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL, {VPSN->getChain(), VPSN->getValue(), - VPSN->getBasePtr(), Index, VPSN->getScale(), + VPSN->getBasePtr(), Index, ScaleOp, VPSN->getMask(), VPSN->getVectorLength()}, VPSN->getMemOperand(), NewIndexTy); if (const auto *MGN = dyn_cast(N)) return DAG.getMaskedGather( N->getVTList(), MGN->getMemoryVT(), DL, {MGN->getChain(), MGN->getPassThru(), MGN->getMask(), - MGN->getBasePtr(), Index, MGN->getScale()}, + MGN->getBasePtr(), Index, ScaleOp}, MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType()); const auto *MSN = cast(N); return DAG.getMaskedScatter( N->getVTList(), MSN->getMemoryVT(), DL, {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(), - Index, MSN->getScale()}, + Index, ScaleOp}, MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore()); } case RISCVISD::SRA_VL: