diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h --- a/llvm/include/llvm/CodeGen/ISDOpcodes.h +++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -1338,37 +1338,25 @@ /// MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's /// index parameter when calculating addresses. /// -/// SIGNED_SCALED Addr = Base + ((signed)Index * sizeof(element)) -/// SIGNED_UNSCALED Addr = Base + (signed)Index -/// UNSIGNED_SCALED Addr = Base + ((unsigned)Index * sizeof(element)) -/// UNSIGNED_UNSCALED Addr = Base + (unsigned)Index -enum MemIndexType { - SIGNED_SCALED = 0, - SIGNED_UNSCALED, - UNSIGNED_SCALED, - UNSIGNED_UNSCALED -}; - -static const int LAST_MEM_INDEX_TYPE = UNSIGNED_UNSCALED + 1; +/// SIGNED_SCALED Addr = Base + ((signed)Index * Scale) +/// UNSIGNED_SCALED Addr = Base + ((unsigned)Index * Scale) +/// +/// NOTE: The value of Scale is typically only known to the node owning the +/// IndexType, with a value of 1 the equivilant of being unscaled. +enum MemIndexType { SIGNED_SCALED = 0, UNSIGNED_SCALED }; -inline bool isIndexTypeScaled(MemIndexType IndexType) { - return IndexType == SIGNED_SCALED || IndexType == UNSIGNED_SCALED; -} +static const int LAST_MEM_INDEX_TYPE = UNSIGNED_SCALED + 1; inline bool isIndexTypeSigned(MemIndexType IndexType) { - return IndexType == SIGNED_SCALED || IndexType == SIGNED_UNSCALED; + return IndexType == SIGNED_SCALED; } inline MemIndexType getSignedIndexType(MemIndexType IndexType) { - return isIndexTypeScaled(IndexType) ? SIGNED_SCALED : SIGNED_UNSCALED; + return SIGNED_SCALED; } inline MemIndexType getUnsignedIndexType(MemIndexType IndexType) { - return isIndexTypeScaled(IndexType) ? UNSIGNED_SCALED : UNSIGNED_UNSCALED; -} - -inline MemIndexType getUnscaledIndexType(MemIndexType IndexType) { - return isIndexTypeSigned(IndexType) ? SIGNED_UNSCALED : UNSIGNED_UNSCALED; + return UNSIGNED_SCALED; } //===--------------------------------------------------------------------===// diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -2697,7 +2697,9 @@ ISD::MemIndexType getIndexType() const { return static_cast(LSBaseSDNodeBits.AddressingMode); } - bool isIndexScaled() const { return isIndexTypeScaled(getIndexType()); } + bool isIndexScaled() const { + return !cast(getScale())->isOne(); + } bool isIndexSigned() const { return isIndexTypeSigned(getIndexType()); } // In the both nodes address is Op1, mask is Op2: @@ -2779,7 +2781,9 @@ ISD::MemIndexType getIndexType() const { return static_cast(LSBaseSDNodeBits.AddressingMode); } - bool isIndexScaled() const { return isIndexTypeScaled(getIndexType()); } + bool isIndexScaled() const { + return !cast(getScale())->isOne(); + } bool isIndexSigned() const { return isIndexTypeSigned(getIndexType()); } // In the both nodes address is Op1, mask is Op2: diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -4822,10 +4822,6 @@ // combiner can fold the new nodes. SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const; - /// Give targets the chance to reduce the number of distinct addresing modes. - ISD::MemIndexType getCanonicalIndexType(ISD::MemIndexType IndexType, - EVT MemVT, SDValue Offsets) const; - private: SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &DL, DAGCombinerInfo &DCI) const; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -8572,7 +8572,6 @@ return SDValue(E, 0); } - IndexType = TLI->getCanonicalIndexType(IndexType, MemVT, Ops[4]); auto *N = newSDNode(dl.getIROrder(), dl.getDebugLoc(), VTs, MemVT, MMO, IndexType, ExtTy); createOperands(N, Ops); @@ -8620,7 +8619,6 @@ return SDValue(E, 0); } - IndexType = TLI->getCanonicalIndexType(IndexType, MemVT, Ops[4]); auto *N = newSDNode(dl.getIROrder(), dl.getDebugLoc(), VTs, MemVT, MMO, IndexType, IsTrunc); createOperands(N, Ops); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4435,7 +4435,7 @@ if (!UniformBase) { Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(Ptr); - IndexType = ISD::SIGNED_UNSCALED; + IndexType = ISD::SIGNED_SCALED; Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); } @@ -4543,7 +4543,7 @@ if (!UniformBase) { Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(Ptr); - IndexType = ISD::SIGNED_UNSCALED; + IndexType = ISD::SIGNED_SCALED; Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); } @@ -7386,7 +7386,7 @@ if (!UniformBase) { Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(PtrOperand); - IndexType = ISD::SIGNED_UNSCALED; + IndexType = ISD::SIGNED_SCALED; Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())); } @@ -7442,7 +7442,7 @@ if (!UniformBase) { Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(PtrOperand); - IndexType = ISD::SIGNED_UNSCALED; + IndexType = ISD::SIGNED_SCALED; Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())); } diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -8299,18 +8299,6 @@ return SDValue(); } -// Convert redundant addressing modes (e.g. scaling is redundant -// when accessing bytes). -ISD::MemIndexType -TargetLowering::getCanonicalIndexType(ISD::MemIndexType IndexType, EVT MemVT, - SDValue Offsets) const { - // Scaling is unimportant for bytes, canonicalize to unscaled. - if (ISD::isIndexTypeScaled(IndexType) && MemVT.getScalarType() == MVT::i8) - return ISD::getUnscaledIndexType(IndexType); - - return IndexType; -} - SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const { SDValue Op0 = Node->getOperand(0); SDValue Op1 = Node->getOperand(1); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8704,40 +8704,41 @@ DL, IndexVT, Index); } - unsigned Scale = cast(ScaleOp)->getZExtValue(); - if (IsIndexScaled && Scale != 1) { - // Manually scale the indices by the element size. + if (IsIndexScaled) { + // Manually scale the indices. // TODO: Sanitize the scale operand here? // TODO: For VP nodes, should we use VP_SHL here? + unsigned Scale = cast(ScaleOp)->getZExtValue(); assert(isPowerOf2_32(Scale) && "Expecting power-of-two types"); SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT); Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale); + ScaleOp = DAG.getTargetConstant(1, DL, ScaleOp.getValueType()); } - ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED; + ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_SCALED; if (const auto *VPGN = dyn_cast(N)) return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL, {VPGN->getChain(), VPGN->getBasePtr(), Index, - VPGN->getScale(), VPGN->getMask(), + ScaleOp, VPGN->getMask(), VPGN->getVectorLength()}, VPGN->getMemOperand(), NewIndexTy); if (const auto *VPSN = dyn_cast(N)) return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL, {VPSN->getChain(), VPSN->getValue(), - VPSN->getBasePtr(), Index, VPSN->getScale(), + VPSN->getBasePtr(), Index, ScaleOp, VPSN->getMask(), VPSN->getVectorLength()}, VPSN->getMemOperand(), NewIndexTy); if (const auto *MGN = dyn_cast(N)) return DAG.getMaskedGather( N->getVTList(), MGN->getMemoryVT(), DL, {MGN->getChain(), MGN->getPassThru(), MGN->getMask(), - MGN->getBasePtr(), Index, MGN->getScale()}, + MGN->getBasePtr(), Index, ScaleOp}, MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType()); const auto *MSN = cast(N); return DAG.getMaskedScatter( N->getVTList(), MSN->getMemoryVT(), DL, {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(), - Index, MSN->getScale()}, + Index, ScaleOp}, MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore()); } case RISCVISD::SRA_VL: diff --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll --- a/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll @@ -79,14 +79,12 @@ ; CHECK-NEXT: mov z0.d, #0 // =0x0 ; CHECK-NEXT: punpklo p1.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b -; CHECK-NEXT: st1w { z0.d }, p1, [x8, z0.d, lsl #2] -; CHECK-NEXT: st1w { z0.d }, p0, [x8, z0.d, lsl #2] +; CHECK-NEXT: st1w { z0.d }, p1, [z0.d] +; CHECK-NEXT: st1w { z0.d }, p0, [z0.d] ; CHECK-NEXT: ret vector.body: call void @llvm.masked.scatter.nxv4i32.nxv4p0i32( undef, - shufflevector ( insertelement ( poison, i32* undef, i32 0), poison, zeroinitializer), - i32 4, - %pg) + shufflevector ( insertelement ( poison, i32* null, i32 0), poison, zeroinitializer), i32 4, %pg) ret void }