diff --git a/mlir/include/mlir/Conversion/NVGPUToNVVM/NVGPUToNVVM.h b/mlir/include/mlir/Conversion/NVGPUToNVVM/NVGPUToNVVM.h --- a/mlir/include/mlir/Conversion/NVGPUToNVVM/NVGPUToNVVM.h +++ b/mlir/include/mlir/Conversion/NVGPUToNVVM/NVGPUToNVVM.h @@ -23,15 +23,15 @@ #include "mlir/Conversion/Passes.h.inc" namespace nvgpu { -class MBarrierType; +class MBarrierGroupType; /// Returns the memory space attribute of the mbarrier object. Attribute getMbarrierMemorySpace(MLIRContext *context, - MBarrierType barrierType); + MBarrierGroupType barrierType); /// Return the memref type that can be used to represent an mbarrier object. MemRefType getMBarrierMemrefType(MLIRContext *context, - MBarrierType barrierType); + MBarrierGroupType barrierType); } // namespace nvgpu void populateNVGPUToNVVMConversionPatterns(LLVMTypeConverter &converter, diff --git a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td --- a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td +++ b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td @@ -135,20 +135,26 @@ }]; } -def NVGPU_MBarrier : NVGPU_Type<"MBarrier", "mbarrier.barrier", []> { +def NVGPU_MBarrierGroup : NVGPU_Type<"MBarrierGroup", "mbarrier.group", []> { let summary = "mbarrier barrier type"; let description = [{ - This is the type for a mbarrier object in shared memory that is used - to synchronize a variable number of threads. + This is the type for one or more mbarrier object in shared memory that is + used to synchronize a variable number of threads. - The mbarrier object is 64 bit with 8 byte alignment. The mbarrier object - can be initiated and invalidated. + If `num_barriers` is not set, the number of mbarrier objects is 1. - See for more details: - https://docs.nvidia.com/cuda/parallel-thread-execution/#size-and-alignment-of-mbarrier-object + A mbarrier object is 64 bit with 8 byte alignment. The mbarrier object + can be initiated and invalidated. + + [See for more details in PTX ISA](https://docs.nvidia.com/cuda/parallel-thread-execution/#size-and-alignment-of-mbarrier-object) }]; - let parameters = (ins "Attribute":$memorySpace); + let parameters = (ins "Attribute":$memorySpace, DefaultValuedParameter<"unsigned", "1">:$num_barriers); let assemblyFormat = "`<` struct(params) `>`"; + let builders = [ + TypeBuilder<(ins "Attribute":$memorySpace), [{ + return $_get($_ctxt, memorySpace, 1); + }]> + ]; } def NVGPU_MBarrierToken : NVGPU_Type<"MBarrierToken", "mbarrier.token", []> { } @@ -473,7 +479,7 @@ def NVGPU_MBarrierCreateOp : NVGPU_Op<"mbarrier.create", []> { let summary = "Creates a `nvgpu.mbarrier` object."; let description = [{ - The Op generates an `mbarrier` object, which is a barrier created in + The Op generates one or more `mbarrier` object, which is a barrier created in shared memory and supports various synchronization behaviors for threads. The `mbarrier` object has the following type and alignment requirements: @@ -485,9 +491,9 @@ ``` }]; let arguments = (ins); - let results = (outs NVGPU_MBarrier:$barrier); + let results = (outs NVGPU_MBarrierGroup:$barriers); let assemblyFormat = [{ - attr-dict `->` type($barrier) + attr-dict `->` type($barriers) }]; } @@ -503,8 +509,8 @@ nvgpu.mbarrier.init %barrier, %num_threads : !nvgpu.mbarrier.barrier> ``` }]; - let arguments = (ins NVGPU_MBarrier:$barrier, Index:$count); - let assemblyFormat = "$barrier `,` $count attr-dict `:` type($barrier)"; + let arguments = (ins NVGPU_MBarrierGroup:$barriers, Index:$count, Index:$mbarId); + let assemblyFormat = "$barriers `[` $mbarId `]` `,` $count attr-dict `:` type($barriers)"; } def NVGPU_MBarrierTestWaitOp : NVGPU_Op<"mbarrier.test.wait", []> { @@ -518,9 +524,9 @@ %isComplete = nvgpu.mbarrier.test.wait %barrier, %token : !nvgpu.mbarrier.barrier>, !nvgpu.mbarrier.token ``` }]; - let arguments = (ins NVGPU_MBarrier:$barrier, NVGPU_MBarrierToken:$token); + let arguments = (ins NVGPU_MBarrierGroup:$barriers, NVGPU_MBarrierToken:$token, Index:$mbarId); let results = (outs I1:$waitComplete); - let assemblyFormat = "$barrier `,` $token attr-dict `:` type($barrier) `,` type($token)"; + let assemblyFormat = "$barriers `[` $mbarId `]` `,` $token attr-dict `:` type($barriers) `,` type($token)"; } def NVGPU_MBarrierArriveOp : NVGPU_Op<"mbarrier.arrive", []> { @@ -537,9 +543,9 @@ %token = nvgpu.mbarrier.arrive %barrier : !nvgpu.mbarrier.barrier> -> !nvgpu.mbarrier.token ``` }]; - let arguments = (ins NVGPU_MBarrier:$barrier); + let arguments = (ins NVGPU_MBarrierGroup:$barriers, Index:$mbarId); let results = (outs NVGPU_MBarrierToken:$token); -let assemblyFormat = "$barrier attr-dict `:` type($barrier) `->` type($token)"; +let assemblyFormat = "$barriers `[` $mbarId `]` attr-dict `:` type($barriers) `->` type($token)"; } def NVGPU_MBarrierArriveNoCompleteOp : NVGPU_Op<"mbarrier.arrive.nocomplete", []> { @@ -555,10 +561,10 @@ %token = nvgpu.mbarrier.arrive.noComplete %barrier, %count : !nvgpu.mbarrier.barrier> -> !nvgpu.mbarrier.token ``` }]; - let arguments = (ins NVGPU_MBarrier:$barrier, + let arguments = (ins NVGPU_MBarrierGroup:$barriers, Index:$mbarId, Index:$count); let results = (outs NVGPU_MBarrierToken:$token); - let assemblyFormat = "$barrier `,` $count attr-dict `:` type($barrier) `->` type($token)"; + let assemblyFormat = "$barriers `[` $mbarId `]` `,` $count attr-dict `:` type($barriers) `->` type($token)"; } def NVGPU_MBarrierArriveExpectTxOp : NVGPU_Op<"mbarrier.arrive.expect_tx", []> { @@ -578,9 +584,8 @@ nvgpu.mbarrier.arrive.expect_tx %barrier, %ic0 : !nvgpu.mbarrier.barrier> ``` }]; - let arguments = (ins NVGPU_MBarrier:$barrier, - Index:$txcount); - let assemblyFormat = "$barrier `,` $txcount attr-dict `:` type($barrier)"; + let arguments = (ins NVGPU_MBarrierGroup:$barriers, Index:$txcount, Index:$mbarId); + let assemblyFormat = "$barriers `[` $mbarId `]` `,` $txcount attr-dict `:` type($barriers)"; } def NVGPU_MBarrierTryWaitParityOp : NVGPU_Op<"mbarrier.try_wait.parity", []> { @@ -597,8 +602,8 @@ ``` }]; - let arguments = (ins NVGPU_MBarrier:$barrier, Index:$phase, Index:$ticks); - let assemblyFormat = "$barrier `,` $phase `,` $ticks attr-dict `:` type($barrier)"; + let arguments = (ins NVGPU_MBarrierGroup:$barriers, Index:$phase, Index:$ticks, Index:$mbarId); + let assemblyFormat = "$barriers `[` $mbarId `]` `,` $phase `,` $ticks attr-dict `:` type($barriers)"; } def NVGPU_TmaAsyncLoadOp : NVGPU_Op<"tma.async.load", []> { @@ -613,12 +618,13 @@ The Op uses `$barrier` mbarrier based completion mechanism. }]; let arguments = (ins Arg:$dst, - NVGPU_MBarrier:$barrier, + NVGPU_MBarrierGroup:$barriers, NVGPU_TensorMapDescriptor:$tensorMapDescriptor, - Variadic:$coordinates); + Variadic:$coordinates, + Index:$mbarId); let assemblyFormat = [{ - $tensorMapDescriptor `[` $coordinates `]` `,` $barrier `to` $dst - attr-dict `:` type($tensorMapDescriptor) `,` type($barrier) `->` type($dst) + $tensorMapDescriptor `[` $coordinates `]` `,` $barriers `[` $mbarId `]` `to` $dst + attr-dict `:` type($tensorMapDescriptor) `,` type($barriers) `->` type($dst) }]; let hasVerifier = 1; diff --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp --- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp +++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp @@ -17,8 +17,10 @@ #include "mlir/Dialect/LLVMIR/NVVMDialect.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/NVGPU/IR/NVGPUDialect.h" +#include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/TypeUtilities.h" +#include "mlir/IR/Value.h" #include "mlir/Pass/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" @@ -212,14 +214,14 @@ } /// Returns whether mbarrier object has shared memory address space. -static bool isMbarrierShared(nvgpu::MBarrierType barrierType) { +static bool isMbarrierShared(nvgpu::MBarrierGroupType barrierType) { return (mlir::nvgpu::NVGPUDialect::isSharedMemoryAddressSpace( barrierType.getMemorySpace())); } /// Returns the memory space attribute of the mbarrier object. Attribute nvgpu::getMbarrierMemorySpace(MLIRContext *context, - nvgpu::MBarrierType barrierType) { + nvgpu::MBarrierGroupType barrierType) { Attribute memorySpace = {}; if (isMbarrierShared(barrierType)) { memorySpace = @@ -230,25 +232,13 @@ } /// Returns memref type of the mbarrier object. The type is defined in the -/// MBarrierType. +/// MBarrierGroupType. MemRefType nvgpu::getMBarrierMemrefType(MLIRContext *context, - nvgpu::MBarrierType barrierType) { + nvgpu::MBarrierGroupType barrierType) { Attribute memorySpace = nvgpu::getMbarrierMemorySpace(context, barrierType); MemRefLayoutAttrInterface layout; - return MemRefType::get({1}, IntegerType::get(context, 64), layout, - memorySpace); -} - -/// Returns the base pointer of the mbarrier object. -static Value getMbarrierPtr(ConversionPatternRewriter &rewriter, - const LLVMTypeConverter &typeConverter, - TypedValue barrier, - Value barrierMemref) { - MemRefType memrefType = - nvgpu::getMBarrierMemrefType(rewriter.getContext(), barrier.getType()); - MemRefDescriptor memRefDescriptor(barrierMemref); - return memRefDescriptor.bufferPtr(rewriter, barrier.getLoc(), typeConverter, - memrefType); + return MemRefType::get({barrierType.getNumBarriers()}, + IntegerType::get(context, 64), layout, memorySpace); } namespace { @@ -426,7 +416,7 @@ [&](nvgpu::WarpgroupMatrixDescriptorType type) -> Type { return converter.convertType(IntegerType::get(type.getContext(), 64)); }); - converter.addConversion([&](nvgpu::MBarrierType type) -> Type { + converter.addConversion([&](nvgpu::MBarrierGroupType type) -> Type { return converter.convertType( nvgpu::getMBarrierMemrefType(rewriter.getContext(), type)); }); @@ -762,7 +752,7 @@ ConversionPatternRewriter &rewriter) const override { Operation *funcOp = op->getParentOp(); MemRefType barrierType = nvgpu::getMBarrierMemrefType( - rewriter.getContext(), op.getBarrier().getType()); + rewriter.getContext(), op.getBarriers().getType()); memref::GlobalOp global; if (auto moduleOp = funcOp->getParentOfType()) @@ -777,21 +767,37 @@ } }; +/// Base class for lowering mbarrier operations to nvvm intrinsics. +template +struct MBarrierBasePattern : public ConvertOpToLLVMPattern { +public: + using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + /// Returns the base pointer of the mbarrier object. + Value getMbarrierPtr(Operation *op, nvgpu::MBarrierGroupType mbarType, + Value memrefDesc, Value mbarId, + ConversionPatternRewriter &rewriter) const { + MemRefType mbarrierMemrefType = + nvgpu::getMBarrierMemrefType(rewriter.getContext(), mbarType); + return ConvertToLLVMPattern::getStridedElementPtr( + op->getLoc(), mbarrierMemrefType, memrefDesc, {mbarId}, rewriter); + return memrefDesc; + } +}; + /// Lowers `nvgpu.mbarrier.init` to `nvvm.mbarrier.init` struct NVGPUMBarrierInitLowering - : public ConvertOpToLLVMPattern { - using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + : public MBarrierBasePattern { + using MBarrierBasePattern::MBarrierBasePattern; LogicalResult matchAndRewrite(nvgpu::MBarrierInitOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { + nvgpu::MBarrierGroupType mbarrierType = op.getBarriers().getType(); rewriter.setInsertionPoint(op); - Value barrier = getMbarrierPtr(rewriter, *getTypeConverter(), - op.getBarrier(), adaptor.getBarrier()); - + Value barrier = getMbarrierPtr(op, mbarrierType, adaptor.getBarriers(), + adaptor.getMbarId(), rewriter); Value count = truncToI32(rewriter, op->getLoc(), adaptor.getCount()); - - if (isMbarrierShared(op.getBarrier().getType())) { + if (isMbarrierShared(mbarrierType)) { rewriter.replaceOpWithNewOp(op, barrier, count); } else { @@ -803,16 +809,17 @@ /// Lowers `nvgpu.mbarrier.arrive` to `nvvm.mbarrier.arrive` struct NVGPUMBarrierArriveLowering - : public ConvertOpToLLVMPattern { - using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + : public MBarrierBasePattern { + using MBarrierBasePattern::MBarrierBasePattern; LogicalResult matchAndRewrite(nvgpu::MBarrierArriveOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - Value barrier = getMbarrierPtr(rewriter, *getTypeConverter(), - op.getBarrier(), adaptor.getBarrier()); + Value barrier = + getMbarrierPtr(op, op.getBarriers().getType(), adaptor.getBarriers(), + adaptor.getMbarId(), rewriter); Type tokenType = getTypeConverter()->convertType( nvgpu::MBarrierTokenType::get(op->getContext())); - if (isMbarrierShared(op.getBarrier().getType())) { + if (isMbarrierShared(op.getBarriers().getType())) { rewriter.replaceOpWithNewOp(op, tokenType, barrier); } else { @@ -826,19 +833,19 @@ /// Lowers `nvgpu.mbarrier.arrive.nocomplete` to /// `nvvm.mbarrier.arrive.nocomplete` struct NVGPUMBarrierArriveNoCompleteLowering - : public ConvertOpToLLVMPattern { - using ConvertOpToLLVMPattern< - nvgpu::MBarrierArriveNoCompleteOp>::ConvertOpToLLVMPattern; - + : public MBarrierBasePattern { + using MBarrierBasePattern< + nvgpu::MBarrierArriveNoCompleteOp>::MBarrierBasePattern; LogicalResult matchAndRewrite(nvgpu::MBarrierArriveNoCompleteOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - Value barrier = getMbarrierPtr(rewriter, *getTypeConverter(), - op.getBarrier(), adaptor.getBarrier()); + Value barrier = + getMbarrierPtr(op, op.getBarriers().getType(), adaptor.getBarriers(), + adaptor.getMbarId(), rewriter); Type tokenType = getTypeConverter()->convertType( nvgpu::MBarrierTokenType::get(op->getContext())); Value count = truncToI32(rewriter, op->getLoc(), adaptor.getCount()); - if (isMbarrierShared(op.getBarrier().getType())) { + if (isMbarrierShared(op.getBarriers().getType())) { rewriter.replaceOpWithNewOp( op, tokenType, barrier, count); } else { @@ -851,17 +858,16 @@ /// Lowers `nvgpu.mbarrier.test.wait` to `nvvm.mbarrier.test.wait` struct NVGPUMBarrierTestWaitLowering - : public ConvertOpToLLVMPattern { - using ConvertOpToLLVMPattern< - nvgpu::MBarrierTestWaitOp>::ConvertOpToLLVMPattern; - + : public MBarrierBasePattern { + using MBarrierBasePattern::MBarrierBasePattern; LogicalResult matchAndRewrite(nvgpu::MBarrierTestWaitOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - Value barrier = getMbarrierPtr(rewriter, *getTypeConverter(), - op.getBarrier(), adaptor.getBarrier()); + Value barrier = + getMbarrierPtr(op, op.getBarriers().getType(), adaptor.getBarriers(), + adaptor.getMbarId(), rewriter); Type retType = rewriter.getI1Type(); - if (isMbarrierShared(op.getBarrier().getType())) { + if (isMbarrierShared(op.getBarriers().getType())) { rewriter.replaceOpWithNewOp( op, retType, barrier, adaptor.getToken()); } else { @@ -873,18 +879,18 @@ }; struct NVGPUMBarrierArriveExpectTxLowering - : public ConvertOpToLLVMPattern { - using ConvertOpToLLVMPattern< - nvgpu::MBarrierArriveExpectTxOp>::ConvertOpToLLVMPattern; - + : public MBarrierBasePattern { + using MBarrierBasePattern< + nvgpu::MBarrierArriveExpectTxOp>::MBarrierBasePattern; LogicalResult matchAndRewrite(nvgpu::MBarrierArriveExpectTxOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - Value barrier = getMbarrierPtr(rewriter, *getTypeConverter(), - op.getBarrier(), adaptor.getBarrier()); + Value barrier = + getMbarrierPtr(op, op.getBarriers().getType(), adaptor.getBarriers(), + adaptor.getMbarId(), rewriter); Value txcount = truncToI32(rewriter, op->getLoc(), adaptor.getTxcount()); - if (isMbarrierShared(op.getBarrier().getType())) { + if (isMbarrierShared(op.getBarriers().getType())) { rewriter.replaceOpWithNewOp( op, barrier, txcount); return success(); @@ -897,19 +903,19 @@ }; struct NVGPUMBarrierTryWaitParityLowering - : public ConvertOpToLLVMPattern { - using ConvertOpToLLVMPattern< - nvgpu::MBarrierTryWaitParityOp>::ConvertOpToLLVMPattern; - + : public MBarrierBasePattern { + using MBarrierBasePattern< + nvgpu::MBarrierTryWaitParityOp>::MBarrierBasePattern; LogicalResult matchAndRewrite(nvgpu::MBarrierTryWaitParityOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - Value barrier = getMbarrierPtr(rewriter, *getTypeConverter(), - op.getBarrier(), adaptor.getBarrier()); + Value barrier = + getMbarrierPtr(op, op.getBarriers().getType(), adaptor.getBarriers(), + adaptor.getMbarId(), rewriter); Value ticks = truncToI32(rewriter, op->getLoc(), adaptor.getTicks()); Value phase = truncToI32(rewriter, op->getLoc(), adaptor.getPhase()); - if (isMbarrierShared(op.getBarrier().getType())) { + if (isMbarrierShared(op.getBarriers().getType())) { rewriter.replaceOpWithNewOp( op, barrier, phase, ticks); return success(); @@ -922,16 +928,17 @@ }; struct NVGPUTmaAsyncLoadOpLowering - : public ConvertOpToLLVMPattern { - using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + : public MBarrierBasePattern { + using MBarrierBasePattern::MBarrierBasePattern; LogicalResult matchAndRewrite(nvgpu::TmaAsyncLoadOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { auto srcMemrefType = cast(op.getDst().getType()); Value dest = getStridedElementPtr(op->getLoc(), srcMemrefType, adaptor.getDst(), {}, rewriter); - Value barrier = getMbarrierPtr(rewriter, *getTypeConverter(), - op.getBarrier(), adaptor.getBarrier()); + Value barrier = + getMbarrierPtr(op, op.getBarriers().getType(), adaptor.getBarriers(), + adaptor.getMbarId(), rewriter); SmallVector coords = adaptor.getCoordinates(); for (auto [index, value] : llvm::enumerate(coords)) { diff --git a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp --- a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp +++ b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp @@ -60,7 +60,7 @@ return llvmTypeConverter.convertType( IntegerType::get(type.getContext(), 64)); }); - llvmTypeConverter.addConversion([&](nvgpu::MBarrierType type) -> Type { + llvmTypeConverter.addConversion([&](nvgpu::MBarrierGroupType type) -> Type { return llvmTypeConverter.convertType( getMBarrierMemrefType(type.getContext(), type)); }); @@ -803,7 +803,7 @@ HopperBuilder(RewriterBase &rewriter, Location loc) : rewriter(rewriter), loc(loc) {} - TypedValue + TypedValue buildAndInitBarrierInSharedMemory(OpFoldResult numThreads); /// Create tma descriptor op to initiate transfer from global to shared @@ -817,9 +817,9 @@ OpFoldResult buildTmaAsyncLoad(TypedValue globalDesc, TypedValue sharedMemref, - TypedValue barrier, + TypedValue barrier, SmallVectorImpl &loadOps); - void buildBarrierArriveTx(TypedValue barrier, + void buildBarrierArriveTx(TypedValue barrier, ArrayRef sizes); /// If threadIdx.x == 0 does TMA request + wait, else just wait. @@ -828,9 +828,9 @@ SmallVector buildPredicateLoadsOnThread0( ArrayRef> globalDescriptors, ArrayRef> sharedMemBuffers, - TypedValue barrier); + TypedValue barrier); - void buildTryWaitParity(TypedValue barrier); + void buildTryWaitParity(TypedValue barrier); RewriterBase &rewriter; Location loc; @@ -839,7 +839,7 @@ SmallVector HopperBuilder::buildPredicateLoadsOnThread0( ArrayRef> globalDescriptors, ArrayRef> sharedMemBuffers, - TypedValue barrier) { + TypedValue barrier) { SmallVector loadOps; Value zero = rewriter.create(loc, 0); Value tidx = rewriter.create(loc, gpu::Dimension::x); @@ -880,15 +880,18 @@ // return b.getI64IntegerAttr(static_cast(kSharedMemorySpace)); } -TypedValue +TypedValue HopperBuilder::buildAndInitBarrierInSharedMemory(OpFoldResult numThreads) { auto sharedMemorySpace = getSharedAddressSpaceAttribute(rewriter); Value barrier = rewriter.create( - loc, nvgpu::MBarrierType::get(rewriter.getContext(), sharedMemorySpace)); + loc, + nvgpu::MBarrierGroupType::get(rewriter.getContext(), sharedMemorySpace)); + Value zero = rewriter.create(loc, 0); rewriter.create( - loc, barrier, getValueOrCreateConstantIndexOp(rewriter, loc, numThreads)); + loc, barrier, getValueOrCreateConstantIndexOp(rewriter, loc, numThreads), + zero); rewriter.create(loc); - return cast>(barrier); + return cast>(barrier); } TypedValue @@ -923,12 +926,12 @@ OpFoldResult HopperBuilder::buildTmaAsyncLoad( TypedValue globalDesc, TypedValue sharedMemref, - TypedValue barrier, + TypedValue barrier, SmallVectorImpl &loadOps) { MLIRContext *ctx = rewriter.getContext(); Value zero = rewriter.create(loc, 0); Operation *loadOp = rewriter.create( - loc, sharedMemref, barrier, globalDesc, ValueRange{zero, zero}); + loc, sharedMemref, barrier, globalDesc, ValueRange{zero, zero}, zero); loadOps.push_back(loadOp); auto mixedSizes = memref::getMixedSizes(rewriter, loc, sharedMemref); SmallVector symbols(mixedSizes.size()); @@ -942,7 +945,7 @@ } void HopperBuilder::buildBarrierArriveTx( - TypedValue barrier, + TypedValue barrier, ArrayRef mixedSizes) { assert(!mixedSizes.empty() && "expecte non-empty sizes"); MLIRContext *ctx = rewriter.getContext(); @@ -952,19 +955,21 @@ OpFoldResult size = affine::makeComposedFoldedAffineApply(rewriter, loc, sumExpr, mixedSizes); Value sizeVal = getValueOrCreateConstantIndexOp(rewriter, loc, size); - rewriter.create(loc, barrier, sizeVal); + Value zero = rewriter.create(loc, 0); + rewriter.create(loc, barrier, sizeVal, zero); } void HopperBuilder::buildTryWaitParity( - TypedValue barrier) { + TypedValue barrier) { Value parity = rewriter.create(loc, 0); // 10M is an arbitrary, not too small or too big number to specify the number // of ticks before retry. // TODO: hoist this in a default dialect constant. Value ticksBeforeRetry = rewriter.create(loc, 10000000); + Value zero = rewriter.create(loc, 0); rewriter.create(loc, barrier, parity, - ticksBeforeRetry); + ticksBeforeRetry, zero); } //===----------------------------------------------------------------------===// @@ -998,7 +1003,7 @@ ArrayRef{launchOp.getBlockSizeX(), launchOp.getBlockSizeY(), launchOp.getBlockSizeZ()}); - TypedValue barrier = + TypedValue barrier = buildAndInitBarrierInSharedMemory(numThreads); SmallVector> shmems; diff --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir --- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir +++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir @@ -470,28 +470,34 @@ return %d : vector<2x2xi32> } -!barrierType = !nvgpu.mbarrier.barrier> +!barrierType = !nvgpu.mbarrier.group> !tokenType = !nvgpu.mbarrier.token // CHECK-LABEL: func @mbarrier func.func @mbarrier() { %num_threads = arith.constant 128 : index + // CHECK: %[[c0:.+]] = arith.constant 0 : index + // CHECK: %[[mid:.+]] = builtin.unrealized_conversion_cast %[[c0]] : index to i64 + %c0 = arith.constant 0 : index // CHECK: %[[barMemref:.+]] = memref.get_global @__mbarrier{{.*}} : memref<1xi64, 3> %barrier = nvgpu.mbarrier.create -> !barrierType // CHECK: %[[barStr:.+]] = builtin.unrealized_conversion_cast %[[barMemref]] : memref<1xi64, 3> to !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> - // CHECK: %[[barPtr:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[base:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[barPtr:.+]] = llvm.getelementptr %[[base]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64 // CHECK: nvvm.mbarrier.init.shared %[[barPtr]] - nvgpu.mbarrier.init %barrier, %num_threads : !barrierType + nvgpu.mbarrier.init %barrier[%c0], %num_threads : !barrierType - // CHECK: %[[barPtr2:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[base2:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[barPtr2:.+]] = llvm.getelementptr %[[base2]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64 // CHECK: %[[token:.+]] = nvvm.mbarrier.arrive.shared %[[barPtr2]] - %token = nvgpu.mbarrier.arrive %barrier : !barrierType -> !tokenType + %token = nvgpu.mbarrier.arrive %barrier[%c0] : !barrierType -> !tokenType - // CHECK: %[[barPtr3:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[base3:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[barPtr3:.+]] = llvm.getelementptr %[[base3]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64 // CHECK: nvvm.mbarrier.test.wait.shared %[[barPtr3]], %[[token]] - %isDone = nvgpu.mbarrier.test.wait %barrier, %token : !barrierType, !tokenType + %isDone = nvgpu.mbarrier.test.wait %barrier[%c0], %token : !barrierType, !tokenType func.return } @@ -500,63 +506,96 @@ func.func @mbarrier_nocomplete() { %num_threads = arith.constant 128 : index %count = arith.constant 12 : index + // CHECK: %[[c0:.+]] = arith.constant 0 : index + // CHECK: %[[mid:.+]] = builtin.unrealized_conversion_cast %[[c0]] : index to i64 + %c0 = arith.constant 0 : index // CHECK: %[[barMemref:.+]] = memref.get_global @__mbarrier{{.*}} : memref<1xi64, 3> %barrier = nvgpu.mbarrier.create -> !barrierType // CHECK: %[[barStr:.+]] = builtin.unrealized_conversion_cast %[[barMemref]] : memref<1xi64, 3> to !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> - // CHECK: %[[barPtr:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[base:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[barPtr:.+]] = llvm.getelementptr %[[base]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64 // CHECK: nvvm.mbarrier.init.shared %[[barPtr]] - nvgpu.mbarrier.init %barrier, %num_threads : !barrierType + nvgpu.mbarrier.init %barrier[%c0], %num_threads : !barrierType - // CHECK: %[[barPtr2:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[base2:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[barPtr2:.+]] = llvm.getelementptr %[[base2]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64 // CHECK: %[[token:.+]] = nvvm.mbarrier.arrive.nocomplete.shared %[[barPtr2]] - %token = nvgpu.mbarrier.arrive.nocomplete %barrier, %count : !barrierType -> !tokenType + %token = nvgpu.mbarrier.arrive.nocomplete %barrier[%c0], %count : !barrierType -> !tokenType - // CHECK: %[[barPtr3:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[base3:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[barPtr3:.+]] = llvm.getelementptr %[[base3]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64 // CHECK: nvvm.mbarrier.test.wait.shared %[[barPtr3]], %[[token]] - %isDone = nvgpu.mbarrier.test.wait %barrier, %token : !barrierType, !tokenType + %isDone = nvgpu.mbarrier.test.wait %barrier[%c0], %token : !barrierType, !tokenType func.return } +// CHECK-LABEL: func @mbarrier_wait +func.func @mbarrier_wait(%barriers : !nvgpu.mbarrier.group, num_barriers = 5>, %token : !tokenType) { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %n = arith.constant 100 : index + + %numBarriers = arith.constant 5 : index + + scf.for %i = %c0 to %n step %c1 { +// CHECK: %[[c5:.+]] = arith.constant 5 : index +// CHECK: scf.for %[[i:.*]] = +// CHECK: %[[S2:.+]] = arith.remui %[[i]], %[[c5]] : index +// CHECK: %[[S3:.+]] = builtin.unrealized_conversion_cast %[[S2]] : index to i64 +// CHECK: %[[S4:.+]] = llvm.extractvalue %0[1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> +// CHECK: %[[S5:.+]] = llvm.getelementptr %[[S4]][%[[S3]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64 + %mbarId = arith.remui %i, %numBarriers : index + %isDone = nvgpu.mbarrier.test.wait %barriers[%mbarId], %token : !nvgpu.mbarrier.group, num_barriers = 5>, !tokenType + } + return +} + // CHECK-LABEL: func @mbarrier_txcount func.func @mbarrier_txcount() { - %num_threads = arith.constant 128 : index + %num_threads = arith.constant 128 : index + // CHECK: %[[c0:.+]] = arith.constant 0 : index + // CHECK: %[[mid:.+]] = builtin.unrealized_conversion_cast %[[c0]] : index to i64 + %c0 = arith.constant 0 : index // CHECK: %[[barMemref:.+]] = memref.get_global @__mbarrier{{.*}} : memref<1xi64, 3> %barrier = nvgpu.mbarrier.create -> !barrierType // CHECK: %[[barStr:.+]] = builtin.unrealized_conversion_cast %[[barMemref]] : memref<1xi64, 3> to !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> - // CHECK: %[[barPtr:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[base:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[barPtr:.+]] = llvm.getelementptr %[[base]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64 // CHECK: nvvm.mbarrier.init.shared %[[barPtr]] - nvgpu.mbarrier.init %barrier, %num_threads : !barrierType + nvgpu.mbarrier.init %barrier[%c0], %num_threads : !barrierType - %c0 = arith.constant 0 : index %tidxreg = nvvm.read.ptx.sreg.tid.x : i32 %tidx = arith.index_cast %tidxreg : i32 to index %cnd = arith.cmpi eq, %tidx, %c0 : index scf.if %cnd { %txcount = arith.constant 256 : index - // CHECK: %[[barPtr2:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[base2:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[barPtr2:.+]] = llvm.getelementptr %[[base2]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64 // CHECK: nvvm.mbarrier.arrive.expect_tx.shared %[[barPtr2]] - nvgpu.mbarrier.arrive.expect_tx %barrier, %txcount : !barrierType + nvgpu.mbarrier.arrive.expect_tx %barrier[%c0], %txcount : !barrierType scf.yield } else { %txcount = arith.constant 0 : index - // CHECK: %[[barPtr2:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[base2:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[barPtr2:.+]] = llvm.getelementptr %[[base2]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64 // CHECK: nvvm.mbarrier.arrive.expect_tx.shared %[[barPtr2]] - nvgpu.mbarrier.arrive.expect_tx %barrier, %txcount : !barrierType + nvgpu.mbarrier.arrive.expect_tx %barrier[%c0], %txcount : !barrierType scf.yield } %phase = arith.constant 0 : index %ticks = arith.constant 10000000 : index - // CHECK: %[[barPtr3:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[base3:.+]] = llvm.extractvalue %[[barStr]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)> + // CHECK: %[[barPtr3:.+]] = llvm.getelementptr %[[base3]][%[[mid]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, i64 // CHECK: nvvm.mbarrier.try_wait.parity.shared %[[barPtr3]] - nvgpu.mbarrier.try_wait.parity %barrier, %phase, %ticks : !barrierType + nvgpu.mbarrier.try_wait.parity %barrier[%c0], %phase, %ticks : !barrierType func.return } @@ -567,7 +606,7 @@ !tensorMap3d = !nvgpu.tensormap.descriptor, swizzle=swizzle_64b, l2promo = l2promo_64b, oob = zero, interleave = none> !tensorMap4d = !nvgpu.tensormap.descriptor, swizzle=swizzle_128b,l2promo = l2promo_128b,oob = zero, interleave = interleave_16b> !tensorMap5d = !nvgpu.tensormap.descriptor, swizzle=none, l2promo = none, oob = zero, interleave = none> -!mbarrier = !nvgpu.mbarrier.barrier> +!mbarrier = !nvgpu.mbarrier.group> func.func @async_tma_load(%tensorMap1d: !tensorMap1d, %tensorMap2d: !tensorMap2d, %tensorMap3d: !tensorMap3d, %tensorMap4d: !tensorMap4d, %tensorMap5d: !tensorMap5d, %buffer1d: memref<128xf32,3>, %buffer2d: memref<32x32xf32,3>, @@ -575,18 +614,19 @@ %buffer4d: memref<2x2x32x32xf32,3>, %buffer5d: memref<2x2x2x32x32xf32,3>, %mbarrier: !mbarrier) { + %c0 = arith.constant 0 : index %crd0 = arith.constant 0 : index %crd1 = arith.constant 0 : index // CHECK: nvvm.cp.async.bulk.tensor.shared.cluster.global %{{.*}}, %{{.*}}, %{{.*}}, box[%{{.*}}] - nvgpu.tma.async.load %tensorMap1d[%crd0], %mbarrier to %buffer1d : !tensorMap1d, !mbarrier -> memref<128xf32,3> + nvgpu.tma.async.load %tensorMap1d[%crd0], %mbarrier[%c0] to %buffer1d : !tensorMap1d, !mbarrier -> memref<128xf32,3> // CHECK: nvvm.cp.async.bulk.tensor.shared.cluster.global %{{.*}}, %{{.*}}, %{{.*}}, box[%{{.*}}, %{{.*}}] - nvgpu.tma.async.load %tensorMap2d[%crd0, %crd1], %mbarrier to %buffer2d : !tensorMap2d, !mbarrier -> memref<32x32xf32,3> + nvgpu.tma.async.load %tensorMap2d[%crd0, %crd1], %mbarrier[%c0] to %buffer2d : !tensorMap2d, !mbarrier -> memref<32x32xf32,3> // CHECK: nvvm.cp.async.bulk.tensor.shared.cluster.global %{{.*}}, %{{.*}}, %{{.*}}, box[%{{.*}}, %{{.*}}, %{{.*}}] - nvgpu.tma.async.load %tensorMap3d[%crd0, %crd1, %crd0], %mbarrier to %buffer3d : !tensorMap3d, !mbarrier -> memref<2x32x32xf32,3> + nvgpu.tma.async.load %tensorMap3d[%crd0, %crd1, %crd0], %mbarrier[%c0] to %buffer3d : !tensorMap3d, !mbarrier -> memref<2x32x32xf32,3> // CHECK: nvvm.cp.async.bulk.tensor.shared.cluster.global %{{.*}}, %{{.*}}, %{{.*}}, box[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] - nvgpu.tma.async.load %tensorMap4d[%crd0, %crd1, %crd1, %crd0], %mbarrier to %buffer4d : !tensorMap4d, !mbarrier -> memref<2x2x32x32xf32,3> + nvgpu.tma.async.load %tensorMap4d[%crd0, %crd1, %crd1, %crd0], %mbarrier[%c0] to %buffer4d : !tensorMap4d, !mbarrier -> memref<2x2x32x32xf32,3> // CHECK: nvvm.cp.async.bulk.tensor.shared.cluster.global %{{.*}}, %{{.*}}, %{{.*}}, box[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] - nvgpu.tma.async.load %tensorMap5d[%crd0, %crd1, %crd1, %crd0, %crd0], %mbarrier to %buffer5d : !tensorMap5d, !mbarrier -> memref<2x2x2x32x32xf32,3> + nvgpu.tma.async.load %tensorMap5d[%crd0, %crd1, %crd1, %crd0, %crd0], %mbarrier[%c0] to %buffer5d : !tensorMap5d, !mbarrier -> memref<2x2x2x32x32xf32,3> func.return } @@ -621,12 +661,12 @@ %rhsShmem3 = memref.subview %rhsShmem2[1,0,0][1, 64, 128][1, 1, 1] : memref<2x64x128xf16,3> to memref<1x64x128xf16, strided<[8192, 128, 1], offset: 8192>, 3> %rhsShmem = memref.subview %rhsShmem3[0,0,0][1, 64, 128][1, 1, 1] : memref<1x64x128xf16, strided<[8192, 128, 1], offset: 8192>, 3> to !shmemrhs // CHECK: nvvm.cp.async.bulk.tensor.shared.cluster.global - nvgpu.tma.async.load %lhsTensorMap[%c0, %c0], %mbarrier to %lhsShmem : !lhsTensorMap, !barrierType -> !shmemlhs + nvgpu.tma.async.load %lhsTensorMap[%c0, %c0], %mbarrier[%c0] to %lhsShmem : !lhsTensorMap, !barrierType -> !shmemlhs // CHECK: %[[desc:.+]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[c8192:.+]] = llvm.mlir.constant(8192 : index) : i64 // CHECK: %[[shmemOfset:.+]] = llvm.getelementptr %[[desc]][%[[c8192]]] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f16 // CHECK: nvvm.cp.async.bulk.tensor.shared.cluster.global %[[shmemOfset]], %{{.*}}, %{{.*}}, box[%{{.*}}, %{{.*}}] : !llvm.ptr<3>, !llvm.ptr, !llvm.ptr<3>, i32, i32 - nvgpu.tma.async.load %rhsTensorMap[%c0, %c0], %mbarrier to %rhsShmem : !rhsTensorMap, !barrierType -> !shmemrhs + nvgpu.tma.async.load %rhsTensorMap[%c0, %c0], %mbarrier[%c0] to %rhsShmem : !rhsTensorMap, !barrierType -> !shmemrhs return } }