diff --git a/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td b/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td --- a/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td +++ b/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td @@ -164,4 +164,23 @@ let hasVerifier = 1; } +def AMDGPU_LDSBarrierOp : AMDGPU_Op<"lds_barrier"> { + let summary = "Barrier that includes a wait for LDS memory operations."; + let description = [{ + `amdgpu.lds_barrier` is both a barrier (all workitems in a workgroup must reach + the barrier before any of them may proceed past it) and a wait for all + operations that affect the Local Data Store (LDS) issued from that wrokgroup + to complete before the workgroup may continue. Since the LDS is per-workgroup + memory, this barrier may be used, for example, to ensure all workitems have + written data to LDS before any workitem attempts to read from it. + + Note that `lds_barrier` does **not** force reads to or from global memory + to complete before execution continues. Therefore, it should be used when + operations on global memory can be issued far in advance of when their results + are used (for example, by writing them to LDS). + }]; + let assemblyFormat = "attr-dict"; +} + + #endif // AMDGPU diff --git a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp --- a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp +++ b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp @@ -241,6 +241,26 @@ } }; +struct LDSBarrierOpLowering : public ConvertOpToLLVMPattern { + using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + + LogicalResult + matchAndRewrite(LDSBarrierOp op, LDSBarrierOp::Adaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto asmDialectAttr = LLVM::AsmDialectAttr::get(rewriter.getContext(), + LLVM::AsmDialect::AD_ATT); + const char *asmStr = "s_waitcnt lgkmcnt(0)\ns_barrier"; + const char *constraints = ""; + rewriter.replaceOpWithNewOp( + op, + /*resultTypes=*/TypeRange(), /*operands=*/ValueRange(), + /*asm_string=*/asmStr, constraints, /*has_side_effects=*/true, + /*is_align_stack=*/false, /*asm_dialect=*/asmDialectAttr, + /*operand_attrs=*/ArrayAttr()); + return success(); + } +}; + struct ConvertAMDGPUToROCDLPass : public ConvertAMDGPUToROCDLBase { ConvertAMDGPUToROCDLPass() = default; @@ -269,6 +289,7 @@ void mlir::populateAMDGPUToROCDLConversionPatterns(LLVMTypeConverter &converter, RewritePatternSet &patterns, Chipset chipset) { + patterns.add(converter); patterns.add< RawBufferOpLowering, RawBufferOpLowering, diff --git a/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir b/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir --- a/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir +++ b/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir @@ -101,3 +101,10 @@ amdgpu.raw_buffer_atomic_fadd {boundsCheck = true} %value -> %buf[%idx] : f32 -> memref<64xf32>, i32 func.return } + +// CHECK-LABEL: func @lds_barrier +func.func @lds_barrier() { + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "s_waitcnt lgkmcnt(0)\0As_barrier" + amdgpu.lds_barrier + func.return +} diff --git a/mlir/test/Dialect/AMDGPU/ops.mlir b/mlir/test/Dialect/AMDGPU/ops.mlir --- a/mlir/test/Dialect/AMDGPU/ops.mlir +++ b/mlir/test/Dialect/AMDGPU/ops.mlir @@ -59,3 +59,10 @@ amdgpu.raw_buffer_atomic_fadd {boundsCheck = true, indexOffset = 1 : i32} %value -> %dst[%idx0, %idx1, %idx2, %idx3] sgprOffset %offset : f32 -> memref<128x64x32x16xf32>, i32, i32, i32, i32 func.return } + +// CHECK-LABEL: func @lds_barrier +func.func @lds_barrier() { + // CHECK: amdgpu.lds_barrier + amdgpu.lds_barrier + func.return +}