Index: mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp =================================================================== --- mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -58,6 +58,9 @@ } private: + /// Next ID to assign to a reduction buffer. + unsigned nextReduceBufferID = 0; + /// Returns an accumulator factory using either the op attribute or the body /// region. AccumulatorFactory getFactory(gpu::AllReduceOp allReduce, @@ -364,6 +367,11 @@ return rewriter.getInsertionBlock()->getArgument(0); } + /// Return next ID to be used to name reduce buffer + unsigned getNextReduceBufferName() { + return nextReduceBufferID++; + } + /// Creates a global array stored in shared memory. Value createSharedMemoryArray(Location loc, gpu::GPUModuleOp module, LLVM::LLVMType elementType, int numElements, @@ -371,7 +379,10 @@ OpBuilder builder(module.body()); auto arrayType = LLVM::LLVMType::getArrayTy(elementType, numElements); - StringRef name = "reduce_buffer"; + unsigned bufferID = + (const_cast(this))->getNextReduceBufferName(); + SmallString<16> out; + StringRef name = ("reduce_buffer" + Twine(bufferID)).toStringRef(out); auto globalOp = builder.create( loc, arrayType.cast(), /*isConstant=*/false, LLVM::Linkage::Internal, name, Index: mlir/test/Dialect/GPU/multiple-all-reduce.mlir =================================================================== --- /dev/null +++ mlir/test/Dialect/GPU/multiple-all-reduce.mlir @@ -0,0 +1,25 @@ +// RUN: mlir-opt --gpu-kernel-outlining --convert-gpu-to-nvvm %s | FileCheck %s + +func @main() { + %data = alloc() : memref<2x6xf32> + %sum = alloc() : memref<2xf32> + %mul = alloc() : memref<2xf32> + %c1 = constant 1 : index + + // ADD + MUL + gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1) + threads(%tx, %ty, %tz) in (%block_x = %c1, %block_y = %c1, %block_z = %c1) { + %val = load %data[%bx, %tx] : memref<2x6xf32> + %reduced0 = "gpu.all_reduce"(%val) ({}) { op = "add" } : (f32) -> (f32) + store %reduced0, %sum[%bx] : memref<2xf32> + %reduced1 = "gpu.all_reduce"(%val) ({}) { op = "mul" } : (f32) -> (f32) + store %reduced1, %mul[%bx] : memref<2xf32> + gpu.terminator + } + +// CHECK: gpu.module @main_kernel { +// CHECK-NEXT: llvm.mlir.global internal @reduce_buffer1() {addr_space = 3 : i32} : !llvm<"[32 x float]"> +// CHECK-NEXT: llvm.mlir.global internal @reduce_buffer0() {addr_space = 3 : i32} : !llvm<"[32 x float]"> + + return +} \ No newline at end of file Index: mlir/test/mlir-cuda-runner/multiple-all-reduce.mlir =================================================================== --- /dev/null +++ mlir/test/mlir-cuda-runner/multiple-all-reduce.mlir @@ -0,0 +1,64 @@ +// RUN: mlir-cuda-runner %s --shared-libs=%cuda_wrapper_library_dir/libcuda-runtime-wrappers%shlibext,%linalg_test_lib_dir/libmlir_runner_utils%shlibext --entry-point-result=void | FileCheck %s + +func @main() { + %data = alloc() : memref<2x6xf32> + %sum = alloc() : memref<2xf32> + %mul = alloc() : memref<2xf32> + %cst0 = constant 0.0 : f32 + %cst1 = constant 1.0 : f32 + %cst2 = constant 2.0 : f32 + %cst4 = constant 4.0 : f32 + %cst8 = constant 8.0 : f32 + %cst16 = constant 16.0 : f32 + + %cst3 = constant 3.0 : f32 + %cst6 = constant 6.0 : f32 + %cst7 = constant 7.0 : f32 + %cst10 = constant 10.0 : f32 + %cst11 = constant 11.0 : f32 + + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %c3 = constant 3 : index + %c4 = constant 4 : index + %c5 = constant 5 : index + %c6 = constant 6 : index + + store %cst0, %data[%c0, %c0] : memref<2x6xf32> + store %cst1, %data[%c0, %c1] : memref<2x6xf32> + store %cst2, %data[%c0, %c2] : memref<2x6xf32> + store %cst4, %data[%c0, %c3] : memref<2x6xf32> + store %cst8, %data[%c0, %c4] : memref<2x6xf32> + store %cst16, %data[%c0, %c5] : memref<2x6xf32> + + store %cst2, %data[%c1, %c0] : memref<2x6xf32> + store %cst3, %data[%c1, %c1] : memref<2x6xf32> + store %cst6, %data[%c1, %c2] : memref<2x6xf32> + store %cst7, %data[%c1, %c3] : memref<2x6xf32> + store %cst10, %data[%c1, %c4] : memref<2x6xf32> + store %cst11, %data[%c1, %c5] : memref<2x6xf32> + + // ADD + MUL + gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c2, %grid_y = %c1, %grid_z = %c1) + threads(%tx, %ty, %tz) in (%block_x = %c6, %block_y = %c1, %block_z = %c1) { + %val = load %data[%bx, %tx] : memref<2x6xf32> + %reduced0 = "gpu.all_reduce"(%val) ({}) { op = "add" } : (f32) -> (f32) + store %reduced0, %sum[%bx] : memref<2xf32> + %reduced1 = "gpu.all_reduce"(%val) ({}) { op = "mul" } : (f32) -> (f32) + store %reduced1, %mul[%bx] : memref<2xf32> + gpu.terminator + } + + %ptr_sum = memref_cast %sum : memref<2xf32> to memref<*xf32> + call @print_memref_f32(%ptr_sum) : (memref<*xf32>) -> () + // CHECK: [31, 39] + + %ptr_mul = memref_cast %mul : memref<2xf32> to memref<*xf32> + call @print_memref_f32(%ptr_mul) : (memref<*xf32>) -> () + // CHECK: [0, 27720] + + return +} + +func @print_memref_f32(memref<*xf32>) \ No newline at end of file