diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -187,6 +187,9 @@ inst->setSyncScopeID(llvmContext.getOrInsertSyncScopeID(*$syncscope)); } }]; + code setOrderingCode = [{ + inst->setAtomic(convertAtomicOrderingToLLVM($ordering)); + }]; code setNonTemporalMetadataCode = [{ if ($nontemporal) { llvm::MDNode *metadata = llvm::MDNode::get( @@ -350,17 +353,48 @@ OptionalAttr:$alias_scopes, OptionalAttr:$noalias_scopes, OptionalAttr:$tbaa, - OptionalAttr:$alignment, UnitAttr:$volatile_, - UnitAttr:$nontemporal); + OptionalAttr:$alignment, + UnitAttr:$volatile_, + UnitAttr:$nontemporal, + DefaultValuedAttr:$ordering, + OptionalAttr:$syncscope); let results = (outs LLVM_LoadableType:$res); string llvmInstName = "Load"; + let description = [{ + The `load` operation is used to read from memory. A load may be marked as + atomic, volatile, and/or nontemporal, and takes a number of optional + attributes that specify aliasing information. + + An atomic load only supports a limited set of pointer, integer, and + floating point types, and requires an explicit alignment. + + Examples: + ```mlir + // A volatile load of a float variable. + %0 = llvm.load volatile %ptr : !llvm.ptr -> f32 + + // A nontemporal load of a float variable. + %0 = llvm.load %ptr {nontemporal} : !llvm.ptr -> f32 + + // An atomic load of an integer variable. + %0 = llvm.load %ptr atomic monotonic {alignment = 8 : i64} + : !llvm.ptr -> i64 + ``` + + See the following link for more details: + https://llvm.org/docs/LangRef.html#load-instruction + }]; let assemblyFormat = [{ - (`volatile` $volatile_^)? $addr attr-dict `:` - custom(type($addr), type($res)) + (`volatile` $volatile_^)? $addr + (`atomic` (`syncscope` `(` $syncscope^ `)`)? $ordering^)? + attr-dict `:` custom(type($addr), type($res)) }]; string llvmBuilder = [{ auto *inst = builder.CreateLoad($_resultType, $addr, $volatile_); - }] # setAlignmentCode + }] # setOrderingCode + # setSyncScopeCode + # setAlignmentCode # setNonTemporalMetadataCode # setAccessGroupsMetadataCode # setAliasScopeMetadataCode @@ -373,22 +407,19 @@ unsigned alignment = loadInst->getAlign().value(); $res = $_builder.create($_location, $_resultType, $addr, alignment, loadInst->isVolatile(), - loadInst->hasMetadata(llvm::LLVMContext::MD_nontemporal)); + loadInst->hasMetadata(llvm::LLVMContext::MD_nontemporal), + convertAtomicOrderingFromLLVM(loadInst->getOrdering()), + getLLVMSyncScope(loadInst)); }]; let builders = [ OpBuilder<(ins "Value":$addr, CArg<"unsigned", "0">:$alignment, - CArg<"bool", "false">:$isVolatile, CArg<"bool", "false">:$isNonTemporal), - [{ - auto type = addr.getType().cast().getElementType(); - assert(type && "must provide explicit element type to the constructor " - "when the pointer type is opaque"); - build($_builder, $_state, type, addr, alignment, isVolatile, isNonTemporal); - }]>, + CArg<"bool", "false">:$isVolatile, CArg<"bool", "false">:$isNonTemporal)>, OpBuilder<(ins "Type":$type, "Value":$addr, CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile, - CArg<"bool", "false">:$isNonTemporal)> + CArg<"bool", "false">:$isNonTemporal, + CArg<"AtomicOrdering", "AtomicOrdering::not_atomic">:$ordering, + CArg<"StringRef", "StringRef()">:$syncscope)> ]; - let hasCustomAssemblyFormat = 1; let hasVerifier = 1; } diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -804,15 +804,65 @@ return success(); } -LogicalResult LoadOp::verify() { return verifyMemOpMetadata(*this); } +/// Returns true if the given type is supported by atomic operations. All +/// integer and float types with limited bit width are supported. Additionally, +/// depending on the operation pointers may be supported as well. +static bool isTypeCompatibleWithAtomicOp(Type type, bool isPointerTypeAllowed) { + if (type.isa()) + return isPointerTypeAllowed; + + std::optional bitWidth = std::nullopt; + if (auto floatType = type.dyn_cast()) { + if (!isCompatibleFloatingPointType(type)) + return false; + bitWidth = floatType.getWidth(); + } + if (auto integerType = type.dyn_cast()) + bitWidth = integerType.getWidth(); + // The type is neither an integer, float, or pointer type. + if (!bitWidth) + return false; + return *bitWidth == 8 || *bitWidth == 16 || *bitWidth == 32 || + *bitWidth == 64; +} + +LogicalResult LoadOp::verify() { + if (getOrdering() != AtomicOrdering::not_atomic) { + if (!isTypeCompatibleWithAtomicOp(getResult().getType(), + /*isPointerTypeAllowed=*/true)) + return emitOpError("unsupported type ") + << getResult().getType() << " for atomic access"; + if (getOrdering() == AtomicOrdering::release || + getOrdering() == AtomicOrdering::acq_rel) + return emitOpError("unsupported ordering '") + << stringifyAtomicOrdering(getOrdering()) << "'"; + if (!getAlignment()) + return emitOpError("expected alignment for atomic access"); + } else if (getSyncscope()) { + return emitOpError("expected syncscope to be null for non-atomic access"); + } + return verifyMemOpMetadata(*this); +} + +void LoadOp::build(OpBuilder &builder, OperationState &state, Value addr, + unsigned alignment, bool isVolatile, bool isNonTemporal) { + auto type = addr.getType().cast().getElementType(); + assert(type && "must provide explicit element type to the constructor " + "when the pointer type is opaque"); + build(builder, state, type, addr, alignment, isVolatile, isNonTemporal); +} void LoadOp::build(OpBuilder &builder, OperationState &state, Type type, Value addr, unsigned alignment, bool isVolatile, - bool isNonTemporal) { - build(builder, state, type, addr, /*access_groups=*/nullptr, - /*alias_scopes=*/nullptr, /*noalias_scopes=*/nullptr, /*tbaa=*/nullptr, + bool isNonTemporal, AtomicOrdering ordering, + StringRef syncscope) { + build(builder, state, type, addr, + /*access_groups=*/nullptr, + /*alias_scopes=*/nullptr, /*noalias_scopes=*/nullptr, + /*tbaa=*/nullptr, alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile, - isNonTemporal); + isNonTemporal, ordering, + syncscope.empty() ? nullptr : builder.getStringAttr(syncscope)); } // Extract the pointee type from the LLVM pointer type wrapped in MLIR. Return @@ -2266,12 +2316,7 @@ if (!mlir::LLVM::isCompatibleFloatingPointType(valType)) return emitOpError("expected LLVM IR floating point type"); } else if (getBinOp() == AtomicBinOp::xchg) { - auto intType = valType.dyn_cast(); - unsigned intBitWidth = intType ? intType.getWidth() : 0; - if (intBitWidth != 8 && intBitWidth != 16 && intBitWidth != 32 && - intBitWidth != 64 && !valType.isa() && - !valType.isa() && !valType.isa() && - !valType.isa()) + if (!isTypeCompatibleWithAtomicOp(valType, /*isPointerTypeAllowed=*/false)) return emitOpError("unexpected LLVM IR type for 'xchg' bin_op"); } else { auto intType = valType.dyn_cast(); @@ -2320,12 +2365,8 @@ if (!ptrType.isOpaque() && valType != ptrType.getElementType()) return emitOpError("expected LLVM IR element type for operand #0 to " "match type for all other operands"); - auto intType = valType.dyn_cast(); - unsigned intBitWidth = intType ? intType.getWidth() : 0; - if (!valType.isa() && intBitWidth != 8 && - intBitWidth != 16 && intBitWidth != 32 && intBitWidth != 64 && - !valType.isa() && !valType.isa() && - !valType.isa() && !valType.isa()) + if (!isTypeCompatibleWithAtomicOp(valType, + /*isPointerTypeAllowed=*/true)) return emitOpError("unexpected LLVM IR type"); if (getSuccessOrdering() < AtomicOrdering::monotonic || getFailureOrdering() < AtomicOrdering::monotonic) diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir --- a/mlir/test/Dialect/LLVMIR/invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/invalid.mlir @@ -153,6 +153,41 @@ // ----- +func.func @load_syncscope(%ptr : !llvm.ptr) { + // expected-error@below {{expected syncscope to be null for non-atomic access}} + %1 = "llvm.load"(%ptr) {syncscope = "singlethread"} : (!llvm.ptr) -> (f32) +} + +// ----- + +func.func @load_unsupported_ordering(%ptr : !llvm.ptr) { + // expected-error@below {{unsupported ordering 'release'}} + %1 = llvm.load %ptr atomic release {alignment = 4 : i64} : !llvm.ptr -> f32 +} + +// ----- + +func.func @load_unsupported_type(%ptr : !llvm.ptr) { + // expected-error@below {{unsupported type 'f80' for atomic access}} + %1 = llvm.load %ptr atomic monotonic {alignment = 16 : i64} : !llvm.ptr -> f80 +} + +// ----- + +func.func @load_unsupported_type(%ptr : !llvm.ptr) { + // expected-error@below {{unsupported type 'i1' for atomic access}} + %1 = llvm.load %ptr atomic monotonic {alignment = 16 : i64} : !llvm.ptr -> i1 +} + +// ----- + +func.func @load_unaligned_atomic(%ptr : !llvm.ptr) { + // expected-error@below {{expected alignment for atomic access}} + %1 = llvm.load %ptr atomic monotonic : !llvm.ptr -> f32 +} + +// ----- + func.func @store_non_llvm_type(%foo : memref, %bar : f32) { // expected-error@+1 {{expected LLVM pointer type}} llvm.store %bar, %foo : memref diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir --- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir +++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir @@ -339,6 +339,15 @@ llvm.return } +// CHECK-LABEL: @atomic_load +func.func @atomic_load(%ptr : !llvm.ptr) { + // CHECK: llvm.load %{{.*}} atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32 + %0 = llvm.load %ptr atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32 + // CHECK: llvm.load volatile %{{.*}} atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr -> f32 + %1 = llvm.load volatile %ptr atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr -> f32 + llvm.return +} + // CHECK-LABEL: @atomicrmw func.func @atomicrmw(%ptr : !llvm.ptr, %val : f32) { // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : !llvm.ptr, f32 diff --git a/mlir/test/Target/LLVMIR/Import/instructions.ll b/mlir/test/Target/LLVMIR/Import/instructions.ll --- a/mlir/test/Target/LLVMIR/Import/instructions.ll +++ b/mlir/test/Target/LLVMIR/Import/instructions.ll @@ -368,6 +368,18 @@ ; // ----- +; CHECK-LABEL: @atomic_load +; CHECK-SAME: %[[PTR:[a-zA-Z0-9]+]] +define void @atomic_load(ptr %ptr) { + ; CHECK: %[[V1:[0-9]+]] = llvm.load %[[PTR]] atomic acquire {alignment = 8 : i64} : !llvm.ptr -> f64 + ; CHECK: %[[V2:[0-9]+]] = llvm.load volatile %[[PTR]] atomic syncscope("singlethreaded") acquire {alignment = 16 : i64} : !llvm.ptr -> f64 + %1 = load atomic double, ptr %ptr acquire, align 8 + %2 = load atomic volatile double, ptr %ptr syncscope("singlethreaded") acquire, align 16 + ret void +} + +; // ----- + ; CHECK-LABEL: @atomic_rmw ; CHECK-SAME: %[[PTR1:[a-zA-Z0-9]+]] ; CHECK-SAME: %[[VAL1:[a-zA-Z0-9]+]] diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir --- a/mlir/test/Target/LLVMIR/llvmir.mlir +++ b/mlir/test/Target/LLVMIR/llvmir.mlir @@ -1261,7 +1261,7 @@ // CHECK-LABEL: @indexconstantarray llvm.func @indexconstantarray() -> vector<3xi32> { %1 = llvm.mlir.constant(dense<[0, 1, 2]> : vector<3xindex>) : vector<3xi32> - // CHECK: ret <3 x i32> + // CHECK: ret <3 x i32> llvm.return %1 : vector<3xi32> } @@ -1780,6 +1780,18 @@ // ----- +llvm.func @atomic_load(%ptr : !llvm.ptr) { + // CHECK: load atomic + // CHECK-SAME: monotonic, align 4 + %1 = llvm.load %ptr atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32 + // CHECK: load atomic + // CHECK-SAME: syncscope("singlethread") monotonic, align 4 + %2 = llvm.load %ptr atomic syncscope("singlethread") monotonic {alignment = 4 : i64} : !llvm.ptr -> f32 + llvm.return +} + +// ----- + // Check that the translation does not crash in absence of a data layout. module { // CHECK: declare void @module_default_layout