diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -430,16 +430,47 @@ OptionalAttr:$alias_scopes, OptionalAttr:$noalias_scopes, OptionalAttr:$tbaa, - OptionalAttr:$alignment, UnitAttr:$volatile_, - UnitAttr:$nontemporal); + OptionalAttr:$alignment, + UnitAttr:$volatile_, + UnitAttr:$nontemporal, + DefaultValuedAttr:$ordering, + OptionalAttr:$syncscope); string llvmInstName = "Store"; + let description = [{ + The `store` operation is used to write to memory. A store may be marked as + atomic, volatile, and/or nontemporal, and takes a number of optional + attributes that specify aliasing information. + + An atomic store only supports a limited set of pointer, integer, and + floating point types, and requires an explicit alignment. + + Examples: + ```mlir + // A volatile store of a float variable. + llvm.store volatile %val, %ptr : f32, !llvm.ptr + + // A nontemporal store of a float variable. + llvm.store %val, %ptr {nontemporal} : f32, !llvm.ptr + + // An atomic store of an integer variable. + llvm.store %val, %ptr atomic monotonic {alignment = 8 : i64} + : i64, !llvm.ptr + ``` + + See the following link for more details: + https://llvm.org/docs/LangRef.html#store-instruction + }]; let assemblyFormat = [{ - (`volatile` $volatile_^)? $value `,` $addr attr-dict `:` - custom(type($value), type($addr)) + (`volatile` $volatile_^)? $value `,` $addr + (`atomic` (`syncscope` `(` $syncscope^ `)`)? $ordering^)? + attr-dict `:` custom(type($value), type($addr)) }]; string llvmBuilder = [{ auto *inst = builder.CreateStore($value, $addr, $volatile_); - }] # setAlignmentCode + }] # setOrderingCode + # setSyncScopeCode + # setAlignmentCode # setNonTemporalMetadataCode # setAccessGroupsMetadataCode # setAliasScopeMetadataCode @@ -449,12 +480,16 @@ unsigned alignment = storeInst->getAlign().value(); $_op = $_builder.create($_location, $value, $addr, alignment, storeInst->isVolatile(), - storeInst->hasMetadata(llvm::LLVMContext::MD_nontemporal)); + storeInst->hasMetadata(llvm::LLVMContext::MD_nontemporal), + convertAtomicOrderingFromLLVM(storeInst->getOrdering()), + getLLVMSyncScope(storeInst)); }]; let builders = [ OpBuilder<(ins "Value":$value, "Value":$addr, CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile, - CArg<"bool", "false">:$isNonTemporal)> + CArg<"bool", "false">:$isNonTemporal, + CArg<"AtomicOrdering", "AtomicOrdering::not_atomic">:$ordering, + CArg<"StringRef", "StringRef()">:$syncscope)> ]; let hasVerifier = 1; } diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -826,21 +826,35 @@ *bitWidth == 64; } -LogicalResult LoadOp::verify() { - if (getOrdering() != AtomicOrdering::not_atomic) { - if (!isTypeCompatibleWithAtomicOp(getResult().getType(), +/// Verifies the attributes and the type of atomic memory access operations. +template +LogicalResult verifyAtomicMemOp(OpTy memOp, Type valueType, + ArrayRef unsupportedOrderings) { + if (memOp.getOrdering() != AtomicOrdering::not_atomic) { + if (!isTypeCompatibleWithAtomicOp(valueType, /*isPointerTypeAllowed=*/true)) - return emitOpError("unsupported type ") - << getResult().getType() << " for atomic access"; - if (getOrdering() == AtomicOrdering::release || - getOrdering() == AtomicOrdering::acq_rel) - return emitOpError("unsupported ordering '") - << stringifyAtomicOrdering(getOrdering()) << "'"; - if (!getAlignment()) - return emitOpError("expected alignment for atomic access"); - } else if (getSyncscope()) { - return emitOpError("expected syncscope to be null for non-atomic access"); + return memOp.emitOpError("unsupported type ") + << valueType << " for atomic access"; + if (llvm::is_contained(unsupportedOrderings, memOp.getOrdering())) + return memOp.emitOpError("unsupported ordering '") + << stringifyAtomicOrdering(memOp.getOrdering()) << "'"; + if (!memOp.getAlignment()) + return memOp.emitOpError("expected alignment for atomic access"); + return success(); } + if (memOp.getSyncscope()) + return memOp.emitOpError( + "expected syncscope to be null for non-atomic access"); + return success(); +} + +LogicalResult LoadOp::verify() { + Type valueType = getResult().getType(); + if (failed(verifyAtomicMemOp( + *this, valueType, + {AtomicOrdering::release, AtomicOrdering::acq_rel}))) + return failure(); + return verifyMemOpMetadata(*this); } @@ -914,15 +928,25 @@ // StoreOp //===----------------------------------------------------------------------===// -LogicalResult StoreOp::verify() { return verifyMemOpMetadata(*this); } +LogicalResult StoreOp::verify() { + Type valueType = getValue().getType(); + if (failed(verifyAtomicMemOp( + *this, valueType, + {AtomicOrdering::acquire, AtomicOrdering::acq_rel}))) + return failure(); + + return verifyMemOpMetadata(*this); +} void StoreOp::build(OpBuilder &builder, OperationState &state, Value value, Value addr, unsigned alignment, bool isVolatile, - bool isNonTemporal) { + bool isNonTemporal, AtomicOrdering ordering, + StringRef syncscope) { build(builder, state, value, addr, /*access_groups=*/nullptr, /*alias_scopes=*/nullptr, /*noalias_scopes=*/nullptr, /*tbaa=*/nullptr, alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile, - isNonTemporal); + isNonTemporal, ordering, + syncscope.empty() ? nullptr : builder.getStringAttr(syncscope)); } /// Parses the StoreOp type either using the typed or opaque pointer format. diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir --- a/mlir/test/Dialect/LLVMIR/invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/invalid.mlir @@ -209,6 +209,41 @@ // ----- +func.func @store_syncscope(%val : f32, %ptr : !llvm.ptr) { + // expected-error@below {{expected syncscope to be null for non-atomic access}} + "llvm.store"(%val, %ptr) {syncscope = "singlethread"} : (f32, !llvm.ptr) -> () +} + +// ----- + +func.func @store_unsupported_ordering(%val : f32, %ptr : !llvm.ptr) { + // expected-error@below {{unsupported ordering 'acquire'}} + llvm.store %val, %ptr atomic acquire {alignment = 4 : i64} : f32, !llvm.ptr +} + +// ----- + +func.func @store_unsupported_type(%val : f80, %ptr : !llvm.ptr) { + // expected-error@below {{unsupported type 'f80' for atomic access}} + llvm.store %val, %ptr atomic monotonic {alignment = 16 : i64} : f80, !llvm.ptr +} + +// ----- + +func.func @store_unsupported_type(%val : i1, %ptr : !llvm.ptr) { + // expected-error@below {{unsupported type 'i1' for atomic access}} + llvm.store %val, %ptr atomic monotonic {alignment = 16 : i64} : i1, !llvm.ptr +} + +// ----- + +func.func @store_unaligned_atomic(%val : f32, %ptr : !llvm.ptr) { + // expected-error@below {{expected alignment for atomic access}} + llvm.store %val, %ptr atomic monotonic : f32, !llvm.ptr +} + +// ----- + func.func @invalid_call() { // expected-error@+1 {{'llvm.call' op must have either a `callee` attribute or at least an operand}} "llvm.call"() : () -> () diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir --- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir +++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir @@ -348,6 +348,15 @@ llvm.return } +// CHECK-LABEL: @atomic_store +func.func @atomic_store(%val : f32, %ptr : !llvm.ptr) { + // CHECK: llvm.store %{{.*}}, %{{.*}} atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr + llvm.store %val, %ptr atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr + // CHECK: llvm.store volatile %{{.*}}, %{{.*}} atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr + llvm.store volatile %val, %ptr atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr + llvm.return +} + // CHECK-LABEL: @atomicrmw func.func @atomicrmw(%ptr : !llvm.ptr, %val : f32) { // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : !llvm.ptr, f32 diff --git a/mlir/test/Target/LLVMIR/Import/instructions.ll b/mlir/test/Target/LLVMIR/Import/instructions.ll --- a/mlir/test/Target/LLVMIR/Import/instructions.ll +++ b/mlir/test/Target/LLVMIR/Import/instructions.ll @@ -368,13 +368,18 @@ ; // ----- -; CHECK-LABEL: @atomic_load +; CHECK-LABEL: @atomic_load_store ; CHECK-SAME: %[[PTR:[a-zA-Z0-9]+]] -define void @atomic_load(ptr %ptr) { +define void @atomic_load_store(ptr %ptr) { ; CHECK: %[[V1:[0-9]+]] = llvm.load %[[PTR]] atomic acquire {alignment = 8 : i64} : !llvm.ptr -> f64 ; CHECK: %[[V2:[0-9]+]] = llvm.load volatile %[[PTR]] atomic syncscope("singlethreaded") acquire {alignment = 16 : i64} : !llvm.ptr -> f64 %1 = load atomic double, ptr %ptr acquire, align 8 %2 = load atomic volatile double, ptr %ptr syncscope("singlethreaded") acquire, align 16 + + ; CHECK: llvm.store %[[V1]], %[[PTR]] atomic release {alignment = 8 : i64} : f64, !llvm.ptr + ; CHECK: llvm.store volatile %[[V2]], %[[PTR]] atomic syncscope("singlethreaded") release {alignment = 16 : i64} : f64, !llvm.ptr + store atomic double %1, ptr %ptr release, align 8 + store atomic volatile double %2, ptr %ptr syncscope("singlethreaded") release, align 16 ret void } diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir --- a/mlir/test/Target/LLVMIR/llvmir.mlir +++ b/mlir/test/Target/LLVMIR/llvmir.mlir @@ -1780,13 +1780,20 @@ // ----- -llvm.func @atomic_load(%ptr : !llvm.ptr) { +llvm.func @atomic_store_and_load(%ptr : !llvm.ptr) { // CHECK: load atomic - // CHECK-SAME: monotonic, align 4 - %1 = llvm.load %ptr atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32 + // CHECK-SAME: acquire, align 4 + %1 = llvm.load %ptr atomic acquire {alignment = 4 : i64} : !llvm.ptr -> f32 // CHECK: load atomic - // CHECK-SAME: syncscope("singlethread") monotonic, align 4 - %2 = llvm.load %ptr atomic syncscope("singlethread") monotonic {alignment = 4 : i64} : !llvm.ptr -> f32 + // CHECK-SAME: syncscope("singlethread") acquire, align 4 + %2 = llvm.load %ptr atomic syncscope("singlethread") acquire {alignment = 4 : i64} : !llvm.ptr -> f32 + + // CHECK: store atomic + // CHECK-SAME: release, align 4 + llvm.store %1, %ptr atomic release {alignment = 4 : i64} : f32, !llvm.ptr + // CHECK: store atomic + // CHECK-SAME: syncscope("singlethread") release, align 4 + llvm.store %2, %ptr atomic syncscope("singlethread") release {alignment = 4 : i64} : f32, !llvm.ptr llvm.return }