diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -1666,7 +1666,8 @@ def LLVM_AtomicRMWType : AnyTypeOf<[LLVM_AnyFloat, AnyInteger]>; -// FIXME: Need to add alignment attribute to MLIR atomicrmw operation. +// FIXME: Need to add alignment and syncscope attribute to MLIR atomicrmw +// operation. def LLVM_AtomicRMWOp : LLVM_Op<"atomicrmw"> { let arguments = (ins AtomicBinOp:$bin_op, LLVM_PointerTo:$ptr, diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp --- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp +++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp @@ -631,8 +631,8 @@ INST(Load, Load), INST(Store, Store), INST(Fence, Fence), - // FIXME: atomiccmpxchg - // FIXME: atomicrmw + // AtomicCmpXchg is handled specially. + // AtomicRMW is handled specially. // Getelementptr is handled specially. INST(Trunc, Trunc), INST(ZExt, ZExt), @@ -761,6 +761,39 @@ llvm_unreachable("incorrect atomic ordering"); } +static AtomicBinOp getLLVMAtomicBinOp(llvm::AtomicRMWInst::BinOp binOp) { + switch (binOp) { + case llvm::AtomicRMWInst::Xchg: + return LLVM::AtomicBinOp::xchg; + case llvm::AtomicRMWInst::Add: + return LLVM::AtomicBinOp::add; + case llvm::AtomicRMWInst::Sub: + return LLVM::AtomicBinOp::sub; + case llvm::AtomicRMWInst::And: + return LLVM::AtomicBinOp::_and; + case llvm::AtomicRMWInst::Nand: + return LLVM::AtomicBinOp::nand; + case llvm::AtomicRMWInst::Or: + return LLVM::AtomicBinOp::_or; + case llvm::AtomicRMWInst::Xor: + return LLVM::AtomicBinOp::_xor; + case llvm::AtomicRMWInst::Max: + return LLVM::AtomicBinOp::max; + case llvm::AtomicRMWInst::Min: + return LLVM::AtomicBinOp::min; + case llvm::AtomicRMWInst::UMax: + return LLVM::AtomicBinOp::umax; + case llvm::AtomicRMWInst::UMin: + return LLVM::AtomicBinOp::umin; + case llvm::AtomicRMWInst::FAdd: + return LLVM::AtomicBinOp::fadd; + case llvm::AtomicRMWInst::FSub: + return LLVM::AtomicBinOp::fsub; + default: + llvm_unreachable("unsupported atomic binary operation"); + } +} + // `br` branches to `target`. Return the branch arguments to `br`, in the // same order of the PHIs in `target`. LogicalResult @@ -1062,6 +1095,45 @@ syncscope); return success(); } + case llvm::Instruction::AtomicRMW: { + auto *atomicInst = cast(inst); + Value ptr = processValue(atomicInst->getPointerOperand()); + Value val = processValue(atomicInst->getValOperand()); + if (!ptr || !val) + return failure(); + + LLVM::AtomicBinOp binOp = getLLVMAtomicBinOp(atomicInst->getOperation()); + LLVM::AtomicOrdering ordering = + getLLVMAtomicOrdering(atomicInst->getOrdering()); + + Type type = processType(inst->getType()); + if (!type) + return failure(); + + instMap[inst] = b.create(loc, type, binOp, ptr, val, ordering); + return success(); + } + case llvm::Instruction::AtomicCmpXchg: { + auto *cmpXchgInst = cast(inst); + Value ptr = processValue(cmpXchgInst->getPointerOperand()); + Value cmpVal = processValue(cmpXchgInst->getCompareOperand()); + Value newVal = processValue(cmpXchgInst->getNewValOperand()); + if (!ptr || !cmpVal || !newVal) + return failure(); + + LLVM::AtomicOrdering ordering = + getLLVMAtomicOrdering(cmpXchgInst->getSuccessOrdering()); + LLVM::AtomicOrdering failOrdering = + getLLVMAtomicOrdering(cmpXchgInst->getFailureOrdering()); + + Type type = processType(inst->getType()); + if (!type) + return failure(); + + instMap[inst] = b.create(loc, type, ptr, cmpVal, newVal, + ordering, failOrdering); + return success(); + } case llvm::Instruction::GetElementPtr: { // FIXME: Support inbounds GEPs. llvm::GetElementPtrInst *gep = cast(inst); diff --git a/mlir/test/Target/LLVMIR/Import/basic.ll b/mlir/test/Target/LLVMIR/Import/basic.ll --- a/mlir/test/Target/LLVMIR/Import/basic.ll +++ b/mlir/test/Target/LLVMIR/Import/basic.ll @@ -668,3 +668,46 @@ ; CHECK: llvm.return ret void } + +; CHECK-LABEL: llvm.func @atomic_rmw +define void @atomic_rmw(i32* %ptr0, i32 %v, float* %ptr1, float %f) { + ; CHECK: llvm.atomicrmw add %arg0, %arg1 acquire : i32 + %1 = atomicrmw add i32* %ptr0, i32 %v acquire + ; CHECK: llvm.atomicrmw add %arg0, %arg1 release : i32 + %2 = atomicrmw add i32* %ptr0, i32 %v release + + ; CHECK: llvm.atomicrmw sub %arg0, %arg1 acquire : i32 + %3 = atomicrmw sub i32* %ptr0, i32 %v acquire + ; CHECK: llvm.atomicrmw xchg %arg0, %arg1 acquire : i32 + %4 = atomicrmw xchg i32* %ptr0, i32 %v acquire + ; CHECK: llvm.atomicrmw _and %arg0, %arg1 acquire : i32 + %5 = atomicrmw and i32* %ptr0, i32 %v acquire + ; CHECK: llvm.atomicrmw nand %arg0, %arg1 acquire : i32 + %6 = atomicrmw nand i32* %ptr0, i32 %v acquire + ; CHECK: llvm.atomicrmw _or %arg0, %arg1 acquire : i32 + %7 = atomicrmw or i32* %ptr0, i32 %v acquire + ; CHECK: llvm.atomicrmw _xor %arg0, %arg1 acquire : i32 + %8 = atomicrmw xor i32* %ptr0, i32 %v acquire + ; CHECK: llvm.atomicrmw max %arg0, %arg1 acquire : i32 + %9 = atomicrmw max i32* %ptr0, i32 %v acquire + ; CHECK: llvm.atomicrmw min %arg0, %arg1 acquire : i32 + %10 = atomicrmw min i32* %ptr0, i32 %v acquire + ; CHECK: llvm.atomicrmw umax %arg0, %arg1 acquire : i32 + %11 = atomicrmw umax i32* %ptr0, i32 %v acquire + ; CHECK: llvm.atomicrmw umin %arg0, %arg1 acquire : i32 + %12 = atomicrmw umin i32* %ptr0, i32 %v acquire + ; CHECK: llvm.atomicrmw fadd %arg2, %arg3 acquire : f32 + %13 = atomicrmw fadd float* %ptr1, float %f acquire + ; CHECK: llvm.atomicrmw fsub %arg2, %arg3 acquire : f32 + %14 = atomicrmw fsub float* %ptr1, float %f acquire + ret void +} + +; CHECK-LABEL: llvm.func @atomic_cmpxchg +define void @atomic_cmpxchg(i32* %ptr0, i32 %v, i32 %c) { + ; CHECK: llvm.cmpxchg %arg0, %arg2, %arg1 seq_cst seq_cst : i32 + %1 = cmpxchg i32* %ptr0, i32 %c, i32 %v seq_cst seq_cst + ; CHECK: llvm.cmpxchg %arg0, %arg2, %arg1 monotonic seq_cst : i32 + %2 = cmpxchg i32* %ptr0, i32 %c, i32 %v monotonic seq_cst + ret void +}