Index: mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt =================================================================== --- mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt +++ mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt @@ -8,6 +8,8 @@ mlir_tablegen(LLVMOpsEnums.cpp.inc -gen-enum-defs) add_public_tablegen_target(MLIRLLVMOpsIncGen) +add_mlir_interface(LLVMOpsInterfaces) + set(LLVM_TARGET_DEFINITIONS LLVMOps.td) mlir_tablegen(LLVMConversions.inc -gen-llvmir-conversions) mlir_tablegen(LLVMConversionEnumsToLLVM.inc -gen-enum-to-llvmir-conversions) Index: mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h =================================================================== --- mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h +++ mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h @@ -29,6 +29,7 @@ #include "llvm/IR/Type.h" #include "mlir/Dialect/LLVMIR/LLVMOpsEnums.h.inc" +#include "mlir/Dialect/LLVMIR/LLVMOpsInterfaces.h.inc" namespace llvm { class Type; @@ -48,6 +49,20 @@ struct LLVMDialectImpl; } // namespace detail +class FMFAttr + : public Attribute::AttrBase { +public: + using Base::Base; + + static FMFAttr get(FastmathFlags flags, MLIRContext *context); + + FastmathFlags getFlags() const; + + void print(DialectAsmPrinter &p) const; + static Attribute parse(StringRef kind, DialectAsmParser &parser); +}; + } // namespace LLVM } // namespace mlir Index: mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td =================================================================== --- mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -14,10 +14,39 @@ #define LLVMIR_OPS include "mlir/Dialect/LLVMIR/LLVMOpBase.td" +include "mlir/Dialect/LLVMIR/LLVMOpsInterfaces.td" include "mlir/IR/SymbolInterfaces.td" include "mlir/Interfaces/ControlFlowInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" +def FMFnnan : BitEnumAttrCase<"nnan", 0x1>; +def FMFninf : BitEnumAttrCase<"ninf", 0x2>; +def FMFnsz : BitEnumAttrCase<"nsz", 0x4>; +def FMFarcp : BitEnumAttrCase<"arcp", 0x8>; +def FMFcontract : BitEnumAttrCase<"contract", 0x10>; +def FMFafn : BitEnumAttrCase<"afn", 0x20>; +def FMFreassoc : BitEnumAttrCase<"reassoc", 0x40>; +def FMFfast : BitEnumAttrCase<"fast", 0x80>; + +def FastmathFlags : BitEnumAttr< + "FastmathFlags", + "fastmath flags", + [FMFnnan, FMFninf, FMFnsz, FMFarcp, FMFcontract, FMFafn, FMFreassoc, FMFfast + ]> { + let cppNamespace = "::mlir::LLVM"; +} + +def LLVM_FMFAttr : DialectAttr< + LLVM_Dialect, + CPred<"$_self.isa<::mlir::LLVM::FMFAttr>()">, + "LLVM fastmath flags"> { + let storageType = "::mlir::LLVM::FMFAttr"; + let returnType = "::mlir::LLVM::FastmathFlags"; + let convertFromStorage = "$_self.getFlags()"; + let constBuilderCall = + "::mlir::LLVM::FMFAttr::get($0, $_builder.getContext())"; +} + class LLVM_Builder { string llvmBuilder = builder; } @@ -92,26 +121,32 @@ LLVM_OneResultOp, LLVM_Builder<"$res = builder." # builderFunc # "($lhs, $rhs);"> { - let arguments = (ins LLVM_ScalarOrVectorOf:$lhs, - LLVM_ScalarOrVectorOf:$rhs); + dag opArgs = (ins LLVM_ScalarOrVectorOf:$lhs, + LLVM_ScalarOrVectorOf:$rhs); let parser = [{ return impl::parseOneResultSameOperandTypeOp(parser, result); }]; let printer = [{ mlir::impl::printOneResultOp(this->getOperation(), p); }]; } class LLVM_IntArithmeticOp traits = []> : - LLVM_ArithmeticOpBase; + LLVM_ArithmeticOpBase { + let arguments = opArgs; +} class LLVM_FloatArithmeticOp traits = []> : - LLVM_ArithmeticOpBase; + LLVM_ArithmeticOpBase { + dag fmfArg = (ins DefaultValuedAttr:$fmf); + let arguments = !con(opArgs, fmfArg); +} // Class for arithmetic unary operations. -class LLVM_UnaryArithmeticOp traits = []> : LLVM_OneResultOp, LLVM_Builder<"$res = builder." # builderFunc # "($operand);"> { - let arguments = (ins type:$operand); + let arguments = (ins type:$operand, + DefaultValuedAttr:$fmf); let parser = [{ return impl::parseOneResultSameOperandTypeOp(parser, result); }]; let printer = [{ mlir::impl::printOneResultOp(this->getOperation(), p); }]; @@ -200,31 +235,41 @@ } // Other integer operations. -def LLVM_FCmpOp : LLVM_OneResultOp<"fcmp", [NoSideEffect]> { +def LLVM_FCmpOp : LLVM_OneResultOp<"fcmp", [ + NoSideEffect, DeclareOpInterfaceMethods]> { let arguments = (ins FCmpPredicate:$predicate, LLVM_ScalarOrVectorOf:$lhs, - LLVM_ScalarOrVectorOf:$rhs); + LLVM_ScalarOrVectorOf:$rhs, + DefaultValuedAttr:$fmf); let llvmBuilder = [{ $res = builder.CreateFCmp(getLLVMCmpPredicate($predicate), $lhs, $rhs); }]; let builders = [ - OpBuilderDAG<(ins "FCmpPredicate":$predicate, "Value":$lhs, "Value":$rhs), + OpBuilderDAG<(ins "FCmpPredicate":$predicate, "Value":$lhs, "Value":$rhs, + CArg<"FastmathFlags", "{}">:$fmf), [{ build($_builder, $_state, LLVMType::getInt1Ty(lhs.getType().getContext()), - $_builder.getI64IntegerAttr(static_cast(predicate)), lhs, rhs); + $_builder.getI64IntegerAttr(static_cast(predicate)), lhs, rhs, + ::mlir::LLVM::FMFAttr::get(fmf, $_builder.getContext())); }]>]; let parser = [{ return parseCmpOp(parser, result); }]; let printer = [{ printFCmpOp(p, *this); }]; } // Floating point binary operations. -def LLVM_FAddOp : LLVM_FloatArithmeticOp<"fadd", "CreateFAdd">; -def LLVM_FSubOp : LLVM_FloatArithmeticOp<"fsub", "CreateFSub">; -def LLVM_FMulOp : LLVM_FloatArithmeticOp<"fmul", "CreateFMul">; -def LLVM_FDivOp : LLVM_FloatArithmeticOp<"fdiv", "CreateFDiv">; -def LLVM_FRemOp : LLVM_FloatArithmeticOp<"frem", "CreateFRem">; -def LLVM_FNegOp : LLVM_UnaryArithmeticOp, - "fneg", "CreateFNeg">; +def LLVM_FAddOp : LLVM_FloatArithmeticOp<"fadd", "CreateFAdd", [ + DeclareOpInterfaceMethods]>; +def LLVM_FSubOp : LLVM_FloatArithmeticOp<"fsub", "CreateFSub", [ + DeclareOpInterfaceMethods]>; +def LLVM_FMulOp : LLVM_FloatArithmeticOp<"fmul", "CreateFMul", [ + DeclareOpInterfaceMethods]>; +def LLVM_FDivOp : LLVM_FloatArithmeticOp<"fdiv", "CreateFDiv", [ + DeclareOpInterfaceMethods]>; +def LLVM_FRemOp : LLVM_FloatArithmeticOp<"frem", "CreateFRem", [ + DeclareOpInterfaceMethods]>; +def LLVM_FNegOp : LLVM_UnaryFloatArithmeticOp< + LLVM_ScalarOrVectorOf, "fneg", "CreateFNeg", [ + DeclareOpInterfaceMethods]>; // Common code definition that is used to verify and set the alignment attribute // of LLVM ops that accept such an attribute. @@ -405,10 +450,12 @@ let printer = [{ printLandingpadOp(p, *this); }]; } -def LLVM_CallOp : LLVM_Op<"call">, +def LLVM_CallOp : LLVM_Op<"call", + [DeclareOpInterfaceMethods]>, Results<(outs Variadic)> { let arguments = (ins OptionalAttr:$callee, - Variadic); + Variadic, + DefaultValuedAttr:$fmf); let builders = [ OpBuilderDAG<(ins "LLVMFuncOp":$func, "ValueRange":$operands, CArg<"ArrayRef", "{}">:$attributes), Index: mlir/include/mlir/Dialect/LLVMIR/LLVMOpsInterfaces.td =================================================================== --- /dev/null +++ mlir/include/mlir/Dialect/LLVMIR/LLVMOpsInterfaces.td @@ -0,0 +1,30 @@ +//===-- LLVMOps.td - LLVM IR dialect op definition file ----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is the LLVM IR interfaces definition file. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_OPS_INTERFACES +#define LLVM_OPS_INTERFACES + +include "mlir/IR/OpBase.td" + +def FastmathFlagsInterface : OpInterface<"FastmathFlagsInterface"> { + let description = [{ + This is an example interface definition. + }]; + + let cppNamespace = "::mlir::LLVM"; + + let methods = [ + InterfaceMethod<"Get fastmath flags", "::mlir::LLVM::FastmathFlags", "fmf">, + ]; +} + +#endif // LLVM_OPS_INTERFACES Index: mlir/lib/Conversion/SPIRVToLLVM/ConvertSPIRVToLLVM.cpp =================================================================== --- mlir/lib/Conversion/SPIRVToLLVM/ConvertSPIRVToLLVM.cpp +++ mlir/lib/Conversion/SPIRVToLLVM/ConvertSPIRVToLLVM.cpp @@ -833,7 +833,8 @@ rewriter.template replaceOpWithNewOp( operation, dstType, rewriter.getI64IntegerAttr(static_cast(predicate)), - operation.operand1(), operation.operand2()); + operation.operand1(), operation.operand2(), + LLVM::FMFAttr::get({}, operation.getContext())); return success(); } }; Index: mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp =================================================================== --- mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp +++ mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp @@ -1844,10 +1844,11 @@ auto result = ComplexStructBuilder::undef(rewriter, loc, structType); // Emit IR to add complex numbers. + auto fmf = LLVM::FMFAttr::get({}, op.getContext()); Value real = - rewriter.create(loc, arg.lhs.real(), arg.rhs.real()); + rewriter.create(loc, arg.lhs.real(), arg.rhs.real(), fmf); Value imag = - rewriter.create(loc, arg.lhs.imag(), arg.rhs.imag()); + rewriter.create(loc, arg.lhs.imag(), arg.rhs.imag(), fmf); result.setReal(rewriter, loc, real); result.setImaginary(rewriter, loc, imag); @@ -1871,10 +1872,11 @@ auto result = ComplexStructBuilder::undef(rewriter, loc, structType); // Emit IR to substract complex numbers. + auto fmf = LLVM::FMFAttr::get({}, op.getContext()); Value real = - rewriter.create(loc, arg.lhs.real(), arg.rhs.real()); + rewriter.create(loc, arg.lhs.real(), arg.rhs.real(), fmf); Value imag = - rewriter.create(loc, arg.lhs.imag(), arg.rhs.imag()); + rewriter.create(loc, arg.lhs.imag(), arg.rhs.imag(), fmf); result.setReal(rewriter, loc, real); result.setImaginary(rewriter, loc, imag); @@ -3169,11 +3171,12 @@ ConversionPatternRewriter &rewriter) const override { CmpFOpAdaptor transformed(operands); + auto fmf = LLVM::FMFAttr::get({}, cmpfOp.getContext()); rewriter.replaceOpWithNewOp( cmpfOp, typeConverter->convertType(cmpfOp.getResult().getType()), rewriter.getI64IntegerAttr(static_cast( convertCmpPredicate(cmpfOp.getPredicate()))), - transformed.lhs(), transformed.rhs()); + transformed.lhs(), transformed.rhs(), fmf); return success(); } Index: mlir/lib/Dialect/LLVMIR/CMakeLists.txt =================================================================== --- mlir/lib/Dialect/LLVMIR/CMakeLists.txt +++ mlir/lib/Dialect/LLVMIR/CMakeLists.txt @@ -10,6 +10,7 @@ DEPENDS MLIRLLVMOpsIncGen + MLIRLLVMOpsInterfacesIncGen MLIRLLVMConversionsIncGen MLIROpenMPOpsIncGen intrinsics_gen Index: mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp =================================================================== --- mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -11,6 +11,7 @@ // //===----------------------------------------------------------------------===// #include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "../../IR/AttributeDetail.h" //TODO #include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" @@ -36,6 +37,7 @@ static constexpr const char kNonTemporalAttrName[] = "nontemporal"; #include "mlir/Dialect/LLVMIR/LLVMOpsEnums.cpp.inc" +#include "mlir/Dialect/LLVMIR/LLVMOpsInterfaces.cpp.inc" //===----------------------------------------------------------------------===// // Printing/parsing for LLVM::CmpOp. @@ -1757,6 +1759,8 @@ //===----------------------------------------------------------------------===// void LLVMDialect::initialize() { + addAttributes(); + // clang-format off addTypeshasTrait() && op->hasTrait(); } + +FMFAttr FMFAttr::get(FastmathFlags flags, MLIRContext *context) { + auto type = + IntegerType::get(64, IntegerType::SignednessSemantics::Unsigned, context); + APInt value(64, static_cast(flags)); + return Base::get(context, type, value); +} + +FastmathFlags FMFAttr::getFlags() const { + return static_cast(getImpl()->getValue().getZExtValue()); +} + +static constexpr const FastmathFlags FastmathFlagsList[] = { + // clang-format off + FastmathFlags::nnan, + FastmathFlags::ninf, + FastmathFlags::nsz, + FastmathFlags::arcp, + FastmathFlags::contract, + FastmathFlags::afn, + FastmathFlags::reassoc, + FastmathFlags::fast, + // clang-format off +}; + +void FMFAttr::print(DialectAsmPrinter &printer) const { + printer << "fmfattr<"; + auto flags = llvm::make_filter_range(FastmathFlagsList, [&](auto flag) { + return bitEnumContains(getFlags(), flag); + }); + llvm::interleaveComma(flags, printer, + [&](auto flag) { printer << stringifyEnum(flag); }); + printer << ">"; +} + +Attribute FMFAttr::parse(StringRef kind, DialectAsmParser &parser) { + if (kind != "fmfattr") + return {}; + + if (parser.parseLess()) + return {}; + + FastmathFlags flags = {}; + if (parser.parseOptionalGreater()) { + do { + StringRef elemName; + if (parser.parseKeyword(&elemName)) + return {}; + + auto elem = symbolizeFastmathFlags(elemName); + if (!elem) + return {}; + + flags = flags | *elem; + } while (succeeded(parser.parseOptionalComma())); + + if (parser.parseGreater()) + return {}; + } + + return FMFAttr::get(flags, parser.getBuilder().getContext()); +} + +Attribute LLVMDialect::parseAttribute(DialectAsmParser &parser, + Type type) const { + if (type) { + parser.emitError(parser.getNameLoc(), "unexpected type"); + return {}; + } + StringRef attrKind; + if (parser.parseKeyword(&attrKind)) + return {}; + + return FMFAttr::parse(attrKind, parser); +} + +void LLVMDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const { + if (auto fmf = attr.dyn_cast()) + fmf.print(os); + else + llvm_unreachable("Unknown attribute type"); +} Index: mlir/lib/Target/LLVMIR/ModuleTranslation.cpp =================================================================== --- mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -570,6 +570,28 @@ }); } +static llvm::FastMathFlags getFastmathFlags(FastmathFlagsInterface &op) { + using llvmFMF = llvm::FastMathFlags; + using FuncT = void (llvmFMF::*)(bool); + const std::pair handlers[] = { + // clang-format off + {FastmathFlags::nnan, &llvmFMF::setNoNaNs}, + {FastmathFlags::ninf, &llvmFMF::setNoInfs}, + {FastmathFlags::nsz, &llvmFMF::setNoSignedZeros}, + {FastmathFlags::arcp, &llvmFMF::setAllowReciprocal}, + {FastmathFlags::contract, &llvmFMF::setAllowContract}, + {FastmathFlags::afn, &llvmFMF::setApproxFunc}, + {FastmathFlags::reassoc, &llvmFMF::setAllowReassoc}, + {FastmathFlags::fast, &llvmFMF::setFast}, + // clang-format on + }; + llvm::FastMathFlags ret; + for (auto it : handlers) + if (bitEnumContains(op.fmf(), it.first)) + (ret.*(it.second))(true); + return ret; +} + /// Given a single MLIR operation, create the corresponding LLVM IR operation /// using the `builder`. LLVM IR Builder does not have a generic interface so /// this has to be a long chain of `if`s calling different functions with a @@ -584,6 +606,10 @@ return position; }; + llvm::IRBuilder<>::FastMathFlagGuard fmfGuard(builder); + if (auto fmf = dyn_cast(opInst)) + builder.setFastMathFlags(getFastmathFlags(fmf)); + #include "mlir/Dialect/LLVMIR/LLVMConversions.inc" // Emit function calls. If the "callee" attribute is present, this is a Index: mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir =================================================================== --- mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir +++ mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir @@ -148,9 +148,9 @@ // CHECK-LABEL: func @gpu_fabs func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.absf %arg_f32 : f32 - // CHECK: llvm.call @__nv_fabsf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_fabsf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.absf %arg_f64 : f64 - // CHECK: llvm.call @__nv_fabs(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_fabs(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -163,9 +163,9 @@ // CHECK-LABEL: func @gpu_ceil func @gpu_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.ceilf %arg_f32 : f32 - // CHECK: llvm.call @__nv_ceilf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_ceilf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.ceilf %arg_f64 : f64 - // CHECK: llvm.call @__nv_ceil(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_ceil(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -178,9 +178,9 @@ // CHECK-LABEL: func @gpu_floor func @gpu_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.floorf %arg_f32 : f32 - // CHECK: llvm.call @__nv_floorf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_floorf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.floorf %arg_f64 : f64 - // CHECK: llvm.call @__nv_floor(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_floor(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -193,9 +193,9 @@ // CHECK-LABEL: func @gpu_cos func @gpu_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.cos %arg_f32 : f32 - // CHECK: llvm.call @__nv_cosf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_cosf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.cos %arg_f64 : f64 - // CHECK: llvm.call @__nv_cos(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_cos(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -207,9 +207,9 @@ // CHECK-LABEL: func @gpu_exp func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.exp %arg_f32 : f32 - // CHECK: llvm.call @__nv_expf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_expf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.exp %arg_f64 : f64 - // CHECK: llvm.call @__nv_exp(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_exp(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -222,9 +222,9 @@ // CHECK-LABEL: func @gpu_log func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.log %arg_f32 : f32 - // CHECK: llvm.call @__nv_logf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_logf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.log %arg_f64 : f64 - // CHECK: llvm.call @__nv_log(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_log(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -237,9 +237,9 @@ // CHECK-LABEL: func @gpu_log10 func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.log10 %arg_f32 : f32 - // CHECK: llvm.call @__nv_log10f(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_log10f(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.log10 %arg_f64 : f64 - // CHECK: llvm.call @__nv_log10(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_log10(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -252,9 +252,9 @@ // CHECK-LABEL: func @gpu_log2 func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.log2 %arg_f32 : f32 - // CHECK: llvm.call @__nv_log2f(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_log2f(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.log2 %arg_f64 : f64 - // CHECK: llvm.call @__nv_log2(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_log2(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -267,9 +267,9 @@ // CHECK-LABEL: func @gpu_sin func @gpu_sin(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.sin %arg_f32 : f32 - // CHECK: llvm.call @__nv_sinf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_sinf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.sin %arg_f64 : f64 - // CHECK: llvm.call @__nv_sin(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_sin(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -283,12 +283,12 @@ func @gpu_tanh(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { %result16 = std.tanh %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float - // CHECK-NEXT: llvm.call @__nv_tanhf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK-NEXT: llvm.call @__nv_tanhf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half %result32 = std.tanh %arg_f32 : f32 - // CHECK: llvm.call @__nv_tanhf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_tanhf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.tanh %arg_f64 : f64 - // CHECK: llvm.call @__nv_tanh(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_tanh(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result16, %result32, %result64 : f16, f32, f64 } } @@ -303,12 +303,12 @@ -> (f16, f32, f64) { %result16 = std.rsqrt %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float - // CHECK-NEXT: llvm.call @__nv_rsqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK-NEXT: llvm.call @__nv_rsqrtf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half %result32 = std.rsqrt %arg_f32 : f32 - // CHECK: llvm.call @__nv_rsqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_rsqrtf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.rsqrt %arg_f64 : f64 - // CHECK: llvm.call @__nv_rsqrt(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_rsqrt(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result16, %result32, %result64 : f16, f32, f64 } } @@ -323,12 +323,12 @@ -> (f16, f32, f64) { %result16 = std.sqrt %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float - // CHECK-NEXT: llvm.call @__nv_sqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK-NEXT: llvm.call @__nv_sqrtf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half %result32 = std.sqrt %arg_f32 : f32 - // CHECK: llvm.call @__nv_sqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_sqrtf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.sqrt %arg_f64 : f64 - // CHECK: llvm.call @__nv_sqrt(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_sqrt(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result16, %result32, %result64 : f16, f32, f64 } } @@ -344,9 +344,9 @@ // CHECK-LABEL: func @gpu_exp func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.exp %arg_f32 : f32 - // CHECK: llvm.call @__nv_expf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__nv_expf(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.exp %arg_f64 : f64 - // CHECK: llvm.call @__nv_exp(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__nv_exp(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } "test.finish" () : () -> () Index: mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir =================================================================== --- mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir +++ mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir @@ -90,9 +90,9 @@ // CHECK-LABEL: func @gpu_fabs func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.absf %arg_f32 : f32 - // CHECK: llvm.call @__ocml_fabs_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_fabs_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.absf %arg_f64 : f64 - // CHECK: llvm.call @__ocml_fabs_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_fabs_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -105,9 +105,9 @@ // CHECK-LABEL: func @gpu_ceil func @gpu_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.ceilf %arg_f32 : f32 - // CHECK: llvm.call @__ocml_ceil_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_ceil_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.ceilf %arg_f64 : f64 - // CHECK: llvm.call @__ocml_ceil_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_ceil_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -120,9 +120,9 @@ // CHECK-LABEL: func @gpu_floor func @gpu_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.floorf %arg_f32 : f32 - // CHECK: llvm.call @__ocml_floor_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_floor_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.floorf %arg_f64 : f64 - // CHECK: llvm.call @__ocml_floor_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_floor_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -135,9 +135,9 @@ // CHECK-LABEL: func @gpu_cos func @gpu_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.cos %arg_f32 : f32 - // CHECK: llvm.call @__ocml_cos_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_cos_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.cos %arg_f64 : f64 - // CHECK: llvm.call @__ocml_cos_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_cos_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -149,11 +149,11 @@ // CHECK-LABEL: func @gpu_exp func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %exp_f32 = std.exp %arg_f32 : f32 - // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result32 = std.exp %exp_f32 : f32 - // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.exp %arg_f64 : f64 - // CHECK: llvm.call @__ocml_exp_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_exp_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -170,11 +170,11 @@ // CHECK-LABEL: func @gpu_exp func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %exp_f32 = std.exp %arg_f32 : f32 - // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result32 = std.exp %exp_f32 : f32 - // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.exp %arg_f64 : f64 - // CHECK: llvm.call @__ocml_exp_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_exp_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } "test.finish" () : () -> () @@ -189,9 +189,9 @@ // CHECK-LABEL: func @gpu_log func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.log %arg_f32 : f32 - // CHECK: llvm.call @__ocml_log_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_log_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.log %arg_f64 : f64 - // CHECK: llvm.call @__ocml_log_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_log_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -204,9 +204,9 @@ // CHECK-LABEL: func @gpu_log10 func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.log10 %arg_f32 : f32 - // CHECK: llvm.call @__ocml_log10_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_log10_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.log10 %arg_f64 : f64 - // CHECK: llvm.call @__ocml_log10_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_log10_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -219,9 +219,9 @@ // CHECK-LABEL: func @gpu_log2 func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.log2 %arg_f32 : f32 - // CHECK: llvm.call @__ocml_log2_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_log2_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.log2 %arg_f64 : f64 - // CHECK: llvm.call @__ocml_log2_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_log2_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } @@ -236,12 +236,12 @@ -> (f16, f32, f64) { %result16 = std.rsqrt %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float - // CHECK-NEXT: llvm.call @__ocml_rsqrt_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK-NEXT: llvm.call @__ocml_rsqrt_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half %result32 = std.rsqrt %arg_f32 : f32 - // CHECK: llvm.call @__ocml_rsqrt_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_rsqrt_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.rsqrt %arg_f64 : f64 - // CHECK: llvm.call @__ocml_rsqrt_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_rsqrt_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result16, %result32, %result64 : f16, f32, f64 } } @@ -256,12 +256,12 @@ -> (f16, f32, f64) { %result16 = std.sqrt %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float - // CHECK-NEXT: llvm.call @__ocml_sqrt_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK-NEXT: llvm.call @__ocml_sqrt_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half %result32 = std.sqrt %arg_f32 : f32 - // CHECK: llvm.call @__ocml_sqrt_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_sqrt_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.sqrt %arg_f64 : f64 - // CHECK: llvm.call @__ocml_sqrt_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_sqrt_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result16, %result32, %result64 : f16, f32, f64 } } @@ -274,9 +274,9 @@ // CHECK-LABEL: func @gpu_tanh func @gpu_tanh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = std.tanh %arg_f32 : f32 - // CHECK: llvm.call @__ocml_tanh_f32(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK: llvm.call @__ocml_tanh_f32(%{{.*}}) {{.*}}: (!llvm.float) -> !llvm.float %result64 = std.tanh %arg_f64 : f64 - // CHECK: llvm.call @__ocml_tanh_f64(%{{.*}}) : (!llvm.double) -> !llvm.double + // CHECK: llvm.call @__ocml_tanh_f64(%{{.*}}) {{.*}}: (!llvm.double) -> !llvm.double std.return %result32, %result64 : f32, f64 } } Index: mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir =================================================================== --- mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir +++ mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir @@ -2,18 +2,18 @@ // CHECK: llvm.mlir.global internal constant @kernel_spv_entry_point_name // CHECK: llvm.mlir.global internal constant @SPIRV_BIN -// CHECK: %[[Vulkan_Runtime_ptr:.*]] = llvm.call @initVulkan() : () -> !llvm.ptr +// CHECK: %[[Vulkan_Runtime_ptr:.*]] = llvm.call @initVulkan() {{.*}}: () -> !llvm.ptr // CHECK: %[[addressof_SPIRV_BIN:.*]] = llvm.mlir.addressof @SPIRV_BIN // CHECK: %[[SPIRV_BIN_ptr:.*]] = llvm.getelementptr %[[addressof_SPIRV_BIN]] // CHECK: %[[SPIRV_BIN_size:.*]] = llvm.mlir.constant -// CHECK: llvm.call @bindMemRef1DFloat(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.i32, !llvm.i32, !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>>) -> !llvm.void -// CHECK: llvm.call @setBinaryShader(%[[Vulkan_Runtime_ptr]], %[[SPIRV_BIN_ptr]], %[[SPIRV_BIN_size]]) : (!llvm.ptr, !llvm.ptr, !llvm.i32) -> !llvm.void +// CHECK: llvm.call @bindMemRef1DFloat(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!llvm.ptr, !llvm.i32, !llvm.i32, !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>>) -> !llvm.void +// CHECK: llvm.call @setBinaryShader(%[[Vulkan_Runtime_ptr]], %[[SPIRV_BIN_ptr]], %[[SPIRV_BIN_size]]) {{.*}}: (!llvm.ptr, !llvm.ptr, !llvm.i32) -> !llvm.void // CHECK: %[[addressof_entry_point:.*]] = llvm.mlir.addressof @kernel_spv_entry_point_name // CHECK: %[[entry_point_ptr:.*]] = llvm.getelementptr %[[addressof_entry_point]] -// CHECK: llvm.call @setEntryPoint(%[[Vulkan_Runtime_ptr]], %[[entry_point_ptr]]) : (!llvm.ptr, !llvm.ptr) -> !llvm.void -// CHECK: llvm.call @setNumWorkGroups(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.i64, !llvm.i64, !llvm.i64) -> !llvm.void -// CHECK: llvm.call @runOnVulkan(%[[Vulkan_Runtime_ptr]]) : (!llvm.ptr) -> !llvm.void -// CHECK: llvm.call @deinitVulkan(%[[Vulkan_Runtime_ptr]]) : (!llvm.ptr) -> !llvm.void +// CHECK: llvm.call @setEntryPoint(%[[Vulkan_Runtime_ptr]], %[[entry_point_ptr]]) {{.*}}: (!llvm.ptr, !llvm.ptr) -> !llvm.void +// CHECK: llvm.call @setNumWorkGroups(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!llvm.ptr, !llvm.i64, !llvm.i64, !llvm.i64) -> !llvm.void +// CHECK: llvm.call @runOnVulkan(%[[Vulkan_Runtime_ptr]]) {{.*}}: (!llvm.ptr) -> !llvm.void +// CHECK: llvm.call @deinitVulkan(%[[Vulkan_Runtime_ptr]]) {{.*}}: (!llvm.ptr) -> !llvm.void // CHECK: llvm.func @bindMemRef1DHalf(!llvm.ptr, !llvm.i32, !llvm.i32, !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>>) Index: mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir =================================================================== --- mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir +++ mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir @@ -164,7 +164,7 @@ spv.func @tan(%arg0: f32) "None" { // CHECK: %[[SIN:.*]] = "llvm.intr.sin"(%{{.*}}) : (!llvm.float) -> !llvm.float // CHECK: %[[COS:.*]] = "llvm.intr.cos"(%{{.*}}) : (!llvm.float) -> !llvm.float - // CHECK: llvm.fdiv %[[SIN]], %[[COS]] : !llvm.float + // CHECK: llvm.fdiv %[[SIN]], %[[COS]] {{.*}}: !llvm.float %0 = spv.GLSL.Tan %arg0 : f32 spv.Return } @@ -179,9 +179,9 @@ // CHECK: %[[X2:.*]] = llvm.fmul %[[TWO]], %{{.*}} : !llvm.float // CHECK: %[[EXP:.*]] = "llvm.intr.exp"(%[[X2]]) : (!llvm.float) -> !llvm.float // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float - // CHECK: %[[T0:.*]] = llvm.fsub %[[EXP]], %[[ONE]] : !llvm.float - // CHECK: %[[T1:.*]] = llvm.fadd %[[EXP]], %[[ONE]] : !llvm.float - // CHECK: llvm.fdiv %[[T0]], %[[T1]] : !llvm.float + // CHECK: %[[T0:.*]] = llvm.fsub %[[EXP]], %[[ONE]] {{.*}}: !llvm.float + // CHECK: %[[T1:.*]] = llvm.fadd %[[EXP]], %[[ONE]] {{.*}}: !llvm.float + // CHECK: llvm.fdiv %[[T0]], %[[T1]] {{.*}}: !llvm.float %0 = spv.GLSL.Tanh %arg0 : f32 spv.Return } @@ -194,7 +194,7 @@ spv.func @inverse_sqrt(%arg0: f32) "None" { // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%{{.*}}) : (!llvm.float) -> !llvm.float - // CHECK: llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.float + // CHECK: llvm.fdiv %[[ONE]], %[[SQRT]] {{.*}}: !llvm.float %0 = spv.GLSL.InverseSqrt %arg0 : f32 spv.Return } Index: mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir =================================================================== --- mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir +++ mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir @@ -35,7 +35,7 @@ // CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr // CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr // CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) {{.*}}: (!llvm.i64) -> !llvm.ptr // CHECK-NEXT: llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr // CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> @@ -57,7 +57,7 @@ func @mixed_dealloc(%arg0: memref) { // CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK-NEXT: %[[ptri8:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr to !llvm.ptr -// CHECK-NEXT: llvm.call @free(%[[ptri8]]) : (!llvm.ptr) -> () +// CHECK-NEXT: llvm.call @free(%[[ptri8]]) {{.*}}: (!llvm.ptr) -> () dealloc %arg0 : memref // CHECK-NEXT: llvm.return return @@ -71,7 +71,7 @@ // CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr // CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr // CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) {{.*}}: (!llvm.i64) -> !llvm.ptr // CHECK-NEXT: llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr // CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -124,7 +124,7 @@ func @dynamic_dealloc(%arg0: memref) { // CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: %[[ptri8:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr to !llvm.ptr -// CHECK-NEXT: llvm.call @free(%[[ptri8]]) : (!llvm.ptr) -> () +// CHECK-NEXT: llvm.call @free(%[[ptri8]]) {{.*}}: (!llvm.ptr) -> () dealloc %arg0 : memref return } @@ -140,7 +140,7 @@ // ALIGNED-ALLOC-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr // ALIGNED-ALLOC-NEXT: %[[bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 // ALIGNED-ALLOC-NEXT: %[[alignment:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64 -// ALIGNED-ALLOC-NEXT: %[[allocated:.*]] = llvm.call @aligned_alloc(%[[alignment]], %[[bytes]]) : (!llvm.i64, !llvm.i64) -> !llvm.ptr +// ALIGNED-ALLOC-NEXT: %[[allocated:.*]] = llvm.call @aligned_alloc(%[[alignment]], %[[bytes]]) {{.*}}: (!llvm.i64, !llvm.i64) -> !llvm.ptr // ALIGNED-ALLOC-NEXT: llvm.bitcast %[[allocated]] : !llvm.ptr to !llvm.ptr %0 = alloc() {alignment = 32} : memref<32x18xf32> // Do another alloc just to test that we have a unique declaration for Index: mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir =================================================================== --- mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir +++ mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir @@ -74,7 +74,7 @@ // CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr // CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr // CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: llvm.call @malloc(%[[size_bytes]]) {{.*}}: (!llvm.i64) -> !llvm.ptr // CHECK-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr // CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64)> // CHECK-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64)> @@ -86,7 +86,7 @@ // BAREPTR-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr // BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr // BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// BAREPTR-NEXT: llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr +// BAREPTR-NEXT: llvm.call @malloc(%[[size_bytes]]) {{.*}}: (!llvm.i64) -> !llvm.ptr // BAREPTR-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr // BAREPTR-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64)> // BAREPTR-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64)> @@ -104,11 +104,11 @@ func @zero_d_dealloc(%arg0: memref) { // CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64)> // CHECK-NEXT: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr to !llvm.ptr -// CHECK-NEXT: llvm.call @free(%[[bc]]) : (!llvm.ptr) -> () +// CHECK-NEXT: llvm.call @free(%[[bc]]) {{.*}}: (!llvm.ptr) -> () // BAREPTR: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64)> // BAREPTR-NEXT: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr to !llvm.ptr -// BAREPTR-NEXT: llvm.call @free(%[[bc]]) : (!llvm.ptr) -> () +// BAREPTR-NEXT: llvm.call @free(%[[bc]]) {{.*}}: (!llvm.ptr) -> () dealloc %arg0 : memref return } @@ -125,7 +125,7 @@ // CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 // CHECK-NEXT: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : !llvm.i64 // CHECK-NEXT: %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : !llvm.i64 -// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) {{.*}}: (!llvm.i64) -> !llvm.ptr // CHECK-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr // CHECK-NEXT: %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr to !llvm.i64 // CHECK-NEXT: %[[one_1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 @@ -147,7 +147,7 @@ // BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 // BAREPTR-NEXT: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : !llvm.i64 // BAREPTR-NEXT: %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : !llvm.i64 -// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (!llvm.i64) -> !llvm.ptr +// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) {{.*}}: (!llvm.i64) -> !llvm.ptr // BAREPTR-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr // BAREPTR-NEXT: %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr to !llvm.i64 // BAREPTR-NEXT: %[[one_2:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 @@ -174,14 +174,14 @@ // CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr // CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr // CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) {{.*}}: (!llvm.i64) -> !llvm.ptr // CHECK-NEXT: llvm.bitcast %[[allocated]] : !llvm.ptr to !llvm.ptr // BAREPTR: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : !llvm.i64 // BAREPTR-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr // BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr // BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr +// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) {{.*}}: (!llvm.i64) -> !llvm.ptr // BAREPTR-NEXT: llvm.bitcast %[[allocated]] : !llvm.ptr to !llvm.ptr %0 = alloc() : memref<32x18xf32> return %0 : memref<32x18xf32> @@ -219,11 +219,11 @@ func @static_dealloc(%static: memref<10x8xf32>) { // CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr to !llvm.ptr -// CHECK-NEXT: llvm.call @free(%[[bc]]) : (!llvm.ptr) -> () +// CHECK-NEXT: llvm.call @free(%[[bc]]) {{.*}}: (!llvm.ptr) -> () // BAREPTR: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // BAREPTR-NEXT: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr to !llvm.ptr -// BAREPTR-NEXT: llvm.call @free(%[[bc]]) : (!llvm.ptr) -> () +// BAREPTR-NEXT: llvm.call @free(%[[bc]]) {{.*}}: (!llvm.ptr) -> () dealloc %static : memref<10x8xf32> return } Index: mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir =================================================================== --- mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir +++ mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir @@ -89,8 +89,8 @@ // CHECK-DAG: %[[A_IMAG:.*]] = llvm.extractvalue %[[A]][1] : !llvm.struct<(double, double)> // CHECK-DAG: %[[B_IMAG:.*]] = llvm.extractvalue %[[B]][1] : !llvm.struct<(double, double)> // CHECK: %[[C0:.*]] = llvm.mlir.undef : !llvm.struct<(double, double)> -// CHECK-DAG: %[[C_REAL:.*]] = llvm.fadd %[[A_REAL]], %[[B_REAL]] : !llvm.double -// CHECK-DAG: %[[C_IMAG:.*]] = llvm.fadd %[[A_IMAG]], %[[B_IMAG]] : !llvm.double +// CHECK-DAG: %[[C_REAL:.*]] = llvm.fadd %[[A_REAL]], %[[B_REAL]] {{.*}}: !llvm.double +// CHECK-DAG: %[[C_IMAG:.*]] = llvm.fadd %[[A_IMAG]], %[[B_IMAG]] {{.*}}: !llvm.double // CHECK: %[[C1:.*]] = llvm.insertvalue %[[C_REAL]], %[[C0]][0] : !llvm.struct<(double, double)> // CHECK: %[[C2:.*]] = llvm.insertvalue %[[C_IMAG]], %[[C1]][1] : !llvm.struct<(double, double)> func @complex_addition() { @@ -110,8 +110,8 @@ // CHECK-DAG: %[[A_IMAG:.*]] = llvm.extractvalue %[[A]][1] : !llvm.struct<(double, double)> // CHECK-DAG: %[[B_IMAG:.*]] = llvm.extractvalue %[[B]][1] : !llvm.struct<(double, double)> // CHECK: %[[C0:.*]] = llvm.mlir.undef : !llvm.struct<(double, double)> -// CHECK-DAG: %[[C_REAL:.*]] = llvm.fsub %[[A_REAL]], %[[B_REAL]] : !llvm.double -// CHECK-DAG: %[[C_IMAG:.*]] = llvm.fsub %[[A_IMAG]], %[[B_IMAG]] : !llvm.double +// CHECK-DAG: %[[C_REAL:.*]] = llvm.fsub %[[A_REAL]], %[[B_REAL]] {{.*}}: !llvm.double +// CHECK-DAG: %[[C_IMAG:.*]] = llvm.fsub %[[A_IMAG]], %[[B_IMAG]] {{.*}}: !llvm.double // CHECK: %[[C1:.*]] = llvm.insertvalue %[[C_REAL]], %[[C0]][0] : !llvm.struct<(double, double)> // CHECK: %[[C2:.*]] = llvm.insertvalue %[[C_IMAG]], %[[C1]][1] : !llvm.struct<(double, double)> func @complex_substraction() { @@ -481,7 +481,7 @@ // CHECK: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64 %2 = addi %0#0, %1 : i64 %3 = constant 42.0 : f32 -// CHECK: {{.*}} = llvm.fadd {{.*}}, {{.*}} : !llvm.float +// CHECK: {{.*}} = llvm.fadd {{.*}}, {{.*}} {{.*}}: !llvm.float %4 = addf %0#1, %3 : f32 %5 = constant 0 : index return @@ -793,20 +793,20 @@ // CHECK-LABEL: func @fcmp(%arg0: !llvm.float, %arg1: !llvm.float) { func @fcmp(f32, f32) -> () { ^bb0(%arg0: f32, %arg1: f32): - // CHECK: llvm.fcmp "oeq" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "ogt" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "oge" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "olt" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "ole" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "one" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "ord" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "ueq" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "ugt" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "uge" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "ult" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "ule" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "une" %arg0, %arg1 : !llvm.float - // CHECK-NEXT: llvm.fcmp "uno" %arg0, %arg1 : !llvm.float + // CHECK: llvm.fcmp "oeq" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "ogt" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "oge" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "olt" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "ole" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "one" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "ord" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "ueq" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "ugt" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "uge" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "ult" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "ule" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "une" %arg0, %arg1 {{.*}}: !llvm.float + // CHECK-NEXT: llvm.fcmp "uno" %arg0, %arg1 {{.*}}: !llvm.float // CHECK-NEXT: llvm.return %1 = cmpf "oeq", %arg0, %arg1 : f32 %2 = cmpf "ogt", %arg0, %arg1 : f32 @@ -1447,7 +1447,7 @@ return %dim : index } // CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(i64, ptr)> -// CHECK-NEXT: llvm.insertvalue +// CHECK-NEXT: llvm.insertvalue // CHECK-NEXT: %[[UNRANKED_DESC:.*]] = llvm.insertvalue // CHECK-NEXT: %[[C0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 Index: mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir =================================================================== --- mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir +++ mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir @@ -17,7 +17,7 @@ func @rsqrt(%arg0 : f32) { // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (!llvm.float) -> !llvm.float - // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.float + // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] {{.*}}: !llvm.float %0 = rsqrt %arg0 : f32 std.return } @@ -60,7 +60,7 @@ func @rsqrt_double(%arg0 : f64) { // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f64) : !llvm.double // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (!llvm.double) -> !llvm.double - // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.double + // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] {{.*}}: !llvm.double %0 = rsqrt %arg0 : f64 std.return } @@ -72,7 +72,7 @@ func @rsqrt_vector(%arg0 : vector<4xf32>) { // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : !llvm.vec<4 x float> // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (!llvm.vec<4 x float>) -> !llvm.vec<4 x float> - // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.vec<4 x float> + // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] {{.*}}: !llvm.vec<4 x float> %0 = rsqrt %arg0 : vector<4xf32> std.return } @@ -85,7 +85,7 @@ // CHECK: %[[EXTRACT:.*]] = llvm.extractvalue %arg0[0] : !llvm.array<4 x vec<3 x float>> // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<3xf32>) : !llvm.vec<3 x float> // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%[[EXTRACT]]) : (!llvm.vec<3 x float>) -> !llvm.vec<3 x float> - // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.vec<3 x float> + // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] {{.*}}: !llvm.vec<3 x float> // CHECK: %[[INSERT:.*]] = llvm.insertvalue %[[DIV]], %0[0] : !llvm.array<4 x vec<3 x float>> %0 = rsqrt %arg0 : vector<4x3xf32> std.return Index: mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir =================================================================== --- mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir +++ mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir @@ -439,8 +439,8 @@ // CHECK-LABEL: llvm.func @vector_print_scalar_i1( // CHECK-SAME: %[[A:.*]]: !llvm.i1) // CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i1 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printI64(%[[S]]) {{.*}}: (!llvm.i64) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_i4(%arg0: i4) { vector.print %arg0 : i4 @@ -449,8 +449,8 @@ // CHECK-LABEL: llvm.func @vector_print_scalar_i4( // CHECK-SAME: %[[A:.*]]: !llvm.i4) // CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i4 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printI64(%[[S]]) {{.*}}: (!llvm.i64) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_si4(%arg0: si4) { vector.print %arg0 : si4 @@ -459,8 +459,8 @@ // CHECK-LABEL: llvm.func @vector_print_scalar_si4( // CHECK-SAME: %[[A:.*]]: !llvm.i4) // CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i4 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printI64(%[[S]]) {{.*}}: (!llvm.i64) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_ui4(%arg0: ui4) { vector.print %arg0 : ui4 @@ -469,8 +469,8 @@ // CHECK-LABEL: llvm.func @vector_print_scalar_ui4( // CHECK-SAME: %[[A:.*]]: !llvm.i4) // CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i4 to !llvm.i64 -// CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printU64(%[[S]]) {{.*}}: (!llvm.i64) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_i32(%arg0: i32) { vector.print %arg0 : i32 @@ -479,8 +479,8 @@ // CHECK-LABEL: llvm.func @vector_print_scalar_i32( // CHECK-SAME: %[[A:.*]]: !llvm.i32) // CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i32 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printI64(%[[S]]) {{.*}}: (!llvm.i64) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_ui32(%arg0: ui32) { vector.print %arg0 : ui32 @@ -489,7 +489,7 @@ // CHECK-LABEL: llvm.func @vector_print_scalar_ui32( // CHECK-SAME: %[[A:.*]]: !llvm.i32) // CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i32 to !llvm.i64 -// CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> () +// CHECK: llvm.call @printU64(%[[S]]) {{.*}}: (!llvm.i64) -> () func @vector_print_scalar_i40(%arg0: i40) { vector.print %arg0 : i40 @@ -498,8 +498,8 @@ // CHECK-LABEL: llvm.func @vector_print_scalar_i40( // CHECK-SAME: %[[A:.*]]: !llvm.i40) // CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i40 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printI64(%[[S]]) {{.*}}: (!llvm.i64) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_si40(%arg0: si40) { vector.print %arg0 : si40 @@ -508,8 +508,8 @@ // CHECK-LABEL: llvm.func @vector_print_scalar_si40( // CHECK-SAME: %[[A:.*]]: !llvm.i40) // CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i40 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printI64(%[[S]]) {{.*}}: (!llvm.i64) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_ui40(%arg0: ui40) { vector.print %arg0 : ui40 @@ -518,8 +518,8 @@ // CHECK-LABEL: llvm.func @vector_print_scalar_ui40( // CHECK-SAME: %[[A:.*]]: !llvm.i40) // CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i40 to !llvm.i64 -// CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printU64(%[[S]]) {{.*}}: (!llvm.i64) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_i64(%arg0: i64) { vector.print %arg0 : i64 @@ -527,8 +527,8 @@ } // CHECK-LABEL: llvm.func @vector_print_scalar_i64( // CHECK-SAME: %[[A:.*]]: !llvm.i64) -// CHECK: llvm.call @printI64(%[[A]]) : (!llvm.i64) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printI64(%[[A]]) {{.*}}: (!llvm.i64) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_ui64(%arg0: ui64) { vector.print %arg0 : ui64 @@ -536,8 +536,8 @@ } // CHECK-LABEL: llvm.func @vector_print_scalar_ui64( // CHECK-SAME: %[[A:.*]]: !llvm.i64) -// CHECK: llvm.call @printU64(%[[A]]) : (!llvm.i64) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printU64(%[[A]]) {{.*}}: (!llvm.i64) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_index(%arg0: index) { vector.print %arg0 : index @@ -545,8 +545,8 @@ } // CHECK-LABEL: llvm.func @vector_print_scalar_index( // CHECK-SAME: %[[A:.*]]: !llvm.i64) -// CHECK: llvm.call @printU64(%[[A]]) : (!llvm.i64) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printU64(%[[A]]) {{.*}}: (!llvm.i64) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_f32(%arg0: f32) { vector.print %arg0 : f32 @@ -554,8 +554,8 @@ } // CHECK-LABEL: llvm.func @vector_print_scalar_f32( // CHECK-SAME: %[[A:.*]]: !llvm.float) -// CHECK: llvm.call @printF32(%[[A]]) : (!llvm.float) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printF32(%[[A]]) {{.*}}: (!llvm.float) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_scalar_f64(%arg0: f64) { vector.print %arg0 : f64 @@ -563,8 +563,8 @@ } // CHECK-LABEL: llvm.func @vector_print_scalar_f64( // CHECK-SAME: %[[A:.*]]: !llvm.double) -// CHECK: llvm.call @printF64(%[[A]]) : (!llvm.double) -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printF64(%[[A]]) {{.*}}: (!llvm.double) -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @vector_print_vector(%arg0: vector<2x2xf32>) { vector.print %arg0 : vector<2x2xf32> @@ -572,30 +572,30 @@ } // CHECK-LABEL: llvm.func @vector_print_vector( // CHECK-SAME: %[[A:.*]]: !llvm.array<2 x vec<2 x float>>) -// CHECK: llvm.call @printOpen() : () -> () +// CHECK: llvm.call @printOpen() {{.*}}: () -> () // CHECK: %[[x0:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<2 x vec<2 x float>> -// CHECK: llvm.call @printOpen() : () -> () +// CHECK: llvm.call @printOpen() {{.*}}: () -> () // CHECK: %[[x1:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 // CHECK: %[[x2:.*]] = llvm.extractelement %[[x0]][%[[x1]] : !llvm.i64] : !llvm.vec<2 x float> -// CHECK: llvm.call @printF32(%[[x2]]) : (!llvm.float) -> () -// CHECK: llvm.call @printComma() : () -> () +// CHECK: llvm.call @printF32(%[[x2]]) {{.*}}: (!llvm.float) -> () +// CHECK: llvm.call @printComma() {{.*}}: () -> () // CHECK: %[[x3:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 // CHECK: %[[x4:.*]] = llvm.extractelement %[[x0]][%[[x3]] : !llvm.i64] : !llvm.vec<2 x float> -// CHECK: llvm.call @printF32(%[[x4]]) : (!llvm.float) -> () -// CHECK: llvm.call @printClose() : () -> () -// CHECK: llvm.call @printComma() : () -> () +// CHECK: llvm.call @printF32(%[[x4]]) {{.*}}: (!llvm.float) -> () +// CHECK: llvm.call @printClose() {{.*}}: () -> () +// CHECK: llvm.call @printComma() {{.*}}: () -> () // CHECK: %[[x5:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<2 x vec<2 x float>> -// CHECK: llvm.call @printOpen() : () -> () +// CHECK: llvm.call @printOpen() {{.*}}: () -> () // CHECK: %[[x6:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 // CHECK: %[[x7:.*]] = llvm.extractelement %[[x5]][%[[x6]] : !llvm.i64] : !llvm.vec<2 x float> -// CHECK: llvm.call @printF32(%[[x7]]) : (!llvm.float) -> () -// CHECK: llvm.call @printComma() : () -> () +// CHECK: llvm.call @printF32(%[[x7]]) {{.*}}: (!llvm.float) -> () +// CHECK: llvm.call @printComma() {{.*}}: () -> () // CHECK: %[[x8:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 // CHECK: %[[x9:.*]] = llvm.extractelement %[[x5]][%[[x8]] : !llvm.i64] : !llvm.vec<2 x float> -// CHECK: llvm.call @printF32(%[[x9]]) : (!llvm.float) -> () -// CHECK: llvm.call @printClose() : () -> () -// CHECK: llvm.call @printClose() : () -> () -// CHECK: llvm.call @printNewline() : () -> () +// CHECK: llvm.call @printF32(%[[x9]]) {{.*}}: (!llvm.float) -> () +// CHECK: llvm.call @printClose() {{.*}}: () -> () +// CHECK: llvm.call @printClose() {{.*}}: () -> () +// CHECK: llvm.call @printNewline() {{.*}}: () -> () func @extract_strided_slice1(%arg0: vector<4xf32>) -> vector<2xf32> { %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4xf32> to vector<2xf32> Index: mlir/test/Dialect/LLVMIR/roundtrip.mlir =================================================================== --- mlir/test/Dialect/LLVMIR/roundtrip.mlir +++ mlir/test/Dialect/LLVMIR/roundtrip.mlir @@ -353,3 +353,32 @@ llvm.return } + +// CHECK-LABEL: @fastmathFlags +func @fastmathFlags(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32) { +// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float +// CHECK: {{.*}} = llvm.fsub %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float +// CHECK: {{.*}} = llvm.fmul %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float +// CHECK: {{.*}} = llvm.fdiv %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float +// CHECK: {{.*}} = llvm.frem %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float + %0 = llvm.fadd %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float + %1 = llvm.fsub %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float + %2 = llvm.fmul %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float + %3 = llvm.fdiv %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float + %4 = llvm.frem %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float + +// CHECK: {{.*}} = llvm.fcmp "oeq" %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float + %5 = llvm.fcmp "oeq" %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float + +// CHECK: {{.*}} = llvm.fneg %arg0 {fmf = #llvm.fmfattr} : !llvm.float + %6 = llvm.fneg %arg0 {fmf = #llvm.fmfattr} : !llvm.float + +// CHECK: {{.*}} = llvm.call @foo(%arg2) {fmf = #llvm.fmfattr} : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> + %7 = llvm.call @foo(%arg2) {fmf = #llvm.fmfattr} : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> + +// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fmf = #llvm.fmfattr<>} : !llvm.float + %8 = llvm.fadd %arg0, %arg1 {fmf = #llvm.fmfattr<>} : !llvm.float +// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float + %9 = llvm.fadd %arg0, %arg1 {fmf = #llvm.fmfattr} : !llvm.float + return +} Index: mlir/test/Target/import.ll =================================================================== --- mlir/test/Target/import.ll +++ mlir/test/Target/import.ll @@ -95,7 +95,7 @@ %cc = getelementptr double, double* @g2, i32 2 ; CHECK: %[[b:[0-9]+]] = llvm.trunc %arg0 : !llvm.i64 to !llvm.i32 %b = trunc i64 %a to i32 -; CHECK: %[[c:[0-9]+]] = llvm.call @fe(%[[b]]) : (!llvm.i32) -> !llvm.float +; CHECK: %[[c:[0-9]+]] = llvm.call @fe(%[[b]]) {{.*}}: (!llvm.i32) -> !llvm.float %c = call float @fe(i32 %b) ; CHECK: %[[d:[0-9]+]] = llvm.fptosi %[[c]] : !llvm.float to !llvm.i32 %d = fptosi float %c to i32 @@ -109,7 +109,7 @@ if.then: ; CHECK: llvm.return %[[c42]] : !llvm.i32 ret i32 42 - + ; CHECK: ^bb2: if.end: ; CHECK: %[[orcond:[0-9]+]] = llvm.or %[[e]], %[[c1]] : !llvm.i1 Index: mlir/test/Target/llvmir.mlir =================================================================== --- mlir/test/Target/llvmir.mlir +++ mlir/test/Target/llvmir.mlir @@ -1358,3 +1358,10 @@ llvm.return } +// CHECK-LABEL: @fastmathFlags +llvm.func @fastmathFlags(%arg0: !llvm.float) { +// CHECK: {{.*}} = fadd nnan ninf float {{.*}}, {{.*}} + %0 = llvm.fadd %arg0, %arg0 {fmf = #llvm.fmfattr} : !llvm.float + llvm.return +} +