diff --git a/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h b/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h --- a/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h +++ b/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h @@ -110,6 +110,9 @@ ::llvm::cl::desc("Target architecture")}; Option features{*this, "features", ::llvm::cl::desc("Target features")}; + Option optLevel{*this, "opt-level", + llvm::cl::desc("Optimization level for compilation"), + llvm::cl::init(2)}; Option gpuBinaryAnnotation{ *this, "gpu-binary-annotation", llvm::cl::desc("Annotation attribute string for GPU binary"), @@ -130,10 +133,11 @@ void registerGpuSerializeToHsacoPass(); /// Create an instance of the GPU kernel function to CUBIN binary serialization -/// pass. +/// pass with default opt level 2. std::unique_ptr createGpuSerializeToCubinPass(StringRef triple, StringRef chip, - StringRef features); + StringRef features, + int optLevel = 2); /// Create an instance of the GPU kernel function to HSAco binary serialization /// pass. diff --git a/mlir/lib/Dialect/GPU/CMakeLists.txt b/mlir/lib/Dialect/GPU/CMakeLists.txt --- a/mlir/lib/Dialect/GPU/CMakeLists.txt +++ b/mlir/lib/Dialect/GPU/CMakeLists.txt @@ -124,6 +124,7 @@ target_link_libraries(MLIRGPUTransforms PRIVATE + MLIRExecutionEngine MLIRNVVMToLLVMIRTranslation ${CUDA_DRIVER_LIBRARY} ) diff --git a/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp b/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp --- a/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "mlir/Dialect/GPU/Transforms/Passes.h" +#include "mlir/ExecutionEngine/OptUtils.h" #include "mlir/Pass/Pass.h" #include "mlir/Target/LLVMIR/Dialect/GPU/GPUToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" @@ -102,8 +103,24 @@ LogicalResult gpu::SerializeToBlobPass::optimizeLlvm(llvm::Module &llvmModule, llvm::TargetMachine &targetMachine) { - // TODO: If serializeToCubin ends up defining optimizations, factor them - // into here from SerializeToHsaco + int optLevel = this->optLevel.getValue(); + if (optLevel < 0 || optLevel > 3) + return getOperation().emitError() + << "Invalid optimization level" << optLevel << "\n"; + + targetMachine.setOptLevel(static_cast(optLevel)); + + auto transformer = + makeOptimizingTransformer(optLevel, /*sizeLevel=*/0, &targetMachine); + auto error = transformer(&llvmModule); + if (error) { + InFlightDiagnostic mlirError = getOperation()->emitError(); + llvm::handleAllErrors( + std::move(error), [&mlirError](const llvm::ErrorInfoBase &ei) { + mlirError << "Could not optimize LLVM IR: " << ei.message() << "\n"; + }); + return mlirError; + } return success(); } diff --git a/mlir/lib/Dialect/GPU/Transforms/SerializeToCubin.cpp b/mlir/lib/Dialect/GPU/Transforms/SerializeToCubin.cpp --- a/mlir/lib/Dialect/GPU/Transforms/SerializeToCubin.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/SerializeToCubin.cpp @@ -49,7 +49,8 @@ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(SerializeToCubinPass) SerializeToCubinPass(StringRef triple = "nvptx64-nvidia-cuda", - StringRef chip = "sm_35", StringRef features = "+ptx60"); + StringRef chip = "sm_35", StringRef features = "+ptx60", + int optLevel = 2); StringRef getArgument() const override { return "gpu-to-cubin"; } StringRef getDescription() const override { @@ -72,10 +73,12 @@ } SerializeToCubinPass::SerializeToCubinPass(StringRef triple, StringRef chip, - StringRef features) { + StringRef features, int optLevel) { maybeSetOption(this->triple, triple); maybeSetOption(this->chip, chip); maybeSetOption(this->features, features); + if (this->optLevel.getNumOccurrences() == 0) + this->optLevel.setValue(optLevel); } void SerializeToCubinPass::getDependentDialects( @@ -147,8 +150,10 @@ std::unique_ptr mlir::createGpuSerializeToCubinPass(StringRef triple, StringRef arch, - StringRef features) { - return std::make_unique(triple, arch, features); + StringRef features, + int optLevel) { + return std::make_unique(triple, arch, features, + optLevel); } #else // MLIR_GPU_TO_CUBIN_PASS_ENABLE diff --git a/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp b/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp --- a/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp @@ -74,11 +74,6 @@ } protected: - Option optLevel{ - *this, "opt-level", - llvm::cl::desc("Optimization level for HSACO compilation"), - llvm::cl::init(2)}; - Option rocmPath{*this, "rocm-path", llvm::cl::desc("Path to ROCm install")}; @@ -86,10 +81,6 @@ std::unique_ptr translateToLLVMIR(llvm::LLVMContext &llvmContext) override; - /// Adds LLVM optimization passes - LogicalResult optimizeLlvm(llvm::Module &llvmModule, - llvm::TargetMachine &targetMachine) override; - private: void getDependentDialects(DialectRegistry ®istry) const override; @@ -320,30 +311,6 @@ return ret; } -LogicalResult -SerializeToHsacoPass::optimizeLlvm(llvm::Module &llvmModule, - llvm::TargetMachine &targetMachine) { - int optLevel = this->optLevel.getValue(); - if (optLevel < 0 || optLevel > 3) - return getOperation().emitError() - << "Invalid HSA optimization level" << optLevel << "\n"; - - targetMachine.setOptLevel(static_cast(optLevel)); - - auto transformer = - makeOptimizingTransformer(optLevel, /*sizeLevel=*/0, &targetMachine); - auto error = transformer(&llvmModule); - if (error) { - InFlightDiagnostic mlirError = getOperation()->emitError(); - llvm::handleAllErrors( - std::move(error), [&mlirError](const llvm::ErrorInfoBase &ei) { - mlirError << "Could not optimize LLVM IR: " << ei.message() << "\n"; - }); - return mlirError; - } - return success(); -} - std::unique_ptr> SerializeToHsacoPass::assembleIsa(const std::string &isa) { auto loc = getOperation().getLoc();