diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -700,23 +700,23 @@ Option<"reassociateFPReductions", "reassociate-fp-reductions", "bool", /*default=*/"false", "Allows llvm to reassociate floating-point reductions for speed">, - Option<"enableIndexOptimizations", "enable-index-optimizations", + Option<"indexOptimizations", "enable-index-optimizations", "bool", /*default=*/"true", "Allows compiler to assume indices fit in 32-bit if that yields " "faster code">, - Option<"enableAMX", "enable-amx", + Option<"amx", "enable-amx", "bool", /*default=*/"false", "Enables the use of AMX dialect while lowering the vector " "dialect.">, - Option<"enableArmNeon", "enable-arm-neon", + Option<"armNeon", "enable-arm-neon", "bool", /*default=*/"false", "Enables the use of ArmNeon dialect while lowering the vector " "dialect.">, - Option<"enableArmSVE", "enable-arm-sve", + Option<"armSVE", "enable-arm-sve", "bool", /*default=*/"false", "Enables the use of ArmSVE dialect while lowering the vector " "dialect.">, - Option<"enableX86Vector", "enable-x86vector", + Option<"x86Vector", "enable-x86vector", "bool", /*default=*/"false", "Enables the use of X86Vector dialect while lowering the vector " "dialect."> diff --git a/mlir/include/mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h b/mlir/include/mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h --- a/mlir/include/mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h +++ b/mlir/include/mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h @@ -22,41 +22,40 @@ /// ConvertVectorToLLVM pass in include/mlir/Conversion/Passes.td struct LowerVectorToLLVMOptions { LowerVectorToLLVMOptions() - : reassociateFPReductions(false), enableIndexOptimizations(true), - enableArmNeon(false), enableArmSVE(false), enableAMX(false), - enableX86Vector(false) {} + : reassociateFPReductions(false), indexOptimizations(true), + armNeon(false), armSVE(false), amx(false), x86Vector(false) {} - LowerVectorToLLVMOptions &setReassociateFPReductions(bool b) { + LowerVectorToLLVMOptions &enableReassociateFPReductions(bool b = true) { reassociateFPReductions = b; return *this; } - LowerVectorToLLVMOptions &setEnableIndexOptimizations(bool b) { - enableIndexOptimizations = b; + LowerVectorToLLVMOptions &enableIndexOptimizations(bool b = true) { + indexOptimizations = b; return *this; } - LowerVectorToLLVMOptions &setEnableArmNeon(bool b) { - enableArmNeon = b; + LowerVectorToLLVMOptions &enableArmNeon(bool b = true) { + armNeon = b; return *this; } - LowerVectorToLLVMOptions &setEnableArmSVE(bool b) { - enableArmSVE = b; + LowerVectorToLLVMOptions &enableArmSVE(bool b = true) { + armSVE = b; return *this; } - LowerVectorToLLVMOptions &setEnableAMX(bool b) { - enableAMX = b; + LowerVectorToLLVMOptions &enableAMX(bool b = true) { + amx = b; return *this; } - LowerVectorToLLVMOptions &setEnableX86Vector(bool b) { - enableX86Vector = b; + LowerVectorToLLVMOptions &enableX86Vector(bool b = true) { + x86Vector = b; return *this; } bool reassociateFPReductions; - bool enableIndexOptimizations; - bool enableArmNeon; - bool enableArmSVE; - bool enableAMX; - bool enableX86Vector; + bool indexOptimizations; + bool armNeon; + bool armSVE; + bool amx; + bool x86Vector; }; /// Collect a set of patterns to convert from Vector contractions to LLVM Matrix diff --git a/mlir/include/mlir/Dialect/Vector/VectorOps.h b/mlir/include/mlir/Dialect/Vector/VectorOps.h --- a/mlir/include/mlir/Dialect/Vector/VectorOps.h +++ b/mlir/include/mlir/Dialect/Vector/VectorOps.h @@ -83,7 +83,7 @@ /// These patterns materialize masks for various vector ops such as transfers. void populateVectorMaskMaterializationPatterns(RewritePatternSet &patterns, - bool enableIndexOptimizations); + bool indexOptimizations); /// Collect a set of patterns to propagate insert_map/extract_map in the ssa /// chain. diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp @@ -34,24 +34,24 @@ : public ConvertVectorToLLVMBase { LowerVectorToLLVMPass(const LowerVectorToLLVMOptions &options) { this->reassociateFPReductions = options.reassociateFPReductions; - this->enableIndexOptimizations = options.enableIndexOptimizations; - this->enableArmNeon = options.enableArmNeon; - this->enableArmSVE = options.enableArmSVE; - this->enableAMX = options.enableAMX; - this->enableX86Vector = options.enableX86Vector; + this->indexOptimizations = options.indexOptimizations; + this->armNeon = options.armNeon; + this->armSVE = options.armSVE; + this->amx = options.amx; + this->x86Vector = options.x86Vector; } // Override explicitly to allow conditional dialect dependence. void getDependentDialects(DialectRegistry ®istry) const override { registry.insert(); registry.insert(); registry.insert(); - if (enableArmNeon) + if (armNeon) registry.insert(); - if (enableArmSVE) + if (armSVE) registry.insert(); - if (enableAMX) + if (amx) registry.insert(); - if (enableX86Vector) + if (x86Vector) registry.insert(); } void runOnOperation() override; @@ -77,7 +77,7 @@ // Convert to the LLVM IR dialect. LLVMTypeConverter converter(&getContext()); RewritePatternSet patterns(&getContext()); - populateVectorMaskMaterializationPatterns(patterns, enableIndexOptimizations); + populateVectorMaskMaterializationPatterns(patterns, indexOptimizations); populateVectorTransferLoweringPatterns(patterns); populateVectorToLLVMMatrixConversionPatterns(converter, patterns); populateVectorToLLVMConversionPatterns(converter, patterns, @@ -90,21 +90,21 @@ target.addLegalDialect(); target.addLegalDialect(); target.addLegalOp(); - if (enableArmNeon) { + if (armNeon) { // TODO: we may or may not want to include in-dialect lowering to // LLVM-compatible operations here. So far, all operations in the dialect // can be translated to LLVM IR so there is no conversion necessary. target.addLegalDialect(); } - if (enableArmSVE) { + if (armSVE) { configureArmSVELegalizeForExportTarget(target); populateArmSVELegalizeForLLVMExportPatterns(converter, patterns); } - if (enableAMX) { + if (amx) { configureAMXLegalizeForExportTarget(target); populateAMXLegalizeForLLVMExportPatterns(converter, patterns); } - if (enableX86Vector) { + if (x86Vector) { configureX86VectorLegalizeForExportTarget(target); populateX86VectorLegalizeForLLVMExportPatterns(converter, patterns); } diff --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp --- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp @@ -3402,7 +3402,7 @@ // generates more elaborate instructions for this intrinsic since it // is very conservative on the boundary conditions. static Value buildVectorComparison(PatternRewriter &rewriter, Operation *op, - bool enableIndexOptimizations, int64_t dim, + bool indexOptimizations, int64_t dim, Value b, Value *off = nullptr) { auto loc = op->getLoc(); // If we can assume all indices fit in 32-bit, we perform the vector @@ -3410,7 +3410,7 @@ // Otherwise we perform the vector comparison using 64-bit indices. Value indices; Type idxType; - if (enableIndexOptimizations) { + if (indexOptimizations) { indices = rewriter.create( loc, rewriter.getI32VectorAttr( llvm::to_vector<4>(llvm::seq(0, dim)))); @@ -3439,7 +3439,7 @@ public: explicit MaterializeTransferMask(MLIRContext *context, bool enableIndexOpt) : mlir::OpRewritePattern(context), - enableIndexOptimizations(enableIndexOpt) {} + indexOptimizations(enableIndexOpt) {} LogicalResult matchAndRewrite(ConcreteOp xferOp, PatternRewriter &rewriter) const override { @@ -3466,8 +3466,8 @@ Value off = xferOp.indices()[lastIndex]; Value dim = vector::createOrFoldDimOp(rewriter, loc, xferOp.source(), lastIndex); - Value mask = buildVectorComparison( - rewriter, xferOp, enableIndexOptimizations, vecWidth, dim, &off); + Value mask = buildVectorComparison(rewriter, xferOp, indexOptimizations, + vecWidth, dim, &off); if (xferOp.mask()) { // Intersect the in-bounds with the mask specified as an op parameter. @@ -3483,7 +3483,7 @@ } private: - const bool enableIndexOptimizations; + const bool indexOptimizations; }; /// Conversion pattern for a vector.create_mask (1-D only). @@ -3493,7 +3493,7 @@ explicit VectorCreateMaskOpConversion(MLIRContext *context, bool enableIndexOpt) : mlir::OpRewritePattern(context), - enableIndexOptimizations(enableIndexOpt) {} + indexOptimizations(enableIndexOpt) {} LogicalResult matchAndRewrite(vector::CreateMaskOp op, PatternRewriter &rewriter) const override { @@ -3501,7 +3501,7 @@ int64_t rank = dstType.getRank(); if (rank == 1) { rewriter.replaceOp( - op, buildVectorComparison(rewriter, op, enableIndexOptimizations, + op, buildVectorComparison(rewriter, op, indexOptimizations, dstType.getDimSize(0), op.getOperand(0))); return success(); } @@ -3509,7 +3509,7 @@ } private: - const bool enableIndexOptimizations; + const bool indexOptimizations; }; // Drop inner most contiguous unit dimensions from transfer_read operand. @@ -3587,11 +3587,11 @@ }; void mlir::vector::populateVectorMaskMaterializationPatterns( - RewritePatternSet &patterns, bool enableIndexOptimizations) { + RewritePatternSet &patterns, bool indexOptimizations) { patterns.add, MaterializeTransferMask>( - patterns.getContext(), enableIndexOptimizations); + patterns.getContext(), indexOptimizations); } void mlir::vector::populatePropagateVectorDistributionPatterns(