diff --git a/mlir/examples/toy/Ch1/parser/AST.cpp b/mlir/examples/toy/Ch1/parser/AST.cpp --- a/mlir/examples/toy/Ch1/parser/AST.cpp +++ b/mlir/examples/toy/Ch1/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch1/toyc.cpp b/mlir/examples/toy/Ch1/toyc.cpp --- a/mlir/examples/toy/Ch1/toyc.cpp +++ b/mlir/examples/toy/Ch1/toyc.cpp @@ -27,7 +27,7 @@ cl::value_desc("filename")); namespace { enum Action { None, DumpAST }; -} +} // namespace static cl::opt emitAction("emit", cl::desc("Select the kind of output desired"), diff --git a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp @@ -58,8 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &F : moduleAST) { - auto func = mlirGen(F); + for (FunctionAST &f : moduleAST) { + auto func = mlirGen(f); if (!func) return nullptr; theModule.push_back(func); @@ -113,16 +113,16 @@ // This is a generic function, the return type will be inferred later. // Arguments type are uniformly unranked tensors. - llvm::SmallVector arg_types(proto.getArgs().size(), - getType(VarType{})); - auto func_type = builder.getFunctionType(arg_types, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + llvm::SmallVector argTypes(proto.getArgs().size(), + getType(VarType{})); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -371,7 +371,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -398,7 +398,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch2/parser/AST.cpp b/mlir/examples/toy/Ch2/parser/AST.cpp --- a/mlir/examples/toy/Ch2/parser/AST.cpp +++ b/mlir/examples/toy/Ch2/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch2/toyc.cpp b/mlir/examples/toy/Ch2/toyc.cpp --- a/mlir/examples/toy/Ch2/toyc.cpp +++ b/mlir/examples/toy/Ch2/toyc.cpp @@ -38,7 +38,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -47,7 +47,7 @@ namespace { enum Action { None, DumpAST, DumpMLIR }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -89,8 +89,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp @@ -58,8 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &F : moduleAST) { - auto func = mlirGen(F); + for (FunctionAST &f : moduleAST) { + auto func = mlirGen(f); if (!func) return nullptr; theModule.push_back(func); @@ -113,16 +113,16 @@ // This is a generic function, the return type will be inferred later. // Arguments type are uniformly unranked tensors. - llvm::SmallVector arg_types(proto.getArgs().size(), - getType(VarType{})); - auto func_type = builder.getFunctionType(arg_types, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + llvm::SmallVector argTypes(proto.getArgs().size(), + getType(VarType{})); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -371,7 +371,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -398,7 +398,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch3/parser/AST.cpp b/mlir/examples/toy/Ch3/parser/AST.cpp --- a/mlir/examples/toy/Ch3/parser/AST.cpp +++ b/mlir/examples/toy/Ch3/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch3/toyc.cpp b/mlir/examples/toy/Ch3/toyc.cpp --- a/mlir/examples/toy/Ch3/toyc.cpp +++ b/mlir/examples/toy/Ch3/toyc.cpp @@ -40,7 +40,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -49,7 +49,7 @@ namespace { enum Action { None, DumpAST, DumpMLIR }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -86,8 +86,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp @@ -58,8 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &F : moduleAST) { - auto func = mlirGen(F); + for (FunctionAST &f : moduleAST) { + auto func = mlirGen(f); if (!func) return nullptr; theModule.push_back(func); @@ -113,16 +113,16 @@ // This is a generic function, the return type will be inferred later. // Arguments type are uniformly unranked tensors. - llvm::SmallVector arg_types(proto.getArgs().size(), - getType(VarType{})); - auto func_type = builder.getFunctionType(arg_types, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + llvm::SmallVector argTypes(proto.getArgs().size(), + getType(VarType{})); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -375,7 +375,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -402,7 +402,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch4/parser/AST.cpp b/mlir/examples/toy/Ch4/parser/AST.cpp --- a/mlir/examples/toy/Ch4/parser/AST.cpp +++ b/mlir/examples/toy/Ch4/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch4/toyc.cpp b/mlir/examples/toy/Ch4/toyc.cpp --- a/mlir/examples/toy/Ch4/toyc.cpp +++ b/mlir/examples/toy/Ch4/toyc.cpp @@ -41,7 +41,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -50,7 +50,7 @@ namespace { enum Action { None, DumpAST, DumpMLIR }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -87,8 +87,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp @@ -58,8 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &F : moduleAST) { - auto func = mlirGen(F); + for (FunctionAST &f : moduleAST) { + auto func = mlirGen(f); if (!func) return nullptr; theModule.push_back(func); @@ -113,16 +113,16 @@ // This is a generic function, the return type will be inferred later. // Arguments type are uniformly unranked tensors. - llvm::SmallVector arg_types(proto.getArgs().size(), - getType(VarType{})); - auto func_type = builder.getFunctionType(arg_types, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + llvm::SmallVector argTypes(proto.getArgs().size(), + getType(VarType{})); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -375,7 +375,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -402,7 +402,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch5/parser/AST.cpp b/mlir/examples/toy/Ch5/parser/AST.cpp --- a/mlir/examples/toy/Ch5/parser/AST.cpp +++ b/mlir/examples/toy/Ch5/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch5/toyc.cpp b/mlir/examples/toy/Ch5/toyc.cpp --- a/mlir/examples/toy/Ch5/toyc.cpp +++ b/mlir/examples/toy/Ch5/toyc.cpp @@ -43,7 +43,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -52,7 +52,7 @@ namespace { enum Action { None, DumpAST, DumpMLIR, DumpMLIRAffine }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -91,8 +91,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp @@ -58,8 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &F : moduleAST) { - auto func = mlirGen(F); + for (FunctionAST &f : moduleAST) { + auto func = mlirGen(f); if (!func) return nullptr; theModule.push_back(func); @@ -113,16 +113,16 @@ // This is a generic function, the return type will be inferred later. // Arguments type are uniformly unranked tensors. - llvm::SmallVector arg_types(proto.getArgs().size(), - getType(VarType{})); - auto func_type = builder.getFunctionType(arg_types, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + llvm::SmallVector argTypes(proto.getArgs().size(), + getType(VarType{})); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -375,7 +375,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -402,7 +402,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch6/parser/AST.cpp b/mlir/examples/toy/Ch6/parser/AST.cpp --- a/mlir/examples/toy/Ch6/parser/AST.cpp +++ b/mlir/examples/toy/Ch6/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp --- a/mlir/examples/toy/Ch6/toyc.cpp +++ b/mlir/examples/toy/Ch6/toyc.cpp @@ -49,7 +49,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -66,7 +66,7 @@ DumpLLVMIR, RunJIT }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -110,8 +110,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp @@ -169,14 +169,14 @@ return nullptr; argTypes.push_back(type); } - auto func_type = builder.getFunctionType(argTypes, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - SymbolTableScopeT var_scope(symbolTable); + SymbolTableScopeT varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -286,7 +286,7 @@ return llvm::None; auto structVars = structAST->getVariables(); - auto it = llvm::find_if(structVars, [&](auto &var) { + const auto *it = llvm::find_if(structVars, [&](auto &var) { return var->getName() == name->getName(); }); if (it == structVars.end()) @@ -569,7 +569,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -612,7 +612,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - SymbolTableScopeT var_scope(symbolTable); + SymbolTableScopeT varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch7/parser/AST.cpp b/mlir/examples/toy/Ch7/parser/AST.cpp --- a/mlir/examples/toy/Ch7/parser/AST.cpp +++ b/mlir/examples/toy/Ch7/parser/AST.cpp @@ -121,7 +121,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch7/toyc.cpp b/mlir/examples/toy/Ch7/toyc.cpp --- a/mlir/examples/toy/Ch7/toyc.cpp +++ b/mlir/examples/toy/Ch7/toyc.cpp @@ -49,7 +49,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -66,7 +66,7 @@ DumpLLVMIR, RunJIT }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -110,8 +110,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp --- a/mlir/lib/Analysis/SliceAnalysis.cpp +++ b/mlir/lib/Analysis/SliceAnalysis.cpp @@ -168,7 +168,7 @@ }; } // namespace -static void DFSPostorder(Operation *root, DFSState *state) { +static void dfsPostorder(Operation *root, DFSState *state) { SmallVector queue(1, root); std::vector ops; while (!queue.empty()) { @@ -200,7 +200,7 @@ DFSState state(toSort); for (auto *s : toSort) { assert(toSort.count(s) == 1 && "NYI: multi-sets not supported"); - DFSPostorder(s, &state); + dfsPostorder(s, &state); } // Reorder and return. diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp --- a/mlir/lib/Analysis/Utils.cpp +++ b/mlir/lib/Analysis/Utils.cpp @@ -1278,10 +1278,10 @@ /// Returns the number of surrounding loops common to 'loopsA' and 'loopsB', /// where each lists loops from outer-most to inner-most in loop nest. -unsigned mlir::getNumCommonSurroundingLoops(Operation &A, Operation &B) { +unsigned mlir::getNumCommonSurroundingLoops(Operation &a, Operation &b) { SmallVector loopsA, loopsB; - getLoopIVs(A, &loopsA); - getLoopIVs(B, &loopsB); + getLoopIVs(a, &loopsA); + getLoopIVs(b, &loopsB); unsigned minNumLoops = std::min(loopsA.size(), loopsB.size()); unsigned numCommonLoops = 0; diff --git a/mlir/lib/Bindings/Python/IRAttributes.cpp b/mlir/lib/Bindings/Python/IRAttributes.cpp --- a/mlir/lib/Bindings/Python/IRAttributes.cpp +++ b/mlir/lib/Bindings/Python/IRAttributes.cpp @@ -17,7 +17,6 @@ using namespace mlir; using namespace mlir::python; -using llvm::None; using llvm::Optional; using llvm::SmallVector; using llvm::Twine; @@ -510,7 +509,8 @@ if (mlirTypeIsAF32(elementType)) { // f32 return bufferInfo(shapedType); - } else if (mlirTypeIsAF64(elementType)) { + } + if (mlirTypeIsAF64(elementType)) { // f64 return bufferInfo(shapedType); } else if (mlirTypeIsAF16(elementType)) { @@ -712,12 +712,12 @@ SmallVector mlirNamedAttributes; mlirNamedAttributes.reserve(attributes.size()); for (auto &it : attributes) { - auto &mlir_attr = it.second.cast(); + auto &mlirAttr = it.second.cast(); auto name = it.first.cast(); mlirNamedAttributes.push_back(mlirNamedAttributeGet( - mlirIdentifierGet(mlirAttributeGetContext(mlir_attr), + mlirIdentifierGet(mlirAttributeGetContext(mlirAttr), toMlirStringRef(name)), - mlir_attr)); + mlirAttr)); } MlirAttribute attr = mlirDictionaryAttrGet(context->get(), mlirNamedAttributes.size(), diff --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp --- a/mlir/lib/Bindings/Python/IRCore.cpp +++ b/mlir/lib/Bindings/Python/IRCore.cpp @@ -1267,7 +1267,7 @@ if (segmentSpec == 1 || segmentSpec == 0) { // Unpack unary element. try { - auto operandValue = py::cast(std::get<0>(it.value())); + auto *operandValue = py::cast(std::get<0>(it.value())); if (operandValue) { operands.push_back(operandValue); operandSegmentLengths.push_back(1); @@ -2286,10 +2286,10 @@ .def_property_readonly( "body", [](PyModule &self) { - PyOperationRef module_op = PyOperation::forOperation( + PyOperationRef moduleOp = PyOperation::forOperation( self.getContext(), mlirModuleGetOperation(self.get()), self.getRef().releaseObject()); - PyBlock returnBlock(module_op, mlirModuleGetBody(self.get())); + PyBlock returnBlock(moduleOp, mlirModuleGetBody(self.get())); return returnBlock; }, "Return the block for this module") diff --git a/mlir/lib/Bindings/Python/IRModule.cpp b/mlir/lib/Bindings/Python/IRModule.cpp --- a/mlir/lib/Bindings/Python/IRModule.cpp +++ b/mlir/lib/Bindings/Python/IRModule.cpp @@ -51,9 +51,8 @@ } catch (py::error_already_set &e) { if (e.matches(PyExc_ModuleNotFoundError)) { continue; - } else { - throw; } + throw; } break; } @@ -136,11 +135,10 @@ // Positive cache. rawOpViewClassMapCache[operationName] = foundIt->second; return foundIt->second; - } else { - // Negative cache. - rawOpViewClassMap[operationName] = py::none(); - return llvm::None; } + // Negative cache. + rawOpViewClassMap[operationName] = py::none(); + return llvm::None; } } diff --git a/mlir/lib/Bindings/Python/PybindUtils.cpp b/mlir/lib/Bindings/Python/PybindUtils.cpp --- a/mlir/lib/Bindings/Python/PybindUtils.cpp +++ b/mlir/lib/Bindings/Python/PybindUtils.cpp @@ -8,8 +8,6 @@ #include "PybindUtils.h" -namespace py = pybind11; - pybind11::error_already_set mlir::python::SetPyError(PyObject *excClass, const llvm::Twine &message) { auto messageStr = message.str(); diff --git a/mlir/lib/Bindings/Python/Transforms/Transforms.cpp b/mlir/lib/Bindings/Python/Transforms/Transforms.cpp --- a/mlir/lib/Bindings/Python/Transforms/Transforms.cpp +++ b/mlir/lib/Bindings/Python/Transforms/Transforms.cpp @@ -10,8 +10,6 @@ #include -namespace py = pybind11; - // ----------------------------------------------------------------------------- // Module initialization. // ----------------------------------------------------------------------------- diff --git a/mlir/lib/CAPI/IR/IR.cpp b/mlir/lib/CAPI/IR/IR.cpp --- a/mlir/lib/CAPI/IR/IR.cpp +++ b/mlir/lib/CAPI/IR/IR.cpp @@ -818,7 +818,7 @@ MlirLogicalResult mlirSymbolTableReplaceAllSymbolUses(MlirStringRef oldSymbol, MlirStringRef newSymbol, MlirOperation from) { - auto cppFrom = unwrap(from); + auto *cppFrom = unwrap(from); auto *context = cppFrom->getContext(); auto oldSymbolAttr = StringAttr::get(unwrap(oldSymbol), context); auto newSymbolAttr = StringAttr::get(unwrap(newSymbol), context); diff --git a/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp b/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp --- a/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp +++ b/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp @@ -468,10 +468,10 @@ Value structPtr = builder.create(loc, structPtrTy, memRefDescPtr); - Type int32_type = typeConverter.convertType(builder.getI32Type()); + Type int32Type = typeConverter.convertType(builder.getI32Type()); Value zero = createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 0); - Value three = builder.create(loc, int32_type, + Value three = builder.create(loc, int32Type, builder.getI32IntegerAttr(3)); return builder.create(loc, LLVM::LLVMPointerType::get(indexTy), structPtr, ValueRange({zero, three})); diff --git a/mlir/lib/Conversion/PDLToPDLInterp/RootOrdering.cpp b/mlir/lib/Conversion/PDLToPDLInterp/RootOrdering.cpp --- a/mlir/lib/Conversion/PDLToPDLInterp/RootOrdering.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/RootOrdering.cpp @@ -90,8 +90,8 @@ DenseMap &costs = outer->second; Value bestSource; std::pair bestCost; - auto inner = costs.begin(), inner_e = costs.end(); - while (inner != inner_e) { + auto inner = costs.begin(), innerE = costs.end(); + while (inner != innerE) { Value source = inner->first; if (cycleSet.contains(source)) { // Going-away edge => get its cost and erase it. diff --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp --- a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp +++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp @@ -259,8 +259,8 @@ // from 0 to N with step 1. Therefore, loop induction variables are replaced // with (gpu-thread/block-id * S) + LB. builder.setInsertionPointToStart(&launchOp.body().front()); - auto lbArgumentIt = lbs.begin(); - auto stepArgumentIt = steps.begin(); + auto *lbArgumentIt = lbs.begin(); + auto *stepArgumentIt = steps.begin(); for (auto en : llvm::enumerate(ivs)) { Value id = en.index() < numBlockDims @@ -640,7 +640,7 @@ } else if (op == launchOp.getOperation()) { // Found our sentinel value. We have finished the operations from one // nesting level, pop one level back up. - auto parent = rewriter.getInsertionPoint()->getParentOp(); + auto *parent = rewriter.getInsertionPoint()->getParentOp(); rewriter.setInsertionPointAfter(parent); leftNestingScope = true; seenSideeffects = false; diff --git a/mlir/lib/Conversion/SCFToStandard/SCFToStandard.cpp b/mlir/lib/Conversion/SCFToStandard/SCFToStandard.cpp --- a/mlir/lib/Conversion/SCFToStandard/SCFToStandard.cpp +++ b/mlir/lib/Conversion/SCFToStandard/SCFToStandard.cpp @@ -455,11 +455,11 @@ ivs.reserve(parallelOp.getNumLoops()); bool first = true; SmallVector loopResults(iterArgs); - for (auto loop_operands : + for (auto loopOperands : llvm::zip(parallelOp.getInductionVars(), parallelOp.getLowerBound(), parallelOp.getUpperBound(), parallelOp.getStep())) { Value iv, lower, upper, step; - std::tie(iv, lower, upper, step) = loop_operands; + std::tie(iv, lower, upper, step) = loopOperands; ForOp forOp = rewriter.create(loc, lower, upper, step, iterArgs); ivs.push_back(forOp.getInductionVar()); auto iterRange = forOp.getRegionIterArgs(); diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp --- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp +++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp @@ -1390,7 +1390,7 @@ auto dstType = typeConverter.convertType(op.getType()); auto scalarType = dstType.cast().getElementType(); auto componentsArray = components.getValue(); - auto context = rewriter.getContext(); + auto *context = rewriter.getContext(); auto llvmI32Type = IntegerType::get(context, 32); Value targetOp = rewriter.create(loc, dstType); for (unsigned i = 0; i < componentsArray.size(); i++) { diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -2173,16 +2173,16 @@ rewriter.create(loc, result); return success(); - } else { - y0x0 = rewriter.create(loc, resultElementTy, y0x0); - y0x1 = rewriter.create(loc, resultElementTy, y0x1); - y1x0 = rewriter.create(loc, resultElementTy, y1x0); - y1x1 = rewriter.create(loc, resultElementTy, y1x1); - - if (resultElementTy.getIntOrFloatBitWidth() > 32) { - dx = rewriter.create(loc, resultElementTy, dx); - dy = rewriter.create(loc, resultElementTy, dy); - } + } + y0x0 = rewriter.create(loc, resultElementTy, y0x0); + y0x1 = rewriter.create(loc, resultElementTy, y0x1); + y1x0 = rewriter.create(loc, resultElementTy, y1x0); + y1x1 = rewriter.create(loc, resultElementTy, y1x1); + + if (resultElementTy.getIntOrFloatBitWidth() > 32) { + dx = rewriter.create(loc, resultElementTy, dx); + dy = rewriter.create(loc, resultElementTy, dy); + } auto unitVal = rewriter.create( loc, rewriter.getIntegerAttr(resultElementTy, 1 << shift)); @@ -2206,7 +2206,6 @@ rewriter.create(loc, result); return success(); - } } return failure(); diff --git a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp --- a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp @@ -28,9 +28,9 @@ struct GpuAllReduceRewriter { using AccumulatorFactory = std::function; - GpuAllReduceRewriter(gpu::GPUFuncOp funcOp_, gpu::AllReduceOp reduceOp_, - PatternRewriter &rewriter_) - : funcOp(funcOp_), reduceOp(reduceOp_), rewriter(rewriter_), + GpuAllReduceRewriter(gpu::GPUFuncOp funcOp, gpu::AllReduceOp reduceOp, + PatternRewriter &rewriter) + : funcOp(funcOp), reduceOp(reduceOp), rewriter(rewriter), loc(reduceOp.getLoc()), valueType(reduceOp.value().getType()), indexType(IndexType::get(reduceOp.getContext())), int32Type(IntegerType::get(reduceOp.getContext(), /*width=*/32)) {} diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp --- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp @@ -313,7 +313,7 @@ // a SymbolTable by the caller. SymbolTable needs to be refactored to // prevent manual building of Ops with symbols in code using SymbolTables // and then this needs to use the OpBuilder. - auto context = getOperation().getContext(); + auto *context = getOperation().getContext(); OpBuilder builder(context); auto kernelModule = builder.create(kernelFunc.getLoc(), kernelFunc.getName()); diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp @@ -266,13 +266,14 @@ unsigned size = kDefaultPointerSizeBits; unsigned abi = kDefaultPointerAlignment; auto newType = newEntry.getKey().get().cast(); - auto it = llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) { - if (auto type = entry.getKey().dyn_cast()) { - return type.cast().getAddressSpace() == - newType.getAddressSpace(); - } - return false; - }); + const auto *it = + llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) { + if (auto type = entry.getKey().dyn_cast()) { + return type.cast().getAddressSpace() == + newType.getAddressSpace(); + } + return false; + }); if (it == oldLayout.end()) { llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) { if (auto type = entry.getKey().dyn_cast()) { @@ -440,14 +441,15 @@ namespace { enum class StructDLEntryPos { Abi = 0, Preferred = 1 }; -} +} // namespace static Optional getStructDataLayoutEntry(DataLayoutEntryListRef params, LLVMStructType type, StructDLEntryPos pos) { - auto currentEntry = llvm::find_if(params, [](DataLayoutEntryInterface entry) { - return entry.isTypeEntry(); - }); + const auto *currentEntry = + llvm::find_if(params, [](DataLayoutEntryInterface entry) { + return entry.isTypeEntry(); + }); if (currentEntry == params.end()) return llvm::None; @@ -509,7 +511,7 @@ if (!newEntry.isTypeEntry()) continue; - auto previousEntry = + const auto *previousEntry = llvm::find_if(oldLayout, [](DataLayoutEntryInterface entry) { return entry.isTypeEntry(); }); diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -228,6 +228,7 @@ return operand; } + // NOLINTNEXTLINE(*-identifier-naming): externally called. Value applyfn__add(Value lhs, Value rhs) { OpBuilder builder = getBuilder(); if (isFloatingPoint(lhs)) @@ -237,6 +238,7 @@ llvm_unreachable("unsupported non numeric type"); } + // NOLINTNEXTLINE(*-identifier-naming): externally called. Value applyfn__exp(Value x) { OpBuilder builder = getBuilder(); if (isFloatingPoint(x)) @@ -244,6 +246,7 @@ llvm_unreachable("unsupported non numeric type"); } + // NOLINTNEXTLINE(*-identifier-naming): externally called. Value applyfn__log(Value x) { OpBuilder builder = getBuilder(); if (isFloatingPoint(x)) @@ -251,6 +254,7 @@ llvm_unreachable("unsupported non numeric type"); } + // NOLINTNEXTLINE(*-identifier-naming): externally called. Value applyfn__sub(Value lhs, Value rhs) { OpBuilder builder = getBuilder(); if (isFloatingPoint(lhs)) @@ -260,6 +264,7 @@ llvm_unreachable("unsupported non numeric type"); } + // NOLINTNEXTLINE(*-identifier-naming): externally called. Value applyfn__mul(Value lhs, Value rhs) { OpBuilder builder = getBuilder(); if (isFloatingPoint(lhs)) @@ -269,6 +274,7 @@ llvm_unreachable("unsupported non numeric type"); } + // NOLINTNEXTLINE(*-identifier-naming): externally called. Value applyfn__max(Value lhs, Value rhs) { OpBuilder builder = getBuilder(); if (isFloatingPoint(lhs)) @@ -278,6 +284,7 @@ llvm_unreachable("unsupported non numeric type"); } + // NOLINTNEXTLINE(*-identifier-naming): externally called. Value applyfn__max_unsigned(Value lhs, Value rhs) { OpBuilder builder = getBuilder(); if (isFloatingPoint(lhs)) @@ -287,6 +294,7 @@ llvm_unreachable("unsupported non numeric type"); } + // NOLINTNEXTLINE(*-identifier-naming): externally called. Value applyfn__min(Value lhs, Value rhs) { OpBuilder builder = getBuilder(); if (isFloatingPoint(lhs)) @@ -296,6 +304,7 @@ llvm_unreachable("unsupported non numeric type"); } + // NOLINTNEXTLINE(*-identifier-naming): externally called. Value applyfn__min_unsigned(Value lhs, Value rhs) { OpBuilder builder = getBuilder(); if (isFloatingPoint(lhs)) @@ -1829,12 +1838,12 @@ return failure(); // Parse input tensors. - SmallVector inputs, input_region_args; + SmallVector inputs, inputRegionArgs; SmallVector inputTypes; if (succeeded(parser.parseOptionalKeyword("ins"))) { llvm::SMLoc inputsOperandsLoc = parser.getCurrentLocation(); - if (parser.parseAssignmentListWithTypes(input_region_args, inputs, + if (parser.parseAssignmentListWithTypes(inputRegionArgs, inputs, inputTypes)) return failure(); @@ -1844,12 +1853,12 @@ } // Parse output tensors. - SmallVector outputs, output_region_args; + SmallVector outputs, outputRegionArgs; SmallVector outputTypes; if (succeeded(parser.parseOptionalKeyword("outs"))) { llvm::SMLoc outputsOperandsLoc = parser.getCurrentLocation(); - if (parser.parseAssignmentListWithTypes(output_region_args, outputs, + if (parser.parseAssignmentListWithTypes(outputRegionArgs, outputs, outputTypes)) return failure(); @@ -1905,15 +1914,15 @@ // Parse the body. Region *body = result.addRegion(); - SmallVector region_types(ivs.size(), builder.getIndexType()); - region_types.append(inputTypes); - region_types.append(outputTypes); + SmallVector regionTypes(ivs.size(), builder.getIndexType()); + regionTypes.append(inputTypes); + regionTypes.append(outputTypes); - SmallVector region_args(ivs); - region_args.append(input_region_args); - region_args.append(output_region_args); + SmallVector regionArgs(ivs); + regionArgs.append(inputRegionArgs); + regionArgs.append(outputRegionArgs); - if (parser.parseRegion(*body, region_args, region_types)) + if (parser.parseRegion(*body, regionArgs, regionTypes)) return failure(); // Parse optional attributes. diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp @@ -127,7 +127,7 @@ : public ConvertElementwiseToLinalgBase { void runOnOperation() final { - auto func = getOperation(); + auto *func = getOperation(); auto *context = &getContext(); ConversionTarget target(*context); RewritePatternSet patterns(context); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1426,9 +1426,9 @@ /// Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}} /// ``` /// kw is unrolled, w is unrolled iff dilationW > 1. -struct Conv1D_NWC_Generator : public StructuredGenerator { - Conv1D_NWC_Generator(OpBuilder &builder, LinalgOp linalgOp, int strideW, - int dilationW) +struct Conv1DNwcGenerator : public StructuredGenerator { + Conv1DNwcGenerator(OpBuilder &builder, LinalgOp linalgOp, int strideW, + int dilationW) : StructuredGenerator(builder, linalgOp), valid(false), strideW(strideW), dilationW(dilationW) { // Determine whether `linalgOp` can be generated with this generator @@ -1594,7 +1594,7 @@ /// ``` /// kw is always unrolled. /// TODO: w (resp. kw) is unrolled when the strideW ( resp. dilationW) is > 1. - FailureOr dilated_conv() { + FailureOr dilatedConv() { if (!valid) return failure(); @@ -1730,7 +1730,7 @@ if (layout({/*lhsIndex*/ {n, strideW * w + dilationW * kw, c}, /*rhsIndex*/ {kw, c}, /*resIndex*/ {n, w, c}})) - return dilated_conv(); + return dilatedConv(); return failure(); } @@ -1752,7 +1752,7 @@ auto stride = strides ? *strides.getValues().begin() : 1; auto dilation = dilations ? *dilations.getValues().begin() : 1; LinalgOp linalgOp = cast(convOp.getOperation()); - Conv1D_NWC_Generator e(b, linalgOp, stride, dilation); + Conv1DNwcGenerator e(b, linalgOp, stride, dilation); auto res = e.generateConv(); if (succeeded(res)) return res; diff --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp --- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp +++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp @@ -195,7 +195,7 @@ // Decomposes given floating point value `arg` into a normalized fraction and // an integral power of two (see std::frexp). Returned values have float type. static std::pair frexp(ImplicitLocOpBuilder &builder, Value arg, - bool is_positive = false) { + bool isPositive = false) { assert(getElementTypeOrSelf(arg).isF32() && "arg must be f32 type"); ArrayRef shape = vectorShape(arg); @@ -222,7 +222,7 @@ Value normalizedFraction = builder.create(f32Vec, tmp1); // Compute exponent. - Value arg0 = is_positive ? arg : builder.create(arg); + Value arg0 = isPositive ? arg : builder.create(arg); Value biasedExponentBits = builder.create( builder.create(i32Vec, arg0), bcast(i32Cst(builder, 23))); diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -375,13 +375,13 @@ /// Print Reduction clause static void printReductionVarList(OpAsmPrinter &p, Optional reductions, - OperandRange reduction_vars) { + OperandRange reductionVars) { p << "reduction("; for (unsigned i = 0, e = reductions->size(); i < e; ++i) { if (i != 0) p << ", "; - p << (*reductions)[i] << " -> " << reduction_vars[i] << " : " - << reduction_vars[i].getType(); + p << (*reductions)[i] << " -> " << reductionVars[i] << " : " + << reductionVars[i].getType(); } p << ") "; } @@ -389,9 +389,9 @@ /// Verifies Reduction Clause static LogicalResult verifyReductionVarList(Operation *op, Optional reductions, - OperandRange reduction_vars) { - if (reduction_vars.size() != 0) { - if (!reductions || reductions->size() != reduction_vars.size()) + OperandRange reductionVars) { + if (reductionVars.size() != 0) { + if (!reductions || reductions->size() != reductionVars.size()) return op->emitOpError() << "expected as many reduction symbol references " "as reduction variables"; @@ -402,7 +402,7 @@ } DenseSet accumulators; - for (auto args : llvm::zip(reduction_vars, *reductions)) { + for (auto args : llvm::zip(reductionVars, *reductions)) { Value accum = std::get<0>(args); if (!accumulators.insert(accum).second) diff --git a/mlir/lib/Dialect/PDL/IR/PDL.cpp b/mlir/lib/Dialect/PDL/IR/PDL.cpp --- a/mlir/lib/Dialect/PDL/IR/PDL.cpp +++ b/mlir/lib/Dialect/PDL/IR/PDL.cpp @@ -271,8 +271,8 @@ static LogicalResult verify(PatternOp pattern) { Region &body = pattern.body(); Operation *term = body.front().getTerminator(); - auto rewrite_op = dyn_cast(term); - if (!rewrite_op) { + auto rewriteOp = dyn_cast(term); + if (!rewriteOp) { return pattern.emitOpError("expected body to terminate with `pdl.rewrite`") .attachNote(term->getLoc()) .append("see terminator defined here"); diff --git a/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp b/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp --- a/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp +++ b/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp @@ -74,9 +74,9 @@ build(builder, state, range, successor); if (initLoop) { // Create the block and the loop variable. - auto range_type = range.getType().cast(); + auto rangeType = range.getType().cast(); state.regions.front()->emplaceBlock(); - state.regions.front()->addArgument(range_type.getElementType()); + state.regions.front()->addArgument(rangeType.getElementType()); } } diff --git a/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp b/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp --- a/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp +++ b/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp @@ -104,11 +104,13 @@ if (candidateType == getStorageType()) { // i.e. i32 -> quant<"uniform[i8:f32]{1.0}"> return *this; - } else if (candidateType.isa()) { + } + if (candidateType.isa()) { // i.e. tensor<4xi8> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">> return RankedTensorType::get( candidateType.cast().getShape(), getStorageType()); - } else if (candidateType.isa()) { + } + if (candidateType.isa()) { // i.e. tensor -> tensor> return UnrankedTensorType::get(getStorageType()); } else if (candidateType.isa()) { @@ -124,7 +126,8 @@ if (quantizedType.isa()) { // i.e. quant<"uniform[i8:f32]{1.0}"> -> i8 return quantizedType.cast().getStorageType(); - } else if (quantizedType.isa()) { + } + if (quantizedType.isa()) { // i.e. tensor<4xi8> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">> ShapedType sType = quantizedType.cast(); if (!sType.getElementType().isa()) { @@ -134,7 +137,8 @@ sType.getElementType().cast().getStorageType(); if (quantizedType.isa()) { return RankedTensorType::get(sType.getShape(), storageType); - } else if (quantizedType.isa()) { + } + if (quantizedType.isa()) { return UnrankedTensorType::get(storageType); } else if (quantizedType.isa()) { return VectorType::get(sType.getShape(), storageType); @@ -148,7 +152,8 @@ if (candidateType == getExpressedType()) { // i.e. f32 -> quant<"uniform[i8:f32]{1.0}"> return *this; - } else if (candidateType.isa()) { + } + if (candidateType.isa()) { ShapedType candidateShapedType = candidateType.cast(); if (candidateShapedType.getElementType() != getExpressedType()) { return nullptr; @@ -157,7 +162,8 @@ if (candidateType.isa()) { // i.e. tensor<4xf32> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">> return RankedTensorType::get(candidateShapedType.getShape(), *this); - } else if (candidateType.isa()) { + } + if (candidateType.isa()) { // i.e. tensor -> tensor> return UnrankedTensorType::get(*this); } else if (candidateType.isa()) { @@ -173,7 +179,8 @@ if (quantizedType.isa()) { // i.e. quant<"uniform[i8:f32]{1.0}"> -> f32 return quantizedType.cast().getExpressedType(); - } else if (quantizedType.isa()) { + } + if (quantizedType.isa()) { // i.e. tensor<4xi8> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">> ShapedType sType = quantizedType.cast(); if (!sType.getElementType().isa()) { @@ -183,7 +190,8 @@ sType.getElementType().cast().getExpressedType(); if (quantizedType.isa()) { return RankedTensorType::get(sType.getShape(), expressedType); - } else if (quantizedType.isa()) { + } + if (quantizedType.isa()) { return UnrankedTensorType::get(expressedType); } else if (quantizedType.isa()) { return VectorType::get(sType.getShape(), expressedType); diff --git a/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp b/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp --- a/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp +++ b/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp @@ -126,7 +126,7 @@ bool hadFailure = false; auto func = getFunction(); RewritePatternSet patterns(func.getContext()); - auto ctx = func.getContext(); + auto *ctx = func.getContext(); patterns.add( ctx, &hadFailure); (void)applyPatternsAndFoldGreedily(func, std::move(patterns)); diff --git a/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp b/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp --- a/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp +++ b/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp @@ -140,10 +140,10 @@ Location loc, unsigned numBits, int32_t quantizedDimension, ArrayRef rmins, ArrayRef rmaxs, bool narrowRange, Type expressedType, bool isSigned) { - size_t axis_size = rmins.size(); - if (axis_size != rmaxs.size()) { + size_t axisSize = rmins.size(); + if (axisSize != rmaxs.size()) { return (emitError(loc, "mismatched per-axis min and max size: ") - << axis_size << " vs. " << rmaxs.size(), + << axisSize << " vs. " << rmaxs.size(), nullptr); } @@ -159,9 +159,9 @@ SmallVector scales; SmallVector zeroPoints; - scales.reserve(axis_size); - zeroPoints.reserve(axis_size); - for (size_t axis = 0; axis != axis_size; ++axis) { + scales.reserve(axisSize); + zeroPoints.reserve(axisSize); + for (size_t axis = 0; axis != axisSize; ++axis) { double rmin = rmins[axis]; double rmax = rmaxs[axis]; if (std::fabs(rmax - rmin) < std::numeric_limits::epsilon()) { diff --git a/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp b/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp --- a/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp +++ b/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp @@ -106,17 +106,17 @@ realValue.cast(), quantizedElementType, converter); outConvertedType = converted.getType(); return converted; - } else if (realValue.isa()) { + } + if (realValue.isa()) { // Sparse tensor or vector constant. auto converted = convertSparseElementsAttr( realValue.cast(), quantizedElementType, converter); outConvertedType = converted.getType(); return converted; - } else { - // Nothing else matched: try to convert a primitive. - return convertPrimitiveValueAttr(realValue, quantizedElementType, converter, - outConvertedType); } + // Nothing else matched: try to convert a primitive. + return convertPrimitiveValueAttr(realValue, quantizedElementType, converter, + outConvertedType); } /// Convert an attribute from a type based on @@ -132,9 +132,9 @@ UniformQuantizedValueConverter converter(uniformQuantized); return quantizeAttrUniform(realValue, uniformQuantized, converter, outConvertedType); - - } else if (auto uniformQuantizedPerAxis = - quantizedElementType.dyn_cast()) { + } + if (auto uniformQuantizedPerAxis = + quantizedElementType.dyn_cast()) { UniformQuantizedPerAxisValueConverter converter(uniformQuantizedPerAxis); auto converted = converter.convert(realValue); // TODO: why we need this outConvertedType? remove it? @@ -142,7 +142,6 @@ outConvertedType = converted.getType(); } return converted; - } else { - return nullptr; } + return nullptr; } diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp @@ -74,7 +74,7 @@ namespace { #include "SPIRVCanonicalization.inc" -} +} // namespace //===----------------------------------------------------------------------===// // spv.AccessChainOp diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp @@ -3250,13 +3250,13 @@ return success(); } -static void print(spirv::CooperativeMatrixLoadNVOp M, OpAsmPrinter &printer) { - printer << " " << M.pointer() << ", " << M.stride() << ", " - << M.columnmajor(); +static void print(spirv::CooperativeMatrixLoadNVOp m, OpAsmPrinter &printer) { + printer << " " << m.pointer() << ", " << m.stride() << ", " + << m.columnmajor(); // Print optional memory access attribute. - if (auto memAccess = M.memory_access()) + if (auto memAccess = m.memory_access()) printer << " [\"" << stringifyMemoryAccess(*memAccess) << "\"]"; - printer << " : " << M.pointer().getType() << " as " << M.getType(); + printer << " : " << m.pointer().getType() << " as " << m.getType(); } static LogicalResult verifyPointerAndCoopMatrixType(Operation *op, Type pointer, diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp --- a/mlir/lib/Dialect/Shape/IR/Shape.cpp +++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp @@ -31,7 +31,7 @@ namespace { #include "ShapeCanonicalization.inc" -} +} // namespace RankedTensorType shape::getExtentTensorType(MLIRContext *ctx, int64_t rank) { return RankedTensorType::get({rank}, IndexType::get(ctx)); @@ -50,7 +50,8 @@ return failure(); shapeValues = llvm::to_vector<6>(type.getShape()); return success(); - } else if (auto inputOp = input.getDefiningOp()) { + } + if (auto inputOp = input.getDefiningOp()) { shapeValues = llvm::to_vector<6>(inputOp.getShape().getValues()); return success(); } else if (auto inputOp = input.getDefiningOp()) { diff --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp --- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp +++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp @@ -540,7 +540,8 @@ rewriter.replaceOpWithNewOp(condbr, condbr.getTrueDest(), condbr.getTrueOperands()); return success(); - } else if (matchPattern(condbr.getCondition(), m_Zero())) { + } + if (matchPattern(condbr.getCondition(), m_Zero())) { // False branch taken. rewriter.replaceOpWithNewOp(condbr, condbr.getFalseDest(), condbr.getFalseOperands()); diff --git a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp @@ -152,11 +152,11 @@ reifyResultShapes(Operation *op, OpBuilder &b, ReifiedRankedShapedTypeDims &reifiedReturnShapes) const { auto loc = op->getLoc(); - auto reshape_op = cast(op); - auto result_shape = getReshapeOutputShapeFromInputShape( - b, loc, reshape_op.src(), reshape_op.getResultType().getShape(), - reshape_op.getReassociationMaps()); - reifiedReturnShapes.push_back(getAsValues(b, loc, result_shape)); + auto reshapeOp = cast(op); + auto resultShape = getReshapeOutputShapeFromInputShape( + b, loc, reshapeOp.src(), reshapeOp.getResultType().getShape(), + reshapeOp.getReassociationMaps()); + reifiedReturnShapes.push_back(getAsValues(b, loc, resultShape)); return success(); } }; diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -634,7 +634,7 @@ // ReshapeOp //===----------------------------------------------------------------------===// -static int64_t GetNumElements(ShapedType type) { +static int64_t getNumElements(ShapedType type) { int64_t numElements = 1; for (auto dim : type.getShape()) numElements *= dim; @@ -657,7 +657,7 @@ if (resultRankedType) { if (operandRankedType && resultRankedType.hasStaticShape() && operandRankedType.hasStaticShape()) { - if (GetNumElements(operandRankedType) != GetNumElements(resultRankedType)) + if (getNumElements(operandRankedType) != getNumElements(resultRankedType)) return op.emitOpError("source and destination tensor should have the " "same number of elements"); } diff --git a/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp @@ -97,9 +97,9 @@ // Traverse all `elements` and create `memref.store` ops. ImplicitLocOpBuilder b(loc, rewriter); - auto element_it = adaptor.elements().begin(); + auto elementIt = adaptor.elements().begin(); SmallVector indices(tensorType.getRank(), constants[0]); - CreateStores(/*dim=*/0, buffer, shape, constants, element_it, indices, b); + createStores(/*dim=*/0, buffer, shape, constants, elementIt, indices, b); rewriter.replaceOp(op, {buffer}); return success(); @@ -108,21 +108,21 @@ private: // Implements backtracking to traverse indices of the output buffer while // iterating over op.elements(). - void CreateStores(int dim, Value buffer, ArrayRef shape, - ArrayRef constants, ValueRange::iterator &element_it, + void createStores(int dim, Value buffer, ArrayRef shape, + ArrayRef constants, ValueRange::iterator &elementIt, SmallVectorImpl &indices, ImplicitLocOpBuilder b) const { if (dim == static_cast(shape.size()) - 1) { for (int i = 0; i < shape.back(); ++i) { indices.back() = constants[i]; - b.create(*element_it, buffer, indices); - ++element_it; + b.create(*elementIt, buffer, indices); + ++elementIt; } return; } for (int i = 0; i < shape[dim]; ++i) { indices[dim] = constants[i]; - CreateStores(dim + 1, buffer, shape, constants, element_it, indices, b); + createStores(dim + 1, buffer, shape, constants, elementIt, indices, b); } } }; diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -771,8 +771,8 @@ OperationState &result, Type outputType, Value input, Value paddings, - Value pad_const) { - result.addOperands({input, paddings, pad_const}); + Value padConst) { + result.addOperands({input, paddings, padConst}); auto quantAttr = buildPadOpQuantizationAttr(builder, input); if (quantAttr) result.addAttribute("quantization_info", quantAttr); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp @@ -33,9 +33,9 @@ } template -TosaOp CreateOpAndInfer(PatternRewriter &rewriter, Location loc, Type result_ty, +TosaOp createOpAndInfer(PatternRewriter &rewriter, Location loc, Type resultTy, Args &&...args) { - auto op = rewriter.create(loc, result_ty, args...); + auto op = rewriter.create(loc, resultTy, args...); InferShapedTypeOpInterface shapeInterface = dyn_cast(op.getOperation()); @@ -57,12 +57,12 @@ auto result = op->getResult(0); auto predictedShape = returnedShapes[0]; auto currentKnowledge = - mlir::tosa::ValueKnowledge::getKnowledgeFromType(result_ty); + mlir::tosa::ValueKnowledge::getKnowledgeFromType(resultTy); // Compute the knowledge based on the inferred type. auto inferredKnowledge = mlir::tosa::ValueKnowledge::getPessimisticValueState(); - inferredKnowledge.dtype = result_ty.cast().getElementType(); + inferredKnowledge.dtype = resultTy.cast().getElementType(); inferredKnowledge.hasRank = predictedShape.hasRank(); if (predictedShape.hasRank()) { for (auto dim : predictedShape.getDims()) { @@ -73,8 +73,8 @@ // Compute the new type based on the joined version. auto newKnowledge = mlir::tosa::ValueKnowledge::join(currentKnowledge, inferredKnowledge); - auto new_ty = newKnowledge.getType(); - result.setType(new_ty); + auto newTy = newKnowledge.getType(); + result.setType(newTy); return op; } @@ -205,19 +205,19 @@ weightWidth % stride[1] ? stride[1] - weightWidth % stride[1] : 0; DenseElementsAttr weightPaddingAttr = DenseIntElementsAttr::get( RankedTensorType::get({4, 2}, rewriter.getI32Type()), weightPadding); - Value weightPaddingVal = CreateOpAndInfer( + Value weightPaddingVal = createOpAndInfer( rewriter, loc, weightPaddingAttr.getType(), weightPaddingAttr); if (op.quantization_info().hasValue()) { auto quantInfo = op.quantization_info().getValue(); - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, weightPaddingVal, nullptr, PadOpQuantizationAttr::get(quantInfo.weight_zp(), rewriter.getContext())); } else { - weight = CreateOpAndInfer(rewriter, loc, + weight = createOpAndInfer(rewriter, loc, UnrankedTensorType::get(weightETy), weight, weightPaddingVal); } @@ -231,7 +231,7 @@ outputChannels, weightHeight / stride[0], stride[0], weightWidth / stride[1], stride[1], inputChannels}; - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, rewriter.getI64ArrayAttr(weightReshapeDims0)); @@ -240,7 +240,7 @@ loc, RankedTensorType::get({6}, rewriter.getI32Type()), rewriter.getI32TensorAttr({2, 4, 0, 1, 3, 5})); - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, transposeWeightVal); @@ -248,15 +248,15 @@ llvm::SmallVector weightReshapeDims1 = { outputChannels * stride[0] * stride[1], weightHeight / stride[0], weightWidth / stride[1], inputChannels}; - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, rewriter.getI64ArrayAttr(weightReshapeDims1)); ShapedType restridedWeightTy = weight.getType().cast(); - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, rewriter.getI64IntegerAttr(1)); - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, rewriter.getI64IntegerAttr(2)); @@ -270,18 +270,18 @@ DenseElementsAttr inputPaddingAttr = DenseIntElementsAttr::get( RankedTensorType::get({4, 2}, rewriter.getI32Type()), inputPadding); - Value inputPaddingVal = CreateOpAndInfer( + Value inputPaddingVal = createOpAndInfer( rewriter, loc, inputPaddingAttr.getType(), inputPaddingAttr); if (op.quantization_info().hasValue()) { auto quantInfo = op.quantization_info().getValue(); - input = CreateOpAndInfer( + input = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(inputETy), input, inputPaddingVal, nullptr, PadOpQuantizationAttr::get(quantInfo.input_zp(), rewriter.getContext())); } else { - input = CreateOpAndInfer(rewriter, loc, + input = createOpAndInfer(rewriter, loc, UnrankedTensorType::get(inputETy), input, inputPaddingVal); } @@ -299,7 +299,7 @@ // Perform the convolution using the zero bias. Value conv2d; if (op.quantization_info().hasValue()) { - conv2d = CreateOpAndInfer( + conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), input, weight, zeroBias, /*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}), @@ -308,7 +308,7 @@ op.quantization_info().getValue()) .getResult(); } else { - conv2d = CreateOpAndInfer( + conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), input, weight, zeroBias, /*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}), @@ -327,7 +327,7 @@ // Factor striding out of the convolution result. llvm::SmallVector convReshapeDims0 = { batch, convHeight, convWidth, stride[0], stride[1], outputChannels}; - conv2d = CreateOpAndInfer( + conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), conv2d, rewriter.getI64ArrayAttr(convReshapeDims0)); @@ -336,14 +336,14 @@ loc, RankedTensorType::get({6}, rewriter.getI32Type()), rewriter.getI32TensorAttr({0, 1, 3, 2, 4, 5})); - conv2d = CreateOpAndInfer( + conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(convETy), conv2d, transposeConvVal); // Fuse striding behavior back into width / height. llvm::SmallVector convReshapeDims1 = { batch, convHeight * stride[0], convWidth * stride[1], outputChannels}; - conv2d = CreateOpAndInfer( + conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), conv2d, rewriter.getI64ArrayAttr(convReshapeDims1)); @@ -354,14 +354,14 @@ sliceBegin[1] = pad[0]; sliceBegin[2] = pad[1]; - auto slice = CreateOpAndInfer( + auto slice = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), conv2d, rewriter.getI64ArrayAttr(sliceBegin), rewriter.getI64ArrayAttr(resultTy.getShape())) .getResult(); auto addBias = - CreateOpAndInfer(rewriter, loc, op.getType(), slice, bias); + createOpAndInfer(rewriter, loc, op.getType(), slice, bias); rewriter.replaceOp(op, addBias.getResult()); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp @@ -223,7 +223,7 @@ // Check whether this use case is replaceable. We define an op as // being replaceable if it is used by a ReturnOp or a TosaOp. bool replaceable = true; - for (auto user : result.getUsers()) { + for (auto *user : result.getUsers()) { if (isa(user)) continue; if (user->getDialect()->getNamespace() == diff --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp --- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp @@ -1179,7 +1179,7 @@ return builder.create(loc, v, perm); } - Value outer_prod(Value lhs, Value rhs, Value res, int reductionSize) { + Value outerProd(Value lhs, Value rhs, Value res, int reductionSize) { assert(reductionSize > 0); for (int64_t k = 0; k < reductionSize; ++k) { Value a = builder.create(loc, lhs, k); @@ -1199,31 +1199,31 @@ bindDims(builder.getContext(), m, n, k); // Classical row-major matmul: Just permute the lhs. if (layout({{m, k}, {k, n}, {m, n}})) - return outer_prod(t(lhs), rhs, res, lhsType.getDimSize(1)); + return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1)); // TODO: may be better to fail and use some vector -> scalar reduction. if (layout({{m, k}, {n, k}, {m, n}})) { Value tlhs = t(lhs); - return outer_prod(tlhs, t(rhs), res, lhsType.getDimSize(1)); + return outerProd(tlhs, t(rhs), res, lhsType.getDimSize(1)); } // No need to permute anything. if (layout({{k, m}, {k, n}, {m, n}})) - return outer_prod(lhs, rhs, res, lhsType.getDimSize(0)); + return outerProd(lhs, rhs, res, lhsType.getDimSize(0)); // Just permute the rhs. if (layout({{k, m}, {n, k}, {m, n}})) - return outer_prod(lhs, t(rhs), res, lhsType.getDimSize(0)); + return outerProd(lhs, t(rhs), res, lhsType.getDimSize(0)); // Transposed output: swap RHS and LHS. // Classical row-major matmul: permute the lhs. if (layout({{m, k}, {k, n}, {n, m}})) - return outer_prod(rhs, t(lhs), res, lhsType.getDimSize(1)); + return outerProd(rhs, t(lhs), res, lhsType.getDimSize(1)); // TODO: may be better to fail and use some vector -> scalar reduction. if (layout({{m, k}, {n, k}, {n, m}})) { Value trhs = t(rhs); - return outer_prod(trhs, t(lhs), res, lhsType.getDimSize(1)); + return outerProd(trhs, t(lhs), res, lhsType.getDimSize(1)); } if (layout({{k, m}, {k, n}, {n, m}})) - return outer_prod(rhs, lhs, res, lhsType.getDimSize(0)); + return outerProd(rhs, lhs, res, lhsType.getDimSize(0)); if (layout({{k, m}, {n, k}, {n, m}})) - return outer_prod(t(rhs), lhs, res, lhsType.getDimSize(0)); + return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0)); return failure(); } @@ -1236,16 +1236,16 @@ // Case mat-vec: transpose. if (layout({{m, k}, {k}, {m}})) - return outer_prod(t(lhs), rhs, res, lhsType.getDimSize(1)); + return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1)); // Case mat-trans-vec: ready to go. if (layout({{k, m}, {k}, {m}})) - return outer_prod(lhs, rhs, res, lhsType.getDimSize(0)); + return outerProd(lhs, rhs, res, lhsType.getDimSize(0)); // Case vec-mat: swap and transpose. if (layout({{k}, {m, k}, {m}})) - return outer_prod(t(rhs), lhs, res, lhsType.getDimSize(0)); + return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0)); // Case vec-mat-trans: swap and ready to go. if (layout({{k}, {k, m}, {m}})) - return outer_prod(rhs, lhs, res, lhsType.getDimSize(0)); + return outerProd(rhs, lhs, res, lhsType.getDimSize(0)); return failure(); } @@ -1260,16 +1260,16 @@ // Case mat-vec: transpose. if (layout({{m, k}, {k}, {m}})) - return outer_prod(t(lhs), rhs, res, lhsType.getDimSize(1)); + return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1)); // Case mat-trans-vec: ready to go. if (layout({{k, m}, {k}, {m}})) - return outer_prod(lhs, rhs, res, lhsType.getDimSize(0)); + return outerProd(lhs, rhs, res, lhsType.getDimSize(0)); // Case vec-mat: swap and transpose. if (layout({{k}, {m, k}, {m}})) - return outer_prod(t(rhs), lhs, res, lhsType.getDimSize(0)); + return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0)); // Case vec-mat-trans: swap and ready to go. if (layout({{k}, {k, m}, {m}})) - return outer_prod(rhs, lhs, res, lhsType.getDimSize(0)); + return outerProd(rhs, lhs, res, lhsType.getDimSize(0)); return failure(); } diff --git a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp --- a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp +++ b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp @@ -31,8 +31,9 @@ ImplicitLocOpBuilder &b, Value v1, Value v2, uint8_t mask) { auto asmDialectAttr = LLVM::AsmDialectAttr::get(b.getContext(), LLVM::AsmDialect::AD_Intel); - auto asmTp = "vblendps $0, $1, $2, {0}"; - auto asmCstr = "=x,x,x"; // Careful: constraint parser is very brittle: no ws! + const auto *asmTp = "vblendps $0, $1, $2, {0}"; + const auto *asmCstr = + "=x,x,x"; // Careful: constraint parser is very brittle: no ws! SmallVector asmVals{v1, v2}; auto asmStr = llvm::formatv(asmTp, llvm::format_hex(mask, /*width=*/2)).str(); auto asmOp = b.create( @@ -116,18 +117,18 @@ "expects all types to be vector<8xf32>"); #endif - Value T0 = mm256UnpackLoPs(ib, vs[0], vs[1]); - Value T1 = mm256UnpackHiPs(ib, vs[0], vs[1]); - Value T2 = mm256UnpackLoPs(ib, vs[2], vs[3]); - Value T3 = mm256UnpackHiPs(ib, vs[2], vs[3]); - Value S0 = mm256ShufflePs(ib, T0, T2, MaskHelper::shuffle<1, 0, 1, 0>()); - Value S1 = mm256ShufflePs(ib, T0, T2, MaskHelper::shuffle<3, 2, 3, 2>()); - Value S2 = mm256ShufflePs(ib, T1, T3, MaskHelper::shuffle<1, 0, 1, 0>()); - Value S3 = mm256ShufflePs(ib, T1, T3, MaskHelper::shuffle<3, 2, 3, 2>()); - vs[0] = mm256Permute2f128Ps(ib, S0, S1, MaskHelper::permute<2, 0>()); - vs[1] = mm256Permute2f128Ps(ib, S2, S3, MaskHelper::permute<2, 0>()); - vs[2] = mm256Permute2f128Ps(ib, S0, S1, MaskHelper::permute<3, 1>()); - vs[3] = mm256Permute2f128Ps(ib, S2, S3, MaskHelper::permute<3, 1>()); + Value t0 = mm256UnpackLoPs(ib, vs[0], vs[1]); + Value t1 = mm256UnpackHiPs(ib, vs[0], vs[1]); + Value t2 = mm256UnpackLoPs(ib, vs[2], vs[3]); + Value t3 = mm256UnpackHiPs(ib, vs[2], vs[3]); + Value s0 = mm256ShufflePs(ib, t0, t2, MaskHelper::shuffle<1, 0, 1, 0>()); + Value s1 = mm256ShufflePs(ib, t0, t2, MaskHelper::shuffle<3, 2, 3, 2>()); + Value s2 = mm256ShufflePs(ib, t1, t3, MaskHelper::shuffle<1, 0, 1, 0>()); + Value s3 = mm256ShufflePs(ib, t1, t3, MaskHelper::shuffle<3, 2, 3, 2>()); + vs[0] = mm256Permute2f128Ps(ib, s0, s1, MaskHelper::permute<2, 0>()); + vs[1] = mm256Permute2f128Ps(ib, s2, s3, MaskHelper::permute<2, 0>()); + vs[2] = mm256Permute2f128Ps(ib, s0, s1, MaskHelper::permute<3, 1>()); + vs[3] = mm256Permute2f128Ps(ib, s2, s3, MaskHelper::permute<3, 1>()); } /// AVX2 8x8xf32-specific transpose lowering using a "C intrinsics" model. @@ -140,46 +141,46 @@ [&](Type t) { return t == vt; }) && "expects all types to be vector<8xf32>"); - Value T0 = mm256UnpackLoPs(ib, vs[0], vs[1]); - Value T1 = mm256UnpackHiPs(ib, vs[0], vs[1]); - Value T2 = mm256UnpackLoPs(ib, vs[2], vs[3]); - Value T3 = mm256UnpackHiPs(ib, vs[2], vs[3]); - Value T4 = mm256UnpackLoPs(ib, vs[4], vs[5]); - Value T5 = mm256UnpackHiPs(ib, vs[4], vs[5]); - Value T6 = mm256UnpackLoPs(ib, vs[6], vs[7]); - Value T7 = mm256UnpackHiPs(ib, vs[6], vs[7]); + Value t0 = mm256UnpackLoPs(ib, vs[0], vs[1]); + Value t1 = mm256UnpackHiPs(ib, vs[0], vs[1]); + Value t2 = mm256UnpackLoPs(ib, vs[2], vs[3]); + Value t3 = mm256UnpackHiPs(ib, vs[2], vs[3]); + Value t4 = mm256UnpackLoPs(ib, vs[4], vs[5]); + Value t5 = mm256UnpackHiPs(ib, vs[4], vs[5]); + Value t6 = mm256UnpackLoPs(ib, vs[6], vs[7]); + Value t7 = mm256UnpackHiPs(ib, vs[6], vs[7]); using inline_asm::mm256BlendPsAsm; - Value sh0 = mm256ShufflePs(ib, T0, T2, MaskHelper::shuffle<1, 0, 3, 2>()); - Value sh2 = mm256ShufflePs(ib, T1, T3, MaskHelper::shuffle<1, 0, 3, 2>()); - Value sh4 = mm256ShufflePs(ib, T4, T6, MaskHelper::shuffle<1, 0, 3, 2>()); - Value sh6 = mm256ShufflePs(ib, T5, T7, MaskHelper::shuffle<1, 0, 3, 2>()); - - Value S0 = - mm256BlendPsAsm(ib, T0, sh0, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>()); - Value S1 = - mm256BlendPsAsm(ib, T2, sh0, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>()); - Value S2 = - mm256BlendPsAsm(ib, T1, sh2, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>()); - Value S3 = - mm256BlendPsAsm(ib, T3, sh2, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>()); - Value S4 = - mm256BlendPsAsm(ib, T4, sh4, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>()); - Value S5 = - mm256BlendPsAsm(ib, T6, sh4, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>()); - Value S6 = - mm256BlendPsAsm(ib, T5, sh6, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>()); - Value S7 = - mm256BlendPsAsm(ib, T7, sh6, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>()); - - vs[0] = mm256Permute2f128Ps(ib, S0, S4, MaskHelper::permute<2, 0>()); - vs[1] = mm256Permute2f128Ps(ib, S1, S5, MaskHelper::permute<2, 0>()); - vs[2] = mm256Permute2f128Ps(ib, S2, S6, MaskHelper::permute<2, 0>()); - vs[3] = mm256Permute2f128Ps(ib, S3, S7, MaskHelper::permute<2, 0>()); - vs[4] = mm256Permute2f128Ps(ib, S0, S4, MaskHelper::permute<3, 1>()); - vs[5] = mm256Permute2f128Ps(ib, S1, S5, MaskHelper::permute<3, 1>()); - vs[6] = mm256Permute2f128Ps(ib, S2, S6, MaskHelper::permute<3, 1>()); - vs[7] = mm256Permute2f128Ps(ib, S3, S7, MaskHelper::permute<3, 1>()); + Value sh0 = mm256ShufflePs(ib, t0, t2, MaskHelper::shuffle<1, 0, 3, 2>()); + Value sh2 = mm256ShufflePs(ib, t1, t3, MaskHelper::shuffle<1, 0, 3, 2>()); + Value sh4 = mm256ShufflePs(ib, t4, t6, MaskHelper::shuffle<1, 0, 3, 2>()); + Value sh6 = mm256ShufflePs(ib, t5, t7, MaskHelper::shuffle<1, 0, 3, 2>()); + + Value s0 = + mm256BlendPsAsm(ib, t0, sh0, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>()); + Value s1 = + mm256BlendPsAsm(ib, t2, sh0, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>()); + Value s2 = + mm256BlendPsAsm(ib, t1, sh2, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>()); + Value s3 = + mm256BlendPsAsm(ib, t3, sh2, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>()); + Value s4 = + mm256BlendPsAsm(ib, t4, sh4, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>()); + Value s5 = + mm256BlendPsAsm(ib, t6, sh4, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>()); + Value s6 = + mm256BlendPsAsm(ib, t5, sh6, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>()); + Value s7 = + mm256BlendPsAsm(ib, t7, sh6, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>()); + + vs[0] = mm256Permute2f128Ps(ib, s0, s4, MaskHelper::permute<2, 0>()); + vs[1] = mm256Permute2f128Ps(ib, s1, s5, MaskHelper::permute<2, 0>()); + vs[2] = mm256Permute2f128Ps(ib, s2, s6, MaskHelper::permute<2, 0>()); + vs[3] = mm256Permute2f128Ps(ib, s3, s7, MaskHelper::permute<2, 0>()); + vs[4] = mm256Permute2f128Ps(ib, s0, s4, MaskHelper::permute<3, 1>()); + vs[5] = mm256Permute2f128Ps(ib, s1, s5, MaskHelper::permute<3, 1>()); + vs[6] = mm256Permute2f128Ps(ib, s2, s6, MaskHelper::permute<3, 1>()); + vs[7] = mm256Permute2f128Ps(ib, s3, s7, MaskHelper::permute<3, 1>()); } /// Rewrite avx2-specific 2-D vector.transpose, for the supported cases and diff --git a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp --- a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp +++ b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp @@ -463,8 +463,10 @@ // https://developercommunity.visualstudio.com/content/problem/475494/clexe-error-with-lambda-inside-function-templates.html // The bug is fixed in VS2019 16.1. Separating the declaration and definition is // a work around for older versions of Visual Studio. +// NOLINTNEXTLINE(*-identifier-naming): externally called. extern "C" API void __mlir_runner_init(llvm::StringMap &exportSymbols); +// NOLINTNEXTLINE(*-identifier-naming): externally called. void __mlir_runner_init(llvm::StringMap &exportSymbols) { auto exportSymbol = [&](llvm::StringRef name, auto ptr) { assert(exportSymbols.count(name) == 0 && "symbol already exists"); @@ -517,6 +519,7 @@ &mlir::runtime::mlirAsyncRuntimePrintCurrentThreadId); } +// NOLINTNEXTLINE(*-identifier-naming): externally called. extern "C" API void __mlir_runner_destroy() { resetDefaultAsyncRuntime(); } } // namespace runtime diff --git a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp --- a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp @@ -58,27 +58,27 @@ using llvm::orc::TMOwningSimpleCompiler; /// Wrap a string into an llvm::StringError. -static Error make_string_error(const Twine &message) { +static Error makeStringError(const Twine &message) { return llvm::make_error(message.str(), llvm::inconvertibleErrorCode()); } -void SimpleObjectCache::notifyObjectCompiled(const Module *M, - MemoryBufferRef ObjBuffer) { - cachedObjects[M->getModuleIdentifier()] = MemoryBuffer::getMemBufferCopy( - ObjBuffer.getBuffer(), ObjBuffer.getBufferIdentifier()); +void SimpleObjectCache::notifyObjectCompiled(const Module *m, + MemoryBufferRef objBuffer) { + cachedObjects[m->getModuleIdentifier()] = MemoryBuffer::getMemBufferCopy( + objBuffer.getBuffer(), objBuffer.getBufferIdentifier()); } -std::unique_ptr SimpleObjectCache::getObject(const Module *M) { - auto I = cachedObjects.find(M->getModuleIdentifier()); - if (I == cachedObjects.end()) { - LLVM_DEBUG(dbgs() << "No object for " << M->getModuleIdentifier() +std::unique_ptr SimpleObjectCache::getObject(const Module *m) { + auto i = cachedObjects.find(m->getModuleIdentifier()); + if (i == cachedObjects.end()) { + LLVM_DEBUG(dbgs() << "No object for " << m->getModuleIdentifier() << " in cache. Compiling.\n"); return nullptr; } - LLVM_DEBUG(dbgs() << "Object for " << M->getModuleIdentifier() + LLVM_DEBUG(dbgs() << "Object for " << m->getModuleIdentifier() << " loaded from cache.\n"); - return MemoryBuffer::getMemBuffer(I->second->getMemBufferRef()); + return MemoryBuffer::getMemBuffer(i->second->getMemBufferRef()); } void SimpleObjectCache::dumpToObjectFile(StringRef outputFilename) { @@ -114,7 +114,8 @@ // Setup the machine properties from the current architecture. auto targetTriple = llvm::sys::getDefaultTargetTriple(); std::string errorMessage; - auto target = llvm::TargetRegistry::lookupTarget(targetTriple, errorMessage); + const auto *target = + llvm::TargetRegistry::lookupTarget(targetTriple, errorMessage); if (!target) { errs() << "NO target: " << errorMessage << "\n"; return true; @@ -160,7 +161,7 @@ // Given a function `foo(<...>)`, define the interface function // `mlir_foo(i8**)`. - auto newType = llvm::FunctionType::get( + auto *newType = llvm::FunctionType::get( builder.getVoidTy(), builder.getInt8PtrTy()->getPointerTo(), /*isVarArg=*/false); auto newName = makePackedFunctionName(func.getName()); @@ -170,7 +171,7 @@ // Extract the arguments from the type-erased argument list and cast them to // the proper types. - auto bb = llvm::BasicBlock::Create(ctx); + auto *bb = llvm::BasicBlock::Create(ctx); bb->insertInto(interfaceFunc); builder.SetInsertPoint(bb); llvm::Value *argList = interfaceFunc->arg_begin(); @@ -237,7 +238,7 @@ auto llvmModule = llvmModuleBuilder ? llvmModuleBuilder(m, *ctx) : translateModuleToLLVMIR(m, *ctx); if (!llvmModule) - return make_string_error("could not convert to LLVM IR"); + return makeStringError("could not convert to LLVM IR"); // FIXME: the triple should be passed to the translation or dialect conversion // instead of this. Currently, the LLVM module created above has no triple // associated with it. @@ -249,7 +250,7 @@ // Callback to create the object layer with symbol resolution to current // process and dynamically linked libraries. auto objectLinkingLayerCreator = [&](ExecutionSession &session, - const Triple &TT) { + const Triple &tt) { auto objectLayer = std::make_unique( session, []() { return std::make_unique(); }); @@ -276,7 +277,7 @@ << "\nError: " << mb.getError().message() << "\n"; continue; } - auto &JD = session.createBareJITDylib(std::string(libPath)); + auto &jd = session.createBareJITDylib(std::string(libPath)); auto loaded = DynamicLibrarySearchGenerator::Load( libPath.data(), dataLayout.getGlobalPrefix()); if (!loaded) { @@ -284,8 +285,8 @@ << "\n"; continue; } - JD.addGenerator(std::move(*loaded)); - cantFail(objectLayer->add(JD, std::move(mb.get()))); + jd.addGenerator(std::move(*loaded)); + cantFail(objectLayer->add(jd, std::move(mb.get()))); } return objectLayer; @@ -293,14 +294,14 @@ // Callback to inspect the cache and recompile on demand. This follows Lang's // LLJITWithObjectCache example. - auto compileFunctionCreator = [&](JITTargetMachineBuilder JTMB) + auto compileFunctionCreator = [&](JITTargetMachineBuilder jtmb) -> Expected> { if (jitCodeGenOptLevel) - JTMB.setCodeGenOptLevel(jitCodeGenOptLevel.getValue()); - auto TM = JTMB.createTargetMachine(); - if (!TM) - return TM.takeError(); - return std::make_unique(std::move(*TM), + jtmb.setCodeGenOptLevel(jitCodeGenOptLevel.getValue()); + auto tm = jtmb.createTargetMachine(); + if (!tm) + return tm.takeError(); + return std::make_unique(std::move(*tm), engine->cache.get()); }; @@ -350,13 +351,13 @@ llvm::raw_string_ostream os(errorMessage); llvm::handleAllErrors(expectedSymbol.takeError(), [&os](llvm::ErrorInfoBase &ei) { ei.log(os); }); - return make_string_error(os.str()); + return makeStringError(os.str()); } auto rawFPtr = expectedSymbol->getAddress(); - auto fptr = reinterpret_cast(rawFPtr); + auto *fptr = reinterpret_cast(rawFPtr); if (!fptr) - return make_string_error("looked up function is null"); + return makeStringError("looked up function is null"); return fptr; } diff --git a/mlir/lib/ExecutionEngine/JitRunner.cpp b/mlir/lib/ExecutionEngine/JitRunner.cpp --- a/mlir/lib/ExecutionEngine/JitRunner.cpp +++ b/mlir/lib/ExecutionEngine/JitRunner.cpp @@ -125,7 +125,7 @@ return OwningModuleRef(parseSourceFile(sourceMgr, context)); } -static inline Error make_string_error(const Twine &message) { +static inline Error makeStringError(const Twine &message) { return llvm::make_error(message.str(), llvm::inconvertibleErrorCode()); } @@ -239,7 +239,7 @@ CompileAndExecuteConfig config) { auto mainFunction = module.lookupSymbol(entryPoint); if (!mainFunction || mainFunction.empty()) - return make_string_error("entry point not found"); + return makeStringError("entry point not found"); void *empty = nullptr; return compileAndExecute(options, module, entryPoint, config, &empty); } @@ -253,7 +253,7 @@ .getReturnType() .dyn_cast(); if (!resultType || resultType.getWidth() != 32) - return make_string_error("only single i32 function result supported"); + return makeStringError("only single i32 function result supported"); return Error::success(); } template <> @@ -263,7 +263,7 @@ .getReturnType() .dyn_cast(); if (!resultType || resultType.getWidth() != 64) - return make_string_error("only single i64 function result supported"); + return makeStringError("only single i64 function result supported"); return Error::success(); } template <> @@ -272,7 +272,7 @@ .cast() .getReturnType() .isa()) - return make_string_error("only single f32 function result supported"); + return makeStringError("only single f32 function result supported"); return Error::success(); } template @@ -281,10 +281,10 @@ CompileAndExecuteConfig config) { auto mainFunction = module.lookupSymbol(entryPoint); if (!mainFunction || mainFunction.isExternal()) - return make_string_error("entry point not found"); + return makeStringError("entry point not found"); if (mainFunction.getType().cast().getNumParams() != 0) - return make_string_error("function inputs not supported"); + return makeStringError("function inputs not supported"); if (Error error = checkCompatibleReturnType(mainFunction)) return error; @@ -384,7 +384,7 @@ ? compileAndExecuteFn(options, m.get(), options.mainFuncName.getValue(), compileAndExecuteConfig) - : make_string_error("unsupported function type"); + : makeStringError("unsupported function type"); int exitCode = EXIT_SUCCESS; llvm::handleAllErrors(std::move(error), diff --git a/mlir/lib/ExecutionEngine/RunnerUtils.cpp b/mlir/lib/ExecutionEngine/RunnerUtils.cpp --- a/mlir/lib/ExecutionEngine/RunnerUtils.cpp +++ b/mlir/lib/ExecutionEngine/RunnerUtils.cpp @@ -16,6 +16,8 @@ #include "mlir/ExecutionEngine/RunnerUtils.h" #include +// NOLINTBEGIN(*-identifier-naming) + extern "C" void _mlir_ciface_print_memref_shape_i8(UnrankedMemRefType *M) { std::cout << "Unranked Memref "; @@ -163,3 +165,5 @@ UnrankedMemRefType expectedDesc = {rank, expectedPtr}; return _mlir_ciface_verifyMemRefF64(&actualDesc, &expectedDesc); } + +// NOLINTEND(*-identifier-naming) diff --git a/mlir/lib/IR/AffineMap.cpp b/mlir/lib/IR/AffineMap.cpp --- a/mlir/lib/IR/AffineMap.cpp +++ b/mlir/lib/IR/AffineMap.cpp @@ -209,7 +209,7 @@ SmallVector affExprs; for (auto index : permutation) affExprs.push_back(getAffineDimExpr(index, context)); - auto m = std::max_element(permutation.begin(), permutation.end()); + const auto *m = std::max_element(permutation.begin(), permutation.end()); auto permutationMap = AffineMap::get(*m + 1, 0, affExprs, context); assert(permutationMap.isPermutation() && "Invalid permutation vector"); return permutationMap; diff --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp --- a/mlir/lib/IR/AsmPrinter.cpp +++ b/mlir/lib/IR/AsmPrinter.cpp @@ -1105,7 +1105,7 @@ // Find the correct index using a binary search, as the groups are ordered. ArrayRef resultGroups = resultGroupIt->second; - auto it = llvm::upper_bound(resultGroups, resultNo); + const auto *it = llvm::upper_bound(resultGroups, resultNo); int groupResultNo = 0, groupSize = 0; // If there are no smaller elements, the last result group is the lookup. @@ -1240,8 +1240,8 @@ raw_ostream &getStream() { return os; } template - inline void interleaveComma(const Container &c, UnaryFunctor each_fn) const { - llvm::interleaveComma(c, os, each_fn); + inline void interleaveComma(const Container &c, UnaryFunctor eachFn) const { + llvm::interleaveComma(c, os, eachFn); } /// This enum describes the different kinds of elision for the type of an diff --git a/mlir/lib/IR/Block.cpp b/mlir/lib/IR/Block.cpp --- a/mlir/lib/IR/Block.cpp +++ b/mlir/lib/IR/Block.cpp @@ -316,7 +316,7 @@ Block *Block::splitBlock(iterator splitBefore) { // Start by creating a new basic block, and insert it immediate after this // one in the containing region. - auto newBB = new Block(); + auto *newBB = new Block(); getParent()->getBlocks().insert(std::next(Region::iterator(this)), newBB); // Move all of the operations from the split point to the end of the region diff --git a/mlir/lib/IR/BuiltinAttributes.cpp b/mlir/lib/IR/BuiltinAttributes.cpp --- a/mlir/lib/IR/BuiltinAttributes.cpp +++ b/mlir/lib/IR/BuiltinAttributes.cpp @@ -121,10 +121,10 @@ if (value.size() == 2) return value[0].getName() == value[1].getName() ? value[0] : none; - auto it = std::adjacent_find(value.begin(), value.end(), - [](NamedAttribute l, NamedAttribute r) { - return l.getName() == r.getName(); - }); + const auto *it = std::adjacent_find(value.begin(), value.end(), + [](NamedAttribute l, NamedAttribute r) { + return l.getName() == r.getName(); + }); return it != value.end() ? *it : none; } diff --git a/mlir/lib/IR/MLIRContext.cpp b/mlir/lib/IR/MLIRContext.cpp --- a/mlir/lib/IR/MLIRContext.cpp +++ b/mlir/lib/IR/MLIRContext.cpp @@ -44,7 +44,6 @@ using namespace mlir::detail; using llvm::hash_combine; -using llvm::hash_combine_range; //===----------------------------------------------------------------------===// // MLIRContext CommandLine Options diff --git a/mlir/lib/IR/Operation.cpp b/mlir/lib/IR/Operation.cpp --- a/mlir/lib/IR/Operation.cpp +++ b/mlir/lib/IR/Operation.cpp @@ -349,28 +349,28 @@ auto llvm::ilist_detail::SpecificNodeAccess< typename llvm::ilist_detail::compute_node_options< - ::mlir::Operation>::type>::getNodePtr(pointer N) -> node_type * { - return NodeAccess::getNodePtr(N); + ::mlir::Operation>::type>::getNodePtr(pointer n) -> node_type * { + return NodeAccess::getNodePtr(n); } auto llvm::ilist_detail::SpecificNodeAccess< typename llvm::ilist_detail::compute_node_options< - ::mlir::Operation>::type>::getNodePtr(const_pointer N) + ::mlir::Operation>::type>::getNodePtr(const_pointer n) -> const node_type * { - return NodeAccess::getNodePtr(N); + return NodeAccess::getNodePtr(n); } auto llvm::ilist_detail::SpecificNodeAccess< typename llvm::ilist_detail::compute_node_options< - ::mlir::Operation>::type>::getValuePtr(node_type *N) -> pointer { - return NodeAccess::getValuePtr(N); + ::mlir::Operation>::type>::getValuePtr(node_type *n) -> pointer { + return NodeAccess::getValuePtr(n); } auto llvm::ilist_detail::SpecificNodeAccess< typename llvm::ilist_detail::compute_node_options< - ::mlir::Operation>::type>::getValuePtr(const node_type *N) + ::mlir::Operation>::type>::getValuePtr(const node_type *n) -> const_pointer { - return NodeAccess::getValuePtr(N); + return NodeAccess::getValuePtr(n); } void llvm::ilist_traits<::mlir::Operation>::deleteNode(Operation *op) { @@ -378,9 +378,9 @@ } Block *llvm::ilist_traits<::mlir::Operation>::getContainingBlock() { - size_t Offset(size_t(&((Block *)nullptr->*Block::getSublistAccess(nullptr)))); - iplist *Anchor(static_cast *>(this)); - return reinterpret_cast(reinterpret_cast(Anchor) - Offset); + size_t offset(size_t(&((Block *)nullptr->*Block::getSublistAccess(nullptr)))); + iplist *anchor(static_cast *>(this)); + return reinterpret_cast(reinterpret_cast(anchor) - offset); } /// This is a trait method invoked when an operation is added to a block. We @@ -1024,8 +1024,7 @@ if (op->getNumRegions() > 1) return op->emitOpError("region #") << region.getRegionNumber() << " should have no arguments"; - else - return op->emitOpError("region should have no arguments"); + return op->emitOpError("region should have no arguments"); } } return success(); diff --git a/mlir/lib/IR/OperationSupport.cpp b/mlir/lib/IR/OperationSupport.cpp --- a/mlir/lib/IR/OperationSupport.cpp +++ b/mlir/lib/IR/OperationSupport.cpp @@ -34,8 +34,8 @@ dictionarySorted.setPointerAndInt(attributes, true); } -NamedAttrList::NamedAttrList(const_iterator in_start, const_iterator in_end) { - assign(in_start, in_end); +NamedAttrList::NamedAttrList(const_iterator inStart, const_iterator inEnd) { + assign(inStart, inEnd); } ArrayRef NamedAttrList::getAttrs() const { return attrs; } @@ -66,8 +66,8 @@ } /// Replaces the attributes with new list of attributes. -void NamedAttrList::assign(const_iterator in_start, const_iterator in_end) { - DictionaryAttr::sort(ArrayRef{in_start, in_end}, attrs); +void NamedAttrList::assign(const_iterator inStart, const_iterator inEnd) { + DictionaryAttr::sort(ArrayRef{inStart, inEnd}, attrs); dictionarySorted.setPointerAndInt(nullptr, true); } diff --git a/mlir/lib/IR/Region.cpp b/mlir/lib/IR/Region.cpp --- a/mlir/lib/IR/Region.cpp +++ b/mlir/lib/IR/Region.cpp @@ -152,10 +152,10 @@ } Region *llvm::ilist_traits<::mlir::Block>::getParentRegion() { - size_t Offset( + size_t offset( size_t(&((Region *)nullptr->*Region::getSublistAccess(nullptr)))); - iplist *Anchor(static_cast *>(this)); - return reinterpret_cast(reinterpret_cast(Anchor) - Offset); + iplist *anchor(static_cast *>(this)); + return reinterpret_cast(reinterpret_cast(anchor) - offset); } /// This is a trait method invoked when a basic block is added to a region. diff --git a/mlir/lib/Interfaces/SideEffectInterfaces.cpp b/mlir/lib/Interfaces/SideEffectInterfaces.cpp --- a/mlir/lib/Interfaces/SideEffectInterfaces.cpp +++ b/mlir/lib/Interfaces/SideEffectInterfaces.cpp @@ -76,9 +76,9 @@ // Otherwise, if the op has recursive side effects we can treat the // operation itself as having no effects. - } else if (hasRecursiveEffects) { - continue; } + if (hasRecursiveEffects) + continue; // If there were no effect interfaces, we treat this op as conservatively // having effects. diff --git a/mlir/lib/Parser/AffineParser.cpp b/mlir/lib/Parser/AffineParser.cpp --- a/mlir/lib/Parser/AffineParser.cpp +++ b/mlir/lib/Parser/AffineParser.cpp @@ -525,13 +525,14 @@ bool isColon = getToken().is(Token::colon); if (!isArrow && !isColon) { return emitError("expected '->' or ':'"); - } else if (isArrow) { + } + if (isArrow) { parseToken(Token::arrow, "expected '->' or '['"); map = parseAffineMapRange(numDims, numSymbols); return map ? success() : failure(); - } else if (parseToken(Token::colon, "expected ':' or '['")) { - return failure(); } + if (parseToken(Token::colon, "expected ':' or '['")) + return failure(); if ((set = parseIntegerSetConstraints(numDims, numSymbols))) return success(); diff --git a/mlir/lib/Pass/Pass.cpp b/mlir/lib/Pass/Pass.cpp --- a/mlir/lib/Pass/Pass.cpp +++ b/mlir/lib/Pass/Pass.cpp @@ -358,8 +358,8 @@ PassInstrumentor *pi = am.getPassInstrumentor(); PassInstrumentation::PipelineParentInfo parentInfo = {llvm::get_threadid(), pass}; - auto dynamic_pipeline_callback = [&](OpPassManager &pipeline, - Operation *root) -> LogicalResult { + auto dynamicPipelineCallback = [&](OpPassManager &pipeline, + Operation *root) -> LogicalResult { if (!op->isAncestor(root)) return root->emitOpError() << "Trying to schedule a dynamic pipeline on an " @@ -379,7 +379,7 @@ verifyPasses, parentInitGeneration, pi, &parentInfo); }; - pass->passState.emplace(op, am, dynamic_pipeline_callback); + pass->passState.emplace(op, am, dynamicPipelineCallback); // Instrument before the pass has run. if (pi) @@ -437,7 +437,7 @@ const PassInstrumentation::PipelineParentInfo *parentInfo) { assert((!instrumentor || parentInfo) && "expected parent info if instrumentor is provided"); - auto scope_exit = llvm::make_scope_exit([&] { + auto scopeExit = llvm::make_scope_exit([&] { // Clear out any computed operation analyses. These analyses won't be used // any more in this pipeline, and this helps reduce the current working set // of memory. If preserving these analyses becomes important in the future @@ -460,7 +460,7 @@ /// type, or nullptr if one does not exist. static OpPassManager *findPassManagerFor(MutableArrayRef mgrs, StringRef name) { - auto it = llvm::find_if( + auto *it = llvm::find_if( mgrs, [&](OpPassManager &mgr) { return mgr.getOpName() == name; }); return it == mgrs.end() ? nullptr : &*it; } @@ -470,7 +470,7 @@ static OpPassManager *findPassManagerFor(MutableArrayRef mgrs, StringAttr name, MLIRContext &context) { - auto it = llvm::find_if( + auto *it = llvm::find_if( mgrs, [&](OpPassManager &mgr) { return mgr.getOpName(context) == name; }); return it == mgrs.end() ? nullptr : &*it; } diff --git a/mlir/lib/TableGen/Attribute.cpp b/mlir/lib/TableGen/Attribute.cpp --- a/mlir/lib/TableGen/Attribute.cpp +++ b/mlir/lib/TableGen/Attribute.cpp @@ -253,7 +253,7 @@ } Attribute StructFieldAttr::getType() const { - auto init = def->getValueInit("type"); + auto *init = def->getValueInit("type"); return Attribute(cast(init)); } diff --git a/mlir/lib/TableGen/Dialect.cpp b/mlir/lib/TableGen/Dialect.cpp --- a/mlir/lib/TableGen/Dialect.cpp +++ b/mlir/lib/TableGen/Dialect.cpp @@ -38,7 +38,7 @@ static StringRef getAsStringOrEmpty(const llvm::Record &record, StringRef fieldName) { - if (auto valueInit = record.getValueInit(fieldName)) { + if (auto *valueInit = record.getValueInit(fieldName)) { if (llvm::isa(valueInit)) return record.getValueAsString(fieldName); } diff --git a/mlir/lib/TableGen/Operator.cpp b/mlir/lib/TableGen/Operator.cpp --- a/mlir/lib/TableGen/Operator.cpp +++ b/mlir/lib/TableGen/Operator.cpp @@ -346,10 +346,9 @@ if (getArg(*mi).is()) { // TODO: Handle attributes. continue; - } else { - resultTypeMapping[i].emplace_back(*mi); - found = true; } + resultTypeMapping[i].emplace_back(*mi); + found = true; } return found; }; diff --git a/mlir/lib/TableGen/Pattern.cpp b/mlir/lib/TableGen/Pattern.cpp --- a/mlir/lib/TableGen/Pattern.cpp +++ b/mlir/lib/TableGen/Pattern.cpp @@ -649,7 +649,7 @@ std::vector ret; ret.reserve(listInit->size()); - for (auto it : *listInit) { + for (auto *it : *listInit) { auto *dagInit = dyn_cast(it); if (!dagInit) PrintFatalError(&def, "all elements in Pattern multi-entity " diff --git a/mlir/lib/TableGen/Predicate.cpp b/mlir/lib/TableGen/Predicate.cpp --- a/mlir/lib/TableGen/Predicate.cpp +++ b/mlir/lib/TableGen/Predicate.cpp @@ -188,7 +188,7 @@ // Build child subtrees. auto combined = static_cast(root); for (const auto *record : combined.getChildren()) { - auto childTree = + auto *childTree = buildPredicateTree(Pred(record), allocator, allSubstitutions); rootNode->children.push_back(childTree); } @@ -241,7 +241,7 @@ for (auto &child : children) { // First, simplify the child. This maintains the predicate as it was. - auto simplifiedChild = + auto *simplifiedChild = propagateGroundTruth(child, knownTruePreds, knownFalsePreds); // Just add the child if we don't know how to simplify the current node. @@ -273,8 +273,9 @@ node->kind = collapseKind; node->children.clear(); return node; - } else if (simplifiedChild->kind == eraseKind || - eraseList.count(simplifiedChild->predicate) != 0) { + } + if (simplifiedChild->kind == eraseKind || + eraseList.count(simplifiedChild->predicate) != 0) { continue; } node->children.push_back(simplifiedChild); @@ -350,7 +351,7 @@ std::string CombinedPred::getConditionImpl() const { llvm::SpecificBumpPtrAllocator allocator; - auto predicateTree = buildPredicateTree(*this, allocator, {}); + auto *predicateTree = buildPredicateTree(*this, allocator, {}); predicateTree = propagateGroundTruth(predicateTree, /*knownTruePreds=*/llvm::SmallPtrSet(), diff --git a/mlir/lib/TableGen/Trait.cpp b/mlir/lib/TableGen/Trait.cpp --- a/mlir/lib/TableGen/Trait.cpp +++ b/mlir/lib/TableGen/Trait.cpp @@ -26,7 +26,7 @@ //===----------------------------------------------------------------------===// Trait Trait::create(const llvm::Init *init) { - auto def = cast(init)->getDef(); + auto *def = cast(init)->getDef(); if (def->isSubClassOf("PredTrait")) return Trait(Kind::Pred, def); if (def->isSubClassOf("GenInternalTrait")) diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp --- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp +++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp @@ -61,7 +61,7 @@ LogicalResult processFunction(llvm::Function *f); /// Imports GV as a GlobalOp, creating it if it doesn't exist. - GlobalOp processGlobal(llvm::GlobalVariable *GV); + GlobalOp processGlobal(llvm::GlobalVariable *gv); private: /// Returns personality of `f` as a FlatSymbolRefAttr. @@ -145,7 +145,8 @@ os << "llvm-imported-inst-%"; inst->printAsOperand(os, /*PrintType=*/false); return FileLineColLoc::get(context, os.str(), 0, 0); - } else if (!loc) { + } + if (!loc) { return unknownLoc; } // FIXME: Obtain the filename from DILocationInfo. @@ -304,47 +305,47 @@ return nullptr; } -GlobalOp Importer::processGlobal(llvm::GlobalVariable *GV) { - auto it = globals.find(GV); +GlobalOp Importer::processGlobal(llvm::GlobalVariable *gv) { + auto it = globals.find(gv); if (it != globals.end()) return it->second; OpBuilder b(module.getBody(), getGlobalInsertPt()); Attribute valueAttr; - if (GV->hasInitializer()) - valueAttr = getConstantAsAttr(GV->getInitializer()); - Type type = processType(GV->getValueType()); + if (gv->hasInitializer()) + valueAttr = getConstantAsAttr(gv->getInitializer()); + Type type = processType(gv->getValueType()); if (!type) return nullptr; uint64_t alignment = 0; - llvm::MaybeAlign maybeAlign = GV->getAlign(); + llvm::MaybeAlign maybeAlign = gv->getAlign(); if (maybeAlign.hasValue()) { llvm::Align align = maybeAlign.getValue(); alignment = align.value(); } GlobalOp op = - b.create(UnknownLoc::get(context), type, GV->isConstant(), - convertLinkageFromLLVM(GV->getLinkage()), - GV->getName(), valueAttr, alignment); + b.create(UnknownLoc::get(context), type, gv->isConstant(), + convertLinkageFromLLVM(gv->getLinkage()), + gv->getName(), valueAttr, alignment); - if (GV->hasInitializer() && !valueAttr) { + if (gv->hasInitializer() && !valueAttr) { Region &r = op.getInitializerRegion(); currentEntryBlock = b.createBlock(&r); b.setInsertionPoint(currentEntryBlock, currentEntryBlock->begin()); - Value v = processConstant(GV->getInitializer()); + Value v = processConstant(gv->getInitializer()); if (!v) return nullptr; b.create(op.getLoc(), ArrayRef({v})); } - if (GV->hasAtLeastLocalUnnamedAddr()) + if (gv->hasAtLeastLocalUnnamedAddr()) op.setUnnamedAddrAttr(UnnamedAddrAttr::get( - context, convertUnnamedAddrFromLLVM(GV->getUnnamedAddr()))); - if (GV->hasSection()) - op.setSectionAttr(b.getStringAttr(GV->getSection())); + context, convertUnnamedAddrFromLLVM(gv->getUnnamedAddr()))); + if (gv->hasSection()) + op.setSectionAttr(b.getStringAttr(gv->getSection())); - return globals[GV] = op; + return globals[gv] = op; } Value Importer::processConstant(llvm::Constant *c) { @@ -366,9 +367,9 @@ return nullptr; return instMap[c] = bEntry.create(unknownLoc, type); } - if (auto *GV = dyn_cast(c)) + if (auto *gv = dyn_cast(c)) return bEntry.create(UnknownLoc::get(context), - processGlobal(GV)); + processGlobal(gv)); if (auto *ce = dyn_cast(c)) { llvm::Instruction *i = ce->getAsInstruction(); @@ -526,8 +527,8 @@ Importer::processBranchArgs(llvm::Instruction *br, llvm::BasicBlock *target, SmallVectorImpl &blockArguments) { for (auto inst = target->begin(); isa(inst); ++inst) { - auto *PN = cast(&*inst); - Value value = processValue(PN->getIncomingValueForBlock(br->getParent())); + auto *pn = cast(&*inst); + Value value = processValue(pn->getIncomingValueForBlock(br->getParent())); if (!value) return failure(); blockArguments.push_back(value); @@ -777,10 +778,10 @@ // If it doesn't have a name, currently, only function pointers that are // bitcast to i8* are parsed. - if (auto ce = dyn_cast(pf)) { + if (auto *ce = dyn_cast(pf)) { if (ce->getOpcode() == llvm::Instruction::BitCast && ce->getType() == llvm::Type::getInt8PtrTy(f->getContext())) { - if (auto func = dyn_cast(ce->getOperand(0))) + if (auto *func = dyn_cast(ce->getOperand(0))) return SymbolRefAttr::get(b.getContext(), func->getName()); } } diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp --- a/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp @@ -55,12 +55,11 @@ unsigned lineNo = fileLoc.getLine(); unsigned colNo = fileLoc.getColumn(); return builder.getOrCreateSrcLocStr(name, fileName, lineNo, colNo); - } else { - std::string locStr; - llvm::raw_string_ostream locOS(locStr); - locOS << loc; - return builder.getOrCreateSrcLocStr(locOS.str()); } + std::string locStr; + llvm::raw_string_ostream locOS(locStr); + locOS << loc; + return builder.getOrCreateSrcLocStr(locOS.str()); } /// Create the location struct from the operation location information. @@ -81,9 +80,8 @@ if (auto nameLoc = loc.dyn_cast()) { StringRef name = nameLoc.getName(); return createSourceLocStrFromLocation(nameLoc.getChildLoc(), builder, name); - } else { - return createSourceLocStrFromLocation(loc, builder, "unknown"); } + return createSourceLocStrFromLocation(loc, builder, "unknown"); } /// Return the runtime function used to lower the given operation. diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp @@ -861,11 +861,11 @@ } // Convert an Atomic Ordering attribute to llvm::AtomicOrdering. -llvm::AtomicOrdering convertAtomicOrdering(Optional AOAttr) { - if (!AOAttr.hasValue()) +llvm::AtomicOrdering convertAtomicOrdering(Optional aoAttr) { + if (!aoAttr.hasValue()) return llvm::AtomicOrdering::Monotonic; // Default Memory Ordering - return StringSwitch(AOAttr.getValue()) + return StringSwitch(aoAttr.getValue()) .Case("seq_cst", llvm::AtomicOrdering::SequentiallyConsistent) .Case("acq_rel", llvm::AtomicOrdering::AcquireRelease) .Case("acquire", llvm::AtomicOrdering::Acquire) @@ -889,7 +889,7 @@ moduleTranslation.translateLoc(opInst.getLoc(), subprogram); llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder.saveIP(), llvm::DebugLoc(diLoc)); - llvm::AtomicOrdering AO = convertAtomicOrdering(readOp.memory_order()); + llvm::AtomicOrdering ao = convertAtomicOrdering(readOp.memory_order()); llvm::Value *address = moduleTranslation.lookupValue(readOp.address()); llvm::OpenMPIRBuilder::InsertPointTy currentIP = builder.saveIP(); @@ -903,9 +903,9 @@ // Restore the IP and insert Atomic Read. builder.restoreIP(currentIP); - llvm::OpenMPIRBuilder::AtomicOpValue V = {v, false, false}; - llvm::OpenMPIRBuilder::AtomicOpValue X = {address, false, false}; - builder.restoreIP(ompBuilder->createAtomicRead(ompLoc, X, V, AO)); + llvm::OpenMPIRBuilder::AtomicOpValue atomicV = {v, false, false}; + llvm::OpenMPIRBuilder::AtomicOpValue x = {address, false, false}; + builder.restoreIP(ompBuilder->createAtomicRead(ompLoc, x, atomicV, ao)); return success(); } diff --git a/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp --- a/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp @@ -29,17 +29,17 @@ // take a single int32 argument. It is likely that the interface of this // function will change to make it more generic. static llvm::Value *createDeviceFunctionCall(llvm::IRBuilderBase &builder, - StringRef fn_name, int parameter) { + StringRef fnName, int parameter) { llvm::Module *module = builder.GetInsertBlock()->getModule(); - llvm::FunctionType *function_type = llvm::FunctionType::get( + llvm::FunctionType *functionType = llvm::FunctionType::get( llvm::Type::getInt64Ty(module->getContext()), // return type. llvm::Type::getInt32Ty(module->getContext()), // parameter type. false); // no variadic arguments. llvm::Function *fn = dyn_cast( - module->getOrInsertFunction(fn_name, function_type).getCallee()); - llvm::Value *fn_op0 = llvm::ConstantInt::get( + module->getOrInsertFunction(fnName, functionType).getCallee()); + llvm::Value *fnOp0 = llvm::ConstantInt::get( llvm::Type::getInt32Ty(module->getContext()), parameter); - return builder.CreateCall(fn, ArrayRef(fn_op0)); + return builder.CreateCall(fn, ArrayRef(fnOp0)); } namespace { diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -242,10 +242,10 @@ if (auto *arrayTy = dyn_cast(llvmType)) { elementType = arrayTy->getElementType(); numElements = arrayTy->getNumElements(); - } else if (auto fVectorTy = dyn_cast(llvmType)) { + } else if (auto *fVectorTy = dyn_cast(llvmType)) { elementType = fVectorTy->getElementType(); numElements = fVectorTy->getNumElements(); - } else if (auto sVectorTy = dyn_cast(llvmType)) { + } else if (auto *sVectorTy = dyn_cast(llvmType)) { elementType = sVectorTy->getElementType(); numElements = sVectorTy->getMinNumElements(); } else { diff --git a/mlir/lib/Tools/PDLL/Parser/Parser.cpp b/mlir/lib/Tools/PDLL/Parser/Parser.cpp --- a/mlir/lib/Tools/PDLL/Parser/Parser.cpp +++ b/mlir/lib/Tools/PDLL/Parser/Parser.cpp @@ -1525,7 +1525,7 @@ // Handle named results. auto elementNames = tupleType.getElementNames(); - auto it = llvm::find(elementNames, name); + const auto *it = llvm::find(elementNames, name); if (it != elementNames.end()) return tupleType.getElementTypes()[it - elementNames.begin()]; } diff --git a/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp b/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp --- a/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp +++ b/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp @@ -133,7 +133,7 @@ } // Check the uses. - auto useIt = llvm::find_if(def.uses, [&](const llvm::SMRange &range) { + const auto *useIt = llvm::find_if(def.uses, [&](const llvm::SMRange &range) { return contains(range, loc); }); if (useIt != def.uses.end()) { diff --git a/mlir/lib/Tools/mlir-reduce/MlirReduceMain.cpp b/mlir/lib/Tools/mlir-reduce/MlirReduceMain.cpp --- a/mlir/lib/Tools/mlir-reduce/MlirReduceMain.cpp +++ b/mlir/lib/Tools/mlir-reduce/MlirReduceMain.cpp @@ -42,20 +42,20 @@ MLIRContext &context) { // Override the default '-h' and use the default PrintHelpMessage() which // won't print options in categories. - static llvm::cl::opt Help("h", llvm::cl::desc("Alias for -help"), + static llvm::cl::opt help("h", llvm::cl::desc("Alias for -help"), llvm::cl::Hidden); - static llvm::cl::OptionCategory MLIRReduceCategory("mlir-reduce options"); + static llvm::cl::OptionCategory mlirReduceCategory("mlir-reduce options"); static llvm::cl::opt inputFilename( llvm::cl::Positional, llvm::cl::desc(""), - llvm::cl::cat(MLIRReduceCategory)); + llvm::cl::cat(mlirReduceCategory)); static llvm::cl::opt outputFilename( "o", llvm::cl::desc("Output filename for the reduced test case"), - llvm::cl::init("-"), llvm::cl::cat(MLIRReduceCategory)); + llvm::cl::init("-"), llvm::cl::cat(mlirReduceCategory)); - llvm::cl::HideUnrelatedOptions(MLIRReduceCategory); + llvm::cl::HideUnrelatedOptions(mlirReduceCategory); llvm::InitLLVM y(argc, argv); @@ -65,7 +65,7 @@ llvm::cl::ParseCommandLineOptions(argc, argv, "MLIR test case reduction tool.\n"); - if (Help) { + if (help) { llvm::cl::PrintHelpMessage(); return success(); } diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp --- a/mlir/lib/Transforms/LoopFusion.cpp +++ b/mlir/lib/Transforms/LoopFusion.cpp @@ -301,14 +301,15 @@ memrefEdgeCount[value]--; } // Remove 'srcId' from 'inEdges[dstId]'. - for (auto it = inEdges[dstId].begin(); it != inEdges[dstId].end(); ++it) { + for (auto *it = inEdges[dstId].begin(); it != inEdges[dstId].end(); ++it) { if ((*it).id == srcId && (*it).value == value) { inEdges[dstId].erase(it); break; } } // Remove 'dstId' from 'outEdges[srcId]'. - for (auto it = outEdges[srcId].begin(); it != outEdges[srcId].end(); ++it) { + for (auto *it = outEdges[srcId].begin(); it != outEdges[srcId].end(); + ++it) { if ((*it).id == dstId && (*it).value == value) { outEdges[srcId].erase(it); break; diff --git a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp --- a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp +++ b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp @@ -85,7 +85,7 @@ // Helper to check whether an operation is loop invariant wrt. SSA properties. auto isDefinedOutsideOfBody = [&](Value value) { - auto definingOp = value.getDefiningOp(); + auto *definingOp = value.getDefiningOp(); return (definingOp && !!willBeMovedSet.count(definingOp)) || looplike.isDefinedOutsideOfLoop(value); }; diff --git a/mlir/lib/Transforms/NormalizeMemRefs.cpp b/mlir/lib/Transforms/NormalizeMemRefs.cpp --- a/mlir/lib/Transforms/NormalizeMemRefs.cpp +++ b/mlir/lib/Transforms/NormalizeMemRefs.cpp @@ -517,6 +517,6 @@ newRegion->takeBody(oldRegion); } return bb.createOperation(result); - } else - return oldOp; + } + return oldOp; } diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp --- a/mlir/lib/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp @@ -191,7 +191,7 @@ // Check for dependence with outgoing DMAs. Doing this conservatively. // TODO: use the dependence analysis to check for // dependences between an incoming and outgoing DMA in the same iteration. - auto it = outgoingDmaOps.begin(); + auto *it = outgoingDmaOps.begin(); for (; it != outgoingDmaOps.end(); ++it) { if (it->getDstMemRef() == dmaStartOp.getSrcMemRef()) break; diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp --- a/mlir/lib/Transforms/Utils/FoldUtils.cpp +++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp @@ -168,7 +168,7 @@ if (op->getNumOperands() >= 2 && op->hasTrait()) { std::stable_partition( op->getOpOperands().begin(), op->getOpOperands().end(), - [&](OpOperand &O) { return !matchPattern(O.get(), m_Constant()); }); + [&](OpOperand &o) { return !matchPattern(o.get(), m_Constant()); }); } // Check to see if any operands to the operation is constant and whether diff --git a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp --- a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp @@ -56,7 +56,8 @@ if (auto loadOp = dyn_cast(op)) { return values.count(loadOp.getMemRef()) > 0 && values[loadOp.getMemRef()] == true; - } else if (auto storeOp = dyn_cast(op)) { + } + if (auto storeOp = dyn_cast(op)) { return values.count(storeOp.getMemRef()) > 0; } return false; diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -3034,7 +3034,7 @@ auto updateRegion = [&](const SmallMapVector, 4> &targetRegions) { - const auto it = targetRegions.find(region->memref); + const auto *const it = targetRegions.find(region->memref); if (it == targetRegions.end()) return false; diff --git a/mlir/test/lib/Analysis/TestAliasAnalysis.cpp b/mlir/test/lib/Analysis/TestAliasAnalysis.cpp --- a/mlir/test/lib/Analysis/TestAliasAnalysis.cpp +++ b/mlir/test/lib/Analysis/TestAliasAnalysis.cpp @@ -67,7 +67,7 @@ // Check for aliasing behavior between each of the values. for (auto it = valsToCheck.begin(), e = valsToCheck.end(); it != e; ++it) - for (auto innerIt = valsToCheck.begin(); innerIt != it; ++innerIt) + for (auto *innerIt = valsToCheck.begin(); innerIt != it; ++innerIt) printAliasResult(aliasAnalysis.alias(*innerIt, *it), *innerIt, *it); } diff --git a/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp b/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp --- a/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp +++ b/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp @@ -52,9 +52,9 @@ void TestMathPolynomialApproximationPass::runOnFunction() { RewritePatternSet patterns(&getContext()); - MathPolynomialApproximationOptions approx_options; - approx_options.enableAvx2 = enableAvx2; - populateMathPolynomialApproximationPatterns(patterns, approx_options); + MathPolynomialApproximationOptions approxOptions; + approxOptions.enableAvx2 = enableAvx2; + populateMathPolynomialApproximationPatterns(patterns, approxOptions); (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); } diff --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp --- a/mlir/test/lib/Dialect/Test/TestDialect.cpp +++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp @@ -689,24 +689,24 @@ Region &body = *result.addRegion(); body.push_back(new Block); Block &block = body.back(); - Operation *wrapped_op = parser.parseGenericOperation(&block, block.begin()); - if (!wrapped_op) + Operation *wrappedOp = parser.parseGenericOperation(&block, block.begin()); + if (!wrappedOp) return failure(); // Create a return terminator in the inner region, pass as operand to the // terminator the returned values from the wrapped operation. - SmallVector return_operands(wrapped_op->getResults()); + SmallVector returnOperands(wrappedOp->getResults()); OpBuilder builder(parser.getContext()); builder.setInsertionPointToEnd(&block); - builder.create(wrapped_op->getLoc(), return_operands); + builder.create(wrappedOp->getLoc(), returnOperands); // Get the results type for the wrapping op from the terminator operands. - Operation &return_op = body.back().back(); - result.types.append(return_op.operand_type_begin(), - return_op.operand_type_end()); + Operation &returnOp = body.back().back(); + result.types.append(returnOp.operand_type_begin(), + returnOp.operand_type_end()); // Use the location of the wrapped op for the "test.wrapping_region" op. - result.location = wrapped_op->getLoc(); + result.location = wrappedOp->getLoc(); return success(); } diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td --- a/mlir/test/lib/Dialect/Test/TestOps.td +++ b/mlir/test/lib/Dialect/Test/TestOps.td @@ -808,7 +808,7 @@ // That way, we will know if operations is called once or twice. def OpMGetNullAttr : NativeCodeCall<"Attribute()">; def OpMAttributeIsNull : Constraint, "Attribute is null">; -def OpMVal : NativeCodeCall<"OpMTest($_builder, $0)">; +def OpMVal : NativeCodeCall<"opMTest($_builder, $0)">; def : Pat<(OpM $attr, $optAttr), (OpM $attr, (OpMVal $attr) ), [(OpMAttributeIsNull:$optAttr)]>; diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp --- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp +++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp @@ -56,7 +56,7 @@ // This let us check the number of times OpM_Test was called by inspecting // the returned value in the MLIR output. static int64_t opMIncreasingValue = 314159265; -static Attribute OpMTest(PatternRewriter &rewriter, Value val) { +static Attribute opMTest(PatternRewriter &rewriter, Value val) { int64_t i = opMIncreasingValue++; return rewriter.getIntegerAttr(rewriter.getIntegerType(32), i); } diff --git a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp --- a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp +++ b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp @@ -71,7 +71,7 @@ double typeRangeMax = double(outputElementType.getStorageTypeMax() - outputElementType.getZeroPoint()) * outputElementType.getScale(); - bool narrow_range = outputElementType.getStorageTypeMin() == 1 ? true : false; + bool narrowRange = outputElementType.getStorageTypeMin() == 1 ? true : false; auto dstQConstType = RankedTensorType::get( outputType.getShape(), @@ -81,7 +81,7 @@ rewriter.getI32IntegerAttr( outputElementType.getStorageTypeIntegralWidth()), 0, true /* signed */, - rewriter.getBoolAttr(narrow_range))); + rewriter.getBoolAttr(narrowRange))); ElementsAttr inputElems; if (!matchPattern(tosaNegateOp.input1(), m_Constant(&inputElems))) diff --git a/mlir/test/lib/IR/TestMatchers.cpp b/mlir/test/lib/IR/TestMatchers.cpp --- a/mlir/test/lib/IR/TestMatchers.cpp +++ b/mlir/test/lib/IR/TestMatchers.cpp @@ -76,19 +76,19 @@ llvm::outs() << "Pattern mul(mul(*), mul(*)) matched " << countMatches(f, p7) << " times\n"; - auto mul_of_mulmul = + auto mulOfMulmul = m_Op(m_Op(), m_Op()); - auto p8 = m_Op(mul_of_mulmul, mul_of_mulmul); + auto p8 = m_Op(mulOfMulmul, mulOfMulmul); llvm::outs() << "Pattern mul(mul(mul(*), mul(*)), mul(mul(*), mul(*))) matched " << countMatches(f, p8) << " times\n"; // clang-format off - auto mul_of_muladd = m_Op(m_Op(), m_Op()); - auto mul_of_anyadd = m_Op(m_Any(), m_Op()); + auto mulOfMuladd = m_Op(m_Op(), m_Op()); + auto mulOfAnyadd = m_Op(m_Any(), m_Op()); auto p9 = m_Op(m_Op( - mul_of_muladd, m_Op()), - m_Op(mul_of_anyadd, mul_of_anyadd)); + mulOfMuladd, m_Op()), + m_Op(mulOfAnyadd, mulOfAnyadd)); // clang-format on llvm::outs() << "Pattern mul(mul(mul(mul(*), add(*)), mul(*)), mul(mul(*, " "add(*)), mul(*, add(*)))) matched " @@ -118,12 +118,12 @@ llvm::outs() << "Pattern mul(a, add(b, c)) matched " << countMatches(f, p15) << " times\n"; - auto mul_of_aany = m_Op(a, m_Any()); - auto p16 = m_Op(mul_of_aany, m_Op(a, c)); + auto mulOfAany = m_Op(a, m_Any()); + auto p16 = m_Op(mulOfAany, m_Op(a, c)); llvm::outs() << "Pattern mul(mul(a, *), add(a, c)) matched " << countMatches(f, p16) << " times\n"; - auto p17 = m_Op(mul_of_aany, m_Op(c, b)); + auto p17 = m_Op(mulOfAany, m_Op(c, b)); llvm::outs() << "Pattern mul(mul(a, *), add(c, b)) matched " << countMatches(f, p17) << " times\n"; } diff --git a/mlir/test/lib/IR/TestOpaqueLoc.cpp b/mlir/test/lib/IR/TestOpaqueLoc.cpp --- a/mlir/test/lib/IR/TestOpaqueLoc.cpp +++ b/mlir/test/lib/IR/TestOpaqueLoc.cpp @@ -35,10 +35,10 @@ void runOnOperation() override { std::vector> myLocs; - int last_it = 0; + int lastIt = 0; getOperation().getBody()->walk([&](Operation *op) { - myLocs.push_back(std::make_unique(last_it++)); + myLocs.push_back(std::make_unique(lastIt++)); Location loc = op->getLoc(); @@ -54,14 +54,13 @@ /// Add the same operation but with fallback location to test the /// corresponding get method and serialization. - Operation *op_cloned_1 = builder.clone(*op); - op_cloned_1->setLoc( - OpaqueLoc::get(myLocs.back().get(), loc)); + Operation *opCloned1 = builder.clone(*op); + opCloned1->setLoc(OpaqueLoc::get(myLocs.back().get(), loc)); /// Add the same operation but with void* instead of MyLocation* to test /// getUnderlyingLocationOrNull method. - Operation *op_cloned_2 = builder.clone(*op); - op_cloned_2->setLoc(OpaqueLoc::get(nullptr, loc)); + Operation *opCloned2 = builder.clone(*op); + opCloned2->setLoc(OpaqueLoc::get(nullptr, loc)); }); ScopedDiagnosticHandler diagHandler(&getContext(), [](Diagnostic &diag) { diff --git a/mlir/test/lib/Transforms/TestLoopFusion.cpp b/mlir/test/lib/Transforms/TestLoopFusion.cpp --- a/mlir/test/lib/Transforms/TestLoopFusion.cpp +++ b/mlir/test/lib/Transforms/TestLoopFusion.cpp @@ -156,7 +156,7 @@ // If 'return_on_change' is true, returns on first invocation of 'fn' which // returns true. static bool iterateLoops(ArrayRef> depthToLoops, - LoopFunc fn, bool return_on_change = false) { + LoopFunc fn, bool returnOnChange = false) { bool changed = false; for (unsigned loopDepth = 0, end = depthToLoops.size(); loopDepth < end; ++loopDepth) { @@ -167,7 +167,7 @@ if (j != k) changed |= fn(loops[j], loops[k], j, k, loopDepth, depthToLoops.size()); - if (changed && return_on_change) + if (changed && returnOnChange) return true; } } diff --git a/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp b/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp --- a/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp +++ b/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp @@ -12,6 +12,8 @@ #include "mlir/ExecutionEngine/RunnerUtils.h" +// NOLINTBEGIN(*-identifier-naming) + extern "C" void _mlir_ciface_fillI32Buffer(StridedMemRefType *mem_ref, int32_t value) { @@ -36,3 +38,5 @@ std::fill_n(mem_ref->basePtr, mem_ref->sizes[0] * mem_ref->sizes[1] * mem_ref->sizes[2], value); } + +// NOLINTEND(*-identifier-naming) diff --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp --- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp +++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp @@ -1009,9 +1009,8 @@ return success( succeeded(generateNamedGenericOpOds(opConfig, genContext)) && succeeded(generateNamedGenericOpDefns(opConfig, genContext))); - } else { - return emitError(genContext.getLoc()) << "unsupported operation type"; } + return emitError(genContext.getLoc()) << "unsupported operation type"; } //===----------------------------------------------------------------------===// diff --git a/mlir/tools/mlir-tblgen/DialectGen.cpp b/mlir/tools/mlir-tblgen/DialectGen.cpp --- a/mlir/tools/mlir-tblgen/DialectGen.cpp +++ b/mlir/tools/mlir-tblgen/DialectGen.cpp @@ -68,9 +68,10 @@ return llvm::None; } - auto dialectIt = llvm::find_if(dialectDefs, [](const llvm::Record *def) { - return Dialect(def).getName() == selectedDialect; - }); + const auto *dialectIt = + llvm::find_if(dialectDefs, [](const llvm::Record *def) { + return Dialect(def).getName() == selectedDialect; + }); if (dialectIt == dialectDefs.end()) { llvm::errs() << "selected dialect with '-dialect' does not exist\n"; return llvm::None; diff --git a/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp b/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp --- a/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp +++ b/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp @@ -24,31 +24,31 @@ #include "llvm/TableGen/Record.h" #include "llvm/TableGen/TableGenBackend.h" -static llvm::cl::OptionCategory IntrinsicGenCat("Intrinsics Generator Options"); +static llvm::cl::OptionCategory intrinsicGenCat("Intrinsics Generator Options"); static llvm::cl::opt nameFilter("llvmir-intrinsics-filter", llvm::cl::desc("Only keep the intrinsics with the specified " "substring in their record name"), - llvm::cl::cat(IntrinsicGenCat)); + llvm::cl::cat(intrinsicGenCat)); static llvm::cl::opt opBaseClass("dialect-opclass-base", llvm::cl::desc("The base class for the ops in the dialect we " "are planning to emit"), - llvm::cl::init("LLVM_IntrOp"), llvm::cl::cat(IntrinsicGenCat)); + llvm::cl::init("LLVM_IntrOp"), llvm::cl::cat(intrinsicGenCat)); static llvm::cl::opt accessGroupRegexp( "llvmir-intrinsics-access-group-regexp", llvm::cl::desc("Mark intrinsics that match the specified " "regexp as taking an access group metadata"), - llvm::cl::cat(IntrinsicGenCat)); + llvm::cl::cat(intrinsicGenCat)); static llvm::cl::opt aliasScopesRegexp( "llvmir-intrinsics-alias-scopes-regexp", llvm::cl::desc("Mark intrinsics that match the specified " "regexp as taking alias.scopes and noalias metadata"), - llvm::cl::cat(IntrinsicGenCat)); + llvm::cl::cat(intrinsicGenCat)); // Used to represent the indices of overloadable operands/results. using IndicesTy = llvm::SmallBitVector; @@ -104,7 +104,7 @@ llvm::SmallVector chunks; llvm::StringRef targetPrefix = record.getValueAsString("TargetPrefix"); name.split(chunks, '_'); - auto chunksBegin = chunks.begin(); + auto *chunksBegin = chunks.begin(); // Remove the target prefix from target specific intrinsics. if (!targetPrefix.empty()) { assert(targetPrefix == *chunksBegin && diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -527,7 +527,7 @@ emitHelper.isEmittingForOp()); // Prefix with `tblgen_` to avoid hiding the attribute accessor. - Twine varName = tblgenNamePrefix + attrName; + std::string varName = (tblgenNamePrefix + attrName).str(); // If the attribute is not required and we cannot emit the condition, then // there is nothing to be done. diff --git a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp --- a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp +++ b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp @@ -891,7 +891,7 @@ unsigned operandNum = 0; for (unsigned i = 0, e = op.getNumArgs(); i < e; ++i) { auto argument = op.getArg(i); - if (auto valueArg = argument.dyn_cast()) { + if (auto *valueArg = argument.dyn_cast()) { if (valueArg->isVariableLength()) { if (i != e - 1) { PrintFatalError(loc, "SPIR-V ops can have Variadic<..> or " @@ -921,7 +921,7 @@ os << tabs << "}\n"; } else { os << tabs << formatv("if ({0} < {1}.size()) {{\n", wordIndex, words); - auto attr = argument.get(); + auto *attr = argument.get(); auto newtabs = tabs.str() + " "; emitAttributeDeserialization( (attr->attr.isOptional() ? attr->attr.getBaseAttr() : attr->attr), diff --git a/mlir/tools/mlir-tblgen/mlir-tblgen.cpp b/mlir/tools/mlir-tblgen/mlir-tblgen.cpp --- a/mlir/tools/mlir-tblgen/mlir-tblgen.cpp +++ b/mlir/tools/mlir-tblgen/mlir-tblgen.cpp @@ -40,16 +40,16 @@ } } -void GenNameParser::printOptionInfo(const llvm::cl::Option &O, - size_t GlobalWidth) const { - GenNameParser *TP = const_cast(this); - llvm::array_pod_sort(TP->Values.begin(), TP->Values.end(), - [](const GenNameParser::OptionInfo *VT1, - const GenNameParser::OptionInfo *VT2) { - return VT1->Name.compare(VT2->Name); +void GenNameParser::printOptionInfo(const llvm::cl::Option &o, + size_t globalWidth) const { + GenNameParser *tp = const_cast(this); + llvm::array_pod_sort(tp->Values.begin(), tp->Values.end(), + [](const GenNameParser::OptionInfo *vT1, + const GenNameParser::OptionInfo *vT2) { + return vT1->Name.compare(vT2->Name); }); using llvm::cl::parser; - parser::printOptionInfo(O, GlobalWidth); + parser::printOptionInfo(o, globalWidth); } // Generator that prints records. @@ -64,7 +64,7 @@ // TableGenMain requires a function pointer so this function is passed in which // simply wraps the call to the generator. -static bool MlirTableGenMain(raw_ostream &os, RecordKeeper &records) { +static bool mlirTableGenMain(raw_ostream &os, RecordKeeper &records) { if (!generator) { os << records; return false; @@ -79,5 +79,5 @@ cl::ParseCommandLineOptions(argc, argv); ::generator = generator.getValue(); - return TableGenMain(argv[0], &MlirTableGenMain); + return TableGenMain(argv[0], &mlirTableGenMain); } diff --git a/mlir/unittests/ExecutionEngine/Invoke.cpp b/mlir/unittests/ExecutionEngine/Invoke.cpp --- a/mlir/unittests/ExecutionEngine/Invoke.cpp +++ b/mlir/unittests/ExecutionEngine/Invoke.cpp @@ -103,10 +103,10 @@ } TEST(NativeMemRefJit, ZeroRankMemref) { - OwningMemRef A({}); - A[{}] = 42.; - ASSERT_EQ(*A->data, 42); - A[{}] = 0; + OwningMemRef a({}); + a[{}] = 42.; + ASSERT_EQ(*a->data, 42); + a[{}] = 0; std::string moduleStr = R"mlir( func @zero_ranked(%arg0 : memref) attributes { llvm.emit_c_interface } { %cst42 = arith.constant 42.0 : f32 @@ -125,19 +125,19 @@ ASSERT_TRUE(!!jitOrError); auto jit = std::move(jitOrError.get()); - llvm::Error error = jit->invoke("zero_ranked", &*A); + llvm::Error error = jit->invoke("zero_ranked", &*a); ASSERT_TRUE(!error); - EXPECT_EQ((A[{}]), 42.); - for (float &elt : *A) - EXPECT_EQ(&elt, &(A[{}])); + EXPECT_EQ((a[{}]), 42.); + for (float &elt : *a) + EXPECT_EQ(&elt, &(a[{}])); } TEST(NativeMemRefJit, RankOneMemref) { int64_t shape[] = {9}; - OwningMemRef A(shape); + OwningMemRef a(shape); int count = 1; - for (float &elt : *A) { - EXPECT_EQ(&elt, &(A[{count - 1}])); + for (float &elt : *a) { + EXPECT_EQ(&elt, &(a[{count - 1}])); elt = count++; } @@ -160,10 +160,10 @@ ASSERT_TRUE(!!jitOrError); auto jit = std::move(jitOrError.get()); - llvm::Error error = jit->invoke("one_ranked", &*A); + llvm::Error error = jit->invoke("one_ranked", &*a); ASSERT_TRUE(!error); count = 1; - for (float &elt : *A) { + for (float &elt : *a) { if (count == 6) EXPECT_EQ(elt, 42.); else @@ -173,24 +173,24 @@ } TEST(NativeMemRefJit, BasicMemref) { - constexpr int K = 3; - constexpr int M = 7; + constexpr int k = 3; + constexpr int m = 7; // Prepare arguments beforehand. auto init = [=](float &elt, ArrayRef indices) { assert(indices.size() == 2); - elt = M * indices[0] + indices[1]; + elt = m * indices[0] + indices[1]; }; - int64_t shape[] = {K, M}; - int64_t shapeAlloc[] = {K + 1, M + 1}; - OwningMemRef A(shape, shapeAlloc, init); - ASSERT_EQ(A->sizes[0], K); - ASSERT_EQ(A->sizes[1], M); - ASSERT_EQ(A->strides[0], M + 1); - ASSERT_EQ(A->strides[1], 1); - for (int i = 0; i < K; ++i) { - for (int j = 0; j < M; ++j) { - EXPECT_EQ((A[{i, j}]), i * M + j); - EXPECT_EQ(&(A[{i, j}]), &((*A)[i][j])); + int64_t shape[] = {k, m}; + int64_t shapeAlloc[] = {k + 1, m + 1}; + OwningMemRef a(shape, shapeAlloc, init); + ASSERT_EQ(a->sizes[0], k); + ASSERT_EQ(a->sizes[1], m); + ASSERT_EQ(a->strides[0], m + 1); + ASSERT_EQ(a->strides[1], 1); + for (int i = 0; i < k; ++i) { + for (int j = 0; j < m; ++j) { + EXPECT_EQ((a[{i, j}]), i * m + j); + EXPECT_EQ(&(a[{i, j}]), &((*a)[i][j])); } } std::string moduleStr = R"mlir( @@ -214,27 +214,27 @@ ASSERT_TRUE(!!jitOrError); std::unique_ptr jit = std::move(jitOrError.get()); - llvm::Error error = jit->invoke("rank2_memref", &*A, &*A); + llvm::Error error = jit->invoke("rank2_memref", &*a, &*a); ASSERT_TRUE(!error); - EXPECT_EQ(((*A)[1][2]), 42.); - EXPECT_EQ((A[{2, 1}]), 42.); + EXPECT_EQ(((*a)[1][2]), 42.); + EXPECT_EQ((a[{2, 1}]), 42.); } // A helper function that will be called from the JIT -static void memref_multiply(::StridedMemRefType *memref, - int32_t coefficient) { +static void memrefMultiply(::StridedMemRefType *memref, + int32_t coefficient) { for (float &elt : *memref) elt *= coefficient; } TEST(NativeMemRefJit, JITCallback) { - constexpr int K = 2; - constexpr int M = 2; - int64_t shape[] = {K, M}; - int64_t shapeAlloc[] = {K + 1, M + 1}; - OwningMemRef A(shape, shapeAlloc); + constexpr int k = 2; + constexpr int m = 2; + int64_t shape[] = {k, m}; + int64_t shapeAlloc[] = {k + 1, m + 1}; + OwningMemRef a(shape, shapeAlloc); int count = 1; - for (float &elt : *A) + for (float &elt : *a) elt = count++; std::string moduleStr = R"mlir( @@ -259,15 +259,15 @@ jit->registerSymbols([&](llvm::orc::MangleAndInterner interner) { llvm::orc::SymbolMap symbolMap; symbolMap[interner("_mlir_ciface_callback")] = - llvm::JITEvaluatedSymbol::fromPointer(memref_multiply); + llvm::JITEvaluatedSymbol::fromPointer(memrefMultiply); return symbolMap; }); int32_t coefficient = 3.; - llvm::Error error = jit->invoke("caller_for_callback", &*A, coefficient); + llvm::Error error = jit->invoke("caller_for_callback", &*a, coefficient); ASSERT_TRUE(!error); count = 1; - for (float elt : *A) + for (float elt : *a) ASSERT_EQ(elt, coefficient * count++); } diff --git a/mlir/unittests/IR/OperationSupportTest.cpp b/mlir/unittests/IR/OperationSupportTest.cpp --- a/mlir/unittests/IR/OperationSupportTest.cpp +++ b/mlir/unittests/IR/OperationSupportTest.cpp @@ -236,7 +236,7 @@ attrs.append("baz", b.getStringAttr("boo")); { - auto it = attrs.begin(); + auto *it = attrs.begin(); EXPECT_EQ(it->getName(), b.getStringAttr("foo")); EXPECT_EQ(it->getValue(), b.getStringAttr("bar")); ++it; @@ -260,7 +260,7 @@ ASSERT_FALSE(dup.hasValue()); { - auto it = attrs.begin(); + auto *it = attrs.begin(); EXPECT_EQ(it->getName(), b.getStringAttr("foo")); EXPECT_EQ(it->getValue(), b.getStringAttr("f")); ++it; diff --git a/mlir/unittests/TableGen/StructsGenTest.cpp b/mlir/unittests/TableGen/StructsGenTest.cpp --- a/mlir/unittests/TableGen/StructsGenTest.cpp +++ b/mlir/unittests/TableGen/StructsGenTest.cpp @@ -18,7 +18,7 @@ namespace mlir { /// Pull in generated enum utility declarations and definitions. -#include "StructAttrGenTest.h.inc" +#include "StructAttrGenTest.h.inc" // NOLINT #include "StructAttrGenTest.cpp.inc" /// Helper that returns an example test::TestStruct for testing its