diff --git a/mlir/examples/toy/Ch1/parser/AST.cpp b/mlir/examples/toy/Ch1/parser/AST.cpp --- a/mlir/examples/toy/Ch1/parser/AST.cpp +++ b/mlir/examples/toy/Ch1/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch1/toyc.cpp b/mlir/examples/toy/Ch1/toyc.cpp --- a/mlir/examples/toy/Ch1/toyc.cpp +++ b/mlir/examples/toy/Ch1/toyc.cpp @@ -27,7 +27,7 @@ cl::value_desc("filename")); namespace { enum Action { None, DumpAST }; -} +} // namespace static cl::opt emitAction("emit", cl::desc("Select the kind of output desired"), diff --git a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp @@ -58,8 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &F : moduleAST) { - auto func = mlirGen(F); + for (FunctionAST &f : moduleAST) { + auto func = mlirGen(f); if (!func) return nullptr; theModule.push_back(func); @@ -113,16 +113,16 @@ // This is a generic function, the return type will be inferred later. // Arguments type are uniformly unranked tensors. - llvm::SmallVector arg_types(proto.getArgs().size(), - getType(VarType{})); - auto func_type = builder.getFunctionType(arg_types, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + llvm::SmallVector argTypes(proto.getArgs().size(), + getType(VarType{})); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -371,7 +371,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -398,7 +398,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch2/parser/AST.cpp b/mlir/examples/toy/Ch2/parser/AST.cpp --- a/mlir/examples/toy/Ch2/parser/AST.cpp +++ b/mlir/examples/toy/Ch2/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch2/toyc.cpp b/mlir/examples/toy/Ch2/toyc.cpp --- a/mlir/examples/toy/Ch2/toyc.cpp +++ b/mlir/examples/toy/Ch2/toyc.cpp @@ -38,7 +38,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -47,7 +47,7 @@ namespace { enum Action { None, DumpAST, DumpMLIR }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -89,8 +89,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp @@ -58,8 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &F : moduleAST) { - auto func = mlirGen(F); + for (FunctionAST &f : moduleAST) { + auto func = mlirGen(f); if (!func) return nullptr; theModule.push_back(func); @@ -113,16 +113,16 @@ // This is a generic function, the return type will be inferred later. // Arguments type are uniformly unranked tensors. - llvm::SmallVector arg_types(proto.getArgs().size(), - getType(VarType{})); - auto func_type = builder.getFunctionType(arg_types, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + llvm::SmallVector argTypes(proto.getArgs().size(), + getType(VarType{})); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -371,7 +371,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -398,7 +398,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch3/parser/AST.cpp b/mlir/examples/toy/Ch3/parser/AST.cpp --- a/mlir/examples/toy/Ch3/parser/AST.cpp +++ b/mlir/examples/toy/Ch3/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch3/toyc.cpp b/mlir/examples/toy/Ch3/toyc.cpp --- a/mlir/examples/toy/Ch3/toyc.cpp +++ b/mlir/examples/toy/Ch3/toyc.cpp @@ -40,7 +40,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -49,7 +49,7 @@ namespace { enum Action { None, DumpAST, DumpMLIR }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -86,8 +86,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp @@ -58,8 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &F : moduleAST) { - auto func = mlirGen(F); + for (FunctionAST &f : moduleAST) { + auto func = mlirGen(f); if (!func) return nullptr; theModule.push_back(func); @@ -113,16 +113,16 @@ // This is a generic function, the return type will be inferred later. // Arguments type are uniformly unranked tensors. - llvm::SmallVector arg_types(proto.getArgs().size(), - getType(VarType{})); - auto func_type = builder.getFunctionType(arg_types, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + llvm::SmallVector argTypes(proto.getArgs().size(), + getType(VarType{})); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -375,7 +375,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -402,7 +402,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch4/parser/AST.cpp b/mlir/examples/toy/Ch4/parser/AST.cpp --- a/mlir/examples/toy/Ch4/parser/AST.cpp +++ b/mlir/examples/toy/Ch4/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch4/toyc.cpp b/mlir/examples/toy/Ch4/toyc.cpp --- a/mlir/examples/toy/Ch4/toyc.cpp +++ b/mlir/examples/toy/Ch4/toyc.cpp @@ -41,7 +41,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -50,7 +50,7 @@ namespace { enum Action { None, DumpAST, DumpMLIR }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -87,8 +87,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp @@ -58,8 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &F : moduleAST) { - auto func = mlirGen(F); + for (FunctionAST &f : moduleAST) { + auto func = mlirGen(f); if (!func) return nullptr; theModule.push_back(func); @@ -113,16 +113,16 @@ // This is a generic function, the return type will be inferred later. // Arguments type are uniformly unranked tensors. - llvm::SmallVector arg_types(proto.getArgs().size(), - getType(VarType{})); - auto func_type = builder.getFunctionType(arg_types, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + llvm::SmallVector argTypes(proto.getArgs().size(), + getType(VarType{})); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -375,7 +375,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -402,7 +402,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch5/parser/AST.cpp b/mlir/examples/toy/Ch5/parser/AST.cpp --- a/mlir/examples/toy/Ch5/parser/AST.cpp +++ b/mlir/examples/toy/Ch5/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch5/toyc.cpp b/mlir/examples/toy/Ch5/toyc.cpp --- a/mlir/examples/toy/Ch5/toyc.cpp +++ b/mlir/examples/toy/Ch5/toyc.cpp @@ -43,7 +43,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -52,7 +52,7 @@ namespace { enum Action { None, DumpAST, DumpMLIR, DumpMLIRAffine }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -91,8 +91,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp @@ -58,8 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &F : moduleAST) { - auto func = mlirGen(F); + for (FunctionAST &f : moduleAST) { + auto func = mlirGen(f); if (!func) return nullptr; theModule.push_back(func); @@ -113,16 +113,16 @@ // This is a generic function, the return type will be inferred later. // Arguments type are uniformly unranked tensors. - llvm::SmallVector arg_types(proto.getArgs().size(), - getType(VarType{})); - auto func_type = builder.getFunctionType(arg_types, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + llvm::SmallVector argTypes(proto.getArgs().size(), + getType(VarType{})); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -375,7 +375,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -402,7 +402,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - ScopedHashTableScope var_scope(symbolTable); + ScopedHashTableScope varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch6/parser/AST.cpp b/mlir/examples/toy/Ch6/parser/AST.cpp --- a/mlir/examples/toy/Ch6/parser/AST.cpp +++ b/mlir/examples/toy/Ch6/parser/AST.cpp @@ -118,7 +118,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp --- a/mlir/examples/toy/Ch6/toyc.cpp +++ b/mlir/examples/toy/Ch6/toyc.cpp @@ -49,7 +49,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -66,7 +66,7 @@ DumpLLVMIR, RunJIT }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -110,8 +110,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp @@ -169,14 +169,14 @@ return nullptr; argTypes.push_back(type); } - auto func_type = builder.getFunctionType(argTypes, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), func_type); + auto funcType = builder.getFunctionType(argTypes, llvm::None); + return mlir::FuncOp::create(location, proto.getName(), funcType); } /// Emit a new function and add it to the MLIR module. mlir::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. - SymbolTableScopeT var_scope(symbolTable); + SymbolTableScopeT varScope(symbolTable); // Create an MLIR function for the given prototype. mlir::FuncOp function(mlirGen(*funcAST.getProto())); @@ -286,7 +286,7 @@ return llvm::None; auto structVars = structAST->getVariables(); - auto it = llvm::find_if(structVars, [&](auto &var) { + const auto *it = llvm::find_if(structVars, [&](auto &var) { return var->getName() == name->getName(); }); if (it == structVars.end()) @@ -569,7 +569,7 @@ /// Future expressions will be able to reference this variable through symbol /// table lookup. mlir::Value mlirGen(VarDeclExprAST &vardecl) { - auto init = vardecl.getInitVal(); + auto *init = vardecl.getInitVal(); if (!init) { emitError(loc(vardecl.loc()), "missing initializer in variable declaration"); @@ -612,7 +612,7 @@ /// Codegen a list of expression, return failure if one of them hit an error. mlir::LogicalResult mlirGen(ExprASTList &blockAST) { - SymbolTableScopeT var_scope(symbolTable); + SymbolTableScopeT varScope(symbolTable); for (auto &expr : blockAST) { // Specific handling for variable declarations, return statement, and // print. These can only appear in block list and not in nested diff --git a/mlir/examples/toy/Ch7/parser/AST.cpp b/mlir/examples/toy/Ch7/parser/AST.cpp --- a/mlir/examples/toy/Ch7/parser/AST.cpp +++ b/mlir/examples/toy/Ch7/parser/AST.cpp @@ -121,7 +121,7 @@ /// <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ] void printLitHelper(ExprAST *litOrNum) { // Inside a literal expression we can have either a number or another literal - if (auto num = llvm::dyn_cast(litOrNum)) { + if (auto *num = llvm::dyn_cast(litOrNum)) { llvm::errs() << num->getValue(); return; } diff --git a/mlir/examples/toy/Ch7/toyc.cpp b/mlir/examples/toy/Ch7/toyc.cpp --- a/mlir/examples/toy/Ch7/toyc.cpp +++ b/mlir/examples/toy/Ch7/toyc.cpp @@ -49,7 +49,7 @@ namespace { enum InputType { Toy, MLIR }; -} +} // namespace static cl::opt inputType( "x", cl::init(Toy), cl::desc("Decided the kind of output desired"), cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")), @@ -66,7 +66,7 @@ DumpLLVMIR, RunJIT }; -} +} // namespace static cl::opt emitAction( "emit", cl::desc("Select the kind of output desired"), cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")), @@ -110,8 +110,8 @@ // Otherwise, the input is '.mlir'. llvm::ErrorOr> fileOrErr = llvm::MemoryBuffer::getFileOrSTDIN(inputFilename); - if (std::error_code EC = fileOrErr.getError()) { - llvm::errs() << "Could not open input file: " << EC.message() << "\n"; + if (std::error_code ec = fileOrErr.getError()) { + llvm::errs() << "Could not open input file: " << ec.message() << "\n"; return -1; } diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp @@ -266,13 +266,14 @@ unsigned size = kDefaultPointerSizeBits; unsigned abi = kDefaultPointerAlignment; auto newType = newEntry.getKey().get().cast(); - auto it = llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) { - if (auto type = entry.getKey().dyn_cast()) { - return type.cast().getAddressSpace() == - newType.getAddressSpace(); - } - return false; - }); + const auto *it = + llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) { + if (auto type = entry.getKey().dyn_cast()) { + return type.cast().getAddressSpace() == + newType.getAddressSpace(); + } + return false; + }); if (it == oldLayout.end()) { llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) { if (auto type = entry.getKey().dyn_cast()) { @@ -440,14 +441,15 @@ namespace { enum class StructDLEntryPos { Abi = 0, Preferred = 1 }; -} +} // namespace static Optional getStructDataLayoutEntry(DataLayoutEntryListRef params, LLVMStructType type, StructDLEntryPos pos) { - auto currentEntry = llvm::find_if(params, [](DataLayoutEntryInterface entry) { - return entry.isTypeEntry(); - }); + const auto *currentEntry = + llvm::find_if(params, [](DataLayoutEntryInterface entry) { + return entry.isTypeEntry(); + }); if (currentEntry == params.end()) return llvm::None; @@ -509,7 +511,7 @@ if (!newEntry.isTypeEntry()) continue; - auto previousEntry = + const auto *previousEntry = llvm::find_if(oldLayout, [](DataLayoutEntryInterface entry) { return entry.isTypeEntry(); }); diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -1829,12 +1829,12 @@ return failure(); // Parse input tensors. - SmallVector inputs, input_region_args; + SmallVector inputs, inputRegionArgs; SmallVector inputTypes; if (succeeded(parser.parseOptionalKeyword("ins"))) { llvm::SMLoc inputsOperandsLoc = parser.getCurrentLocation(); - if (parser.parseAssignmentListWithTypes(input_region_args, inputs, + if (parser.parseAssignmentListWithTypes(inputRegionArgs, inputs, inputTypes)) return failure(); @@ -1844,12 +1844,12 @@ } // Parse output tensors. - SmallVector outputs, output_region_args; + SmallVector outputs, outputRegionArgs; SmallVector outputTypes; if (succeeded(parser.parseOptionalKeyword("outs"))) { llvm::SMLoc outputsOperandsLoc = parser.getCurrentLocation(); - if (parser.parseAssignmentListWithTypes(output_region_args, outputs, + if (parser.parseAssignmentListWithTypes(outputRegionArgs, outputs, outputTypes)) return failure(); @@ -1905,15 +1905,15 @@ // Parse the body. Region *body = result.addRegion(); - SmallVector region_types(ivs.size(), builder.getIndexType()); - region_types.append(inputTypes); - region_types.append(outputTypes); + SmallVector regionTypes(ivs.size(), builder.getIndexType()); + regionTypes.append(inputTypes); + regionTypes.append(outputTypes); - SmallVector region_args(ivs); - region_args.append(input_region_args); - region_args.append(output_region_args); + SmallVector regionArgs(ivs); + regionArgs.append(inputRegionArgs); + regionArgs.append(outputRegionArgs); - if (parser.parseRegion(*body, region_args, region_types)) + if (parser.parseRegion(*body, regionArgs, regionTypes)) return failure(); // Parse optional attributes. diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1426,9 +1426,9 @@ /// Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}} /// ``` /// kw is unrolled, w is unrolled iff dilationW > 1. -struct Conv1D_NWC_Generator : public StructuredGenerator { - Conv1D_NWC_Generator(OpBuilder &builder, LinalgOp linalgOp, int strideW, - int dilationW) +struct Conv1DNwcGenerator : public StructuredGenerator { + Conv1DNwcGenerator(OpBuilder &builder, LinalgOp linalgOp, int strideW, + int dilationW) : StructuredGenerator(builder, linalgOp), valid(false), strideW(strideW), dilationW(dilationW) { // Determine whether `linalgOp` can be generated with this generator @@ -1594,7 +1594,7 @@ /// ``` /// kw is always unrolled. /// TODO: w (resp. kw) is unrolled when the strideW ( resp. dilationW) is > 1. - FailureOr dilated_conv() { + FailureOr dilatedConv() { if (!valid) return failure(); @@ -1730,7 +1730,7 @@ if (layout({/*lhsIndex*/ {n, strideW * w + dilationW * kw, c}, /*rhsIndex*/ {kw, c}, /*resIndex*/ {n, w, c}})) - return dilated_conv(); + return dilatedConv(); return failure(); } @@ -1752,7 +1752,7 @@ auto stride = strides ? *strides.getValues().begin() : 1; auto dilation = dilations ? *dilations.getValues().begin() : 1; LinalgOp linalgOp = cast(convOp.getOperation()); - Conv1D_NWC_Generator e(b, linalgOp, stride, dilation); + Conv1DNwcGenerator e(b, linalgOp, stride, dilation); auto res = e.generateConv(); if (succeeded(res)) return res; diff --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp --- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp +++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp @@ -195,7 +195,7 @@ // Decomposes given floating point value `arg` into a normalized fraction and // an integral power of two (see std::frexp). Returned values have float type. static std::pair frexp(ImplicitLocOpBuilder &builder, Value arg, - bool is_positive = false) { + bool isPositive = false) { assert(getElementTypeOrSelf(arg).isF32() && "arg must be f32 type"); ArrayRef shape = vectorShape(arg); @@ -222,7 +222,7 @@ Value normalizedFraction = builder.create(f32Vec, tmp1); // Compute exponent. - Value arg0 = is_positive ? arg : builder.create(arg); + Value arg0 = isPositive ? arg : builder.create(arg); Value biasedExponentBits = builder.create( builder.create(i32Vec, arg0), bcast(i32Cst(builder, 23))); diff --git a/mlir/lib/Dialect/PDL/IR/PDL.cpp b/mlir/lib/Dialect/PDL/IR/PDL.cpp --- a/mlir/lib/Dialect/PDL/IR/PDL.cpp +++ b/mlir/lib/Dialect/PDL/IR/PDL.cpp @@ -271,8 +271,8 @@ static LogicalResult verify(PatternOp pattern) { Region &body = pattern.body(); Operation *term = body.front().getTerminator(); - auto rewrite_op = dyn_cast(term); - if (!rewrite_op) { + auto rewriteOp = dyn_cast(term); + if (!rewriteOp) { return pattern.emitOpError("expected body to terminate with `pdl.rewrite`") .attachNote(term->getLoc()) .append("see terminator defined here"); diff --git a/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp b/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp --- a/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp +++ b/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp @@ -104,7 +104,8 @@ if (candidateType == getStorageType()) { // i.e. i32 -> quant<"uniform[i8:f32]{1.0}"> return *this; - } else if (candidateType.isa()) { + } + if (candidateType.isa()) { // i.e. tensor<4xi8> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">> return RankedTensorType::get( candidateType.cast().getShape(), getStorageType()); @@ -124,7 +125,8 @@ if (quantizedType.isa()) { // i.e. quant<"uniform[i8:f32]{1.0}"> -> i8 return quantizedType.cast().getStorageType(); - } else if (quantizedType.isa()) { + } + if (quantizedType.isa()) { // i.e. tensor<4xi8> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">> ShapedType sType = quantizedType.cast(); if (!sType.getElementType().isa()) { @@ -148,7 +150,8 @@ if (candidateType == getExpressedType()) { // i.e. f32 -> quant<"uniform[i8:f32]{1.0}"> return *this; - } else if (candidateType.isa()) { + } + if (candidateType.isa()) { ShapedType candidateShapedType = candidateType.cast(); if (candidateShapedType.getElementType() != getExpressedType()) { return nullptr; @@ -173,7 +176,8 @@ if (quantizedType.isa()) { // i.e. quant<"uniform[i8:f32]{1.0}"> -> f32 return quantizedType.cast().getExpressedType(); - } else if (quantizedType.isa()) { + } + if (quantizedType.isa()) { // i.e. tensor<4xi8> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">> ShapedType sType = quantizedType.cast(); if (!sType.getElementType().isa()) { diff --git a/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp b/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp --- a/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp +++ b/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp @@ -126,7 +126,7 @@ bool hadFailure = false; auto func = getFunction(); RewritePatternSet patterns(func.getContext()); - auto ctx = func.getContext(); + auto *ctx = func.getContext(); patterns.add( ctx, &hadFailure); (void)applyPatternsAndFoldGreedily(func, std::move(patterns)); diff --git a/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp b/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp --- a/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp +++ b/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp @@ -140,10 +140,10 @@ Location loc, unsigned numBits, int32_t quantizedDimension, ArrayRef rmins, ArrayRef rmaxs, bool narrowRange, Type expressedType, bool isSigned) { - size_t axis_size = rmins.size(); - if (axis_size != rmaxs.size()) { + size_t axisSize = rmins.size(); + if (axisSize != rmaxs.size()) { return (emitError(loc, "mismatched per-axis min and max size: ") - << axis_size << " vs. " << rmaxs.size(), + << axisSize << " vs. " << rmaxs.size(), nullptr); } @@ -159,9 +159,9 @@ SmallVector scales; SmallVector zeroPoints; - scales.reserve(axis_size); - zeroPoints.reserve(axis_size); - for (size_t axis = 0; axis != axis_size; ++axis) { + scales.reserve(axisSize); + zeroPoints.reserve(axisSize); + for (size_t axis = 0; axis != axisSize; ++axis) { double rmin = rmins[axis]; double rmax = rmaxs[axis]; if (std::fabs(rmax - rmin) < std::numeric_limits::epsilon()) { diff --git a/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp b/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp --- a/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp +++ b/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp @@ -106,7 +106,8 @@ realValue.cast(), quantizedElementType, converter); outConvertedType = converted.getType(); return converted; - } else if (realValue.isa()) { + } + if (realValue.isa()) { // Sparse tensor or vector constant. auto converted = convertSparseElementsAttr( realValue.cast(), quantizedElementType, converter); @@ -132,9 +133,9 @@ UniformQuantizedValueConverter converter(uniformQuantized); return quantizeAttrUniform(realValue, uniformQuantized, converter, outConvertedType); - - } else if (auto uniformQuantizedPerAxis = - quantizedElementType.dyn_cast()) { + } + if (auto uniformQuantizedPerAxis = + quantizedElementType.dyn_cast()) { UniformQuantizedPerAxisValueConverter converter(uniformQuantizedPerAxis); auto converted = converter.convert(realValue); // TODO: why we need this outConvertedType? remove it? diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp @@ -74,7 +74,7 @@ namespace { #include "SPIRVCanonicalization.inc" -} +} // namespace //===----------------------------------------------------------------------===// // spv.AccessChainOp diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp @@ -3250,13 +3250,13 @@ return success(); } -static void print(spirv::CooperativeMatrixLoadNVOp M, OpAsmPrinter &printer) { - printer << " " << M.pointer() << ", " << M.stride() << ", " - << M.columnmajor(); +static void print(spirv::CooperativeMatrixLoadNVOp m, OpAsmPrinter &printer) { + printer << " " << m.pointer() << ", " << m.stride() << ", " + << m.columnmajor(); // Print optional memory access attribute. - if (auto memAccess = M.memory_access()) + if (auto memAccess = m.memory_access()) printer << " [\"" << stringifyMemoryAccess(*memAccess) << "\"]"; - printer << " : " << M.pointer().getType() << " as " << M.getType(); + printer << " : " << m.pointer().getType() << " as " << m.getType(); } static LogicalResult verifyPointerAndCoopMatrixType(Operation *op, Type pointer, diff --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp --- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp +++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp @@ -540,7 +540,8 @@ rewriter.replaceOpWithNewOp(condbr, condbr.getTrueDest(), condbr.getTrueOperands()); return success(); - } else if (matchPattern(condbr.getCondition(), m_Zero())) { + } + if (matchPattern(condbr.getCondition(), m_Zero())) { // False branch taken. rewriter.replaceOpWithNewOp(condbr, condbr.getFalseDest(), condbr.getFalseOperands()); diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -771,8 +771,8 @@ OperationState &result, Type outputType, Value input, Value paddings, - Value pad_const) { - result.addOperands({input, paddings, pad_const}); + Value padConst) { + result.addOperands({input, paddings, padConst}); auto quantAttr = buildPadOpQuantizationAttr(builder, input); if (quantAttr) result.addAttribute("quantization_info", quantAttr); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp @@ -33,9 +33,9 @@ } template -TosaOp CreateOpAndInfer(PatternRewriter &rewriter, Location loc, Type result_ty, +TosaOp createOpAndInfer(PatternRewriter &rewriter, Location loc, Type resultTy, Args &&...args) { - auto op = rewriter.create(loc, result_ty, args...); + auto op = rewriter.create(loc, resultTy, args...); InferShapedTypeOpInterface shapeInterface = dyn_cast(op.getOperation()); @@ -57,12 +57,12 @@ auto result = op->getResult(0); auto predictedShape = returnedShapes[0]; auto currentKnowledge = - mlir::tosa::ValueKnowledge::getKnowledgeFromType(result_ty); + mlir::tosa::ValueKnowledge::getKnowledgeFromType(resultTy); // Compute the knowledge based on the inferred type. auto inferredKnowledge = mlir::tosa::ValueKnowledge::getPessimisticValueState(); - inferredKnowledge.dtype = result_ty.cast().getElementType(); + inferredKnowledge.dtype = resultTy.cast().getElementType(); inferredKnowledge.hasRank = predictedShape.hasRank(); if (predictedShape.hasRank()) { for (auto dim : predictedShape.getDims()) { @@ -73,8 +73,8 @@ // Compute the new type based on the joined version. auto newKnowledge = mlir::tosa::ValueKnowledge::join(currentKnowledge, inferredKnowledge); - auto new_ty = newKnowledge.getType(); - result.setType(new_ty); + auto newTy = newKnowledge.getType(); + result.setType(newTy); return op; } @@ -205,19 +205,19 @@ weightWidth % stride[1] ? stride[1] - weightWidth % stride[1] : 0; DenseElementsAttr weightPaddingAttr = DenseIntElementsAttr::get( RankedTensorType::get({4, 2}, rewriter.getI32Type()), weightPadding); - Value weightPaddingVal = CreateOpAndInfer( + Value weightPaddingVal = createOpAndInfer( rewriter, loc, weightPaddingAttr.getType(), weightPaddingAttr); if (op.quantization_info().hasValue()) { auto quantInfo = op.quantization_info().getValue(); - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, weightPaddingVal, nullptr, PadOpQuantizationAttr::get(quantInfo.weight_zp(), rewriter.getContext())); } else { - weight = CreateOpAndInfer(rewriter, loc, + weight = createOpAndInfer(rewriter, loc, UnrankedTensorType::get(weightETy), weight, weightPaddingVal); } @@ -231,7 +231,7 @@ outputChannels, weightHeight / stride[0], stride[0], weightWidth / stride[1], stride[1], inputChannels}; - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, rewriter.getI64ArrayAttr(weightReshapeDims0)); @@ -240,7 +240,7 @@ loc, RankedTensorType::get({6}, rewriter.getI32Type()), rewriter.getI32TensorAttr({2, 4, 0, 1, 3, 5})); - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, transposeWeightVal); @@ -248,15 +248,15 @@ llvm::SmallVector weightReshapeDims1 = { outputChannels * stride[0] * stride[1], weightHeight / stride[0], weightWidth / stride[1], inputChannels}; - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, rewriter.getI64ArrayAttr(weightReshapeDims1)); ShapedType restridedWeightTy = weight.getType().cast(); - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, rewriter.getI64IntegerAttr(1)); - weight = CreateOpAndInfer( + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, rewriter.getI64IntegerAttr(2)); @@ -270,18 +270,18 @@ DenseElementsAttr inputPaddingAttr = DenseIntElementsAttr::get( RankedTensorType::get({4, 2}, rewriter.getI32Type()), inputPadding); - Value inputPaddingVal = CreateOpAndInfer( + Value inputPaddingVal = createOpAndInfer( rewriter, loc, inputPaddingAttr.getType(), inputPaddingAttr); if (op.quantization_info().hasValue()) { auto quantInfo = op.quantization_info().getValue(); - input = CreateOpAndInfer( + input = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(inputETy), input, inputPaddingVal, nullptr, PadOpQuantizationAttr::get(quantInfo.input_zp(), rewriter.getContext())); } else { - input = CreateOpAndInfer(rewriter, loc, + input = createOpAndInfer(rewriter, loc, UnrankedTensorType::get(inputETy), input, inputPaddingVal); } @@ -299,7 +299,7 @@ // Perform the convolution using the zero bias. Value conv2d; if (op.quantization_info().hasValue()) { - conv2d = CreateOpAndInfer( + conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), input, weight, zeroBias, /*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}), @@ -308,7 +308,7 @@ op.quantization_info().getValue()) .getResult(); } else { - conv2d = CreateOpAndInfer( + conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), input, weight, zeroBias, /*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}), @@ -327,7 +327,7 @@ // Factor striding out of the convolution result. llvm::SmallVector convReshapeDims0 = { batch, convHeight, convWidth, stride[0], stride[1], outputChannels}; - conv2d = CreateOpAndInfer( + conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), conv2d, rewriter.getI64ArrayAttr(convReshapeDims0)); @@ -336,14 +336,14 @@ loc, RankedTensorType::get({6}, rewriter.getI32Type()), rewriter.getI32TensorAttr({0, 1, 3, 2, 4, 5})); - conv2d = CreateOpAndInfer( + conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(convETy), conv2d, transposeConvVal); // Fuse striding behavior back into width / height. llvm::SmallVector convReshapeDims1 = { batch, convHeight * stride[0], convWidth * stride[1], outputChannels}; - conv2d = CreateOpAndInfer( + conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), conv2d, rewriter.getI64ArrayAttr(convReshapeDims1)); @@ -354,14 +354,14 @@ sliceBegin[1] = pad[0]; sliceBegin[2] = pad[1]; - auto slice = CreateOpAndInfer( + auto slice = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), conv2d, rewriter.getI64ArrayAttr(sliceBegin), rewriter.getI64ArrayAttr(resultTy.getShape())) .getResult(); auto addBias = - CreateOpAndInfer(rewriter, loc, op.getType(), slice, bias); + createOpAndInfer(rewriter, loc, op.getType(), slice, bias); rewriter.replaceOp(op, addBias.getResult()); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp @@ -223,7 +223,7 @@ // Check whether this use case is replaceable. We define an op as // being replaceable if it is used by a ReturnOp or a TosaOp. bool replaceable = true; - for (auto user : result.getUsers()) { + for (auto *user : result.getUsers()) { if (isa(user)) continue; if (user->getDialect()->getNamespace() == diff --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp --- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp @@ -1179,7 +1179,7 @@ return builder.create(loc, v, perm); } - Value outer_prod(Value lhs, Value rhs, Value res, int reductionSize) { + Value outerProd(Value lhs, Value rhs, Value res, int reductionSize) { assert(reductionSize > 0); for (int64_t k = 0; k < reductionSize; ++k) { Value a = builder.create(loc, lhs, k); @@ -1199,31 +1199,31 @@ bindDims(builder.getContext(), m, n, k); // Classical row-major matmul: Just permute the lhs. if (layout({{m, k}, {k, n}, {m, n}})) - return outer_prod(t(lhs), rhs, res, lhsType.getDimSize(1)); + return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1)); // TODO: may be better to fail and use some vector -> scalar reduction. if (layout({{m, k}, {n, k}, {m, n}})) { Value tlhs = t(lhs); - return outer_prod(tlhs, t(rhs), res, lhsType.getDimSize(1)); + return outerProd(tlhs, t(rhs), res, lhsType.getDimSize(1)); } // No need to permute anything. if (layout({{k, m}, {k, n}, {m, n}})) - return outer_prod(lhs, rhs, res, lhsType.getDimSize(0)); + return outerProd(lhs, rhs, res, lhsType.getDimSize(0)); // Just permute the rhs. if (layout({{k, m}, {n, k}, {m, n}})) - return outer_prod(lhs, t(rhs), res, lhsType.getDimSize(0)); + return outerProd(lhs, t(rhs), res, lhsType.getDimSize(0)); // Transposed output: swap RHS and LHS. // Classical row-major matmul: permute the lhs. if (layout({{m, k}, {k, n}, {n, m}})) - return outer_prod(rhs, t(lhs), res, lhsType.getDimSize(1)); + return outerProd(rhs, t(lhs), res, lhsType.getDimSize(1)); // TODO: may be better to fail and use some vector -> scalar reduction. if (layout({{m, k}, {n, k}, {n, m}})) { Value trhs = t(rhs); - return outer_prod(trhs, t(lhs), res, lhsType.getDimSize(1)); + return outerProd(trhs, t(lhs), res, lhsType.getDimSize(1)); } if (layout({{k, m}, {k, n}, {n, m}})) - return outer_prod(rhs, lhs, res, lhsType.getDimSize(0)); + return outerProd(rhs, lhs, res, lhsType.getDimSize(0)); if (layout({{k, m}, {n, k}, {n, m}})) - return outer_prod(t(rhs), lhs, res, lhsType.getDimSize(0)); + return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0)); return failure(); } @@ -1236,16 +1236,16 @@ // Case mat-vec: transpose. if (layout({{m, k}, {k}, {m}})) - return outer_prod(t(lhs), rhs, res, lhsType.getDimSize(1)); + return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1)); // Case mat-trans-vec: ready to go. if (layout({{k, m}, {k}, {m}})) - return outer_prod(lhs, rhs, res, lhsType.getDimSize(0)); + return outerProd(lhs, rhs, res, lhsType.getDimSize(0)); // Case vec-mat: swap and transpose. if (layout({{k}, {m, k}, {m}})) - return outer_prod(t(rhs), lhs, res, lhsType.getDimSize(0)); + return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0)); // Case vec-mat-trans: swap and ready to go. if (layout({{k}, {k, m}, {m}})) - return outer_prod(rhs, lhs, res, lhsType.getDimSize(0)); + return outerProd(rhs, lhs, res, lhsType.getDimSize(0)); return failure(); } @@ -1260,16 +1260,16 @@ // Case mat-vec: transpose. if (layout({{m, k}, {k}, {m}})) - return outer_prod(t(lhs), rhs, res, lhsType.getDimSize(1)); + return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1)); // Case mat-trans-vec: ready to go. if (layout({{k, m}, {k}, {m}})) - return outer_prod(lhs, rhs, res, lhsType.getDimSize(0)); + return outerProd(lhs, rhs, res, lhsType.getDimSize(0)); // Case vec-mat: swap and transpose. if (layout({{k}, {m, k}, {m}})) - return outer_prod(t(rhs), lhs, res, lhsType.getDimSize(0)); + return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0)); // Case vec-mat-trans: swap and ready to go. if (layout({{k}, {k, m}, {m}})) - return outer_prod(rhs, lhs, res, lhsType.getDimSize(0)); + return outerProd(rhs, lhs, res, lhsType.getDimSize(0)); return failure(); } diff --git a/mlir/test/lib/Analysis/TestAliasAnalysis.cpp b/mlir/test/lib/Analysis/TestAliasAnalysis.cpp --- a/mlir/test/lib/Analysis/TestAliasAnalysis.cpp +++ b/mlir/test/lib/Analysis/TestAliasAnalysis.cpp @@ -67,7 +67,7 @@ // Check for aliasing behavior between each of the values. for (auto it = valsToCheck.begin(), e = valsToCheck.end(); it != e; ++it) - for (auto innerIt = valsToCheck.begin(); innerIt != it; ++innerIt) + for (auto *innerIt = valsToCheck.begin(); innerIt != it; ++innerIt) printAliasResult(aliasAnalysis.alias(*innerIt, *it), *innerIt, *it); } diff --git a/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp b/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp --- a/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp +++ b/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp @@ -52,9 +52,9 @@ void TestMathPolynomialApproximationPass::runOnFunction() { RewritePatternSet patterns(&getContext()); - MathPolynomialApproximationOptions approx_options; - approx_options.enableAvx2 = enableAvx2; - populateMathPolynomialApproximationPatterns(patterns, approx_options); + MathPolynomialApproximationOptions approxOptions; + approxOptions.enableAvx2 = enableAvx2; + populateMathPolynomialApproximationPatterns(patterns, approxOptions); (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); } diff --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp --- a/mlir/test/lib/Dialect/Test/TestDialect.cpp +++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp @@ -108,7 +108,7 @@ void getAsmBlockArgumentNames(Block *block, OpAsmSetValueNameFn setNameFn) const final { - auto op = block->getParentOp(); + auto *op = block->getParentOp(); auto arrayAttr = op->getAttrOfType("arg_names"); if (!arrayAttr) return; @@ -703,24 +703,24 @@ Region &body = *result.addRegion(); body.push_back(new Block); Block &block = body.back(); - Operation *wrapped_op = parser.parseGenericOperation(&block, block.begin()); - if (!wrapped_op) + Operation *wrappedOp = parser.parseGenericOperation(&block, block.begin()); + if (!wrappedOp) return failure(); // Create a return terminator in the inner region, pass as operand to the // terminator the returned values from the wrapped operation. - SmallVector return_operands(wrapped_op->getResults()); + SmallVector returnOperands(wrappedOp->getResults()); OpBuilder builder(parser.getContext()); builder.setInsertionPointToEnd(&block); - builder.create(wrapped_op->getLoc(), return_operands); + builder.create(wrappedOp->getLoc(), returnOperands); // Get the results type for the wrapping op from the terminator operands. - Operation &return_op = body.back().back(); - result.types.append(return_op.operand_type_begin(), - return_op.operand_type_end()); + Operation &returnOp = body.back().back(); + result.types.append(returnOp.operand_type_begin(), + returnOp.operand_type_end()); // Use the location of the wrapped op for the "test.wrapping_region" op. - result.location = wrapped_op->getLoc(); + result.location = wrappedOp->getLoc(); return success(); } diff --git a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp --- a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp +++ b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp @@ -71,7 +71,7 @@ double typeRangeMax = double(outputElementType.getStorageTypeMax() - outputElementType.getZeroPoint()) * outputElementType.getScale(); - bool narrow_range = outputElementType.getStorageTypeMin() == 1 ? true : false; + bool narrowRange = outputElementType.getStorageTypeMin() == 1 ? true : false; auto dstQConstType = RankedTensorType::get( outputType.getShape(), @@ -81,7 +81,7 @@ rewriter.getI32IntegerAttr( outputElementType.getStorageTypeIntegralWidth()), 0, true /* signed */, - rewriter.getBoolAttr(narrow_range))); + rewriter.getBoolAttr(narrowRange))); ElementsAttr inputElems; if (!matchPattern(tosaNegateOp.input1(), m_Constant(&inputElems))) diff --git a/mlir/test/lib/IR/TestMatchers.cpp b/mlir/test/lib/IR/TestMatchers.cpp --- a/mlir/test/lib/IR/TestMatchers.cpp +++ b/mlir/test/lib/IR/TestMatchers.cpp @@ -76,19 +76,19 @@ llvm::outs() << "Pattern mul(mul(*), mul(*)) matched " << countMatches(f, p7) << " times\n"; - auto mul_of_mulmul = + auto mulOfMulmul = m_Op(m_Op(), m_Op()); - auto p8 = m_Op(mul_of_mulmul, mul_of_mulmul); + auto p8 = m_Op(mulOfMulmul, mulOfMulmul); llvm::outs() << "Pattern mul(mul(mul(*), mul(*)), mul(mul(*), mul(*))) matched " << countMatches(f, p8) << " times\n"; // clang-format off - auto mul_of_muladd = m_Op(m_Op(), m_Op()); - auto mul_of_anyadd = m_Op(m_Any(), m_Op()); + auto mulOfMuladd = m_Op(m_Op(), m_Op()); + auto mulOfAnyadd = m_Op(m_Any(), m_Op()); auto p9 = m_Op(m_Op( - mul_of_muladd, m_Op()), - m_Op(mul_of_anyadd, mul_of_anyadd)); + mulOfMuladd, m_Op()), + m_Op(mulOfAnyadd, mulOfAnyadd)); // clang-format on llvm::outs() << "Pattern mul(mul(mul(mul(*), add(*)), mul(*)), mul(mul(*, " "add(*)), mul(*, add(*)))) matched " @@ -118,12 +118,12 @@ llvm::outs() << "Pattern mul(a, add(b, c)) matched " << countMatches(f, p15) << " times\n"; - auto mul_of_aany = m_Op(a, m_Any()); - auto p16 = m_Op(mul_of_aany, m_Op(a, c)); + auto mulOfAany = m_Op(a, m_Any()); + auto p16 = m_Op(mulOfAany, m_Op(a, c)); llvm::outs() << "Pattern mul(mul(a, *), add(a, c)) matched " << countMatches(f, p16) << " times\n"; - auto p17 = m_Op(mul_of_aany, m_Op(c, b)); + auto p17 = m_Op(mulOfAany, m_Op(c, b)); llvm::outs() << "Pattern mul(mul(a, *), add(c, b)) matched " << countMatches(f, p17) << " times\n"; } diff --git a/mlir/test/lib/IR/TestOpaqueLoc.cpp b/mlir/test/lib/IR/TestOpaqueLoc.cpp --- a/mlir/test/lib/IR/TestOpaqueLoc.cpp +++ b/mlir/test/lib/IR/TestOpaqueLoc.cpp @@ -35,10 +35,10 @@ void runOnOperation() override { std::vector> myLocs; - int last_it = 0; + int lastIt = 0; getOperation().getBody()->walk([&](Operation *op) { - myLocs.push_back(std::make_unique(last_it++)); + myLocs.push_back(std::make_unique(lastIt++)); Location loc = op->getLoc(); @@ -54,14 +54,13 @@ /// Add the same operation but with fallback location to test the /// corresponding get method and serialization. - Operation *op_cloned_1 = builder.clone(*op); - op_cloned_1->setLoc( - OpaqueLoc::get(myLocs.back().get(), loc)); + Operation *opCloned1 = builder.clone(*op); + opCloned1->setLoc(OpaqueLoc::get(myLocs.back().get(), loc)); /// Add the same operation but with void* instead of MyLocation* to test /// getUnderlyingLocationOrNull method. - Operation *op_cloned_2 = builder.clone(*op); - op_cloned_2->setLoc(OpaqueLoc::get(nullptr, loc)); + Operation *opCloned2 = builder.clone(*op); + opCloned2->setLoc(OpaqueLoc::get(nullptr, loc)); }); ScopedDiagnosticHandler diagHandler(&getContext(), [](Diagnostic &diag) { diff --git a/mlir/test/lib/Transforms/TestLoopFusion.cpp b/mlir/test/lib/Transforms/TestLoopFusion.cpp --- a/mlir/test/lib/Transforms/TestLoopFusion.cpp +++ b/mlir/test/lib/Transforms/TestLoopFusion.cpp @@ -156,7 +156,7 @@ // If 'return_on_change' is true, returns on first invocation of 'fn' which // returns true. static bool iterateLoops(ArrayRef> depthToLoops, - LoopFunc fn, bool return_on_change = false) { + LoopFunc fn, bool returnOnChange = false) { bool changed = false; for (unsigned loopDepth = 0, end = depthToLoops.size(); loopDepth < end; ++loopDepth) { @@ -167,7 +167,7 @@ if (j != k) changed |= fn(loops[j], loops[k], j, k, loopDepth, depthToLoops.size()); - if (changed && return_on_change) + if (changed && returnOnChange) return true; } } diff --git a/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp b/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp --- a/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp +++ b/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp @@ -12,27 +12,23 @@ #include "mlir/ExecutionEngine/RunnerUtils.h" -extern "C" void -_mlir_ciface_fillI32Buffer(StridedMemRefType *mem_ref, - int32_t value) { - std::fill_n(mem_ref->basePtr, mem_ref->sizes[0], value); +extern "C" void mlirCifaceFillI32Buffer(StridedMemRefType *memRef, + int32_t value) { + std::fill_n(memRef->basePtr, memRef->sizes[0], value); } -extern "C" void -_mlir_ciface_fillF32Buffer1D(StridedMemRefType *mem_ref, - float value) { - std::fill_n(mem_ref->basePtr, mem_ref->sizes[0], value); +extern "C" void mlirCifaceFillF32Buffer1D(StridedMemRefType *memRef, + float value) { + std::fill_n(memRef->basePtr, memRef->sizes[0], value); } -extern "C" void -_mlir_ciface_fillF32Buffer2D(StridedMemRefType *mem_ref, - float value) { - std::fill_n(mem_ref->basePtr, mem_ref->sizes[0] * mem_ref->sizes[1], value); +extern "C" void mlirCifaceFillF32Buffer2D(StridedMemRefType *memRef, + float value) { + std::fill_n(memRef->basePtr, memRef->sizes[0] * memRef->sizes[1], value); } -extern "C" void -_mlir_ciface_fillF32Buffer3D(StridedMemRefType *mem_ref, - float value) { - std::fill_n(mem_ref->basePtr, - mem_ref->sizes[0] * mem_ref->sizes[1] * mem_ref->sizes[2], value); +extern "C" void mlirCifaceFillF32Buffer3D(StridedMemRefType *memRef, + float value) { + std::fill_n(memRef->basePtr, + memRef->sizes[0] * memRef->sizes[1] * memRef->sizes[2], value); } diff --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp --- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp +++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp @@ -1009,9 +1009,8 @@ return success( succeeded(generateNamedGenericOpOds(opConfig, genContext)) && succeeded(generateNamedGenericOpDefns(opConfig, genContext))); - } else { - return emitError(genContext.getLoc()) << "unsupported operation type"; } + return emitError(genContext.getLoc()) << "unsupported operation type"; } //===----------------------------------------------------------------------===// diff --git a/mlir/tools/mlir-tblgen/DialectGen.cpp b/mlir/tools/mlir-tblgen/DialectGen.cpp --- a/mlir/tools/mlir-tblgen/DialectGen.cpp +++ b/mlir/tools/mlir-tblgen/DialectGen.cpp @@ -68,9 +68,10 @@ return llvm::None; } - auto dialectIt = llvm::find_if(dialectDefs, [](const llvm::Record *def) { - return Dialect(def).getName() == selectedDialect; - }); + const auto *dialectIt = + llvm::find_if(dialectDefs, [](const llvm::Record *def) { + return Dialect(def).getName() == selectedDialect; + }); if (dialectIt == dialectDefs.end()) { llvm::errs() << "selected dialect with '-dialect' does not exist\n"; return llvm::None; diff --git a/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp b/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp --- a/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp +++ b/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp @@ -24,31 +24,31 @@ #include "llvm/TableGen/Record.h" #include "llvm/TableGen/TableGenBackend.h" -static llvm::cl::OptionCategory IntrinsicGenCat("Intrinsics Generator Options"); +static llvm::cl::OptionCategory intrinsicGenCat("Intrinsics Generator Options"); static llvm::cl::opt nameFilter("llvmir-intrinsics-filter", llvm::cl::desc("Only keep the intrinsics with the specified " "substring in their record name"), - llvm::cl::cat(IntrinsicGenCat)); + llvm::cl::cat(intrinsicGenCat)); static llvm::cl::opt opBaseClass("dialect-opclass-base", llvm::cl::desc("The base class for the ops in the dialect we " "are planning to emit"), - llvm::cl::init("LLVM_IntrOp"), llvm::cl::cat(IntrinsicGenCat)); + llvm::cl::init("LLVM_IntrOp"), llvm::cl::cat(intrinsicGenCat)); static llvm::cl::opt accessGroupRegexp( "llvmir-intrinsics-access-group-regexp", llvm::cl::desc("Mark intrinsics that match the specified " "regexp as taking an access group metadata"), - llvm::cl::cat(IntrinsicGenCat)); + llvm::cl::cat(intrinsicGenCat)); static llvm::cl::opt aliasScopesRegexp( "llvmir-intrinsics-alias-scopes-regexp", llvm::cl::desc("Mark intrinsics that match the specified " "regexp as taking alias.scopes and noalias metadata"), - llvm::cl::cat(IntrinsicGenCat)); + llvm::cl::cat(intrinsicGenCat)); // Used to represent the indices of overloadable operands/results. using IndicesTy = llvm::SmallBitVector; @@ -104,7 +104,7 @@ llvm::SmallVector chunks; llvm::StringRef targetPrefix = record.getValueAsString("TargetPrefix"); name.split(chunks, '_'); - auto chunksBegin = chunks.begin(); + auto *chunksBegin = chunks.begin(); // Remove the target prefix from target specific intrinsics. if (!targetPrefix.empty()) { assert(targetPrefix == *chunksBegin && diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -527,7 +527,7 @@ emitHelper.isEmittingForOp()); // Prefix with `tblgen_` to avoid hiding the attribute accessor. - Twine varName = tblgenNamePrefix + attrName; + std::string varName = (tblgenNamePrefix + attrName).str(); // If the attribute is not required and we cannot emit the condition, then // there is nothing to be done. diff --git a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp --- a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp +++ b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp @@ -891,7 +891,7 @@ unsigned operandNum = 0; for (unsigned i = 0, e = op.getNumArgs(); i < e; ++i) { auto argument = op.getArg(i); - if (auto valueArg = argument.dyn_cast()) { + if (auto *valueArg = argument.dyn_cast()) { if (valueArg->isVariableLength()) { if (i != e - 1) { PrintFatalError(loc, "SPIR-V ops can have Variadic<..> or " @@ -921,7 +921,7 @@ os << tabs << "}\n"; } else { os << tabs << formatv("if ({0} < {1}.size()) {{\n", wordIndex, words); - auto attr = argument.get(); + auto *attr = argument.get(); auto newtabs = tabs.str() + " "; emitAttributeDeserialization( (attr->attr.isOptional() ? attr->attr.getBaseAttr() : attr->attr), diff --git a/mlir/tools/mlir-tblgen/mlir-tblgen.cpp b/mlir/tools/mlir-tblgen/mlir-tblgen.cpp --- a/mlir/tools/mlir-tblgen/mlir-tblgen.cpp +++ b/mlir/tools/mlir-tblgen/mlir-tblgen.cpp @@ -41,7 +41,7 @@ } void GenNameParser::printOptionInfo(const llvm::cl::Option &O, - size_t GlobalWidth) const { + size_t globalWidth) const { GenNameParser *TP = const_cast(this); llvm::array_pod_sort(TP->Values.begin(), TP->Values.end(), [](const GenNameParser::OptionInfo *VT1, @@ -49,7 +49,7 @@ return VT1->Name.compare(VT2->Name); }); using llvm::cl::parser; - parser::printOptionInfo(O, GlobalWidth); + parser::printOptionInfo(O, globalWidth); } // Generator that prints records. @@ -64,7 +64,7 @@ // TableGenMain requires a function pointer so this function is passed in which // simply wraps the call to the generator. -static bool MlirTableGenMain(raw_ostream &os, RecordKeeper &records) { +static bool mlirTableGenMain(raw_ostream &os, RecordKeeper &records) { if (!generator) { os << records; return false; @@ -79,5 +79,5 @@ cl::ParseCommandLineOptions(argc, argv); ::generator = generator.getValue(); - return TableGenMain(argv[0], &MlirTableGenMain); + return TableGenMain(argv[0], &mlirTableGenMain); }