diff --git a/mlir/examples/toy/Ch2/mlir/Dialect.cpp b/mlir/examples/toy/Ch2/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch2/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch2/mlir/Dialect.cpp @@ -54,7 +54,7 @@ // If the type is a function type, it contains the input and result types of // this operation. - if (FunctionType funcType = type.dyn_cast()) { + if (FunctionType funcType = llvm::dyn_cast(type)) { if (parser.resolveOperands(operands, funcType.getInputs(), operandsLoc, result.operands)) return mlir::failure(); @@ -133,13 +133,13 @@ mlir::LogicalResult ConstantOp::verify() { // If the return type of the constant is not an unranked tensor, the shape // must match the shape of the attribute holding the data. - auto resultType = getResult().getType().dyn_cast(); + auto resultType = llvm::dyn_cast(getResult().getType()); if (!resultType) return success(); // Check that the rank of the attribute type matches the rank of the constant // result type. - auto attrType = getValue().getType().cast(); + auto attrType = llvm::cast(getValue().getType()); if (attrType.getRank() != resultType.getRank()) { return emitOpError("return type must match the one of the attached value " "attribute: ") @@ -269,8 +269,8 @@ auto resultType = results.front(); // Check that the result type of the function matches the operand type. - if (inputType == resultType || inputType.isa() || - resultType.isa()) + if (inputType == resultType || llvm::isa(inputType) || + llvm::isa(resultType)) return mlir::success(); return emitError() << "type of return operand (" << inputType @@ -289,8 +289,8 @@ } mlir::LogicalResult TransposeOp::verify() { - auto inputType = getOperand().getType().dyn_cast(); - auto resultType = getType().dyn_cast(); + auto inputType = llvm::dyn_cast(getOperand().getType()); + auto resultType = llvm::dyn_cast(getType()); if (!inputType || !resultType) return mlir::success(); diff --git a/mlir/examples/toy/Ch3/mlir/Dialect.cpp b/mlir/examples/toy/Ch3/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch3/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch3/mlir/Dialect.cpp @@ -54,7 +54,7 @@ // If the type is a function type, it contains the input and result types of // this operation. - if (FunctionType funcType = type.dyn_cast()) { + if (FunctionType funcType = llvm::dyn_cast(type)) { if (parser.resolveOperands(operands, funcType.getInputs(), operandsLoc, result.operands)) return mlir::failure(); @@ -133,13 +133,13 @@ mlir::LogicalResult ConstantOp::verify() { // If the return type of the constant is not an unranked tensor, the shape // must match the shape of the attribute holding the data. - auto resultType = getResult().getType().dyn_cast(); + auto resultType = llvm::dyn_cast(getResult().getType()); if (!resultType) return success(); // Check that the rank of the attribute type matches the rank of the constant // result type. - auto attrType = getValue().getType().cast(); + auto attrType = llvm::cast(getValue().getType()); if (attrType.getRank() != resultType.getRank()) { return emitOpError("return type must match the one of the attached value " "attribute: ") @@ -269,8 +269,8 @@ auto resultType = results.front(); // Check that the result type of the function matches the operand type. - if (inputType == resultType || inputType.isa() || - resultType.isa()) + if (inputType == resultType || llvm::isa(inputType) || + llvm::isa(resultType)) return mlir::success(); return emitError() << "type of return operand (" << inputType @@ -289,8 +289,8 @@ } mlir::LogicalResult TransposeOp::verify() { - auto inputType = getOperand().getType().dyn_cast(); - auto resultType = getType().dyn_cast(); + auto inputType = llvm::dyn_cast(getOperand().getType()); + auto resultType = llvm::dyn_cast(getType()); if (!inputType || !resultType) return mlir::success(); diff --git a/mlir/examples/toy/Ch4/mlir/Dialect.cpp b/mlir/examples/toy/Ch4/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch4/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch4/mlir/Dialect.cpp @@ -114,7 +114,7 @@ // If the type is a function type, it contains the input and result types of // this operation. - if (FunctionType funcType = type.dyn_cast()) { + if (FunctionType funcType = llvm::dyn_cast(type)) { if (parser.resolveOperands(operands, funcType.getInputs(), operandsLoc, result.operands)) return mlir::failure(); @@ -193,13 +193,13 @@ mlir::LogicalResult ConstantOp::verify() { // If the return type of the constant is not an unranked tensor, the shape // must match the shape of the attribute holding the data. - auto resultType = getResult().getType().dyn_cast(); + auto resultType = llvm::dyn_cast(getResult().getType()); if (!resultType) return success(); // Check that the rank of the attribute type matches the rank of the constant // result type. - auto attrType = getValue().getType().cast(); + auto attrType = llvm::cast(getValue().getType()); if (attrType.getRank() != resultType.getRank()) { return emitOpError("return type must match the one of the attached value " "attribute: ") @@ -254,8 +254,8 @@ if (inputs.size() != 1 || outputs.size() != 1) return false; // The inputs must be Tensors with the same element type. - TensorType input = inputs.front().dyn_cast(); - TensorType output = outputs.front().dyn_cast(); + TensorType input = llvm::dyn_cast(inputs.front()); + TensorType output = llvm::dyn_cast(outputs.front()); if (!input || !output || input.getElementType() != output.getElementType()) return false; // The shape is required to match if both types are ranked. @@ -397,8 +397,8 @@ auto resultType = results.front(); // Check that the result type of the function matches the operand type. - if (inputType == resultType || inputType.isa() || - resultType.isa()) + if (inputType == resultType || llvm::isa(inputType) || + llvm::isa(resultType)) return mlir::success(); return emitError() << "type of return operand (" << inputType @@ -417,14 +417,14 @@ } void TransposeOp::inferShapes() { - auto arrayTy = getOperand().getType().cast(); + auto arrayTy = llvm::cast(getOperand().getType()); SmallVector dims(llvm::reverse(arrayTy.getShape())); getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType())); } mlir::LogicalResult TransposeOp::verify() { - auto inputType = getOperand().getType().dyn_cast(); - auto resultType = getType().dyn_cast(); + auto inputType = llvm::dyn_cast(getOperand().getType()); + auto resultType = llvm::dyn_cast(getType()); if (!inputType || !resultType) return mlir::success(); diff --git a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp --- a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp @@ -94,7 +94,7 @@ /// operands inferred. static bool allOperandsInferred(Operation *op) { return llvm::all_of(op->getOperandTypes(), [](Type operandType) { - return operandType.isa(); + return llvm::isa(operandType); }); } @@ -102,7 +102,7 @@ /// shaped result. static bool returnsDynamicShape(Operation *op) { return llvm::any_of(op->getResultTypes(), [](Type resultType) { - return !resultType.isa(); + return !llvm::isa(resultType); }); } }; diff --git a/mlir/examples/toy/Ch5/mlir/Dialect.cpp b/mlir/examples/toy/Ch5/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch5/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch5/mlir/Dialect.cpp @@ -114,7 +114,7 @@ // If the type is a function type, it contains the input and result types of // this operation. - if (FunctionType funcType = type.dyn_cast()) { + if (FunctionType funcType = llvm::dyn_cast(type)) { if (parser.resolveOperands(operands, funcType.getInputs(), operandsLoc, result.operands)) return mlir::failure(); @@ -193,13 +193,13 @@ mlir::LogicalResult ConstantOp::verify() { // If the return type of the constant is not an unranked tensor, the shape // must match the shape of the attribute holding the data. - auto resultType = getResult().getType().dyn_cast(); + auto resultType = llvm::dyn_cast(getResult().getType()); if (!resultType) return success(); // Check that the rank of the attribute type matches the rank of the constant // result type. - auto attrType = getValue().getType().cast(); + auto attrType = llvm::cast(getValue().getType()); if (attrType.getRank() != resultType.getRank()) { return emitOpError("return type must match the one of the attached value " "attribute: ") @@ -254,8 +254,8 @@ if (inputs.size() != 1 || outputs.size() != 1) return false; // The inputs must be Tensors with the same element type. - TensorType input = inputs.front().dyn_cast(); - TensorType output = outputs.front().dyn_cast(); + TensorType input = llvm::dyn_cast(inputs.front()); + TensorType output = llvm::dyn_cast(outputs.front()); if (!input || !output || input.getElementType() != output.getElementType()) return false; // The shape is required to match if both types are ranked. @@ -397,8 +397,8 @@ auto resultType = results.front(); // Check that the result type of the function matches the operand type. - if (inputType == resultType || inputType.isa() || - resultType.isa()) + if (inputType == resultType || llvm::isa(inputType) || + llvm::isa(resultType)) return mlir::success(); return emitError() << "type of return operand (" << inputType @@ -417,14 +417,14 @@ } void TransposeOp::inferShapes() { - auto arrayTy = getOperand().getType().cast(); + auto arrayTy = llvm::cast(getOperand().getType()); SmallVector dims(llvm::reverse(arrayTy.getShape())); getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType())); } mlir::LogicalResult TransposeOp::verify() { - auto inputType = getOperand().getType().dyn_cast(); - auto resultType = getType().dyn_cast(); + auto inputType = llvm::dyn_cast(getOperand().getType()); + auto resultType = llvm::dyn_cast(getType()); if (!inputType || !resultType) return mlir::success(); diff --git a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp --- a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp +++ b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp @@ -62,7 +62,7 @@ static void lowerOpToLoops(Operation *op, ValueRange operands, PatternRewriter &rewriter, LoopIterationFn processIteration) { - auto tensorType = (*op->result_type_begin()).cast(); + auto tensorType = llvm::cast((*op->result_type_begin())); auto loc = op->getLoc(); // Insert an allocation and deallocation for the result of this operation. @@ -144,7 +144,7 @@ // When lowering the constant operation, we allocate and assign the constant // values to a corresponding memref allocation. - auto tensorType = op.getType().cast(); + auto tensorType = llvm::cast(op.getType()); auto memRefType = convertTensorToMemRef(tensorType); auto alloc = insertAllocAndDealloc(memRefType, loc, rewriter); @@ -342,7 +342,7 @@ target.addIllegalDialect(); target.addDynamicallyLegalOp([](toy::PrintOp op) { return llvm::none_of(op->getOperandTypes(), - [](Type type) { return type.isa(); }); + [](Type type) { return llvm::isa(type); }); }); // Now that the conversion target has been defined, we just need to provide diff --git a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp --- a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp @@ -94,7 +94,7 @@ /// operands inferred. static bool allOperandsInferred(Operation *op) { return llvm::all_of(op->getOperandTypes(), [](Type operandType) { - return operandType.isa(); + return llvm::isa(operandType); }); } @@ -102,7 +102,7 @@ /// shaped result. static bool returnsDynamicShape(Operation *op) { return llvm::any_of(op->getResultTypes(), [](Type resultType) { - return !resultType.isa(); + return !llvm::isa(resultType); }); } }; diff --git a/mlir/examples/toy/Ch6/mlir/Dialect.cpp b/mlir/examples/toy/Ch6/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch6/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch6/mlir/Dialect.cpp @@ -114,7 +114,7 @@ // If the type is a function type, it contains the input and result types of // this operation. - if (FunctionType funcType = type.dyn_cast()) { + if (FunctionType funcType = llvm::dyn_cast(type)) { if (parser.resolveOperands(operands, funcType.getInputs(), operandsLoc, result.operands)) return mlir::failure(); @@ -193,13 +193,13 @@ mlir::LogicalResult ConstantOp::verify() { // If the return type of the constant is not an unranked tensor, the shape // must match the shape of the attribute holding the data. - auto resultType = getResult().getType().dyn_cast(); + auto resultType = llvm::dyn_cast(getResult().getType()); if (!resultType) return success(); // Check that the rank of the attribute type matches the rank of the constant // result type. - auto attrType = getValue().getType().cast(); + auto attrType = llvm::cast(getValue().getType()); if (attrType.getRank() != resultType.getRank()) { return emitOpError("return type must match the one of the attached value " "attribute: ") @@ -254,8 +254,8 @@ if (inputs.size() != 1 || outputs.size() != 1) return false; // The inputs must be Tensors with the same element type. - TensorType input = inputs.front().dyn_cast(); - TensorType output = outputs.front().dyn_cast(); + TensorType input = llvm::dyn_cast(inputs.front()); + TensorType output = llvm::dyn_cast(outputs.front()); if (!input || !output || input.getElementType() != output.getElementType()) return false; // The shape is required to match if both types are ranked. @@ -397,8 +397,8 @@ auto resultType = results.front(); // Check that the result type of the function matches the operand type. - if (inputType == resultType || inputType.isa() || - resultType.isa()) + if (inputType == resultType || llvm::isa(inputType) || + llvm::isa(resultType)) return mlir::success(); return emitError() << "type of return operand (" << inputType @@ -417,14 +417,14 @@ } void TransposeOp::inferShapes() { - auto arrayTy = getOperand().getType().cast(); + auto arrayTy = llvm::cast(getOperand().getType()); SmallVector dims(llvm::reverse(arrayTy.getShape())); getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType())); } mlir::LogicalResult TransposeOp::verify() { - auto inputType = getOperand().getType().dyn_cast(); - auto resultType = getType().dyn_cast(); + auto inputType = llvm::dyn_cast(getOperand().getType()); + auto resultType = llvm::dyn_cast(getType()); if (!inputType || !resultType) return mlir::success(); diff --git a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp --- a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp +++ b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp @@ -62,7 +62,7 @@ static void lowerOpToLoops(Operation *op, ValueRange operands, PatternRewriter &rewriter, LoopIterationFn processIteration) { - auto tensorType = (*op->result_type_begin()).cast(); + auto tensorType = llvm::cast((*op->result_type_begin())); auto loc = op->getLoc(); // Insert an allocation and deallocation for the result of this operation. @@ -144,7 +144,7 @@ // When lowering the constant operation, we allocate and assign the constant // values to a corresponding memref allocation. - auto tensorType = op.getType().cast(); + auto tensorType = llvm::cast(op.getType()); auto memRefType = convertTensorToMemRef(tensorType); auto alloc = insertAllocAndDealloc(memRefType, loc, rewriter); @@ -342,7 +342,7 @@ target.addIllegalDialect(); target.addDynamicallyLegalOp([](toy::PrintOp op) { return llvm::none_of(op->getOperandTypes(), - [](Type type) { return type.isa(); }); + [](Type type) { return llvm::isa(type); }); }); // Now that the conversion target has been defined, we just need to provide diff --git a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp --- a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp +++ b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp @@ -61,7 +61,7 @@ LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { - auto memRefType = (*op->operand_type_begin()).cast(); + auto memRefType = llvm::cast((*op->operand_type_begin())); auto memRefShape = memRefType.getShape(); auto loc = op->getLoc(); diff --git a/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp --- a/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp @@ -94,7 +94,7 @@ /// operands inferred. static bool allOperandsInferred(Operation *op) { return llvm::all_of(op->getOperandTypes(), [](Type operandType) { - return operandType.isa(); + return llvm::isa(operandType); }); } @@ -102,7 +102,7 @@ /// shaped result. static bool returnsDynamicShape(Operation *op) { return llvm::any_of(op->getResultTypes(), [](Type resultType) { - return !resultType.isa(); + return !llvm::isa(resultType); }); } }; diff --git a/mlir/examples/toy/Ch7/mlir/Dialect.cpp b/mlir/examples/toy/Ch7/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch7/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch7/mlir/Dialect.cpp @@ -101,7 +101,7 @@ // If the type is a function type, it contains the input and result types of // this operation. - if (FunctionType funcType = type.dyn_cast()) { + if (FunctionType funcType = llvm::dyn_cast(type)) { if (parser.resolveOperands(operands, funcType.getInputs(), operandsLoc, result.operands)) return mlir::failure(); @@ -179,9 +179,9 @@ static mlir::LogicalResult verifyConstantForType(mlir::Type type, mlir::Attribute opaqueValue, mlir::Operation *op) { - if (type.isa()) { + if (llvm::isa(type)) { // Check that the value is an elements attribute. - auto attrValue = opaqueValue.dyn_cast(); + auto attrValue = llvm::dyn_cast(opaqueValue); if (!attrValue) return op->emitError("constant of TensorType must be initialized by " "a DenseFPElementsAttr, got ") @@ -189,13 +189,13 @@ // If the return type of the constant is not an unranked tensor, the shape // must match the shape of the attribute holding the data. - auto resultType = type.dyn_cast(); + auto resultType = llvm::dyn_cast(type); if (!resultType) return success(); // Check that the rank of the attribute type matches the rank of the // constant result type. - auto attrType = attrValue.getType().cast(); + auto attrType = llvm::cast(attrValue.getType()); if (attrType.getRank() != resultType.getRank()) { return op->emitOpError("return type must match the one of the attached " "value attribute: ") @@ -213,11 +213,11 @@ } return mlir::success(); } - auto resultType = type.cast(); + auto resultType = llvm::cast(type); llvm::ArrayRef resultElementTypes = resultType.getElementTypes(); // Verify that the initializer is an Array. - auto attrValue = opaqueValue.dyn_cast(); + auto attrValue = llvm::dyn_cast(opaqueValue); if (!attrValue || attrValue.getValue().size() != resultElementTypes.size()) return op->emitError("constant of StructType must be initialized by an " "ArrayAttr with the same number of elements, got ") @@ -283,8 +283,8 @@ if (inputs.size() != 1 || outputs.size() != 1) return false; // The inputs must be Tensors with the same element type. - TensorType input = inputs.front().dyn_cast(); - TensorType output = outputs.front().dyn_cast(); + TensorType input = llvm::dyn_cast(inputs.front()); + TensorType output = llvm::dyn_cast(outputs.front()); if (!input || !output || input.getElementType() != output.getElementType()) return false; // The shape is required to match if both types are ranked. @@ -426,8 +426,8 @@ auto resultType = results.front(); // Check that the result type of the function matches the operand type. - if (inputType == resultType || inputType.isa() || - resultType.isa()) + if (inputType == resultType || llvm::isa(inputType) || + llvm::isa(resultType)) return mlir::success(); return emitError() << "type of return operand (" << inputType @@ -442,7 +442,7 @@ void StructAccessOp::build(mlir::OpBuilder &b, mlir::OperationState &state, mlir::Value input, size_t index) { // Extract the result type from the input type. - StructType structTy = input.getType().cast(); + StructType structTy = llvm::cast(input.getType()); assert(index < structTy.getNumElementTypes()); mlir::Type resultType = structTy.getElementTypes()[index]; @@ -451,7 +451,7 @@ } mlir::LogicalResult StructAccessOp::verify() { - StructType structTy = getInput().getType().cast(); + StructType structTy = llvm::cast(getInput().getType()); size_t indexValue = getIndex(); if (indexValue >= structTy.getNumElementTypes()) return emitOpError() @@ -474,14 +474,14 @@ } void TransposeOp::inferShapes() { - auto arrayTy = getOperand().getType().cast(); + auto arrayTy = llvm::cast(getOperand().getType()); SmallVector dims(llvm::reverse(arrayTy.getShape())); getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType())); } mlir::LogicalResult TransposeOp::verify() { - auto inputType = getOperand().getType().dyn_cast(); - auto resultType = getType().dyn_cast(); + auto inputType = llvm::dyn_cast(getOperand().getType()); + auto resultType = llvm::dyn_cast(getType()); if (!inputType || !resultType) return mlir::success(); @@ -598,7 +598,7 @@ return nullptr; // Check that the type is either a TensorType or another StructType. - if (!elementType.isa()) { + if (!llvm::isa(elementType)) { parser.emitError(typeLoc, "element type for a struct must either " "be a TensorType or a StructType, got: ") << elementType; @@ -619,7 +619,7 @@ void ToyDialect::printType(mlir::Type type, mlir::DialectAsmPrinter &printer) const { // Currently the only toy type is a struct type. - StructType structType = type.cast(); + StructType structType = llvm::cast(type); // Print the struct type according to the parser format. printer << "struct<"; @@ -653,9 +653,9 @@ mlir::Attribute value, mlir::Type type, mlir::Location loc) { - if (type.isa()) + if (llvm::isa(type)) return builder.create(loc, type, - value.cast()); + llvm::cast(value)); return builder.create(loc, type, - value.cast()); + llvm::cast(value)); } diff --git a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp --- a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp +++ b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp @@ -62,7 +62,7 @@ static void lowerOpToLoops(Operation *op, ValueRange operands, PatternRewriter &rewriter, LoopIterationFn processIteration) { - auto tensorType = (*op->result_type_begin()).cast(); + auto tensorType = llvm::cast((*op->result_type_begin())); auto loc = op->getLoc(); // Insert an allocation and deallocation for the result of this operation. @@ -144,7 +144,7 @@ // When lowering the constant operation, we allocate and assign the constant // values to a corresponding memref allocation. - auto tensorType = op.getType().cast(); + auto tensorType = llvm::cast(op.getType()); auto memRefType = convertTensorToMemRef(tensorType); auto alloc = insertAllocAndDealloc(memRefType, loc, rewriter); @@ -342,7 +342,7 @@ target.addIllegalDialect(); target.addDynamicallyLegalOp([](toy::PrintOp op) { return llvm::none_of(op->getOperandTypes(), - [](Type type) { return type.isa(); }); + [](Type type) { return llvm::isa(type); }); }); // Now that the conversion target has been defined, we just need to provide diff --git a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp --- a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp +++ b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp @@ -61,7 +61,7 @@ LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { - auto memRefType = (*op->operand_type_begin()).cast(); + auto memRefType = llvm::cast((*op->operand_type_begin())); auto memRefShape = memRefType.getShape(); auto loc = op->getLoc(); diff --git a/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp --- a/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp @@ -94,7 +94,7 @@ /// operands inferred. static bool allOperandsInferred(Operation *op) { return llvm::all_of(op->getOperandTypes(), [](Type operandType) { - return operandType.isa(); + return llvm::isa(operandType); }); } @@ -102,7 +102,7 @@ /// shaped result. static bool returnsDynamicShape(Operation *op) { return llvm::any_of(op->getResultTypes(), [](Type resultType) { - return !resultType.isa(); + return !llvm::isa(resultType); }); } }; diff --git a/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp --- a/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp +++ b/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp @@ -31,7 +31,8 @@ /// Fold simple struct access operations that access into a constant. OpFoldResult StructAccessOp::fold(FoldAdaptor adaptor) { - auto structAttr = adaptor.getInput().dyn_cast_or_null(); + auto structAttr = + llvm::dyn_cast_if_present(adaptor.getInput()); if (!structAttr) return nullptr; diff --git a/mlir/include/mlir/Debug/BreakpointManagers/FileLineColLocBreakpointManager.h b/mlir/include/mlir/Debug/BreakpointManagers/FileLineColLocBreakpointManager.h --- a/mlir/include/mlir/Debug/BreakpointManagers/FileLineColLocBreakpointManager.h +++ b/mlir/include/mlir/Debug/BreakpointManagers/FileLineColLocBreakpointManager.h @@ -62,19 +62,19 @@ public: Breakpoint *match(const Action &action) const override { for (const IRUnit &unit : action.getContextIRUnits()) { - if (auto *op = unit.dyn_cast()) { + if (auto *op = llvm::dyn_cast_if_present(unit)) { if (auto match = matchFromLocation(op->getLoc())) return *match; continue; } - if (auto *block = unit.dyn_cast()) { + if (auto *block = llvm::dyn_cast_if_present(unit)) { for (auto &op : block->getOperations()) { if (auto match = matchFromLocation(op.getLoc())) return *match; } continue; } - if (Region *region = unit.dyn_cast()) { + if (Region *region = llvm::dyn_cast_if_present(unit)) { if (auto match = matchFromLocation(region->getLoc())) return *match; continue; diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td b/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td --- a/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td +++ b/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td @@ -110,27 +110,27 @@ "gpu.mma_matrix", "::mlir::gpu::MMAMatrixType">; // Types for all sparse handles. -def GPU_SparseEnvHandle : - DialectType()">, - "sparse environment handle type">, +def GPU_SparseEnvHandle : + DialectType($_self)">, + "sparse environment handle type">, BuildableType<"mlir::gpu::SparseEnvHandleType::get($_builder.getContext())">; -def GPU_SparseDnVecHandle : - DialectType()">, +def GPU_SparseDnVecHandle : + DialectType($_self)">, "dense vector handle type">, BuildableType<"mlir::gpu::SparseDnVecHandleType::get($_builder.getContext())">; -def GPU_SparseDnMatHandle : - DialectType()">, +def GPU_SparseDnMatHandle : + DialectType($_self)">, "dense matrix handle type">, BuildableType<"mlir::gpu::SparseDnMatHandleType::get($_builder.getContext())">; -def GPU_SparseSpMatHandle : - DialectType()">, +def GPU_SparseSpMatHandle : + DialectType($_self)">, "sparse matrix handle type">, BuildableType<"mlir::gpu::SparseSpMatHandleType::get($_builder.getContext())">; diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td @@ -95,7 +95,7 @@ /*methodName=*/"getDeclareTargetDeviceType", (ins), [{}], [{ if (mlir::Attribute dTar = $_op->getAttr("omp.declare_target")) - if (auto dAttr = dTar.dyn_cast_or_null()) + if (auto dAttr = llvm::dyn_cast_or_null(dTar)) return dAttr.getDeviceType().getValue(); return {}; }]>, @@ -108,7 +108,7 @@ /*methodName=*/"getDeclareTargetCaptureClause", (ins), [{}], [{ if (mlir::Attribute dTar = $_op->getAttr("omp.declare_target")) - if (auto dAttr = dTar.dyn_cast_or_null()) + if (auto dAttr = llvm::dyn_cast_or_null(dTar)) return dAttr.getCaptureClause().getValue(); return {}; }]> diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h --- a/mlir/include/mlir/IR/BuiltinTypes.h +++ b/mlir/include/mlir/IR/BuiltinTypes.h @@ -115,7 +115,7 @@ static bool classof(Type type); /// Allow implicit conversion to ShapedType. - operator ShapedType() const { return cast(); } + operator ShapedType() const { return llvm::cast(*this); } }; //===----------------------------------------------------------------------===// @@ -169,7 +169,7 @@ unsigned getMemorySpaceAsInt() const; /// Allow implicit conversion to ShapedType. - operator ShapedType() const { return cast(); } + operator ShapedType() const { return llvm::cast(*this); } }; } // namespace mlir diff --git a/mlir/include/mlir/IR/TypeRange.h b/mlir/include/mlir/IR/TypeRange.h --- a/mlir/include/mlir/IR/TypeRange.h +++ b/mlir/include/mlir/IR/TypeRange.h @@ -217,13 +217,15 @@ } static bool isEmptyKey(mlir::TypeRange range) { - if (const auto *type = range.getBase().dyn_cast()) + if (const auto *type = + llvm::dyn_cast_if_present(range.getBase())) return type == getEmptyKeyPointer(); return false; } static bool isTombstoneKey(mlir::TypeRange range) { - if (const auto *type = range.getBase().dyn_cast()) + if (const auto *type = + llvm::dyn_cast_if_present(range.getBase())) return type == getTombstoneKeyPointer(); return false; } diff --git a/mlir/include/mlir/Interfaces/SideEffectInterfaces.h b/mlir/include/mlir/Interfaces/SideEffectInterfaces.h --- a/mlir/include/mlir/Interfaces/SideEffectInterfaces.h +++ b/mlir/include/mlir/Interfaces/SideEffectInterfaces.h @@ -163,12 +163,12 @@ /// Return the value the effect is applied on, or nullptr if there isn't a /// known value being affected. - Value getValue() const { return value ? value.dyn_cast() : Value(); } + Value getValue() const { return value ? llvm::dyn_cast_if_present(value) : Value(); } /// Return the symbol reference the effect is applied on, or nullptr if there /// isn't a known smbol being affected. SymbolRefAttr getSymbolRef() const { - return value ? value.dyn_cast() : SymbolRefAttr(); + return value ? llvm::dyn_cast_if_present(value) : SymbolRefAttr(); } /// Return the resource that the effect applies to. diff --git a/mlir/include/mlir/Pass/AnalysisManager.h b/mlir/include/mlir/Pass/AnalysisManager.h --- a/mlir/include/mlir/Pass/AnalysisManager.h +++ b/mlir/include/mlir/Pass/AnalysisManager.h @@ -254,7 +254,7 @@ /// Returns the parent analysis map for this analysis map, or null if this is /// the top-level map. const NestedAnalysisMap *getParent() const { - return parentOrInstrumentor.dyn_cast(); + return llvm::dyn_cast_if_present(parentOrInstrumentor); } /// Returns a pass instrumentation object for the current operation. This diff --git a/mlir/lib/Analysis/DataFlow/ConstantPropagationAnalysis.cpp b/mlir/lib/Analysis/DataFlow/ConstantPropagationAnalysis.cpp --- a/mlir/lib/Analysis/DataFlow/ConstantPropagationAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/ConstantPropagationAnalysis.cpp @@ -89,7 +89,7 @@ // Merge in the result of the fold, either a constant or a value. OpFoldResult foldResult = std::get<1>(it); - if (Attribute attr = foldResult.dyn_cast()) { + if (Attribute attr = llvm::dyn_cast_if_present(foldResult)) { LLVM_DEBUG(llvm::dbgs() << "Folded to constant: " << attr << "\n"); propagateIfChanged(lattice, lattice->join(ConstantValue(attr, op->getDialect()))); diff --git a/mlir/lib/Analysis/DataFlow/DeadCodeAnalysis.cpp b/mlir/lib/Analysis/DataFlow/DeadCodeAnalysis.cpp --- a/mlir/lib/Analysis/DataFlow/DeadCodeAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/DeadCodeAnalysis.cpp @@ -31,7 +31,7 @@ } void Executable::onUpdate(DataFlowSolver *solver) const { - if (auto *block = point.dyn_cast()) { + if (auto *block = llvm::dyn_cast_if_present(point)) { // Re-invoke the analyses on the block itself. for (DataFlowAnalysis *analysis : subscribers) solver->enqueue({block, analysis}); @@ -39,7 +39,7 @@ for (DataFlowAnalysis *analysis : subscribers) for (Operation &op : *block) solver->enqueue({&op, analysis}); - } else if (auto *programPoint = point.dyn_cast()) { + } else if (auto *programPoint = llvm::dyn_cast_if_present(point)) { // Re-invoke the analysis on the successor block. if (auto *edge = dyn_cast(programPoint)) { for (DataFlowAnalysis *analysis : subscribers) @@ -219,7 +219,7 @@ LogicalResult DeadCodeAnalysis::visit(ProgramPoint point) { if (point.is()) return success(); - auto *op = point.dyn_cast(); + auto *op = llvm::dyn_cast_if_present(point); if (!op) return emitError(point.getLoc(), "unknown program point kind"); diff --git a/mlir/lib/Analysis/DataFlow/DenseAnalysis.cpp b/mlir/lib/Analysis/DataFlow/DenseAnalysis.cpp --- a/mlir/lib/Analysis/DataFlow/DenseAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/DenseAnalysis.cpp @@ -33,9 +33,9 @@ } LogicalResult AbstractDenseDataFlowAnalysis::visit(ProgramPoint point) { - if (auto *op = point.dyn_cast()) + if (auto *op = llvm::dyn_cast_if_present(point)) processOperation(op); - else if (auto *block = point.dyn_cast()) + else if (auto *block = llvm::dyn_cast_if_present(point)) visitBlock(block); else return failure(); diff --git a/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp b/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp --- a/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp @@ -181,7 +181,7 @@ if (auto bound = dyn_cast_or_null(loopBound->get())) return bound.getValue(); - } else if (auto value = loopBound->dyn_cast()) { + } else if (auto value = llvm::dyn_cast_if_present(*loopBound)) { const IntegerValueRangeLattice *lattice = getLatticeElementFor(op, value); if (lattice != nullptr) diff --git a/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp b/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp --- a/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp @@ -66,9 +66,9 @@ } LogicalResult AbstractSparseDataFlowAnalysis::visit(ProgramPoint point) { - if (Operation *op = point.dyn_cast()) + if (Operation *op = llvm::dyn_cast_if_present(point)) visitOperation(op); - else if (Block *block = point.dyn_cast()) + else if (Block *block = llvm::dyn_cast_if_present(point)) visitBlock(block); else return failure(); @@ -238,7 +238,7 @@ unsigned firstIndex = 0; if (inputs.size() != lattices.size()) { - if (point.dyn_cast()) { + if (llvm::dyn_cast_if_present(point)) { if (!inputs.empty()) firstIndex = cast(inputs.front()).getResultNumber(); visitNonControlFlowArgumentsImpl( @@ -316,9 +316,9 @@ LogicalResult AbstractSparseBackwardDataFlowAnalysis::visit(ProgramPoint point) { - if (Operation *op = point.dyn_cast()) + if (Operation *op = llvm::dyn_cast_if_present(point)) visitOperation(op); - else if (point.dyn_cast()) + else if (llvm::dyn_cast_if_present(point)) // For backward dataflow, we don't have to do any work for the blocks // themselves. CFG edges between blocks are processed by the BranchOp // logic in `visitOperation`, and entry blocks for functions are tied diff --git a/mlir/lib/Analysis/DataFlowFramework.cpp b/mlir/lib/Analysis/DataFlowFramework.cpp --- a/mlir/lib/Analysis/DataFlowFramework.cpp +++ b/mlir/lib/Analysis/DataFlowFramework.cpp @@ -39,21 +39,21 @@ os << ""; return; } - if (auto *programPoint = dyn_cast()) + if (auto *programPoint = llvm::dyn_cast(*this)) return programPoint->print(os); - if (auto *op = dyn_cast()) + if (auto *op = llvm::dyn_cast(*this)) return op->print(os); - if (auto value = dyn_cast()) + if (auto value = llvm::dyn_cast(*this)) return value.print(os); return get()->print(os); } Location ProgramPoint::getLoc() const { - if (auto *programPoint = dyn_cast()) + if (auto *programPoint = llvm::dyn_cast(*this)) return programPoint->getLoc(); - if (auto *op = dyn_cast()) + if (auto *op = llvm::dyn_cast(*this)) return op->getLoc(); - if (auto value = dyn_cast()) + if (auto value = llvm::dyn_cast(*this)) return value.getLoc(); return get()->getParent()->getLoc(); } diff --git a/mlir/lib/AsmParser/Parser.cpp b/mlir/lib/AsmParser/Parser.cpp --- a/mlir/lib/AsmParser/Parser.cpp +++ b/mlir/lib/AsmParser/Parser.cpp @@ -2060,7 +2060,7 @@ if (parseToken(Token::r_paren, "expected ')' in location")) return failure(); - if (auto *op = opOrArgument.dyn_cast()) + if (auto *op = llvm::dyn_cast_if_present(opOrArgument)) op->setLoc(directLoc); else opOrArgument.get().setLoc(directLoc); diff --git a/mlir/lib/CAPI/Interfaces/Interfaces.cpp b/mlir/lib/CAPI/Interfaces/Interfaces.cpp --- a/mlir/lib/CAPI/Interfaces/Interfaces.cpp +++ b/mlir/lib/CAPI/Interfaces/Interfaces.cpp @@ -47,7 +47,7 @@ DictionaryAttr unwrapAttributes(MlirAttribute attributes) { DictionaryAttr attributeDict; if (!mlirAttributeIsNull(attributes)) - attributeDict = unwrap(attributes).cast(); + attributeDict = llvm::cast(unwrap(attributes)); return attributeDict; } diff --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp --- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp @@ -1190,9 +1190,9 @@ // TODO: safer and more flexible to store data type in actual op instead? static Type getSpMatElemType(Value spMat) { if (auto op = spMat.getDefiningOp()) - return op.getValues().getType().cast().getElementType(); + return llvm::cast(op.getValues().getType()).getElementType(); if (auto op = spMat.getDefiningOp()) - return op.getValues().getType().cast().getElementType(); + return llvm::cast(op.getValues().getType()).getElementType(); llvm_unreachable("cannot find spmat def"); } @@ -1235,7 +1235,7 @@ MemRefDescriptor(adaptor.getMemref()).allocatedPtr(rewriter, loc); if (!getTypeConverter()->useOpaquePointers()) pVec = rewriter.create(loc, llvmPointerType, pVec); - Type dType = op.getMemref().getType().cast().getElementType(); + Type dType = llvm::cast(op.getMemref().getType()).getElementType(); auto dw = rewriter.create(loc, llvmInt32Type, dType.getIntOrFloatBitWidth()); auto handle = @@ -1271,7 +1271,7 @@ MemRefDescriptor(adaptor.getMemref()).allocatedPtr(rewriter, loc); if (!getTypeConverter()->useOpaquePointers()) pMat = rewriter.create(loc, llvmPointerType, pMat); - Type dType = op.getMemref().getType().cast().getElementType(); + Type dType = llvm::cast(op.getMemref().getType()).getElementType(); auto dw = rewriter.create(loc, llvmInt32Type, dType.getIntOrFloatBitWidth()); auto handle = @@ -1315,8 +1315,8 @@ pColIdxs = rewriter.create(loc, llvmPointerType, pColIdxs); pValues = rewriter.create(loc, llvmPointerType, pValues); } - Type iType = op.getColIdxs().getType().cast().getElementType(); - Type dType = op.getValues().getType().cast().getElementType(); + Type iType = llvm::cast(op.getColIdxs().getType()).getElementType(); + Type dType = llvm::cast(op.getValues().getType()).getElementType(); auto iw = rewriter.create( loc, llvmInt32Type, iType.isIndex() ? 64 : iType.getIntOrFloatBitWidth()); auto dw = rewriter.create(loc, llvmInt32Type, @@ -1350,9 +1350,9 @@ pColIdxs = rewriter.create(loc, llvmPointerType, pColIdxs); pValues = rewriter.create(loc, llvmPointerType, pValues); } - Type pType = op.getRowPos().getType().cast().getElementType(); - Type iType = op.getColIdxs().getType().cast().getElementType(); - Type dType = op.getValues().getType().cast().getElementType(); + Type pType = llvm::cast(op.getRowPos().getType()).getElementType(); + Type iType = llvm::cast(op.getColIdxs().getType()).getElementType(); + Type dType = llvm::cast(op.getValues().getType()).getElementType(); auto pw = rewriter.create( loc, llvmInt32Type, pType.isIndex() ? 64 : pType.getIntOrFloatBitWidth()); auto iw = rewriter.create( diff --git a/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp b/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp --- a/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp +++ b/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp @@ -405,7 +405,7 @@ return failure(); if (!(*converted)) // Conversion to default is 0. return 0; - if (auto explicitSpace = converted->dyn_cast_or_null()) + if (auto explicitSpace = llvm::dyn_cast_if_present(*converted)) return explicitSpace.getInt(); return failure(); } diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp --- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp +++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp @@ -671,7 +671,7 @@ Attribute initialValue = nullptr; if (!global.isExternal() && !global.isUninitialized()) { - auto elementsAttr = global.getInitialValue()->cast(); + auto elementsAttr = llvm::cast(*global.getInitialValue()); initialValue = elementsAttr; // For scalar memrefs, the global variable created is of the element type, diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp --- a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp @@ -412,10 +412,10 @@ auto *ans = cast(answer); if (isa(val.getType())) builder.create( - loc, val, ans->getValue().cast(), success, failure); + loc, val, llvm::cast(ans->getValue()), success, failure); else builder.create( - loc, val, ans->getValue().cast(), success, failure); + loc, val, llvm::cast(ans->getValue()), success, failure); break; } case Predicates::AttributeQuestion: { diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -300,7 +300,7 @@ return rewriter.create(loc, resultTypes, args); // tosa::ErfOp - if (isa(op) && elementTy.isa()) + if (isa(op) && llvm::isa(elementTy)) return rewriter.create(loc, resultTypes, args); // tosa::GreaterOp @@ -1885,7 +1885,7 @@ auto addDynamicDimension = [&](Value source, int64_t dim) { auto dynamicDim = tensor::createDimValue(builder, loc, source, dim); - if (auto dimValue = dynamicDim.value().dyn_cast()) + if (auto dimValue = llvm::dyn_cast_if_present(dynamicDim.value())) results.push_back(dimValue); }; diff --git a/mlir/lib/Debug/DebuggerExecutionContextHook.cpp b/mlir/lib/Debug/DebuggerExecutionContextHook.cpp --- a/mlir/lib/Debug/DebuggerExecutionContextHook.cpp +++ b/mlir/lib/Debug/DebuggerExecutionContextHook.cpp @@ -121,11 +121,11 @@ return; } IRUnit *unit = &state.cursor; - if (auto *op = unit->dyn_cast()) { + if (auto *op = llvm::dyn_cast_if_present(*unit)) { state.cursor = op->getBlock(); - } else if (auto *region = unit->dyn_cast()) { + } else if (auto *region = llvm::dyn_cast_if_present(*unit)) { state.cursor = region->getParentOp(); - } else if (auto *block = unit->dyn_cast()) { + } else if (auto *block = llvm::dyn_cast_if_present(*unit)) { state.cursor = block->getParent(); } else { llvm::outs() << "Current cursor is not a valid IRUnit"; @@ -142,14 +142,14 @@ return; } IRUnit *unit = &state.cursor; - if (auto *op = unit->dyn_cast()) { + if (auto *op = llvm::dyn_cast_if_present(*unit)) { if (index < 0 || index >= static_cast(op->getNumRegions())) { llvm::outs() << "Index invalid, op has " << op->getNumRegions() << " but got " << index << "\n"; return; } state.cursor = &op->getRegion(index); - } else if (auto *region = unit->dyn_cast()) { + } else if (auto *region = llvm::dyn_cast_if_present(*unit)) { auto block = region->begin(); int count = 0; while (block != region->end() && count != index) { @@ -163,7 +163,7 @@ return; } state.cursor = &*block; - } else if (auto *block = unit->dyn_cast()) { + } else if (auto *block = llvm::dyn_cast_if_present(*unit)) { auto op = block->begin(); int count = 0; while (op != block->end() && count != index) { @@ -192,14 +192,14 @@ return; } IRUnit *unit = &state.cursor; - if (auto *op = unit->dyn_cast()) { + if (auto *op = llvm::dyn_cast_if_present(*unit)) { Operation *previous = op->getPrevNode(); if (!previous) { llvm::outs() << "No previous operation in the current block\n"; return; } state.cursor = previous; - } else if (auto *region = unit->dyn_cast()) { + } else if (auto *region = llvm::dyn_cast_if_present(*unit)) { llvm::outs() << "Has region\n"; Operation *parent = region->getParentOp(); if (!parent) { @@ -212,7 +212,7 @@ } state.cursor = ®ion->getParentOp()->getRegion(region->getRegionNumber() - 1); - } else if (auto *block = unit->dyn_cast()) { + } else if (auto *block = llvm::dyn_cast_if_present(*unit)) { Block *previous = block->getPrevNode(); if (!previous) { llvm::outs() << "No previous block in the current region\n"; @@ -234,14 +234,14 @@ return; } IRUnit *unit = &state.cursor; - if (auto *op = unit->dyn_cast()) { + if (auto *op = llvm::dyn_cast_if_present(*unit)) { Operation *next = op->getNextNode(); if (!next) { llvm::outs() << "No next operation in the current block\n"; return; } state.cursor = next; - } else if (auto *region = unit->dyn_cast()) { + } else if (auto *region = llvm::dyn_cast_if_present(*unit)) { Operation *parent = region->getParentOp(); if (!parent) { llvm::outs() << "No parent operation for the current region\n"; @@ -253,7 +253,7 @@ } state.cursor = ®ion->getParentOp()->getRegion(region->getRegionNumber() + 1); - } else if (auto *block = unit->dyn_cast()) { + } else if (auto *block = llvm::dyn_cast_if_present(*unit)) { Block *next = block->getNextNode(); if (!next) { llvm::outs() << "No next block in the current region\n"; diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -1212,7 +1212,7 @@ actualValues.reserve(values.size()); auto *dialect = b.getContext()->getLoadedDialect(); for (OpFoldResult ofr : values) { - if (auto value = ofr.dyn_cast()) { + if (auto value = llvm::dyn_cast_if_present(ofr)) { actualValues.push_back(value); continue; } @@ -4599,7 +4599,7 @@ if (staticDim.has_value()) return builder.create(result.location, *staticDim); - return ofr.dyn_cast(); + return llvm::dyn_cast_if_present(ofr); }); result.addOperands(basisValues); } diff --git a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp --- a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp +++ b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp @@ -808,7 +808,7 @@ if (matchPattern(getRhs(), m_Zero())) return getLhs(); /// or(x, ) -> - if (auto rhsAttr = adaptor.getRhs().dyn_cast_or_null()) + if (auto rhsAttr = llvm::dyn_cast_if_present(adaptor.getRhs())) if (rhsAttr.getValue().isAllOnes()) return rhsAttr; @@ -1249,7 +1249,7 @@ /// Always fold extension of FP constants. OpFoldResult arith::ExtFOp::fold(FoldAdaptor adaptor) { - auto constOperand = adaptor.getIn().dyn_cast_or_null(); + auto constOperand = llvm::dyn_cast_if_present(adaptor.getIn()); if (!constOperand) return {}; @@ -1702,7 +1702,7 @@ // We are moving constants to the right side; So if lhs is constant rhs is // guaranteed to be a constant. - if (auto lhs = adaptor.getLhs().dyn_cast_or_null()) { + if (auto lhs = llvm::dyn_cast_if_present(adaptor.getLhs())) { return constFoldBinaryOp( adaptor.getOperands(), getI1SameShape(lhs.getType()), [pred = getPredicate()](const APInt &lhs, const APInt &rhs) { @@ -1772,8 +1772,8 @@ } OpFoldResult arith::CmpFOp::fold(FoldAdaptor adaptor) { - auto lhs = adaptor.getLhs().dyn_cast_or_null(); - auto rhs = adaptor.getRhs().dyn_cast_or_null(); + auto lhs = llvm::dyn_cast_if_present(adaptor.getLhs()); + auto rhs = llvm::dyn_cast_if_present(adaptor.getRhs()); // If one operand is NaN, making them both NaN does not change the result. if (lhs && lhs.getValue().isNaN()) @@ -2193,11 +2193,11 @@ // Constant-fold constant operands over non-splat constant condition. // select %cst_vec, %cst0, %cst1 => %cst2 if (auto cond = - adaptor.getCondition().dyn_cast_or_null()) { + llvm::dyn_cast_if_present(adaptor.getCondition())) { if (auto lhs = - adaptor.getTrueValue().dyn_cast_or_null()) { + llvm::dyn_cast_if_present(adaptor.getTrueValue())) { if (auto rhs = - adaptor.getFalseValue().dyn_cast_or_null()) { + llvm::dyn_cast_if_present(adaptor.getFalseValue())) { SmallVector results; results.reserve(static_cast(cond.getNumElements())); auto condVals = llvm::make_range(cond.value_begin(), diff --git a/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp @@ -184,7 +184,7 @@ // If the buffers have different types, they differ only in their layout // map. - auto memrefType = trueType->cast(); + auto memrefType = llvm::cast(*trueType); return getMemRefTypeWithFullyDynamicLayout( RankedTensorType::get(memrefType.getShape(), memrefType.getElementType()), diff --git a/mlir/lib/Dialect/Arith/Utils/Utils.cpp b/mlir/lib/Dialect/Arith/Utils/Utils.cpp --- a/mlir/lib/Dialect/Arith/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Arith/Utils/Utils.cpp @@ -33,8 +33,8 @@ if (ofr.is()) continue; // Newly static, move from Value to constant. - if (auto cstOp = - ofr.dyn_cast().getDefiningOp()) { + if (auto cstOp = llvm::dyn_cast_if_present(ofr) + .getDefiningOp()) { ofr = b.getIndexAttr(cstOp.value()); valuesChanged = true; } @@ -56,9 +56,9 @@ Value mlir::getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr) { - if (auto value = ofr.dyn_cast()) + if (auto value = llvm::dyn_cast_if_present(ofr)) return value; - auto attr = dyn_cast(ofr.dyn_cast()); + auto attr = dyn_cast(llvm::dyn_cast_if_present(ofr)); assert(attr && "expect the op fold result casts to an integer attribute"); return b.create(loc, attr.getValue().getSExtValue()); } diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -179,7 +179,7 @@ populateDynamicDimSizes(rewriter, loc, copyBuffer, dynamicDims); } FailureOr alloc = options.createAlloc( - rewriter, loc, allocType->cast(), dynamicDims); + rewriter, loc, llvm::cast(*allocType), dynamicDims); if (failed(alloc)) return failure(); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp @@ -59,7 +59,8 @@ /// Return the func::FuncOp called by `callOp`. static func::FuncOp getCalledFunction(CallOpInterface callOp) { - SymbolRefAttr sym = callOp.getCallableForCallee().dyn_cast(); + SymbolRefAttr sym = + llvm::dyn_cast_if_present(callOp.getCallableForCallee()); if (!sym) return nullptr; return dyn_cast_or_null( diff --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp @@ -80,7 +80,7 @@ /// Return the FuncOp called by `callOp`. static FuncOp getCalledFunction(CallOpInterface callOp) { - SymbolRefAttr sym = callOp.getCallableForCallee().dyn_cast(); + SymbolRefAttr sym = llvm::dyn_cast_if_present(callOp.getCallableForCallee()); if (!sym) return nullptr; return dyn_cast_or_null( diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp @@ -995,7 +995,7 @@ op->walk([&](Operation *op) { SmallVector aliasSets; for (OpResult opResult : op->getOpResults()) { - if (opResult.getType().isa()) { + if (llvm::isa(opResult.getType())) { SmallVector aliases; state.applyOnAliases(opResult, [&](Value alias) { std::string buffer; diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp @@ -238,7 +238,7 @@ /// Return the func::FuncOp called by `callOp`. static func::FuncOp getCalledFunction(func::CallOp callOp) { - SymbolRefAttr sym = callOp.getCallableForCallee().dyn_cast(); + SymbolRefAttr sym = llvm::dyn_cast_if_present(callOp.getCallableForCallee()); if (!sym) return nullptr; return dyn_cast_or_null( diff --git a/mlir/lib/Dialect/Complex/IR/ComplexOps.cpp b/mlir/lib/Dialect/Complex/IR/ComplexOps.cpp --- a/mlir/lib/Dialect/Complex/IR/ComplexOps.cpp +++ b/mlir/lib/Dialect/Complex/IR/ComplexOps.cpp @@ -90,7 +90,8 @@ //===----------------------------------------------------------------------===// OpFoldResult ImOp::fold(FoldAdaptor adaptor) { - ArrayAttr arrayAttr = adaptor.getComplex().dyn_cast_or_null(); + ArrayAttr arrayAttr = + llvm::dyn_cast_if_present(adaptor.getComplex()); if (arrayAttr && arrayAttr.size() == 2) return arrayAttr[1]; if (auto createOp = getOperand().getDefiningOp()) @@ -103,7 +104,8 @@ //===----------------------------------------------------------------------===// OpFoldResult ReOp::fold(FoldAdaptor adaptor) { - ArrayAttr arrayAttr = adaptor.getComplex().dyn_cast_or_null(); + ArrayAttr arrayAttr = + llvm::dyn_cast_if_present(adaptor.getComplex()); if (arrayAttr && arrayAttr.size() == 2) return arrayAttr[0]; if (auto createOp = getOperand().getDefiningOp()) diff --git a/mlir/lib/Dialect/DLTI/DLTI.cpp b/mlir/lib/Dialect/DLTI/DLTI.cpp --- a/mlir/lib/Dialect/DLTI/DLTI.cpp +++ b/mlir/lib/Dialect/DLTI/DLTI.cpp @@ -94,7 +94,7 @@ void DataLayoutEntryAttr::print(AsmPrinter &os) const { os << DataLayoutEntryAttr::kAttrKeyword << "<"; - if (auto type = getKey().dyn_cast()) + if (auto type = llvm::dyn_cast_if_present(getKey())) os << type; else os << "\"" << getKey().get().strref() << "\""; @@ -151,7 +151,7 @@ DenseSet types; DenseSet ids; for (DataLayoutEntryInterface entry : entries) { - if (auto type = entry.getKey().dyn_cast()) { + if (auto type = llvm::dyn_cast_if_present(entry.getKey())) { if (!types.insert(type).second) return emitError() << "repeated layout entry key: " << type; } else { diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -493,7 +493,7 @@ // error. All other canonicalization is done in the fold method. bool requiresConst = !rawConstantIndices.empty() && currType.isa_and_nonnull(); - if (Value val = iter.dyn_cast()) { + if (Value val = llvm::dyn_cast_if_present(iter)) { APInt intC; if (requiresConst && matchPattern(val, m_ConstantInt(&intC)) && intC.isSignedIntN(kGEPConstantBitWidth)) { @@ -598,7 +598,7 @@ llvm::interleaveComma( GEPIndicesAdaptor(rawConstantIndices, indices), printer, [&](PointerUnion cst) { - if (Value val = cst.dyn_cast()) + if (Value val = llvm::dyn_cast_if_present(cst)) printer.printOperand(val); else printer << cst.get().getInt(); @@ -2495,7 +2495,7 @@ !integer.getValue().isSignedIntN(kGEPConstantBitWidth)) { PointerUnion existing = getIndices()[iter.index()]; - if (Value val = existing.dyn_cast()) + if (Value val = llvm::dyn_cast_if_present(existing)) gepArgs.emplace_back(val); else gepArgs.emplace_back(existing.get().getInt()); diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp @@ -261,7 +261,7 @@ static bool hasAllZeroIndices(LLVM::GEPOp gepOp) { return llvm::all_of(gepOp.getIndices(), [](auto index) { - auto indexAttr = index.template dyn_cast(); + auto indexAttr = llvm::dyn_cast_if_present(index); return indexAttr && indexAttr.getValue() == 0; }); } @@ -289,7 +289,7 @@ // Ensures all indices are static and fetches them. SmallVector indices; for (auto index : gep.getIndices()) { - IntegerAttr indexInt = index.dyn_cast(); + IntegerAttr indexInt = llvm::dyn_cast_if_present(index); if (!indexInt) return {}; indices.push_back(indexInt); @@ -310,7 +310,7 @@ for (IntegerAttr index : llvm::drop_begin(indices)) { // Ensure the structure of the type being indexed can be reasoned about. // This includes rejecting any potential typed pointer. - auto destructurable = selectedType.dyn_cast(); + auto destructurable = llvm::dyn_cast(selectedType); if (!destructurable) return {}; @@ -343,7 +343,7 @@ bool LLVM::GEPOp::canRewire(const DestructurableMemorySlot &slot, SmallPtrSetImpl &usedIndices, SmallVectorImpl &mustBeSafelyUsed) { - auto basePtrType = getBase().getType().dyn_cast(); + auto basePtrType = llvm::dyn_cast(getBase().getType()); if (!basePtrType) return false; @@ -359,7 +359,7 @@ return false; auto firstLevelIndex = cast(getIndices()[1]); assert(slot.elementPtrs.contains(firstLevelIndex)); - if (!slot.elementPtrs.at(firstLevelIndex).isa()) + if (!llvm::isa(slot.elementPtrs.at(firstLevelIndex))) return false; mustBeSafelyUsed.emplace_back({getResult(), reachedType}); usedIndices.insert(firstLevelIndex); @@ -369,7 +369,7 @@ DeletionKind LLVM::GEPOp::rewire(const DestructurableMemorySlot &slot, DenseMap &subslots, RewriterBase &rewriter) { - IntegerAttr firstLevelIndex = getIndices()[1].dyn_cast(); + IntegerAttr firstLevelIndex = llvm::dyn_cast_if_present(getIndices()[1]); const MemorySlot &newSlot = subslots.at(firstLevelIndex); ArrayRef remainingIndices = getRawConstantIndices().slice(2); @@ -414,7 +414,7 @@ } Type LLVM::LLVMStructType::getTypeAtIndex(Attribute index) { - auto indexAttr = index.dyn_cast(); + auto indexAttr = llvm::dyn_cast(index); if (!indexAttr || !indexAttr.getType().isInteger(32)) return {}; int32_t indexInt = indexAttr.getInt(); @@ -439,7 +439,7 @@ } Type LLVM::LLVMArrayType::getTypeAtIndex(Attribute index) const { - auto indexAttr = index.dyn_cast(); + auto indexAttr = llvm::dyn_cast(index); if (!indexAttr || !indexAttr.getType().isInteger(32)) return {}; int32_t indexInt = indexAttr.getInt(); diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp @@ -354,7 +354,7 @@ auto newType = llvm::cast(newEntry.getKey().get()); const auto *it = llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) { - if (auto type = entry.getKey().dyn_cast()) { + if (auto type = llvm::dyn_cast_if_present(entry.getKey())) { return llvm::cast(type).getAddressSpace() == newType.getAddressSpace(); } @@ -362,7 +362,7 @@ }); if (it == oldLayout.end()) { llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) { - if (auto type = entry.getKey().dyn_cast()) { + if (auto type = llvm::dyn_cast_if_present(entry.getKey())) { return llvm::cast(type).getAddressSpace() == 0; } return false; diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -2368,7 +2368,7 @@ sizes.reserve(tileSizes.size()); unsigned dynamicIdx = 0; for (OpFoldResult ofr : getMixedSizes()) { - if (auto attr = ofr.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(ofr)) { sizes.push_back(b.create( getLoc(), cast(attr).getInt())); continue; @@ -2794,7 +2794,7 @@ sizes.reserve(tileSizes.size()); unsigned dynamicIdx = 0; for (OpFoldResult ofr : getMixedSizes()) { - if (auto attr = ofr.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(ofr)) { sizes.push_back(b.create( getLoc(), cast(attr).getInt())); } else { diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -1447,7 +1447,7 @@ cast(genericOp.getOperation()) .createLoopRanges(rewriter, genericOp.getLoc()); auto opFoldIsConstantValue = [](OpFoldResult ofr, int64_t value) { - if (auto attr = ofr.dyn_cast()) + if (auto attr = llvm::dyn_cast_if_present(ofr)) return cast(attr).getInt() == value; llvm::APInt actual; return matchPattern(ofr.get(), m_ConstantInt(&actual)) && diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -229,7 +229,7 @@ // to look for the bound. LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n"); Value size; - if (auto attr = rangeValue.size.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(rangeValue.size)) { size = getValueOrCreateConstantIndexOp(b, loc, rangeValue.size); } else { Value materializedSize = diff --git a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp @@ -92,7 +92,7 @@ rewriter, op.getLoc(), d0 + d1 - d2, {iterationSpace[dimension].offset, iterationSpace[dimension].size, minSplitPoint}); - if (auto attr = remainingSize.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(remainingSize)) { if (cast(attr).getValue().isZero()) return {op, TilingInterface()}; } diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -48,7 +48,7 @@ static bool isZero(OpFoldResult v) { if (!v) return false; - if (auto attr = v.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(v)) { IntegerAttr intAttr = dyn_cast(attr); return intAttr && intAttr.getValue().isZero(); } @@ -104,7 +104,7 @@ /// checked at runtime. static void emitIsPositiveIndexAssertion(ImplicitLocOpBuilder &b, OpFoldResult value) { - if (auto attr = value.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(value)) { assert(cast(attr).getValue().isStrictlyPositive() && "expected strictly positive tile size and divisor"); return; diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -1135,7 +1135,7 @@ PatternRewriter &rewriter) const { // Given an OpFoldResult, return an index-typed value. auto getIdxValue = [&](OpFoldResult ofr) { - if (auto val = ofr.dyn_cast()) + if (auto val = llvm::dyn_cast_if_present(ofr)) return val; return rewriter .create( diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1646,7 +1646,7 @@ ArrayRef ofrs) { SmallVector result; for (auto o : ofrs) { - if (auto val = o.template dyn_cast()) { + if (auto val = llvm::dyn_cast_if_present(o)) { result.push_back(val); } else { result.push_back(rewriter.create( @@ -1954,8 +1954,8 @@ continue; // Other cases: Take a deeper look at defining ops of values. - auto v1 = size1.dyn_cast(); - auto v2 = size2.dyn_cast(); + auto v1 = llvm::dyn_cast_if_present(size1); + auto v2 = llvm::dyn_cast_if_present(size2); if (!v1 || !v2) return false; diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp --- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp @@ -970,7 +970,7 @@ auto dim = it.index(); auto size = it.value(); curr.push_back(dim); - auto attr = size.dyn_cast(); + auto attr = llvm::dyn_cast_if_present(size); if (attr && cast(attr).getInt() == 1) continue; reassociation.emplace_back(ReassociationIndices{}); diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefMemorySlot.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefMemorySlot.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefMemorySlot.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefMemorySlot.cpp @@ -64,7 +64,7 @@ //===----------------------------------------------------------------------===// static bool isSupportedElementType(Type type) { - return type.isa() || + return llvm::isa(type) || OpBuilder(type.getContext()).getZeroAttr(type); } @@ -110,7 +110,7 @@ SmallVector memref::AllocaOp::getDestructurableSlots() { MemRefType memrefType = getType(); - auto destructurable = memrefType.dyn_cast(); + auto destructurable = llvm::dyn_cast(memrefType); if (!destructurable) return {}; @@ -134,7 +134,7 @@ DenseMap slotMap; - auto memrefType = getType().cast(); + auto memrefType = llvm::cast(getType()); for (Attribute usedIndex : usedIndices) { Type elemType = memrefType.getTypeAtIndex(usedIndex); MemRefType elemPtr = MemRefType::get({}, elemType); @@ -281,7 +281,7 @@ MemRefDestructurableTypeExternalModel, MemRefType> { std::optional> getSubelementIndexMap(Type type) const { - auto memrefType = type.cast(); + auto memrefType = llvm::cast(type); constexpr int64_t maxMemrefSizeForDestructuring = 16; if (!memrefType.hasStaticShape() || memrefType.getNumElements() > maxMemrefSizeForDestructuring || @@ -298,15 +298,15 @@ } Type getTypeAtIndex(Type type, Attribute index) const { - auto memrefType = type.cast(); - auto coordArrAttr = index.dyn_cast(); + auto memrefType = llvm::cast(type); + auto coordArrAttr = llvm::dyn_cast(index); if (!coordArrAttr || coordArrAttr.size() != memrefType.getShape().size()) return {}; Type indexType = IndexType::get(memrefType.getContext()); for (const auto &[coordAttr, dimSize] : llvm::zip(coordArrAttr, memrefType.getShape())) { - auto coord = coordAttr.dyn_cast(); + auto coord = llvm::dyn_cast(coordAttr); if (!coord || coord.getType() != indexType || coord.getInt() < 0 || coord.getInt() >= dimSize) return {}; diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -970,7 +970,7 @@ return unusedDims; for (const auto &dim : llvm::enumerate(sizes)) - if (auto attr = dim.value().dyn_cast()) + if (auto attr = llvm::dyn_cast_if_present(dim.value())) if (llvm::cast(attr).getInt() == 1) unusedDims.set(dim.index()); @@ -1042,7 +1042,7 @@ OpFoldResult DimOp::fold(FoldAdaptor adaptor) { // All forms of folding require a known index. - auto index = adaptor.getIndex().dyn_cast_or_null(); + auto index = llvm::dyn_cast_if_present(adaptor.getIndex()); if (!index) return {}; diff --git a/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp b/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp @@ -56,7 +56,7 @@ // Because we only support input strides of 1, the output stride is also // always 1. if (llvm::all_of(strides, [](OpFoldResult &valueOrAttr) { - Attribute attr = valueOrAttr.dyn_cast(); + Attribute attr = llvm::dyn_cast_if_present(valueOrAttr); return attr && cast(attr).getInt() == 1; })) { strides = SmallVector(sourceOp.getMixedStrides().size(), @@ -86,8 +86,9 @@ } sizes.push_back(opSize); - Attribute opOffsetAttr = opOffset.dyn_cast(), - sourceOffsetAttr = sourceOffset.dyn_cast(); + Attribute opOffsetAttr = llvm::dyn_cast_if_present(opOffset), + sourceOffsetAttr = + llvm::dyn_cast_if_present(sourceOffset); if (opOffsetAttr && sourceOffsetAttr) { // If both offsets are static we can simply calculate the combined @@ -101,7 +102,7 @@ AffineExpr expr = rewriter.getAffineConstantExpr(0); SmallVector affineApplyOperands; for (auto valueOrAttr : {opOffset, sourceOffset}) { - if (auto attr = valueOrAttr.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(valueOrAttr)) { expr = expr + cast(attr).getInt(); } else { expr = diff --git a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp --- a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp +++ b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp @@ -520,7 +520,7 @@ << operandName << " operand appears more than once"; mlir::Type varType = operand.getType(); - auto symbolRef = std::get<1>(args).cast(); + auto symbolRef = llvm::cast(std::get<1>(args)); auto decl = SymbolTable::lookupNearestSymbolFrom(op, symbolRef); if (!decl) return op->emitOpError() diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -802,10 +802,10 @@ for (const auto &mapTypeOp : *map_types) { int64_t mapTypeBits = 0x00; - if (!mapTypeOp.isa()) + if (!llvm::isa(mapTypeOp)) return failure(); - mapTypeBits = mapTypeOp.cast().getInt(); + mapTypeBits = llvm::cast(mapTypeOp).getInt(); bool to = bitAnd(mapTypeBits, llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO); diff --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp @@ -381,7 +381,7 @@ // map. auto yieldedRanked = cast(yieldedValueBufferType); #ifndef NDEBUG - auto iterRanked = initArgBufferType->cast(); + auto iterRanked = llvm::cast(*initArgBufferType); assert(llvm::equal(yieldedRanked.getShape(), iterRanked.getShape()) && "expected same shape"); assert(yieldedRanked.getMemorySpace() == iterRanked.getMemorySpace() && @@ -802,7 +802,7 @@ if (!isa(bbArg.getType())) return bbArg.getType(); // TODO: error handling - return bufferization::getBufferType(bbArg, options)->cast(); + return llvm::cast(*bufferization::getBufferType(bbArg, options)); })); // Construct a new scf.while op with memref instead of tensor values. diff --git a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp --- a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp +++ b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp @@ -88,10 +88,10 @@ return failure(); unsigned dimIv = cstr.appendDimVar(iv); - auto lbv = lb.dyn_cast(); + auto lbv = llvm::dyn_cast_if_present(lb); unsigned symLb = lbv ? cstr.appendSymbolVar(lbv) : cstr.appendSymbolVar(/*num=*/1); - auto ubv = ub.dyn_cast(); + auto ubv = llvm::dyn_cast_if_present(ub); unsigned symUb = ubv ? cstr.appendSymbolVar(ubv) : cstr.appendSymbolVar(/*num=*/1); diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp @@ -152,7 +152,7 @@ auto type = llvm::cast(constructOp.getType()); if (getIndices().size() == 1 && constructOp.getConstituents().size() == type.getNumElements()) { - auto i = getIndices().begin()->cast(); + auto i = llvm::cast(*getIndices().begin()); return constructOp.getConstituents()[i.getValue().getSExtValue()]; } } diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp @@ -1562,8 +1562,8 @@ //===----------------------------------------------------------------------===// LogicalResult spirv::ConvertPtrToUOp::verify() { - auto operandType = getPointer().getType().cast(); - auto resultType = getResult().getType().cast(); + auto operandType = llvm::cast(getPointer().getType()); + auto resultType = llvm::cast(getResult().getType()); if (!resultType || !resultType.isSignlessInteger()) return emitError("result must be a scalar type of unsigned integer"); auto spirvModule = (*this)->getParentOfType(); @@ -1583,8 +1583,8 @@ //===----------------------------------------------------------------------===// LogicalResult spirv::ConvertUToPtrOp::verify() { - auto operandType = getOperand().getType().cast(); - auto resultType = getResult().getType().cast(); + auto operandType = llvm::cast(getOperand().getType()); + auto resultType = llvm::cast(getResult().getType()); if (!operandType || !operandType.isSignlessInteger()) return emitError("result must be a scalar type of unsigned integer"); auto spirvModule = (*this)->getParentOfType(); diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp @@ -125,23 +125,23 @@ } unsigned CompositeType::getNumElements() const { - if (auto arrayType = dyn_cast()) + if (auto arrayType = llvm::dyn_cast(*this)) return arrayType.getNumElements(); - if (auto matrixType = dyn_cast()) + if (auto matrixType = llvm::dyn_cast(*this)) return matrixType.getNumColumns(); - if (auto structType = dyn_cast()) + if (auto structType = llvm::dyn_cast(*this)) return structType.getNumElements(); - if (auto vectorType = dyn_cast()) + if (auto vectorType = llvm::dyn_cast(*this)) return vectorType.getNumElements(); - if (isa()) { + if (llvm::isa(*this)) { llvm_unreachable( "invalid to query number of elements of spirv::CooperativeMatrix type"); } - if (isa()) { + if (llvm::isa(*this)) { llvm_unreachable( "invalid to query number of elements of spirv::JointMatrix type"); } - if (isa()) { + if (llvm::isa(*this)) { llvm_unreachable( "invalid to query number of elements of spirv::RuntimeArray type"); } @@ -149,8 +149,8 @@ } bool CompositeType::hasCompileTimeKnownNumElements() const { - return !isa(); + return !llvm::isa(*this); } void CompositeType::getExtensions( @@ -188,11 +188,11 @@ } std::optional CompositeType::getSizeInBytes() { - if (auto arrayType = dyn_cast()) + if (auto arrayType = llvm::dyn_cast(*this)) return arrayType.getSizeInBytes(); - if (auto structType = dyn_cast()) + if (auto structType = llvm::dyn_cast(*this)) return structType.getSizeInBytes(); - if (auto vectorType = dyn_cast()) { + if (auto vectorType = llvm::dyn_cast(*this)) { std::optional elementSize = llvm::cast(vectorType.getElementType()).getSizeInBytes(); if (!elementSize) @@ -680,7 +680,7 @@ capabilities.push_back(ref); \ } break - if (auto intType = dyn_cast()) { + if (auto intType = llvm::dyn_cast(*this)) { switch (bitwidth) { WIDTH_CASE(Int, 8); WIDTH_CASE(Int, 16); @@ -692,7 +692,7 @@ llvm_unreachable("invalid bitwidth to getCapabilities"); } } else { - assert(isa()); + assert(llvm::isa(*this)); switch (bitwidth) { WIDTH_CASE(Float, 16); WIDTH_CASE(Float, 64); @@ -735,22 +735,22 @@ } bool SPIRVType::isScalarOrVector() { - return isIntOrFloat() || isa(); + return isIntOrFloat() || llvm::isa(*this); } void SPIRVType::getExtensions(SPIRVType::ExtensionArrayRefVector &extensions, std::optional storage) { - if (auto scalarType = dyn_cast()) { + if (auto scalarType = llvm::dyn_cast(*this)) { scalarType.getExtensions(extensions, storage); - } else if (auto compositeType = dyn_cast()) { + } else if (auto compositeType = llvm::dyn_cast(*this)) { compositeType.getExtensions(extensions, storage); - } else if (auto imageType = dyn_cast()) { + } else if (auto imageType = llvm::dyn_cast(*this)) { imageType.getExtensions(extensions, storage); - } else if (auto sampledImageType = dyn_cast()) { + } else if (auto sampledImageType = llvm::dyn_cast(*this)) { sampledImageType.getExtensions(extensions, storage); - } else if (auto matrixType = dyn_cast()) { + } else if (auto matrixType = llvm::dyn_cast(*this)) { matrixType.getExtensions(extensions, storage); - } else if (auto ptrType = dyn_cast()) { + } else if (auto ptrType = llvm::dyn_cast(*this)) { ptrType.getExtensions(extensions, storage); } else { llvm_unreachable("invalid SPIR-V Type to getExtensions"); @@ -760,17 +760,17 @@ void SPIRVType::getCapabilities( SPIRVType::CapabilityArrayRefVector &capabilities, std::optional storage) { - if (auto scalarType = dyn_cast()) { + if (auto scalarType = llvm::dyn_cast(*this)) { scalarType.getCapabilities(capabilities, storage); - } else if (auto compositeType = dyn_cast()) { + } else if (auto compositeType = llvm::dyn_cast(*this)) { compositeType.getCapabilities(capabilities, storage); - } else if (auto imageType = dyn_cast()) { + } else if (auto imageType = llvm::dyn_cast(*this)) { imageType.getCapabilities(capabilities, storage); - } else if (auto sampledImageType = dyn_cast()) { + } else if (auto sampledImageType = llvm::dyn_cast(*this)) { sampledImageType.getCapabilities(capabilities, storage); - } else if (auto matrixType = dyn_cast()) { + } else if (auto matrixType = llvm::dyn_cast(*this)) { matrixType.getCapabilities(capabilities, storage); - } else if (auto ptrType = dyn_cast()) { + } else if (auto ptrType = llvm::dyn_cast(*this)) { ptrType.getCapabilities(capabilities, storage); } else { llvm_unreachable("invalid SPIR-V Type to getCapabilities"); @@ -778,9 +778,9 @@ } std::optional SPIRVType::getSizeInBytes() { - if (auto scalarType = dyn_cast()) + if (auto scalarType = llvm::dyn_cast(*this)) return scalarType.getSizeInBytes(); - if (auto compositeType = dyn_cast()) + if (auto compositeType = llvm::dyn_cast(*this)) return compositeType.getSizeInBytes(); return std::nullopt; } diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp --- a/mlir/lib/Dialect/Shape/IR/Shape.cpp +++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp @@ -856,9 +856,9 @@ if (!adaptor.getLhs() || !adaptor.getRhs()) return nullptr; auto lhsShape = llvm::to_vector<6>( - adaptor.getLhs().cast().getValues()); + llvm::cast(adaptor.getLhs()).getValues()); auto rhsShape = llvm::to_vector<6>( - adaptor.getRhs().cast().getValues()); + llvm::cast(adaptor.getRhs()).getValues()); SmallVector resultShape; resultShape.append(lhsShape.begin(), lhsShape.end()); resultShape.append(rhsShape.begin(), rhsShape.end()); @@ -989,7 +989,7 @@ if (!operand) return false; extents.push_back(llvm::to_vector<6>( - operand.cast().getValues())); + llvm::cast(operand).getValues())); } return OpTrait::util::staticallyKnownBroadcastable(extents); }()) @@ -1132,10 +1132,10 @@ //===----------------------------------------------------------------------===// OpFoldResult DivOp::fold(FoldAdaptor adaptor) { - auto lhs = adaptor.getLhs().dyn_cast_or_null(); + auto lhs = llvm::dyn_cast_if_present(adaptor.getLhs()); if (!lhs) return nullptr; - auto rhs = adaptor.getRhs().dyn_cast_or_null(); + auto rhs = llvm::dyn_cast_if_present(adaptor.getRhs()); if (!rhs) return nullptr; @@ -1346,7 +1346,7 @@ } OpFoldResult GetExtentOp::fold(FoldAdaptor adaptor) { - auto elements = adaptor.getShape().dyn_cast_or_null(); + auto elements = llvm::dyn_cast_if_present(adaptor.getShape()); if (!elements) return nullptr; std::optional dim = getConstantDim(); @@ -1490,7 +1490,7 @@ //===----------------------------------------------------------------------===// OpFoldResult shape::RankOp::fold(FoldAdaptor adaptor) { - auto shape = adaptor.getShape().dyn_cast_or_null(); + auto shape = llvm::dyn_cast_if_present(adaptor.getShape()); if (!shape) return {}; int64_t rank = shape.getNumElements(); @@ -1671,10 +1671,10 @@ //===----------------------------------------------------------------------===// OpFoldResult MulOp::fold(FoldAdaptor adaptor) { - auto lhs = adaptor.getLhs().dyn_cast_or_null(); + auto lhs = llvm::dyn_cast_if_present(adaptor.getLhs()); if (!lhs) return nullptr; - auto rhs = adaptor.getRhs().dyn_cast_or_null(); + auto rhs = llvm::dyn_cast_if_present(adaptor.getRhs()); if (!rhs) return nullptr; APInt folded = lhs.getValue() * rhs.getValue(); @@ -1864,9 +1864,9 @@ if (!adaptor.getOperand() || !adaptor.getIndex()) return failure(); auto shapeVec = llvm::to_vector<6>( - adaptor.getOperand().cast().getValues()); + llvm::cast(adaptor.getOperand()).getValues()); auto shape = llvm::ArrayRef(shapeVec); - auto splitPoint = adaptor.getIndex().cast().getInt(); + auto splitPoint = llvm::cast(adaptor.getIndex()).getInt(); // Verify that the split point is in the correct range. // TODO: Constant fold to an "error". int64_t rank = shape.size(); @@ -1889,7 +1889,7 @@ return OpFoldResult(); Builder builder(getContext()); auto shape = llvm::to_vector<6>( - adaptor.getInput().cast().getValues()); + llvm::cast(adaptor.getInput()).getValues()); auto type = RankedTensorType::get({static_cast(shape.size())}, builder.getIndexType()); return DenseIntElementsAttr::get(type, shape); diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -815,7 +815,7 @@ Level cooStartLvl = getCOOStart(stt.getEncoding()); if (cooStartLvl < stt.getLvlRank()) { // We only supports trailing COO for now, must be the last input. - auto cooTp = lvlTps.back().cast(); + auto cooTp = llvm::cast(lvlTps.back()); // The coordinates should be in shape of unsigned expCOORank = stt.getLvlRank() - cooStartLvl; if (cooTp.getRank() != 2 || expCOORank != cooTp.getShape().back()) { @@ -844,7 +844,7 @@ inputTp = lvlTps[idx++]; } // The input element type and expected element type should match. - Type inpElemTp = inputTp.cast().getElementType(); + Type inpElemTp = llvm::cast(inputTp).getElementType(); Type expElemTp = getFieldElemType(stt, fKind); if (inpElemTp != expElemTp) { misMatch = true; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp @@ -188,7 +188,7 @@ /// Generates a memref from tensor operation. static Value genTensorToMemref(PatternRewriter &rewriter, Location loc, Value tensor) { - auto tensorType = tensor.getType().cast(); + auto tensorType = llvm::cast(tensor.getType()); auto memrefType = MemRefType::get(tensorType.getShape(), tensorType.getElementType()); return rewriter.create(loc, memrefType, tensor); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -414,7 +414,7 @@ /// TODO: better unord/not-unique; also generalize, optimize, specialize! SmallVector genImplementation(TypeRange retTypes, ValueRange args, OpBuilder &builder, Location loc) { - const SparseTensorType stt(rtp.cast()); + const SparseTensorType stt(llvm::cast(rtp)); const Level lvlRank = stt.getLvlRank(); // Extract fields and coordinates from args. SmallVector fields = llvm::to_vector(args.drop_back(lvlRank + 1)); @@ -466,7 +466,7 @@ // The mangled name of the function has this format: // ______ constexpr const char kInsertFuncNamePrefix[] = "_insert_"; - const SparseTensorType stt(rtp.cast()); + const SparseTensorType stt(llvm::cast(rtp)); SmallString<32> nameBuffer; llvm::raw_svector_ostream nameOstream(nameBuffer); @@ -541,14 +541,14 @@ static TypedValue genToMemref(OpBuilder &builder, Location loc, Value tensor) { - auto tTp = tensor.getType().cast(); + auto tTp = llvm::cast(tensor.getType()); auto mTp = MemRefType::get(tTp.getShape(), tTp.getElementType()); return builder.create(loc, mTp, tensor) .getResult(); } Value genSliceToSize(OpBuilder &builder, Location loc, Value mem, Value sz) { - auto elemTp = mem.getType().cast().getElementType(); + auto elemTp = llvm::cast(mem.getType()).getElementType(); return builder .create( loc, MemRefType::get({ShapedType::kDynamic}, elemTp), mem, diff --git a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp @@ -180,7 +180,7 @@ AffineExpr expr = b.getAffineDimExpr(0); unsigned numSymbols = 0; auto addOpFoldResult = [&](OpFoldResult valueOrAttr) { - if (Value v = valueOrAttr.dyn_cast()) { + if (Value v = llvm::dyn_cast_if_present(valueOrAttr)) { expr = expr + b.getAffineSymbolExpr(numSymbols++); mapOperands.push_back(v); return; diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -501,7 +501,7 @@ OpFoldResult DimOp::fold(FoldAdaptor adaptor) { // All forms of folding require a known index. - auto index = adaptor.getIndex().dyn_cast_or_null(); + auto index = llvm::dyn_cast_if_present(adaptor.getIndex()); if (!index) return {}; @@ -764,7 +764,7 @@ OpFoldResult currDim = std::get<1>(it); // Case 1: The empty tensor dim is static. Check that the tensor cast // result dim matches. - if (auto attr = currDim.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(currDim)) { if (ShapedType::isDynamic(newDim) || newDim != llvm::cast(attr).getInt()) { // Something is off, the cast result shape cannot be more dynamic @@ -2106,7 +2106,7 @@ } OpFoldResult ExtractSliceOp::fold(FoldAdaptor adaptor) { - if (auto splat = adaptor.getSource().dyn_cast_or_null()) { + if (auto splat = llvm::dyn_cast_if_present(adaptor.getSource())) { auto resultType = llvm::cast(getResult().getType()); if (resultType.hasStaticShape()) return splat.resizeSplat(resultType); @@ -3558,7 +3558,7 @@ SmallVector result; for (auto o : ofrs) { // Have to do this first, as getConstantIntValue special-cases constants. - if (o.dyn_cast()) + if (llvm::dyn_cast_if_present(o)) result.push_back(ShapedType::kDynamic); else result.push_back(getConstantIntValue(o).value_or(ShapedType::kDynamic)); diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -76,7 +76,7 @@ auto rankedResultType = cast(castOp.getType()); return MemRefType::get( rankedResultType.getShape(), rankedResultType.getElementType(), - maybeSrcBufferType->cast().getLayout(), memorySpace); + llvm::cast(*maybeSrcBufferType).getLayout(), memorySpace); } LogicalResult bufferize(Operation *op, RewriterBase &rewriter, @@ -139,7 +139,7 @@ collapseShapeOp.getSrc(), options, fixedTypes); if (failed(maybeSrcBufferType)) return failure(); - auto srcBufferType = maybeSrcBufferType->cast(); + auto srcBufferType = llvm::cast(*maybeSrcBufferType); bool canBeCollapsed = memref::CollapseShapeOp::isGuaranteedCollapsible( srcBufferType, collapseShapeOp.getReassociationIndices()); @@ -303,7 +303,7 @@ expandShapeOp.getSrc(), options, fixedTypes); if (failed(maybeSrcBufferType)) return failure(); - auto srcBufferType = maybeSrcBufferType->cast(); + auto srcBufferType = llvm::cast(*maybeSrcBufferType); auto maybeResultType = memref::ExpandShapeOp::computeExpandedType( srcBufferType, expandShapeOp.getResultType().getShape(), expandShapeOp.getReassociationIndices()); @@ -369,7 +369,7 @@ if (failed(resultMemrefType)) return failure(); Value subView = rewriter.create( - loc, resultMemrefType->cast(), *srcMemref, mixedOffsets, + loc, llvm::cast(*resultMemrefType), *srcMemref, mixedOffsets, mixedSizes, mixedStrides); replaceOpWithBufferizedValues(rewriter, op, subView); @@ -389,7 +389,7 @@ SmallVector mixedSizes = extractSliceOp.getMixedSizes(); SmallVector mixedStrides = extractSliceOp.getMixedStrides(); return cast(memref::SubViewOp::inferRankReducedResultType( - extractSliceOp.getType().getShape(), srcMemrefType->cast(), + extractSliceOp.getType().getShape(), llvm::cast(*srcMemrefType), mixedOffsets, mixedSizes, mixedStrides)); } }; diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp @@ -548,8 +548,8 @@ return {}; auto resultETy = resultTy.getElementType(); - auto lhsAttr = adaptor.getInput1().dyn_cast_or_null(); - auto rhsAttr = adaptor.getInput2().dyn_cast_or_null(); + auto lhsAttr = llvm::dyn_cast_if_present(adaptor.getInput1()); + auto rhsAttr = llvm::dyn_cast_if_present(adaptor.getInput2()); if (lhsTy == resultTy && isSplatZero(resultETy, rhsAttr)) return getInput1(); @@ -573,8 +573,8 @@ return {}; auto resultETy = resultTy.getElementType(); - auto lhsAttr = adaptor.getInput1().dyn_cast_or_null(); - auto rhsAttr = adaptor.getInput2().dyn_cast_or_null(); + auto lhsAttr = llvm::dyn_cast_if_present(adaptor.getInput1()); + auto rhsAttr = llvm::dyn_cast_if_present(adaptor.getInput2()); if (lhsAttr && lhsAttr.isSplat()) { if (llvm::isa(resultETy) && lhsAttr.getSplatValue().isZero()) @@ -642,8 +642,8 @@ return {}; auto resultETy = resultTy.getElementType(); - auto lhsAttr = adaptor.getInput1().dyn_cast_or_null(); - auto rhsAttr = adaptor.getInput2().dyn_cast_or_null(); + auto lhsAttr = llvm::dyn_cast_if_present(adaptor.getInput1()); + auto rhsAttr = llvm::dyn_cast_if_present(adaptor.getInput2()); const int64_t shift = llvm::isa(resultETy) ? getShift() : 0; if (rhsTy == resultTy) { @@ -670,8 +670,8 @@ return {}; auto resultETy = resultTy.getElementType(); - auto lhsAttr = adaptor.getInput1().dyn_cast_or_null(); - auto rhsAttr = adaptor.getInput2().dyn_cast_or_null(); + auto lhsAttr = llvm::dyn_cast_if_present(adaptor.getInput1()); + auto rhsAttr = llvm::dyn_cast_if_present(adaptor.getInput2()); if (lhsTy == resultTy && isSplatZero(resultETy, rhsAttr)) return getInput1(); @@ -713,8 +713,8 @@ OpFoldResult GreaterOp::fold(FoldAdaptor adaptor) { auto resultTy = llvm::dyn_cast(getType()); - auto lhsAttr = adaptor.getInput1().dyn_cast_or_null(); - auto rhsAttr = adaptor.getInput2().dyn_cast_or_null(); + auto lhsAttr = llvm::dyn_cast_if_present(adaptor.getInput1()); + auto rhsAttr = llvm::dyn_cast_if_present(adaptor.getInput2()); if (!lhsAttr || !rhsAttr) return {}; @@ -725,8 +725,8 @@ OpFoldResult GreaterEqualOp::fold(FoldAdaptor adaptor) { auto resultTy = llvm::dyn_cast(getType()); - auto lhsAttr = adaptor.getInput1().dyn_cast_or_null(); - auto rhsAttr = adaptor.getInput2().dyn_cast_or_null(); + auto lhsAttr = llvm::dyn_cast_if_present(adaptor.getInput1()); + auto rhsAttr = llvm::dyn_cast_if_present(adaptor.getInput2()); if (!lhsAttr || !rhsAttr) return {}; @@ -738,8 +738,8 @@ OpFoldResult EqualOp::fold(FoldAdaptor adaptor) { auto resultTy = llvm::dyn_cast(getType()); - auto lhsAttr = adaptor.getInput1().dyn_cast_or_null(); - auto rhsAttr = adaptor.getInput2().dyn_cast_or_null(); + auto lhsAttr = llvm::dyn_cast_if_present(adaptor.getInput1()); + auto rhsAttr = llvm::dyn_cast_if_present(adaptor.getInput2()); Value lhs = getInput1(); Value rhs = getInput2(); auto lhsTy = llvm::cast(lhs.getType()); @@ -763,7 +763,7 @@ if (getInput().getType() == getType()) return getInput(); - auto operand = adaptor.getInput().dyn_cast_or_null(); + auto operand = llvm::dyn_cast_if_present(adaptor.getInput()); if (!operand) return {}; @@ -852,7 +852,7 @@ if (inputTy == outputTy) return getInput1(); - auto operand = adaptor.getInput1().dyn_cast_or_null(); + auto operand = llvm::dyn_cast_if_present(adaptor.getInput1()); if (operand && outputTy.hasStaticShape() && operand.isSplat()) { return SplatElementsAttr::get(outputTy, operand.getSplatValue()); } @@ -863,7 +863,7 @@ OpFoldResult PadOp::fold(FoldAdaptor adaptor) { // If the pad is all zeros we can fold this operation away. if (adaptor.getPadding()) { - auto densePad = adaptor.getPadding().cast(); + auto densePad = llvm::cast(adaptor.getPadding()); if (densePad.isSplat() && densePad.getSplatValue().isZero()) { return getInput1(); } @@ -907,7 +907,7 @@ auto operand = getInput(); auto operandTy = llvm::cast(operand.getType()); auto axis = getAxis(); - auto operandAttr = adaptor.getInput().dyn_cast_or_null(); + auto operandAttr = llvm::dyn_cast_if_present(adaptor.getInput()); if (operandAttr) return operandAttr; @@ -936,7 +936,7 @@ !outputTy.getElementType().isIntOrIndexOrFloat()) return {}; - auto operand = adaptor.getInput().cast(); + auto operand = llvm::cast(adaptor.getInput()); if (operand.isSplat() && outputTy.hasStaticShape()) { return SplatElementsAttr::get(outputTy, operand.getSplatValue()); } @@ -955,7 +955,7 @@ if (getOnTrue() == getOnFalse()) return getOnTrue(); - auto predicate = adaptor.getPred().dyn_cast_or_null(); + auto predicate = llvm::dyn_cast_if_present(adaptor.getPred()); if (!predicate) return {}; @@ -977,7 +977,7 @@ auto resultTy = llvm::cast(getType()); // Transposing splat values just means reshaping. - if (auto input = adaptor.getInput1().dyn_cast_or_null()) { + if (auto input = llvm::dyn_cast_if_present(adaptor.getInput1())) { if (input.isSplat() && resultTy.hasStaticShape() && inputTy.getElementType() == resultTy.getElementType()) return input.reshape(resultTy); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp @@ -63,9 +63,9 @@ // Verify the rank agrees with the output type if the output type is ranked. if (outputType) { if (outputType.getRank() != - input1_copy.getType().cast().getRank() || + llvm::cast(input1_copy.getType()).getRank() || outputType.getRank() != - input2_copy.getType().cast().getRank()) + llvm::cast(input2_copy.getType()).getRank()) return rewriter.notifyMatchFailure( loc, "the reshaped type doesn't agrees with the ranked output type"); } diff --git a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp --- a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp +++ b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp @@ -103,8 +103,8 @@ LogicalResult mlir::tosa::EqualizeRanks(PatternRewriter &rewriter, Location loc, Value &input1, Value &input2) { - auto input1Ty = input1.getType().dyn_cast(); - auto input2Ty = input2.getType().dyn_cast(); + auto input1Ty = llvm::dyn_cast(input1.getType()); + auto input2Ty = llvm::dyn_cast(input2.getType()); if (!input1Ty || !input2Ty) { return failure(); @@ -126,9 +126,9 @@ } ArrayRef higherRankShape = - higherTensorValue.getType().cast().getShape(); + llvm::cast(higherTensorValue.getType()).getShape(); ArrayRef lowerRankShape = - lowerTensorValue.getType().cast().getShape(); + llvm::cast(lowerTensorValue.getType()).getShape(); SmallVector reshapeOutputShape; @@ -136,7 +136,8 @@ .failed()) return failure(); - auto reshapeInputType = lowerTensorValue.getType().cast(); + auto reshapeInputType = + llvm::cast(lowerTensorValue.getType()); auto reshapeOutputType = RankedTensorType::get( ArrayRef(reshapeOutputShape), reshapeInputType.getElementType()); diff --git a/mlir/lib/Dialect/Transform/IR/TransformInterfaces.cpp b/mlir/lib/Dialect/Transform/IR/TransformInterfaces.cpp --- a/mlir/lib/Dialect/Transform/IR/TransformInterfaces.cpp +++ b/mlir/lib/Dialect/Transform/IR/TransformInterfaces.cpp @@ -118,7 +118,7 @@ SmallVector operations; operations.reserve(values.size()); for (transform::MappedValue value : values) { - if (auto *op = value.dyn_cast()) { + if (auto *op = llvm::dyn_cast_if_present(value)) { operations.push_back(op); continue; } @@ -135,7 +135,7 @@ SmallVector payloadValues; payloadValues.reserve(values.size()); for (transform::MappedValue value : values) { - if (auto v = value.dyn_cast()) { + if (auto v = llvm::dyn_cast_if_present(value)) { payloadValues.push_back(v); continue; } @@ -152,7 +152,7 @@ SmallVector parameters; parameters.reserve(values.size()); for (transform::MappedValue value : values) { - if (auto attr = value.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(value)) { parameters.push_back(attr); continue; } diff --git a/mlir/lib/Dialect/Utils/StaticValueUtils.cpp b/mlir/lib/Dialect/Utils/StaticValueUtils.cpp --- a/mlir/lib/Dialect/Utils/StaticValueUtils.cpp +++ b/mlir/lib/Dialect/Utils/StaticValueUtils.cpp @@ -18,7 +18,7 @@ bool isZeroIndex(OpFoldResult v) { if (!v) return false; - if (auto attr = v.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(v)) { IntegerAttr intAttr = dyn_cast(attr); return intAttr && intAttr.getValue().isZero(); } @@ -51,7 +51,7 @@ void dispatchIndexOpFoldResult(OpFoldResult ofr, SmallVectorImpl &dynamicVec, SmallVectorImpl &staticVec) { - auto v = ofr.dyn_cast(); + auto v = llvm::dyn_cast_if_present(ofr); if (!v) { APInt apInt = cast(ofr.get()).getValue(); staticVec.push_back(apInt.getSExtValue()); @@ -116,14 +116,14 @@ /// If ofr is a constant integer or an IntegerAttr, return the integer. std::optional getConstantIntValue(OpFoldResult ofr) { // Case 1: Check for Constant integer. - if (auto val = ofr.dyn_cast()) { + if (auto val = llvm::dyn_cast_if_present(ofr)) { APSInt intVal; if (matchPattern(val, m_ConstantInt(&intVal))) return intVal.getSExtValue(); return std::nullopt; } // Case 2: Check for IntegerAttr. - Attribute attr = ofr.dyn_cast(); + Attribute attr = llvm::dyn_cast_if_present(ofr); if (auto intAttr = dyn_cast_or_null(attr)) return intAttr.getValue().getSExtValue(); return std::nullopt; @@ -143,7 +143,8 @@ auto cst1 = getConstantIntValue(ofr1), cst2 = getConstantIntValue(ofr2); if (cst1 && cst2 && *cst1 == *cst2) return true; - auto v1 = ofr1.dyn_cast(), v2 = ofr2.dyn_cast(); + auto v1 = llvm::dyn_cast_if_present(ofr1), + v2 = llvm::dyn_cast_if_present(ofr2); return v1 && v1 == v2; } diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -1154,7 +1154,7 @@ OpaqueProperties properties, RegionRange, SmallVectorImpl &inferredReturnTypes) { ExtractOp::Adaptor op(operands, attributes, properties); - auto vectorType = op.getVector().getType().cast(); + auto vectorType = llvm::cast(op.getVector().getType()); if (static_cast(op.getPosition().size()) == vectorType.getRank()) { inferredReturnTypes.push_back(vectorType.getElementType()); } else { @@ -2003,9 +2003,9 @@ if (!adaptor.getSource()) return {}; auto vectorType = getResultVectorType(); - if (adaptor.getSource().isa()) + if (llvm::isa(adaptor.getSource())) return DenseElementsAttr::get(vectorType, adaptor.getSource()); - if (auto attr = adaptor.getSource().dyn_cast()) + if (auto attr = llvm::dyn_cast(adaptor.getSource())) return DenseElementsAttr::get(vectorType, attr.getSplatValue()); return {}; } @@ -2090,7 +2090,7 @@ OpaqueProperties properties, RegionRange, SmallVectorImpl &inferredReturnTypes) { ShuffleOp::Adaptor op(operands, attributes, properties); - auto v1Type = op.getV1().getType().cast(); + auto v1Type = llvm::cast(op.getV1().getType()); auto v1Rank = v1Type.getRank(); // Construct resulting type: leading dimension matches mask // length, all trailing dimensions match the operands. @@ -4951,7 +4951,7 @@ OpFoldResult vector::TransposeOp::fold(FoldAdaptor adaptor) { // Eliminate splat constant transpose ops. - if (auto attr = adaptor.getVector().dyn_cast_or_null()) + if (auto attr = llvm::dyn_cast_if_present(adaptor.getVector())) if (attr.isSplat()) return attr.reshape(getResultVectorType()); diff --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp --- a/mlir/lib/IR/AsmPrinter.cpp +++ b/mlir/lib/IR/AsmPrinter.cpp @@ -3642,7 +3642,7 @@ if (auto *op = getDefiningOp()) return op->print(os, flags); // TODO: Improve BlockArgument print'ing. - BlockArgument arg = this->cast(); + BlockArgument arg = llvm::cast(*this); os << " of type '" << arg.getType() << "' at index: " << arg.getArgNumber(); } @@ -3656,7 +3656,7 @@ return op->print(os, state); // TODO: Improve BlockArgument print'ing. - BlockArgument arg = this->cast(); + BlockArgument arg = llvm::cast(*this); os << " of type '" << arg.getType() << "' at index: " << arg.getArgNumber(); } @@ -3693,10 +3693,10 @@ void Value::printAsOperand(raw_ostream &os, const OpPrintingFlags &flags) { Operation *op; - if (auto result = dyn_cast()) { + if (auto result = llvm::dyn_cast(*this)) { op = result.getOwner(); } else { - op = cast().getOwner()->getParentOp(); + op = llvm::cast(*this).getOwner()->getParentOp(); if (!op) { os << "<>"; return; diff --git a/mlir/lib/IR/Block.cpp b/mlir/lib/IR/Block.cpp --- a/mlir/lib/IR/Block.cpp +++ b/mlir/lib/IR/Block.cpp @@ -347,14 +347,14 @@ /// See `llvm::detail::indexed_accessor_range_base` for details. BlockRange::OwnerT BlockRange::offset_base(OwnerT object, ptrdiff_t index) { - if (auto *operand = object.dyn_cast()) + if (auto *operand = llvm::dyn_cast_if_present(object)) return {operand + index}; - return {object.dyn_cast() + index}; + return {llvm::dyn_cast_if_present(object) + index}; } /// See `llvm::detail::indexed_accessor_range_base` for details. Block *BlockRange::dereference_iterator(OwnerT object, ptrdiff_t index) { - if (const auto *operand = object.dyn_cast()) + if (const auto *operand = llvm::dyn_cast_if_present(object)) return operand[index].get(); - return object.dyn_cast()[index]; + return llvm::dyn_cast_if_present(object)[index]; } diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp --- a/mlir/lib/IR/Builders.cpp +++ b/mlir/lib/IR/Builders.cpp @@ -483,7 +483,7 @@ Type expectedType = std::get<1>(it); // Normal values get pushed back directly. - if (auto value = std::get<0>(it).dyn_cast()) { + if (auto value = llvm::dyn_cast_if_present(std::get<0>(it))) { if (value.getType() != expectedType) return cleanupFailure(); diff --git a/mlir/lib/IR/BuiltinAttributes.cpp b/mlir/lib/IR/BuiltinAttributes.cpp --- a/mlir/lib/IR/BuiltinAttributes.cpp +++ b/mlir/lib/IR/BuiltinAttributes.cpp @@ -1247,12 +1247,12 @@ DenseElementsAttr DenseElementsAttr::mapValues(Type newElementType, function_ref mapping) const { - return cast().mapValues(newElementType, mapping); + return llvm::cast(*this).mapValues(newElementType, mapping); } DenseElementsAttr DenseElementsAttr::mapValues( Type newElementType, function_ref mapping) const { - return cast().mapValues(newElementType, mapping); + return llvm::cast(*this).mapValues(newElementType, mapping); } ShapedType DenseElementsAttr::getType() const { diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -88,45 +88,45 @@ //===----------------------------------------------------------------------===// unsigned FloatType::getWidth() { - if (isa()) + if (llvm::isa(*this)) return 8; - if (isa()) + if (llvm::isa(*this)) return 16; - if (isa()) + if (llvm::isa(*this)) return 32; - if (isa()) + if (llvm::isa(*this)) return 64; - if (isa()) + if (llvm::isa(*this)) return 80; - if (isa()) + if (llvm::isa(*this)) return 128; llvm_unreachable("unexpected float type"); } /// Returns the floating semantics for the given type. const llvm::fltSemantics &FloatType::getFloatSemantics() { - if (isa()) + if (llvm::isa(*this)) return APFloat::Float8E5M2(); - if (isa()) + if (llvm::isa(*this)) return APFloat::Float8E4M3FN(); - if (isa()) + if (llvm::isa(*this)) return APFloat::Float8E5M2FNUZ(); - if (isa()) + if (llvm::isa(*this)) return APFloat::Float8E4M3FNUZ(); - if (isa()) + if (llvm::isa(*this)) return APFloat::Float8E4M3B11FNUZ(); - if (isa()) + if (llvm::isa(*this)) return APFloat::BFloat(); - if (isa()) + if (llvm::isa(*this)) return APFloat::IEEEhalf(); - if (isa()) + if (llvm::isa(*this)) return APFloat::IEEEsingle(); - if (isa()) + if (llvm::isa(*this)) return APFloat::IEEEdouble(); - if (isa()) + if (llvm::isa(*this)) return APFloat::x87DoubleExtended(); - if (isa()) + if (llvm::isa(*this)) return APFloat::IEEEquad(); llvm_unreachable("non-floating point type used"); } @@ -269,21 +269,21 @@ [](auto type) { return type.getElementType(); }); } -bool TensorType::hasRank() const { return !isa(); } +bool TensorType::hasRank() const { return !llvm::isa(*this); } ArrayRef TensorType::getShape() const { - return cast().getShape(); + return llvm::cast(*this).getShape(); } TensorType TensorType::cloneWith(std::optional> shape, Type elementType) const { - if (auto unrankedTy = dyn_cast()) { + if (auto unrankedTy = llvm::dyn_cast(*this)) { if (shape) return RankedTensorType::get(*shape, elementType); return UnrankedTensorType::get(elementType); } - auto rankedTy = cast(); + auto rankedTy = llvm::cast(*this); if (!shape) return RankedTensorType::get(rankedTy.getShape(), elementType, rankedTy.getEncoding()); @@ -356,15 +356,15 @@ [](auto type) { return type.getElementType(); }); } -bool BaseMemRefType::hasRank() const { return !isa(); } +bool BaseMemRefType::hasRank() const { return !llvm::isa(*this); } ArrayRef BaseMemRefType::getShape() const { - return cast().getShape(); + return llvm::cast(*this).getShape(); } BaseMemRefType BaseMemRefType::cloneWith(std::optional> shape, Type elementType) const { - if (auto unrankedTy = dyn_cast()) { + if (auto unrankedTy = llvm::dyn_cast(*this)) { if (!shape) return UnrankedMemRefType::get(elementType, getMemorySpace()); MemRefType::Builder builder(*shape, elementType); @@ -372,7 +372,7 @@ return builder; } - MemRefType::Builder builder(cast()); + MemRefType::Builder builder(llvm::cast(*this)); if (shape) builder.setShape(*shape); builder.setElementType(elementType); @@ -389,15 +389,15 @@ } Attribute BaseMemRefType::getMemorySpace() const { - if (auto rankedMemRefTy = dyn_cast()) + if (auto rankedMemRefTy = llvm::dyn_cast(*this)) return rankedMemRefTy.getMemorySpace(); - return cast().getMemorySpace(); + return llvm::cast(*this).getMemorySpace(); } unsigned BaseMemRefType::getMemorySpaceAsInt() const { - if (auto rankedMemRefTy = dyn_cast()) + if (auto rankedMemRefTy = llvm::dyn_cast(*this)) return rankedMemRefTy.getMemorySpaceAsInt(); - return cast().getMemorySpaceAsInt(); + return llvm::cast(*this).getMemorySpaceAsInt(); } //===----------------------------------------------------------------------===// diff --git a/mlir/lib/IR/OperationSupport.cpp b/mlir/lib/IR/OperationSupport.cpp --- a/mlir/lib/IR/OperationSupport.cpp +++ b/mlir/lib/IR/OperationSupport.cpp @@ -626,17 +626,17 @@ /// See `llvm::detail::indexed_accessor_range_base` for details. ValueRange::OwnerT ValueRange::offset_base(const OwnerT &owner, ptrdiff_t index) { - if (const auto *value = owner.dyn_cast()) + if (const auto *value = llvm::dyn_cast_if_present(owner)) return {value + index}; - if (auto *operand = owner.dyn_cast()) + if (auto *operand = llvm::dyn_cast_if_present(owner)) return {operand + index}; return owner.get()->getNextResultAtOffset(index); } /// See `llvm::detail::indexed_accessor_range_base` for details. Value ValueRange::dereference_iterator(const OwnerT &owner, ptrdiff_t index) { - if (const auto *value = owner.dyn_cast()) + if (const auto *value = llvm::dyn_cast_if_present(owner)) return value[index]; - if (auto *operand = owner.dyn_cast()) + if (auto *operand = llvm::dyn_cast_if_present(owner)) return operand[index].get(); return owner.get()->getNextResultAtOffset(index); } diff --git a/mlir/lib/IR/Region.cpp b/mlir/lib/IR/Region.cpp --- a/mlir/lib/IR/Region.cpp +++ b/mlir/lib/IR/Region.cpp @@ -267,18 +267,18 @@ /// See `llvm::detail::indexed_accessor_range_base` for details. RegionRange::OwnerT RegionRange::offset_base(const OwnerT &owner, ptrdiff_t index) { - if (auto *region = owner.dyn_cast *>()) + if (auto *region = llvm::dyn_cast_if_present *>(owner)) return region + index; - if (auto **region = owner.dyn_cast()) + if (auto **region = llvm::dyn_cast_if_present(owner)) return region + index; return &owner.get()[index]; } /// See `llvm::detail::indexed_accessor_range_base` for details. Region *RegionRange::dereference_iterator(const OwnerT &owner, ptrdiff_t index) { - if (auto *region = owner.dyn_cast *>()) + if (auto *region = llvm::dyn_cast_if_present *>(owner)) return region[index].get(); - if (auto **region = owner.dyn_cast()) + if (auto **region = llvm::dyn_cast_if_present(owner)) return region[index]; return &owner.get()[index]; } diff --git a/mlir/lib/IR/SymbolTable.cpp b/mlir/lib/IR/SymbolTable.cpp --- a/mlir/lib/IR/SymbolTable.cpp +++ b/mlir/lib/IR/SymbolTable.cpp @@ -551,7 +551,7 @@ typename llvm::function_traits::result_t, void>::value> * = nullptr> std::optional walk(CallbackT cback) { - if (Region *region = limit.dyn_cast()) + if (Region *region = llvm::dyn_cast_if_present(limit)) return walkSymbolUses(*region, cback); return walkSymbolUses(limit.get(), cback); } @@ -571,7 +571,7 @@ /// traversing into any nested symbol tables. template std::optional walkSymbolTable(CallbackT &&cback) { - if (Region *region = limit.dyn_cast()) + if (Region *region = llvm::dyn_cast_if_present(limit)) return ::walkSymbolTable(*region, cback); return ::walkSymbolTable(limit.get(), cback); } diff --git a/mlir/lib/IR/TypeRange.cpp b/mlir/lib/IR/TypeRange.cpp --- a/mlir/lib/IR/TypeRange.cpp +++ b/mlir/lib/IR/TypeRange.cpp @@ -27,9 +27,9 @@ if (count == 0) return; ValueRange::OwnerT owner = values.begin().getBase(); - if (auto *result = owner.dyn_cast()) + if (auto *result = llvm::dyn_cast_if_present(owner)) this->base = result; - else if (auto *operand = owner.dyn_cast()) + else if (auto *operand = llvm::dyn_cast_if_present(owner)) this->base = operand; else this->base = owner.get(); @@ -37,22 +37,22 @@ /// See `llvm::detail::indexed_accessor_range_base` for details. TypeRange::OwnerT TypeRange::offset_base(OwnerT object, ptrdiff_t index) { - if (const auto *value = object.dyn_cast()) + if (const auto *value = llvm::dyn_cast_if_present(object)) return {value + index}; - if (auto *operand = object.dyn_cast()) + if (auto *operand = llvm::dyn_cast_if_present(object)) return {operand + index}; - if (auto *result = object.dyn_cast()) + if (auto *result = llvm::dyn_cast_if_present(object)) return {result->getNextResultAtOffset(index)}; - return {object.dyn_cast() + index}; + return {llvm::dyn_cast_if_present(object) + index}; } /// See `llvm::detail::indexed_accessor_range_base` for details. Type TypeRange::dereference_iterator(OwnerT object, ptrdiff_t index) { - if (const auto *value = object.dyn_cast()) + if (const auto *value = llvm::dyn_cast_if_present(object)) return (value + index)->getType(); - if (auto *operand = object.dyn_cast()) + if (auto *operand = llvm::dyn_cast_if_present(object)) return (operand + index)->get().getType(); - if (auto *result = object.dyn_cast()) + if (auto *result = llvm::dyn_cast_if_present(object)) return result->getNextResultAtOffset(index)->getType(); - return object.dyn_cast()[index]; + return llvm::dyn_cast_if_present(object)[index]; } diff --git a/mlir/lib/IR/Types.cpp b/mlir/lib/IR/Types.cpp --- a/mlir/lib/IR/Types.cpp +++ b/mlir/lib/IR/Types.cpp @@ -34,84 +34,94 @@ MLIRContext *Type::getContext() const { return getDialect().getContext(); } -bool Type::isFloat8E5M2() const { return isa(); } -bool Type::isFloat8E4M3FN() const { return isa(); } -bool Type::isFloat8E5M2FNUZ() const { return isa(); } -bool Type::isFloat8E4M3FNUZ() const { return isa(); } -bool Type::isFloat8E4M3B11FNUZ() const { return isa(); } -bool Type::isBF16() const { return isa(); } -bool Type::isF16() const { return isa(); } -bool Type::isF32() const { return isa(); } -bool Type::isF64() const { return isa(); } -bool Type::isF80() const { return isa(); } -bool Type::isF128() const { return isa(); } - -bool Type::isIndex() const { return isa(); } +bool Type::isFloat8E5M2() const { return llvm::isa(*this); } +bool Type::isFloat8E4M3FN() const { return llvm::isa(*this); } +bool Type::isFloat8E5M2FNUZ() const { + return llvm::isa(*this); +} +bool Type::isFloat8E4M3FNUZ() const { + return llvm::isa(*this); +} +bool Type::isFloat8E4M3B11FNUZ() const { + return llvm::isa(*this); +} +bool Type::isBF16() const { return llvm::isa(*this); } +bool Type::isF16() const { return llvm::isa(*this); } +bool Type::isF32() const { return llvm::isa(*this); } +bool Type::isF64() const { return llvm::isa(*this); } +bool Type::isF80() const { return llvm::isa(*this); } +bool Type::isF128() const { return llvm::isa(*this); } + +bool Type::isIndex() const { return llvm::isa(*this); } /// Return true if this is an integer type with the specified width. bool Type::isInteger(unsigned width) const { - if (auto intTy = dyn_cast()) + if (auto intTy = llvm::dyn_cast(*this)) return intTy.getWidth() == width; return false; } bool Type::isSignlessInteger() const { - if (auto intTy = dyn_cast()) + if (auto intTy = llvm::dyn_cast(*this)) return intTy.isSignless(); return false; } bool Type::isSignlessInteger(unsigned width) const { - if (auto intTy = dyn_cast()) + if (auto intTy = llvm::dyn_cast(*this)) return intTy.isSignless() && intTy.getWidth() == width; return false; } bool Type::isSignedInteger() const { - if (auto intTy = dyn_cast()) + if (auto intTy = llvm::dyn_cast(*this)) return intTy.isSigned(); return false; } bool Type::isSignedInteger(unsigned width) const { - if (auto intTy = dyn_cast()) + if (auto intTy = llvm::dyn_cast(*this)) return intTy.isSigned() && intTy.getWidth() == width; return false; } bool Type::isUnsignedInteger() const { - if (auto intTy = dyn_cast()) + if (auto intTy = llvm::dyn_cast(*this)) return intTy.isUnsigned(); return false; } bool Type::isUnsignedInteger(unsigned width) const { - if (auto intTy = dyn_cast()) + if (auto intTy = llvm::dyn_cast(*this)) return intTy.isUnsigned() && intTy.getWidth() == width; return false; } bool Type::isSignlessIntOrIndex() const { - return isSignlessInteger() || isa(); + return isSignlessInteger() || llvm::isa(*this); } bool Type::isSignlessIntOrIndexOrFloat() const { - return isSignlessInteger() || isa(); + return isSignlessInteger() || llvm::isa(*this); } bool Type::isSignlessIntOrFloat() const { - return isSignlessInteger() || isa(); + return isSignlessInteger() || llvm::isa(*this); } -bool Type::isIntOrIndex() const { return isa() || isIndex(); } +bool Type::isIntOrIndex() const { + return llvm::isa(*this) || isIndex(); +} -bool Type::isIntOrFloat() const { return isa(); } +bool Type::isIntOrFloat() const { + return llvm::isa(*this); +} bool Type::isIntOrIndexOrFloat() const { return isIntOrFloat() || isIndex(); } unsigned Type::getIntOrFloatBitWidth() const { assert(isIntOrFloat() && "only integers and floats have a bitwidth"); - if (auto intType = dyn_cast()) + if (auto intType = llvm::dyn_cast(*this)) return intType.getWidth(); - return cast().getWidth(); + return llvm::cast(*this).getWidth(); } diff --git a/mlir/lib/IR/Unit.cpp b/mlir/lib/IR/Unit.cpp --- a/mlir/lib/IR/Unit.cpp +++ b/mlir/lib/IR/Unit.cpp @@ -48,11 +48,11 @@ } void mlir::IRUnit::print(llvm::raw_ostream &os, OpPrintingFlags flags) const { - if (auto *op = this->dyn_cast()) + if (auto *op = llvm::dyn_cast_if_present(*this)) return printOp(os, op, flags); - if (auto *region = this->dyn_cast()) + if (auto *region = llvm::dyn_cast_if_present(*this)) return printRegion(os, region, flags); - if (auto *block = this->dyn_cast()) + if (auto *block = llvm::dyn_cast_if_present(*this)) return printBlock(os, block, flags); llvm_unreachable("unknown IRUnit"); } diff --git a/mlir/lib/IR/Value.cpp b/mlir/lib/IR/Value.cpp --- a/mlir/lib/IR/Value.cpp +++ b/mlir/lib/IR/Value.cpp @@ -18,7 +18,7 @@ /// If this value is the result of an Operation, return the operation that /// defines it. Operation *Value::getDefiningOp() const { - if (auto result = dyn_cast()) + if (auto result = llvm::dyn_cast(*this)) return result.getOwner(); return nullptr; } @@ -27,28 +27,28 @@ if (auto *op = getDefiningOp()) return op->getLoc(); - return cast().getLoc(); + return llvm::cast(*this).getLoc(); } void Value::setLoc(Location loc) { if (auto *op = getDefiningOp()) return op->setLoc(loc); - return cast().setLoc(loc); + return llvm::cast(*this).setLoc(loc); } /// Return the Region in which this Value is defined. Region *Value::getParentRegion() { if (auto *op = getDefiningOp()) return op->getParentRegion(); - return cast().getOwner()->getParent(); + return llvm::cast(*this).getOwner()->getParent(); } /// Return the Block in which this Value is defined. Block *Value::getParentBlock() { if (Operation *op = getDefiningOp()) return op->getBlock(); - return cast().getOwner(); + return llvm::cast(*this).getOwner(); } //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Interfaces/DataLayoutInterfaces.cpp b/mlir/lib/Interfaces/DataLayoutInterfaces.cpp --- a/mlir/lib/Interfaces/DataLayoutInterfaces.cpp +++ b/mlir/lib/Interfaces/DataLayoutInterfaces.cpp @@ -241,7 +241,7 @@ TypeID typeID) { return llvm::to_vector<4>(llvm::make_filter_range( entries, [typeID](DataLayoutEntryInterface entry) { - auto type = entry.getKey().dyn_cast(); + auto type = llvm::dyn_cast_if_present(entry.getKey()); return type && type.getTypeID() == typeID; })); } @@ -521,7 +521,7 @@ DenseMap &types, DenseMap &ids) { for (DataLayoutEntryInterface entry : getEntries()) { - if (auto type = entry.getKey().dyn_cast()) + if (auto type = llvm::dyn_cast_if_present(entry.getKey())) types[type.getTypeID()].push_back(entry); else ids[entry.getKey().get()] = entry; diff --git a/mlir/lib/Interfaces/InferTypeOpInterface.cpp b/mlir/lib/Interfaces/InferTypeOpInterface.cpp --- a/mlir/lib/Interfaces/InferTypeOpInterface.cpp +++ b/mlir/lib/Interfaces/InferTypeOpInterface.cpp @@ -68,7 +68,7 @@ bool ShapeAdaptor::hasRank() const { if (val.isNull()) return false; - if (auto t = val.dyn_cast()) + if (auto t = llvm::dyn_cast_if_present(val)) return cast(t).hasRank(); if (val.is()) return true; @@ -78,7 +78,7 @@ Type ShapeAdaptor::getElementType() const { if (val.isNull()) return nullptr; - if (auto t = val.dyn_cast()) + if (auto t = llvm::dyn_cast_if_present(val)) return cast(t).getElementType(); if (val.is()) return nullptr; @@ -87,10 +87,10 @@ void ShapeAdaptor::getDims(SmallVectorImpl &res) const { assert(hasRank()); - if (auto t = val.dyn_cast()) { + if (auto t = llvm::dyn_cast_if_present(val)) { ArrayRef vals = cast(t).getShape(); res.assign(vals.begin(), vals.end()); - } else if (auto attr = val.dyn_cast()) { + } else if (auto attr = llvm::dyn_cast_if_present(val)) { auto dattr = cast(attr); res.clear(); res.reserve(dattr.size()); @@ -110,9 +110,9 @@ int64_t ShapeAdaptor::getDimSize(int index) const { assert(hasRank()); - if (auto t = val.dyn_cast()) + if (auto t = llvm::dyn_cast_if_present(val)) return cast(t).getDimSize(index); - if (auto attr = val.dyn_cast()) + if (auto attr = llvm::dyn_cast_if_present(val)) return cast(attr) .getValues()[index] .getSExtValue(); @@ -122,9 +122,9 @@ int64_t ShapeAdaptor::getRank() const { assert(hasRank()); - if (auto t = val.dyn_cast()) + if (auto t = llvm::dyn_cast_if_present(val)) return cast(t).getRank(); - if (auto attr = val.dyn_cast()) + if (auto attr = llvm::dyn_cast_if_present(val)) return cast(attr).size(); return val.get()->getDims().size(); } @@ -133,9 +133,9 @@ if (!hasRank()) return false; - if (auto t = val.dyn_cast()) + if (auto t = llvm::dyn_cast_if_present(val)) return cast(t).hasStaticShape(); - if (auto attr = val.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(val)) { auto dattr = cast(attr); for (auto index : dattr.getValues()) if (ShapedType::isDynamic(index.getSExtValue())) @@ -149,10 +149,10 @@ int64_t ShapeAdaptor::getNumElements() const { assert(hasStaticShape() && "cannot get element count of dynamic shaped type"); - if (auto t = val.dyn_cast()) + if (auto t = llvm::dyn_cast_if_present(val)) return cast(t).getNumElements(); - if (auto attr = val.dyn_cast()) { + if (auto attr = llvm::dyn_cast_if_present(val)) { auto dattr = cast(attr); int64_t num = 1; for (auto index : dattr.getValues()) { diff --git a/mlir/lib/Interfaces/ValueBoundsOpInterface.cpp b/mlir/lib/Interfaces/ValueBoundsOpInterface.cpp --- a/mlir/lib/Interfaces/ValueBoundsOpInterface.cpp +++ b/mlir/lib/Interfaces/ValueBoundsOpInterface.cpp @@ -26,14 +26,14 @@ /// If ofr is a constant integer or an IntegerAttr, return the integer. static std::optional getConstantIntValue(OpFoldResult ofr) { // Case 1: Check for Constant integer. - if (auto val = ofr.dyn_cast()) { + if (auto val = llvm::dyn_cast_if_present(ofr)) { APSInt intVal; if (matchPattern(val, m_ConstantInt(&intVal))) return intVal.getSExtValue(); return std::nullopt; } // Case 2: Check for IntegerAttr. - Attribute attr = ofr.dyn_cast(); + Attribute attr = llvm::dyn_cast_if_present(ofr); if (auto intAttr = dyn_cast_or_null(attr)) return intAttr.getValue().getSExtValue(); return std::nullopt; @@ -99,7 +99,7 @@ } AffineExpr ValueBoundsConstraintSet::getExpr(OpFoldResult ofr) { - if (Value value = ofr.dyn_cast()) + if (Value value = llvm::dyn_cast_if_present(ofr)) return getExpr(value, /*dim=*/std::nullopt); auto constInt = getConstantIntValue(ofr); assert(constInt.has_value() && "expected Integer constant"); diff --git a/mlir/lib/Pass/PassDetail.h b/mlir/lib/Pass/PassDetail.h --- a/mlir/lib/Pass/PassDetail.h +++ b/mlir/lib/Pass/PassDetail.h @@ -26,7 +26,8 @@ const Pass &getPass() const { return pass; } Operation *getOp() const { ArrayRef irUnits = getContextIRUnits(); - return irUnits.empty() ? nullptr : irUnits[0].dyn_cast(); + return irUnits.empty() ? nullptr + : llvm::dyn_cast_if_present(irUnits[0]); } public: diff --git a/mlir/lib/TableGen/Operator.cpp b/mlir/lib/TableGen/Operator.cpp --- a/mlir/lib/TableGen/Operator.cpp +++ b/mlir/lib/TableGen/Operator.cpp @@ -384,7 +384,7 @@ if (getTrait("::mlir::OpTrait::SameOperandsAndResultType")) { // Check for a non-variable length operand to use as the type anchor. auto *operandI = llvm::find_if(arguments, [](const Argument &arg) { - NamedTypeConstraint *operand = arg.dyn_cast(); + NamedTypeConstraint *operand = llvm::dyn_cast_if_present(arg); return operand && !operand->isVariableLength(); }); if (operandI == arguments.end()) @@ -824,7 +824,7 @@ void Operator::print(llvm::raw_ostream &os) const { os << "op '" << getOperationName() << "'\n"; for (Argument arg : arguments) { - if (auto *attr = arg.dyn_cast()) + if (auto *attr = llvm::dyn_cast_if_present(arg)) os << "[attribute] " << attr->name << '\n'; else os << "[operand] " << arg.get()->name << '\n'; diff --git a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp --- a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp @@ -131,7 +131,7 @@ return nullptr; SmallVector weightValues; weightValues.reserve(weights->size()); - for (APInt weight : weights->cast()) + for (APInt weight : llvm::cast(*weights)) weightValues.push_back(weight.getLimitedValue()); return llvm::MDBuilder(moduleTranslation.getLLVMContext()) .createBranchWeights(weightValues); @@ -330,7 +330,7 @@ auto *ty = llvm::cast( moduleTranslation.convertType(switchOp.getValue().getType())); for (auto i : - llvm::zip(switchOp.getCaseValues()->cast(), + llvm::zip(llvm::cast(*switchOp.getCaseValues()), switchOp.getCaseDestinations())) switchInst->addCase( llvm::ConstantInt::get(ty, std::get<0>(i).getLimitedValue()), diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp --- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp @@ -730,8 +730,8 @@ // Returns the static shape of the provided type if possible. auto getConstantShape = [&](llvm::Type *type) { - return getBuiltinTypeForAttr(convertType(type)) - .dyn_cast_or_null(); + return llvm::dyn_cast_if_present(getBuiltinTypeForAttr(convertType(type)) + ); }; // Convert one-dimensional constant arrays or vectors that store 1/2/4/8-byte @@ -798,8 +798,8 @@ // Convert zero aggregates. if (auto *constZero = dyn_cast(constant)) { - auto shape = getBuiltinTypeForAttr(convertType(constZero->getType())) - .dyn_cast_or_null(); + auto shape = llvm::dyn_cast_if_present(getBuiltinTypeForAttr(convertType(constZero->getType())) + ); if (!shape) return {}; // Convert zero aggregates with a static shape to splat elements attributes. diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -69,7 +69,7 @@ std::string llvmDataLayout; llvm::raw_string_ostream layoutStream(llvmDataLayout); for (DataLayoutEntryInterface entry : attribute.getEntries()) { - auto key = entry.getKey().dyn_cast(); + auto key = llvm::dyn_cast_if_present(entry.getKey()); if (!key) continue; if (key.getValue() == DLTIDialect::kDataLayoutEndiannessKey) { @@ -108,7 +108,7 @@ // specified in entries. Where possible, data layout queries are used instead // of directly inspecting the entries. for (DataLayoutEntryInterface entry : attribute.getEntries()) { - auto type = entry.getKey().dyn_cast(); + auto type = llvm::dyn_cast_if_present(entry.getKey()); if (!type) continue; // Data layout for the index type is irrelevant at this point. diff --git a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp --- a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp +++ b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp @@ -285,7 +285,7 @@ static_cast<::mlir::spirv::LinkageType>(words[wordIndex++])); auto linkageAttr = opBuilder.getAttr<::mlir::spirv::LinkageAttributesAttr>( linkageName, linkageTypeAttr); - decorations[words[0]].set(symbol, linkageAttr.dyn_cast()); + decorations[words[0]].set(symbol, llvm::dyn_cast(linkageAttr)); break; } case spirv::Decoration::Aliased: diff --git a/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp b/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp --- a/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp +++ b/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp @@ -639,7 +639,7 @@ if (values) { for (auto &intVal : values.getValue()) { operands.push_back(static_cast( - intVal.cast().getValue().getZExtValue())); + llvm::cast(intVal).getValue().getZExtValue())); } } encodeInstructionInto(executionModes, spirv::Opcode::OpExecutionMode, diff --git a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp --- a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp +++ b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp @@ -222,7 +222,7 @@ case spirv::Decoration::LinkageAttributes: { // Get the value of the Linkage Attributes // e.g., LinkageAttributes=["linkageName", linkageType]. - auto linkageAttr = attr.getValue().dyn_cast(); + auto linkageAttr = llvm::dyn_cast(attr.getValue()); auto linkageName = linkageAttr.getLinkageName(); auto linkageType = linkageAttr.getLinkageType().getValue(); // Encode the Linkage Name (string literal to uint32_t). diff --git a/mlir/lib/Tools/mlir-pdll-lsp-server/PDLLServer.cpp b/mlir/lib/Tools/mlir-pdll-lsp-server/PDLLServer.cpp --- a/mlir/lib/Tools/mlir-pdll-lsp-server/PDLLServer.cpp +++ b/mlir/lib/Tools/mlir-pdll-lsp-server/PDLLServer.cpp @@ -136,7 +136,7 @@ /// Return the location of the definition of this symbol. SMRange getDefLoc() const { - if (const ast::Decl *decl = definition.dyn_cast()) { + if (const ast::Decl *decl = llvm::dyn_cast_if_present(definition)) { const ast::Name *declName = decl->getName(); return declName ? declName->getLoc() : decl->getLoc(); } @@ -465,7 +465,7 @@ return std::nullopt; // Add hover for operation names. - if (const auto *op = symbol->definition.dyn_cast()) + if (const auto *op = llvm::dyn_cast_if_present(symbol->definition)) return buildHoverForOpName(op, hoverRange); const auto *decl = symbol->definition.get(); return findHover(decl, hoverRange); diff --git a/mlir/lib/Transforms/Inliner.cpp b/mlir/lib/Transforms/Inliner.cpp --- a/mlir/lib/Transforms/Inliner.cpp +++ b/mlir/lib/Transforms/Inliner.cpp @@ -373,7 +373,7 @@ #ifndef NDEBUG static std::string getNodeName(CallOpInterface op) { - if (auto sym = op.getCallableForCallee().dyn_cast()) + if (auto sym = llvm::dyn_cast_if_present(op.getCallableForCallee())) return debugString(op); return "_unnamed_callee_"; } diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp --- a/mlir/lib/Transforms/Utils/FoldUtils.cpp +++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp @@ -272,7 +272,7 @@ assert(!foldResults[i].isNull() && "expected valid OpFoldResult"); // Check if the result was an SSA value. - if (auto repl = foldResults[i].dyn_cast()) { + if (auto repl = llvm::dyn_cast_if_present(foldResults[i])) { if (repl.getType() != op->getResult(i).getType()) { results.clear(); return failure(); diff --git a/mlir/lib/Transforms/Utils/InliningUtils.cpp b/mlir/lib/Transforms/Utils/InliningUtils.cpp --- a/mlir/lib/Transforms/Utils/InliningUtils.cpp +++ b/mlir/lib/Transforms/Utils/InliningUtils.cpp @@ -266,7 +266,7 @@ // Remap the locations of the inlined operations if a valid source location // was provided. - if (inlineLoc && !inlineLoc->isa()) + if (inlineLoc && !llvm::isa(*inlineLoc)) remapInlinedLocations(newBlocks, *inlineLoc); // If the blocks were moved in-place, make sure to remap any necessary diff --git a/mlir/test/lib/Analysis/TestDataFlowFramework.cpp b/mlir/test/lib/Analysis/TestDataFlowFramework.cpp --- a/mlir/test/lib/Analysis/TestDataFlowFramework.cpp +++ b/mlir/test/lib/Analysis/TestDataFlowFramework.cpp @@ -115,11 +115,11 @@ } LogicalResult FooAnalysis::visit(ProgramPoint point) { - if (auto *op = point.dyn_cast()) { + if (auto *op = llvm::dyn_cast_if_present(point)) { visitOperation(op); return success(); } - if (auto *block = point.dyn_cast()) { + if (auto *block = llvm::dyn_cast_if_present(point)) { visitBlock(block); return success(); } diff --git a/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp b/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp --- a/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp +++ b/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp @@ -161,7 +161,7 @@ } // Replace the op with the reified bound. - if (auto val = reified->dyn_cast()) { + if (auto val = llvm::dyn_cast_if_present(*reified)) { rewriter.replaceOp(op, val); return WalkResult::skip(); } diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -1134,7 +1134,7 @@ )decl"; for (const auto &attrOrProp : attrOrProperties) { if (const auto *namedProperty = - attrOrProp.dyn_cast()) { + llvm::dyn_cast_if_present(attrOrProp)) { StringRef name = namedProperty->name; auto &prop = namedProperty->prop; FmtContext fctx; @@ -1145,7 +1145,7 @@ .addSubst("_diag", propertyDiag)), name); } else { - const auto *namedAttr = attrOrProp.dyn_cast(); + const auto *namedAttr = llvm::dyn_cast_if_present(attrOrProp); StringRef name = namedAttr->attrName; setPropMethod << formatv(R"decl( {{ @@ -1187,7 +1187,7 @@ )decl"; for (const auto &attrOrProp : attrOrProperties) { if (const auto *namedProperty = - attrOrProp.dyn_cast()) { + llvm::dyn_cast_if_present(attrOrProp)) { StringRef name = namedProperty->name; auto &prop = namedProperty->prop; FmtContext fctx; @@ -1198,7 +1198,7 @@ .addSubst("_storage", propertyStorage))); continue; } - const auto *namedAttr = attrOrProp.dyn_cast(); + const auto *namedAttr = llvm::dyn_cast_if_present(attrOrProp); StringRef name = namedAttr->attrName; getPropMethod << formatv(R"decl( {{ @@ -1225,7 +1225,7 @@ )decl"; for (const auto &attrOrProp : attrOrProperties) { if (const auto *namedProperty = - attrOrProp.dyn_cast()) { + llvm::dyn_cast_if_present(attrOrProp)) { StringRef name = namedProperty->name; auto &prop = namedProperty->prop; FmtContext fctx; @@ -1238,13 +1238,13 @@ llvm::interleaveComma( attrOrProperties, hashMethod, [&](const ConstArgument &attrOrProp) { if (const auto *namedProperty = - attrOrProp.dyn_cast()) { + llvm::dyn_cast_if_present(attrOrProp)) { hashMethod << "\n hash_" << namedProperty->name << "(prop." << namedProperty->name << ")"; return; } const auto *namedAttr = - attrOrProp.dyn_cast(); + llvm::dyn_cast_if_present(attrOrProp); StringRef name = namedAttr->attrName; hashMethod << "\n llvm::hash_value(prop." << name << ".getAsOpaquePointer())"; @@ -1266,7 +1266,7 @@ )decl"; for (const auto &attrOrProp : attrOrProperties) { if (const auto *namedAttr = - attrOrProp.dyn_cast()) { + llvm::dyn_cast_if_present(attrOrProp)) { StringRef name = namedAttr->attrName; getInherentAttrMethod << formatv(getInherentAttrMethodFmt, name); setInherentAttrMethod << formatv(setInherentAttrMethodFmt, name); @@ -1281,7 +1281,7 @@ // syntax. This method verifies the constraint on the properties attributes // before they are set, since dyn_cast<> will silently omit failures. for (const auto &attrOrProp : attrOrProperties) { - const auto *namedAttr = attrOrProp.dyn_cast(); + const auto *namedAttr = llvm::dyn_cast_if_present(attrOrProp); if (!namedAttr || !namedAttr->constraint) continue; Attribute attr = *namedAttr->constraint; @@ -2472,7 +2472,7 @@ // Calculate the start index from which we can attach default values in the // builder declaration. for (int i = op.getNumArgs() - 1; i >= 0; --i) { - auto *namedAttr = op.getArg(i).dyn_cast(); + auto *namedAttr = llvm::dyn_cast_if_present(op.getArg(i)); if (!namedAttr || !namedAttr->attr.hasDefaultValue()) break; @@ -2502,7 +2502,7 @@ for (int i = 0, e = op.getNumArgs(), numOperands = 0; i < e; ++i) { Argument arg = op.getArg(i); - if (const auto *operand = arg.dyn_cast()) { + if (const auto *operand = llvm::dyn_cast_if_present(arg)) { StringRef type; if (operand->isVariadicOfVariadic()) type = "::llvm::ArrayRef<::mlir::ValueRange>"; @@ -2515,7 +2515,7 @@ operand->isOptional()); continue; } - if (const auto *operand = arg.dyn_cast()) { + if (const auto *operand = llvm::dyn_cast_if_present(arg)) { // TODO continue; } @@ -3442,7 +3442,7 @@ llvm::raw_string_ostream comparatorOs(comparator); for (const auto &attrOrProp : attrOrProperties) { if (const auto *namedProperty = - attrOrProp.dyn_cast()) { + llvm::dyn_cast_if_present(attrOrProp)) { StringRef name = namedProperty->name; if (name.empty()) report_fatal_error("missing name for property"); @@ -3476,7 +3476,7 @@ .addSubst("_storage", propertyStorage))); continue; } - const auto *namedAttr = attrOrProp.dyn_cast(); + const auto *namedAttr = llvm::dyn_cast_if_present(attrOrProp); const Attribute *attr = nullptr; if (namedAttr->constraint) attr = &*namedAttr->constraint; diff --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp --- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp @@ -265,11 +265,11 @@ /// Get the variable this type is resolved to, or nullptr. const NamedTypeConstraint *getVariable() const { - return resolver.dyn_cast(); + return llvm::dyn_cast_if_present(resolver); } /// Get the attribute this type is resolved to, or nullptr. const NamedAttribute *getAttribute() const { - return resolver.dyn_cast(); + return llvm::dyn_cast_if_present(resolver); } /// Get the transformer for the type of the variable, or std::nullopt. std::optional getVarTransformer() const { diff --git a/mlir/tools/mlir-tblgen/OpPythonBindingGen.cpp b/mlir/tools/mlir-tblgen/OpPythonBindingGen.cpp --- a/mlir/tools/mlir-tblgen/OpPythonBindingGen.cpp +++ b/mlir/tools/mlir-tblgen/OpPythonBindingGen.cpp @@ -674,7 +674,7 @@ builderLines.push_back("_ods_context = _ods_get_default_loc_context(loc)"); for (int i = 0, e = op.getNumArgs(); i < e; ++i) { Argument arg = op.getArg(i); - auto *attribute = arg.dyn_cast(); + auto *attribute = llvm::dyn_cast_if_present(arg); if (!attribute) continue; @@ -914,9 +914,9 @@ // - default-valued named attributes // - optional operands Argument a = op.getArg(builderArgIndex - numResultArgs); - if (auto *nattr = a.dyn_cast()) + if (auto *nattr = llvm::dyn_cast_if_present(a)) return (nattr->attr.isOptional() || nattr->attr.hasDefaultValue()); - if (auto *ntype = a.dyn_cast()) + if (auto *ntype = llvm::dyn_cast_if_present(a)) return ntype->isOptional(); return false; }; diff --git a/mlir/tools/mlir-tblgen/RewriterGen.cpp b/mlir/tools/mlir-tblgen/RewriterGen.cpp --- a/mlir/tools/mlir-tblgen/RewriterGen.cpp +++ b/mlir/tools/mlir-tblgen/RewriterGen.cpp @@ -595,7 +595,7 @@ ++opArgIdx; continue; } - if (auto *operand = opArg.dyn_cast()) { + if (auto *operand = llvm::dyn_cast_if_present(opArg)) { if (operand->isVariableLength()) { auto error = formatv("use nested DAG construct to match op {0}'s " "variadic operand #{1} unsupported now", @@ -1524,7 +1524,7 @@ int valueIndex = 0; // An index for uniquing local variable names. for (int argIndex = 0, e = resultOp.getNumArgs(); argIndex < e; ++argIndex) { const auto *operand = - resultOp.getArg(argIndex).dyn_cast(); + llvm::dyn_cast_if_present(resultOp.getArg(argIndex)); // We do not need special handling for attributes. if (!operand) continue; @@ -1579,7 +1579,7 @@ Argument opArg = resultOp.getArg(argIndex); // Handle the case of operand first. - if (auto *operand = opArg.dyn_cast()) { + if (auto *operand = llvm::dyn_cast_if_present(opArg)) { if (!operand->name.empty()) os << "/*" << operand->name << "=*/"; os << childNodeNames.lookup(argIndex); diff --git a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp --- a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp +++ b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp @@ -926,7 +926,7 @@ // Process operands/attributes for (unsigned i = 0, e = op.getNumArgs(); i < e; ++i) { auto argument = op.getArg(i); - if (auto *valueArg = argument.dyn_cast()) { + if (auto *valueArg = llvm::dyn_cast_if_present(argument)) { if (valueArg->isVariableLength()) { if (i != e - 1) { PrintFatalError(loc, "SPIR-V ops can have Variadic<..> or " diff --git a/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp b/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp --- a/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp +++ b/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp @@ -159,7 +159,7 @@ // Handle built-in types that are not handled by the default process. if (auto iType = dyn_cast(type)) { for (DataLayoutEntryInterface entry : params) - if (entry.getKey().dyn_cast() == type) + if (llvm::dyn_cast_if_present(entry.getKey()) == type) return 8 * cast(entry.getValue()).getValue().getZExtValue(); return 8 * iType.getIntOrFloatBitWidth();