diff --git a/mlir/include/mlir-c/BuiltinTypes.h b/mlir/include/mlir-c/BuiltinTypes.h --- a/mlir/include/mlir-c/BuiltinTypes.h +++ b/mlir/include/mlir-c/BuiltinTypes.h @@ -302,6 +302,9 @@ /// attribute if none. MLIR_CAPI_EXPORTED MlirAttribute mlirRankedTensorTypeGetEncoding(MlirType type); +/// Gets the element type from the ranked tensor type. +MLIR_CAPI_EXPORTED MlirType mlirRankedTensorTypeGetElementType(MlirType type); + /// Creates an unranked tensor type with the given element type in the same /// context as the element type. The type is owned by the context. MLIR_CAPI_EXPORTED MlirType mlirUnrankedTensorTypeGet(MlirType elementType); @@ -311,6 +314,9 @@ MLIR_CAPI_EXPORTED MlirType mlirUnrankedTensorTypeGetChecked(MlirLocation loc, MlirType elementType); +/// Gets the element type from the unranked tensor type. +MLIR_CAPI_EXPORTED MlirType mlirRankedTensorTypeGetElementType(MlirType type); + //===----------------------------------------------------------------------===// // Ranked / Unranked MemRef type. //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir-c/Dialect/Transform.h b/mlir/include/mlir-c/Dialect/Transform.h --- a/mlir/include/mlir-c/Dialect/Transform.h +++ b/mlir/include/mlir-c/Dialect/Transform.h @@ -33,6 +33,8 @@ MLIR_CAPI_EXPORTED bool mlirTypeIsATransformOperationType(MlirType type); +MLIR_CAPI_EXPORTED MlirTypeID mlirTransformOperationTypeGetTypeID(void); + MLIR_CAPI_EXPORTED MlirType mlirTransformOperationTypeGet(MlirContext ctx, MlirStringRef operationName); diff --git a/mlir/include/mlir-c/Support.h b/mlir/include/mlir-c/Support.h --- a/mlir/include/mlir-c/Support.h +++ b/mlir/include/mlir-c/Support.h @@ -142,6 +142,9 @@ // TypeID API. //===----------------------------------------------------------------------===// +struct MlirContext; +struct MlirDialect; + /// `ptr` must be 8 byte aligned and unique to a type valid for the duration of /// the returned type id's usage MLIR_CAPI_EXPORTED MlirTypeID mlirTypeIDCreate(const void *ptr); @@ -149,6 +152,10 @@ /// Checks whether a type id is null. static inline bool mlirTypeIDIsNull(MlirTypeID typeID) { return !typeID.ptr; } +/// Get dialect that TypeID belongs to. +MLIR_CAPI_EXPORTED struct MlirDialect +mlirTypeIDGetDialect(MlirTypeID typeID, struct MlirContext context); + /// Checks if two type ids are equal. MLIR_CAPI_EXPORTED bool mlirTypeIDEqual(MlirTypeID typeID1, MlirTypeID typeID2); diff --git a/mlir/include/mlir/Bindings/Python/PybindAdaptors.h b/mlir/include/mlir/Bindings/Python/PybindAdaptors.h --- a/mlir/include/mlir/Bindings/Python/PybindAdaptors.h +++ b/mlir/include/mlir/Bindings/Python/PybindAdaptors.h @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include "mlir-c/Bindings/Python/Interop.h" #include "mlir-c/IR.h" @@ -28,6 +30,7 @@ #include "llvm/ADT/Twine.h" namespace py = pybind11; +using namespace py::literals; // Raw CAPI type casters need to be declared before use, so always include them // first. @@ -269,10 +272,29 @@ static handle cast(MlirType t, return_value_policy, handle) { py::object capsule = py::reinterpret_steal(mlirPythonTypeToCapsule(t)); - return py::module::import(MAKE_MLIR_PYTHON_QUALNAME("ir")) - .attr("Type") - .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(capsule) - .release(); + + object pyType = py::module::import(MAKE_MLIR_PYTHON_QUALNAME("ir")) + .attr("Type") + .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(capsule); + + MlirTypeID mlirTypeID = mlirTypeGetTypeID(t); + MlirContext mlirContext = mlirTypeGetContext(t); + if (mlirContextIsNull(mlirContext)) + throw std::runtime_error("No current Context"); + py::object contextCapsule = py::reinterpret_steal( + mlirPythonContextToCapsule(mlirContext)); + object context = py::module::import(MAKE_MLIR_PYTHON_QUALNAME("ir")) + .attr("Context") + .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(contextCapsule); + auto typeCaster = + py::module::import(MAKE_MLIR_PYTHON_QUALNAME("ir")).attr("TypeCaster"); + if (py::bool_(typeCaster.attr("contains")(mlirTypeID, context)) && + !typeCaster.attr("get")(mlirTypeID, context).is_none()) + return typeCaster.attr("get")(mlirTypeID, context) + .cast()(pyType) + .release(); + + return pyType.release(); } }; @@ -471,6 +493,12 @@ "isinstance", [isaFunction](MlirType other) { return isaFunction(other); }, py::arg("other_type")); + def("__repr__", [superCls, captureTypeName](py::object self) { + return std::regex_replace( + py::repr(superCls(self)).cast(), + std::regex(superCls.attr("__name__").cast()), + captureTypeName); + }); } }; diff --git a/mlir/include/mlir/CAPI/Support.h b/mlir/include/mlir/CAPI/Support.h --- a/mlir/include/mlir/CAPI/Support.h +++ b/mlir/include/mlir/CAPI/Support.h @@ -44,4 +44,25 @@ DEFINE_C_API_METHODS(MlirTypeID, mlir::TypeID) DEFINE_C_API_PTR_METHODS(MlirTypeIDAllocator, mlir::TypeIDAllocator) +namespace llvm { + +template <> +struct DenseMapInfo { + static inline MlirTypeID getEmptyKey() { + auto *pointer = llvm::DenseMapInfo::getEmptyKey(); + return mlirTypeIDCreate(pointer); + } + static inline MlirTypeID getTombstoneKey() { + auto *pointer = llvm::DenseMapInfo::getTombstoneKey(); + return mlirTypeIDCreate(pointer); + } + static inline unsigned getHashValue(const MlirTypeID &val) { + return mlirTypeIDHashValue(val); + } + static inline bool isEqual(const MlirTypeID &lhs, const MlirTypeID &rhs) { + return mlirTypeIDEqual(lhs, rhs); + } +}; +} // namespace llvm + #endif // MLIR_CAPI_SUPPORT_H diff --git a/mlir/lib/Bindings/Python/DialectTransform.cpp b/mlir/lib/Bindings/Python/DialectTransform.cpp --- a/mlir/lib/Bindings/Python/DialectTransform.cpp +++ b/mlir/lib/Bindings/Python/DialectTransform.cpp @@ -56,6 +56,14 @@ return py::str(operationName.data, operationName.length); }, "Get the name of the payload operation accepted by the handle."); + py::module::import(MAKE_MLIR_PYTHON_QUALNAME("ir")) + .attr("TypeCaster") + .attr("insert")( + mlirTransformOperationTypeGetTypeID(), + pybind11::cpp_function([operationType](const py::object &mlirType) { + return operationType.get_class()(mlirType); + }), + "OperationType", /*replace*/ false); } PYBIND11_MODULE(_mlirDialectsTransform, m) { diff --git a/mlir/lib/Bindings/Python/Globals.h b/mlir/lib/Bindings/Python/Globals.h --- a/mlir/lib/Bindings/Python/Globals.h +++ b/mlir/lib/Bindings/Python/Globals.h @@ -9,12 +9,15 @@ #ifndef MLIR_BINDINGS_PYTHON_GLOBALS_H #define MLIR_BINDINGS_PYTHON_GLOBALS_H +#include #include #include -#include #include "PybindUtils.h" +#include "mlir-c/IR.h" +#include "mlir/CAPI/Support.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSet.h" @@ -54,16 +57,20 @@ /// entities. void loadDialectModule(llvm::StringRef dialectNamespace); - /// Decorator for registering a custom Dialect class. The class object must - /// have a DIALECT_NAMESPACE attribute. - pybind11::object registerDialectDecorator(pybind11::object pyClass); - /// Adds a user-friendly Attribute builder. /// Raises an exception if the mapping already exists. /// This is intended to be called by implementation code. void registerAttributeBuilder(const std::string &attributeKind, pybind11::function pyFunc); + /// Adds a user-friendly Type caster. + /// Raises an exception (with provided typeName) if the mapping already exists + /// and replace == false. This is intended to be called by implementation + /// code. + void registerTypeCaster(MlirTypeID mlirTypeID, pybind11::function typeCaster, + std::optional typeName, + bool replace = false); + /// Adds a concrete implementation dialect class. /// Raises an exception if the mapping already exists. /// This is intended to be called by implementation code. @@ -80,6 +87,10 @@ std::optional lookupAttributeBuilder(const std::string &attributeKind); + /// Returns the custom Type caster for MlirTypeID mlirTypeID. + std::optional lookupTypeCaster(MlirTypeID mlirTypeID, + MlirContext context); + /// Looks up a registered dialect class by namespace. Note that this may /// trigger loading of the defining module and can arbitrarily re-enter. std::optional @@ -101,6 +112,8 @@ llvm::StringMap operationClassMap; /// Map of attribute ODS name to custom builder. llvm::StringMap attributeBuilderMap; + /// Map of MlirTypeID to custom type caster. + llvm::DenseMap typeCasterMap; /// Set of dialect namespaces that we have attempted to import implementation /// modules for. diff --git a/mlir/lib/Bindings/Python/IRAttributes.cpp b/mlir/lib/Bindings/Python/IRAttributes.cpp --- a/mlir/lib/Bindings/Python/IRAttributes.cpp +++ b/mlir/lib/Bindings/Python/IRAttributes.cpp @@ -15,6 +15,7 @@ #include "mlir-c/BuiltinAttributes.h" #include "mlir-c/BuiltinTypes.h" +#include "mlir/Bindings/Python/PybindAdaptors.h" namespace py = pybind11; using namespace mlir; @@ -1023,8 +1024,7 @@ py::arg("value"), py::arg("context") = py::none(), "Gets a uniqued Type attribute"); c.def_property_readonly("value", [](PyTypeAttribute &self) { - return PyType(self.getContext()->getRef(), - mlirTypeAttrGetValue(self.get())); + return mlirTypeAttrGetValue(self.get()); }); } }; diff --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp --- a/mlir/lib/Bindings/Python/IRCore.cpp +++ b/mlir/lib/Bindings/Python/IRCore.cpp @@ -25,6 +25,7 @@ #include namespace py = pybind11; +using namespace py::literals; using namespace mlir; using namespace mlir::python; @@ -231,6 +232,48 @@ } }; +struct PyTypeCasterMap { + static bool dunderContains(MlirTypeID mlirTypeID, MlirContext context) { + return PyGlobals::get().lookupTypeCaster(mlirTypeID, context).has_value(); + } + static py::function dundeGetItemNamed(MlirTypeID mlirTypeID, + MlirContext context) { + auto builder = PyGlobals::get().lookupTypeCaster(mlirTypeID, context); + if (!builder) + throw py::key_error(); + return *builder; + } + static void dundeSetItemNamed(MlirTypeID mlirTypeID, py::function func, + std::optional typeName, + std::optional replace) { + PyGlobals::get().registerTypeCaster(mlirTypeID, std::move(func), + std::move(typeName), + replace.value_or(false)); + } + + static void bind(py::module &m) { + py::class_(m, "TypeCaster", py::module_local()) + .def_static( + "contains", + [](MlirTypeID mlirTypeID, DefaultingPyMlirContext context) { + return PyTypeCasterMap::dunderContains(mlirTypeID, + context->get()); + }, + "typeid"_a, "context"_a = py::none()) + .def_static( + "get", + [](MlirTypeID mlirTypeID, DefaultingPyMlirContext context) { + return PyTypeCasterMap::dundeGetItemNamed(mlirTypeID, + context->get()); + }, + "typeid"_a, "context"_a = py::none()) + .def_static("insert", &PyTypeCasterMap::dundeSetItemNamed, + py::arg("typeid"), py::arg("type_caster"), + py::arg("type_name") = py::none(), + py::arg("replace") = false); + } +}; + //------------------------------------------------------------------------------ // Collections. //------------------------------------------------------------------------------ @@ -2100,13 +2143,12 @@ /// Returns the list of types of the values held by container. template -static std::vector getValueTypes(Container &container, - PyMlirContextRef &context) { - std::vector result; +static std::vector getValueTypes(Container &container, + PyMlirContextRef &context) { + std::vector result; result.reserve(container.size()); for (int i = 0, e = container.size(); i < e; ++i) { - result.push_back( - PyType(context, mlirValueGetType(container.getElement(i).get()))); + result.push_back(mlirValueGetType(container.getElement(i).get())); } return result; } @@ -3157,11 +3199,8 @@ "context", [](PyAttribute &self) { return self.getContext().getObject(); }, "Context that owns the Attribute") - .def_property_readonly("type", - [](PyAttribute &self) { - return PyType(self.getContext()->getRef(), - mlirAttributeGetType(self)); - }) + .def_property_readonly( + "type", [](PyAttribute &self) { return mlirAttributeGetType(self); }) .def( "get_named", [](PyAttribute &self, std::string name) { @@ -3256,7 +3295,7 @@ mlirTypeParseGet(context->get(), toMlirStringRef(typeSpec)); if (mlirTypeIsNull(type)) throw MLIRError("Unable to parse type", errors.take()); - return PyType(context->getRef(), type); + return type; }, py::arg("asm"), py::arg("context") = py::none(), kContextParseTypeDocstring) @@ -3396,12 +3435,8 @@ return printAccum.join(); }, py::arg("use_local_scope") = false, kGetNameAsOperand) - .def_property_readonly("type", - [](PyValue &self) { - return PyType( - self.getParentOperation()->getContext(), - mlirValueGetType(self.get())); - }) + .def_property_readonly( + "type", [](PyValue &self) { return mlirValueGetType(self.get()); }) .def( "replace_all_uses_with", [](PyValue &self, PyValue &with) { @@ -3460,6 +3495,8 @@ // Attribute builder getter. PyAttrBuilderMap::bind(m); + // Type caster builder getter. + PyTypeCasterMap::bind(m); py::register_local_exception_translator([](std::exception_ptr p) { // We can't define exceptions with custom fields through pybind, so instead diff --git a/mlir/lib/Bindings/Python/IRInterfaces.cpp b/mlir/lib/Bindings/Python/IRInterfaces.cpp --- a/mlir/lib/Bindings/Python/IRInterfaces.cpp +++ b/mlir/lib/Bindings/Python/IRInterfaces.cpp @@ -321,11 +321,7 @@ py::module_local()) .def_property_readonly( "element_type", - [](PyShapedTypeComponents &self) { - return PyType(PyMlirContext::forContext( - mlirTypeGetContext(self.elementType)), - self.elementType); - }, + [](PyShapedTypeComponents &self) { return self.elementType; }, "Returns the element type of the shaped type components.") .def_static( "get", diff --git a/mlir/lib/Bindings/Python/IRModule.h b/mlir/lib/Bindings/Python/IRModule.h --- a/mlir/lib/Bindings/Python/IRModule.h +++ b/mlir/lib/Bindings/Python/IRModule.h @@ -13,6 +13,7 @@ #include #include +#include "Globals.h" #include "PybindUtils.h" #include "mlir-c/AffineExpr.h" @@ -868,9 +869,7 @@ PyConcreteType() = default; PyConcreteType(PyMlirContextRef contextRef, MlirType t) - : BaseTy(std::move(contextRef), t) { - pybind11::implicitly_convertible(); - } + : BaseTy(std::move(contextRef), t) {} PyConcreteType(PyType &orig) : PyConcreteType(orig.getContext(), castFrom(orig)) {} @@ -914,6 +913,16 @@ return printAccum.join(); }); + if (DerivedTy::getTypeIdFunction) { + PyGlobals::get().registerTypeCaster( + DerivedTy::getTypeIdFunction(), + pybind11::cpp_function( + [](PyType pyType) -> DerivedTy { return pyType; }), + DerivedTy::pyClassName); + } + + pybind11::implicitly_convertible(); + DerivedTy::bindDerived(cls); } @@ -1009,9 +1018,8 @@ return DerivedTy::isaFunction(otherAttr); }, pybind11::arg("other")); - cls.def_property_readonly("type", [](PyAttribute &attr) { - return PyType(attr.getContext(), mlirAttributeGetType(attr)); - }); + cls.def_property_readonly( + "type", [](PyAttribute &attr) { return mlirAttributeGetType(attr); }); DerivedTy::bindDerived(cls); } diff --git a/mlir/lib/Bindings/Python/IRModule.cpp b/mlir/lib/Bindings/Python/IRModule.cpp --- a/mlir/lib/Bindings/Python/IRModule.cpp +++ b/mlir/lib/Bindings/Python/IRModule.cpp @@ -14,6 +14,7 @@ #include #include "mlir-c/Bindings/Python/Interop.h" +#include "mlir-c/Support.h" namespace py = pybind11; using namespace mlir; @@ -72,6 +73,20 @@ found = std::move(pyFunc); } +void PyGlobals::registerTypeCaster(MlirTypeID mlirTypeID, + pybind11::function typeCaster, + std::optional typeName, + bool replace) { + pybind11::object &found = typeCasterMap[mlirTypeID]; + if (found && !found.is_none() && !replace) { + throw std::runtime_error((llvm::Twine("Type caster for '") + + typeName.value_or("UNKNOWN TYPE") + + "' is already registered") + .str()); + } + found = std::move(typeCaster); +} + void PyGlobals::registerDialectImpl(const std::string &dialectNamespace, py::object pyClass) { py::object &found = dialectClassMap[dialectNamespace]; @@ -110,6 +125,42 @@ return std::nullopt; } +std::optional PyGlobals::lookupTypeCaster(MlirTypeID mlirTypeID, + MlirContext context) { + { + // Fast match against the class map first (common case). + const auto foundIt = typeCasterMap.find(mlirTypeID); + if (foundIt != typeCasterMap.end()) { + if (foundIt->second.is_none()) + return std::nullopt; + assert(foundIt->second && "py::function is defined"); + return foundIt->second; + } + } + + // Not found. Load the dialect namespace. + MlirDialect dialect = mlirTypeIDGetDialect(mlirTypeID, context); + MlirStringRef dialectNamespace = mlirDialectGetNamespace(dialect); + loadDialectModule( + std::string(dialectNamespace.data, dialectNamespace.length)); + + // Attempt to find from the canonical map and cache. + { + const auto foundIt = typeCasterMap.find(mlirTypeID); + if (foundIt != typeCasterMap.end()) { + if (foundIt->second.is_none()) + return std::nullopt; + assert(foundIt->second && "py::object is defined"); + // Positive cache. + typeCasterMap[mlirTypeID] = foundIt->second; + return foundIt->second; + } + // Negative cache. + typeCasterMap[mlirTypeID] = py::none(); + return std::nullopt; + } +} + std::optional PyGlobals::lookupDialectClass(const std::string &dialectNamespace) { loadDialectModule(dialectNamespace); diff --git a/mlir/lib/Bindings/Python/IRTypes.cpp b/mlir/lib/Bindings/Python/IRTypes.cpp --- a/mlir/lib/Bindings/Python/IRTypes.cpp +++ b/mlir/lib/Bindings/Python/IRTypes.cpp @@ -334,10 +334,7 @@ "Create a complex type"); c.def_property_readonly( "element_type", - [](PyComplexType &self) -> PyType { - MlirType t = mlirComplexTypeGetElementType(self); - return PyType(self.getContext(), t); - }, + [](PyComplexType &self) { return mlirComplexTypeGetElementType(self); }, "Returns element type."); } }; @@ -351,10 +348,7 @@ static void bindDerived(ClassTy &c) { c.def_property_readonly( "element_type", - [](PyShapedType &self) { - MlirType t = mlirShapedTypeGetElementType(self); - return PyType(self.getContext(), t); - }, + [](PyShapedType &self) { return mlirShapedTypeGetElementType(self); }, "Returns the element type of the shaped type."); c.def_property_readonly( "has_rank", @@ -641,9 +635,8 @@ "Create a tuple type"); c.def( "get_type", - [](PyTupleType &self, intptr_t pos) -> PyType { - MlirType t = mlirTupleTypeGetType(self, pos); - return PyType(self.getContext(), t); + [](PyTupleType &self, intptr_t pos) { + return mlirTupleTypeGetType(self, pos); }, py::arg("pos"), "Returns the pos-th type in the tuple type."); c.def_property_readonly( @@ -686,7 +679,7 @@ py::list types; for (intptr_t i = 0, e = mlirFunctionTypeGetNumInputs(self); i < e; ++i) { - types.append(PyType(contextRef, mlirFunctionTypeGetInput(t, i))); + types.append(mlirFunctionTypeGetInput(t, i)); } return types; }, @@ -698,8 +691,7 @@ py::list types; for (intptr_t i = 0, e = mlirFunctionTypeGetNumResults(self); i < e; ++i) { - types.append( - PyType(contextRef, mlirFunctionTypeGetResult(self, i))); + types.append(mlirFunctionTypeGetResult(self, i)); } return types; }, diff --git a/mlir/lib/CAPI/Dialect/Transform.cpp b/mlir/lib/CAPI/Dialect/Transform.cpp --- a/mlir/lib/CAPI/Dialect/Transform.cpp +++ b/mlir/lib/CAPI/Dialect/Transform.cpp @@ -37,6 +37,10 @@ return isa(unwrap(type)); } +MlirTypeID mlirTransformOperationTypeGetTypeID(void) { + return wrap(transform::OperationType::getTypeID()); +} + MlirType mlirTransformOperationTypeGet(MlirContext ctx, MlirStringRef operationName) { return wrap( diff --git a/mlir/lib/CAPI/IR/BuiltinTypes.cpp b/mlir/lib/CAPI/IR/BuiltinTypes.cpp --- a/mlir/lib/CAPI/IR/BuiltinTypes.cpp +++ b/mlir/lib/CAPI/IR/BuiltinTypes.cpp @@ -315,6 +315,10 @@ return wrap(llvm::cast(unwrap(type)).getEncoding()); } +MlirType mlirRankedTensorTypeGetElementType(MlirType type) { + return wrap(llvm::cast(unwrap(type)).getElementType()); +} + MlirType mlirUnrankedTensorTypeGet(MlirType elementType) { return wrap(UnrankedTensorType::get(unwrap(elementType))); } @@ -324,6 +328,10 @@ return wrap(UnrankedTensorType::getChecked(unwrap(loc), unwrap(elementType))); } +MlirType mlirUnrankedTensorTypeGetElementType(MlirType type) { + return wrap(llvm::cast(unwrap(type)).getElementType()); +} + //===----------------------------------------------------------------------===// // Ranked / Unranked MemRef type. //===----------------------------------------------------------------------===// diff --git a/mlir/lib/CAPI/IR/Support.cpp b/mlir/lib/CAPI/IR/Support.cpp --- a/mlir/lib/CAPI/IR/Support.cpp +++ b/mlir/lib/CAPI/IR/Support.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "mlir/CAPI/Support.h" +#include "mlir/CAPI/IR.h" #include "llvm/ADT/StringRef.h" #include @@ -23,7 +24,6 @@ //===----------------------------------------------------------------------===// // TypeID API. //===----------------------------------------------------------------------===// - MlirTypeID mlirTypeIDCreate(const void *ptr) { assert(reinterpret_cast(ptr) % 8 == 0 && "ptr must be 8 byte aligned"); @@ -33,6 +33,13 @@ return wrap(mlir::TypeID::getFromOpaquePointer(ptr)); } +MlirDialect mlirTypeIDGetDialect(MlirTypeID typeID, MlirContext context) { + const mlir::AbstractType &abstractType = + mlir::AbstractType::lookup(unwrap(typeID), unwrap(context)); + abstractType.getDialect().getNamespace(); + return wrap(&abstractType.getDialect()); +} + bool mlirTypeIDEqual(MlirTypeID typeID1, MlirTypeID typeID2) { return unwrap(typeID1) == unwrap(typeID2); } diff --git a/mlir/python/mlir/dialects/python_test.py b/mlir/python/mlir/dialects/python_test.py --- a/mlir/python/mlir/dialects/python_test.py +++ b/mlir/python/mlir/dialects/python_test.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception from ._python_test_ops_gen import * -from .._mlir_libs._mlirPythonTest import TestAttr, TestType, TestTensorValue, TestTensorType +from .._mlir_libs._mlirPythonTest import TestAttr, TestType, TestTensorValue, TestIntegerRankedTensorType def register_python_test_dialect(context, load=True): from .._mlir_libs import _mlirPythonTest diff --git a/mlir/python/mlir/ir.py b/mlir/python/mlir/ir.py --- a/mlir/python/mlir/ir.py +++ b/mlir/python/mlir/ir.py @@ -129,3 +129,12 @@ except ImportError: pass + + +def register_type_caster(mlir_typeid, type_name=None, replace=False): + + def decorator_builder(func): + TypeCaster.insert(mlir_typeid, func, type_name, replace) + return func + + return decorator_builder diff --git a/mlir/test/python/dialects/python_test.py b/mlir/test/python/dialects/python_test.py --- a/mlir/test/python/dialects/python_test.py +++ b/mlir/test/python/dialects/python_test.py @@ -5,11 +5,13 @@ import mlir.dialects.python_test as test import mlir.dialects.tensor as tensor + def run(f): print("\nTEST:", f.__name__) f() return f + # CHECK-LABEL: TEST: testAttributes @run def testAttributes(): @@ -131,6 +133,7 @@ del op.unit print(f"Unit: {op.unit}") + # CHECK-LABEL: TEST: attrBuilder @run def attrBuilder(): @@ -216,8 +219,8 @@ print(first_type_attr.one.type) print(first_type_attr.two.type) - first_attr = test.FirstAttrDeriveAttrOp( - FloatAttr.get(F32Type.get(), 3.14)) + first_attr = test.FirstAttrDeriveAttrOp(FloatAttr.get( + F32Type.get(), 3.14)) # CHECK-COUNT-3: f32 print(first_attr.one.type) print(first_attr.two.type) @@ -344,6 +347,7 @@ i8 = IntegerType.get_signless(8) class Tensor(test.TestTensorValue): + def __str__(self): return super().__str__().replace("Value", "Tensor") @@ -361,60 +365,99 @@ # CHECK: False print(tt.is_null()) - # Classes of custom types that inherit from concrete types should have - # static_typeid - assert isinstance(test.TestTensorType.static_typeid, TypeID) - # And it should be equal to the in-tree concrete type - assert test.TestTensorType.static_typeid == t.type.typeid - # CHECK-LABEL: TEST: inferReturnTypeComponents @run def inferReturnTypeComponents(): - with Context() as ctx, Location.unknown(ctx): - test.register_python_test_dialect(ctx) - module = Module.create() - i32 = IntegerType.get_signless(32) - with InsertionPoint(module.body): - resultType = UnrankedTensorType.get(i32) - operandTypes = [ - RankedTensorType.get([1, 3, 10, 10], i32), - UnrankedTensorType.get(i32), - ] - f = func.FuncOp( - "test_inferReturnTypeComponents", (operandTypes, [resultType]) - ) - entry_block = Block.create_at_start(f.operation.regions[0], operandTypes) - with InsertionPoint(entry_block): - ranked_op = test.InferShapedTypeComponentsOp( - resultType, entry_block.arguments[0] - ) - unranked_op = test.InferShapedTypeComponentsOp( - resultType, entry_block.arguments[1] - ) - - # CHECK: has rank: True - # CHECK: rank: 4 - # CHECK: element type: i32 - # CHECK: shape: [1, 3, 10, 10] - iface = InferShapedTypeOpInterface(ranked_op) - shaped_type_components = iface.inferReturnTypeComponents( - operands=[ranked_op.operand] - )[0] - print("has rank:", shaped_type_components.has_rank) - print("rank:", shaped_type_components.rank) - print("element type:", shaped_type_components.element_type) - print("shape:", shaped_type_components.shape) - - # CHECK: has rank: False - # CHECK: rank: None - # CHECK: element type: i32 - # CHECK: shape: None - iface = InferShapedTypeOpInterface(unranked_op) - shaped_type_components = iface.inferReturnTypeComponents( - operands=[unranked_op.operand] - )[0] - print("has rank:", shaped_type_components.has_rank) - print("rank:", shaped_type_components.rank) - print("element type:", shaped_type_components.element_type) - print("shape:", shaped_type_components.shape) + with Context() as ctx, Location.unknown(ctx): + test.register_python_test_dialect(ctx) + module = Module.create() + i32 = IntegerType.get_signless(32) + with InsertionPoint(module.body): + resultType = UnrankedTensorType.get(i32) + operandTypes = [ + RankedTensorType.get([1, 3, 10, 10], i32), + UnrankedTensorType.get(i32), + ] + f = func.FuncOp("test_inferReturnTypeComponents", + (operandTypes, [resultType])) + entry_block = Block.create_at_start(f.operation.regions[0], operandTypes) + with InsertionPoint(entry_block): + ranked_op = test.InferShapedTypeComponentsOp(resultType, + entry_block.arguments[0]) + unranked_op = test.InferShapedTypeComponentsOp(resultType, + entry_block.arguments[1]) + + # CHECK: has rank: True + # CHECK: rank: 4 + # CHECK: element type: i32 + # CHECK: shape: [1, 3, 10, 10] + iface = InferShapedTypeOpInterface(ranked_op) + shaped_type_components = iface.inferReturnTypeComponents( + operands=[ranked_op.operand])[0] + print("has rank:", shaped_type_components.has_rank) + print("rank:", shaped_type_components.rank) + print("element type:", shaped_type_components.element_type) + print("shape:", shaped_type_components.shape) + + # CHECK: has rank: False + # CHECK: rank: None + # CHECK: element type: i32 + # CHECK: shape: None + iface = InferShapedTypeOpInterface(unranked_op) + shaped_type_components = iface.inferReturnTypeComponents( + operands=[unranked_op.operand])[0] + print("has rank:", shaped_type_components.has_rank) + print("rank:", shaped_type_components.rank) + print("element type:", shaped_type_components.element_type) + print("shape:", shaped_type_components.shape) + + +# CHECK-LABEL: TEST: testCustomTypeTypeCaster +@run +def testCustomTypeTypeCaster(): + with Context() as ctx, Location.unknown(): + test.register_python_test_dialect(ctx) + + a = test.TestType.get() + assert a.typeid is not None + + def type_caster(pytype): + return test.TestType(pytype) + + TypeCaster.insert(a.typeid, type_caster) + + b = Type.parse("!python_test.test_type") + # CHECK: !python_test.test_type + print(b) + + c = test.TestIntegerRankedTensorType.get([10, 10], 5) + # CHECK: tensor<10x10xi5> + print(c) + # CHECK: TestIntegerRankedTensorType(tensor<10x10xi5>) + print(repr(c)) + + # CHECK: Type caster for 'MySpecialTensorType' is already registered + try: + + @register_type_caster(c.typeid, "MySpecialTensorType") + def type_caster(pytype): + return test.TestIntegerRankedTensorType(pytype) + except RuntimeError as e: + print(e) + + @register_type_caster(c.typeid, "MySpecialTensorType", replace=True) + def type_caster(pytype): + return test.TestIntegerRankedTensorType(pytype) + + d = tensor.EmptyOp([10, 10], IntegerType.get_signless(5)).result + # CHECK: tensor<10x10xi5> + print(d.type) + # CHECK: TestIntegerRankedTensorType(tensor<10x10xi5>) + print(repr(d.type)) + + t = Type.parse('!transform.op<"foo.bar">', Context()) + # CHECK: !transform.op<"foo.bar"> + print(t) + # CHECK: OperationType(!transform.op<"foo.bar">) + print(repr(t)) \ No newline at end of file diff --git a/mlir/test/python/ir/attributes.py b/mlir/test/python/ir/attributes.py --- a/mlir/test/python/ir/attributes.py +++ b/mlir/test/python/ir/attributes.py @@ -255,7 +255,7 @@ # CHECK: default_get: #foobar<123> print( "default_get:", - OpaqueAttr.get("foobar", bytes("123", "utf-8"), NoneType.get())) + OpaqueAttr.get("foobar", bytes("123", "utf-8"), NoneType.get())) # CHECK-LABEL: TEST: testStringAttr @@ -418,7 +418,7 @@ def testDictAttr(): with Context(): dict_attr = { - 'stringattr': StringAttr.get('string'), + 'stringattr': StringAttr.get('string'), 'integerattr' : IntegerAttr.get( IntegerType.get_signless(32), 42) } @@ -557,3 +557,58 @@ print(f"rank: {len(attr.strides)}") # CHECK: strides are dynamic: [True, True, True] print(f"strides are dynamic: {[s == dynamic for s in attr.strides]}") + + +# CHECK-LABEL: TEST: testConcreteTypesRoundTrip +@run +def testConcreteTypesRoundTrip(): + with Context(), Location.unknown(): + + def print_item(attr): + print(repr(attr.type)) + + # CHECK: F32Type(f32) + print_item(Attribute.parse("42.0 : f32")) + # CHECK: F32Type(f32) + print_item(FloatAttr.get_f32(42.0)) + # CHECK: F64Type(f64) + print_item(FloatAttr.get_f64(42.0)) + # CHECK: IntegerType(i32) + print_item(IntegerAttr.get(IntegerType.get_signless(32), 42)) + # CHECK: IntegerType(i64) + print_item(IntegerAttr.get(IntegerType.get_signless(64), 42)) + + def print_container_item(attr_asm): + attr = DenseElementsAttr(Attribute.parse(attr_asm)) + print(repr(attr.type)) + print(repr(attr.type.element_type)) + + # CHECK: RankedTensorType(tensor) + # CHECK: IntegerType(i16) + print_container_item("dense<123> : tensor") + # CHECK: RankedTensorType(tensor) + # CHECK: IntegerType(i32) + print_container_item("dense<123> : tensor") + # CHECK: RankedTensorType(tensor) + # CHECK: IntegerType(i64) + print_container_item("dense<123> : tensor") + + # CHECK: RankedTensorType(tensor) + # CHECK: F16Type(f16) + print_container_item("dense<1.0> : tensor") + # CHECK: RankedTensorType(tensor) + # CHECK: F32Type(f32) + print_container_item("dense<1.0> : tensor") + # CHECK: RankedTensorType(tensor) + # CHECK: F64Type(f64) + print_container_item("dense<1.0> : tensor") + + raw = Attribute.parse("vector<4xf32>") + # CHECK: attr: vector<4xf32> + print("attr:", raw) + type_attr = TypeAttr(raw) + + # CHECK: VectorType(vector<4xf32>) + print(repr(type_attr.value)) + # CHECK: F32Type(f32) + print(repr(type_attr.value.element_type)) diff --git a/mlir/test/python/ir/builtin_types.py b/mlir/test/python/ir/builtin_types.py --- a/mlir/test/python/ir/builtin_types.py +++ b/mlir/test/python/ir/builtin_types.py @@ -2,6 +2,7 @@ import gc from mlir.ir import * +from mlir.dialects import arith, tensor, func, memref def run(f): @@ -383,15 +384,15 @@ f32 = F32Type.get() shape = [2, 3] loc = Location.unknown() - memref = MemRefType.get(shape, f32, memory_space=Attribute.parse("2")) + memref_f32 = MemRefType.get(shape, f32, memory_space=Attribute.parse("2")) # CHECK: memref type: memref<2x3xf32, 2> - print("memref type:", memref) + print("memref type:", memref_f32) # CHECK: memref layout: affine_map<(d0, d1) -> (d0, d1)> - print("memref layout:", memref.layout) + print("memref layout:", memref_f32.layout) # CHECK: memref affine map: (d0, d1) -> (d0, d1) - print("memref affine map:", memref.affine_map) + print("memref affine map:", memref_f32.affine_map) # CHECK: memory space: 2 - print("memory space:", memref.memory_space) + print("memory space:", memref_f32.memory_space) layout = AffineMapAttr.get(AffineMap.get_permutation([1, 0])) memref_layout = MemRefType.get(shape, f32, layout=layout) @@ -414,7 +415,7 @@ else: print("Exception not produced") - assert memref.shape == shape + assert memref_f32.shape == shape # CHECK-LABEL: TEST: testUnrankedMemRefType @@ -483,9 +484,9 @@ input_types = [IntegerType.get_signless(32), IntegerType.get_signless(16)] result_types = [IndexType.get()] func = FunctionType.get(input_types, result_types) - # CHECK: INPUTS: [Type(i32), Type(i16)] + # CHECK: INPUTS: [IntegerType(i32), IntegerType(i16)] print("INPUTS:", func.inputs) - # CHECK: RESULTS: [Type(index)] + # CHECK: RESULTS: [IndexType(index)] print("RESULTS:", func.results) @@ -520,27 +521,27 @@ f32 = F32Type.get() types = [ - (IntegerType, IntegerType.get_signless(16)), - (IndexType, IndexType.get()), - (Float8E4M3FNType, Float8E4M3FNType.get()), - (Float8E5M2Type, Float8E5M2Type.get()), - (Float8E4M3FNUZType, Float8E4M3FNUZType.get()), - (Float8E4M3B11FNUZType, Float8E4M3B11FNUZType.get()), - (Float8E5M2FNUZType, Float8E5M2FNUZType.get()), - (BF16Type, BF16Type.get()), - (F16Type, F16Type.get()), - (F32Type, F32Type.get()), - (F64Type, F64Type.get()), - (NoneType, NoneType.get()), - (ComplexType, ComplexType.get(f32)), - (VectorType, VectorType.get([2, 3], f32)), - (RankedTensorType, RankedTensorType.get([2, 3], f32)), - (UnrankedTensorType, UnrankedTensorType.get(f32)), - (MemRefType, MemRefType.get([2, 3], f32)), - (UnrankedMemRefType, UnrankedMemRefType.get(f32, Attribute.parse("2"))), - (TupleType, TupleType.get_tuple([f32])), - (FunctionType, FunctionType.get([], [])), - (OpaqueType, OpaqueType.get("tensor", "bob")), + (IntegerType, IntegerType.get_signless(16)), + (IndexType, IndexType.get()), + (Float8E4M3FNType, Float8E4M3FNType.get()), + (Float8E5M2Type, Float8E5M2Type.get()), + (Float8E4M3FNUZType, Float8E4M3FNUZType.get()), + (Float8E4M3B11FNUZType, Float8E4M3B11FNUZType.get()), + (Float8E5M2FNUZType, Float8E5M2FNUZType.get()), + (BF16Type, BF16Type.get()), + (F16Type, F16Type.get()), + (F32Type, F32Type.get()), + (F64Type, F64Type.get()), + (NoneType, NoneType.get()), + (ComplexType, ComplexType.get(f32)), + (VectorType, VectorType.get([2, 3], f32)), + (RankedTensorType, RankedTensorType.get([2, 3], f32)), + (UnrankedTensorType, UnrankedTensorType.get(f32)), + (MemRefType, MemRefType.get([2, 3], f32)), + (UnrankedMemRefType, UnrankedMemRefType.get(f32, Attribute.parse("2"))), + (TupleType, TupleType.get_tuple([f32])), + (FunctionType, FunctionType.get([], [])), + (OpaqueType, OpaqueType.get("tensor", "bob")), ] # CHECK: IntegerType(i16) @@ -584,9 +585,8 @@ # CHECK: all equal for t1, t2 in typeid_dict.items(): - assert t1.static_typeid == t2.typeid and hash( - t1.static_typeid) == hash( - t2.typeid), f"expected hash and value equality {t1} {t2}" + assert t1.static_typeid == t2.typeid and hash(t1.static_typeid) == hash( + t2.typeid), f"expected hash and value equality {t1} {t2}" else: print("all equal") @@ -599,3 +599,132 @@ vector_type = Type.parse("vector<2x3xf32>") # CHECK: True print(ShapedType(vector_type).typeid == vector_type.typeid) + + +# CHECK-LABEL: TEST: testConcreteTypesRoundTrip +@run +def testConcreteTypesRoundTrip(): + with Context() as ctx, Location.unknown(): + ctx.allow_unregistered_dialects = True + + def print_item(typ, v): + cst = arith.ConstantOp(typ, v).result + print(type(cst.type).__name__) + print(repr(cst.type)) + + # CHECK: F16Type + # CHECK: F16Type(f16) + print_item(F16Type.get(), 0.0) + # CHECK: F32Type + # CHECK: F32Type(f32) + print_item(F32Type.get(), 0.0) + # CHECK: F64Type + # CHECK: F64Type(f64) + print_item(F64Type.get(), 0.0) + # CHECK: Float8E4M3B11FNUZType + # CHECK: Float8E4M3B11FNUZType(f8E4M3B11FNUZ) + print_item(Float8E4M3B11FNUZType.get(), 0.0) + # CHECK: Float8E4M3FNType + # CHECK: Float8E4M3FNType(f8E4M3FN) + print_item(Float8E4M3FNType.get(), 0.0) + # CHECK: Float8E4M3FNUZType + # CHECK: Float8E4M3FNUZType(f8E4M3FNUZ) + print_item(Float8E4M3FNUZType.get(), 0.0) + # CHECK: Float8E5M2Type + # CHECK: Float8E5M2Type(f8E5M2) + print_item(Float8E5M2Type.get(), 0.0) + # CHECK: Float8E5M2FNUZType + # CHECK: Float8E5M2FNUZType(f8E5M2FNUZ) + print_item(Float8E5M2FNUZType.get(), 0.0) + # CHECK: BF16Type + # CHECK: BF16Type(bf16) + print_item(BF16Type.get(), 0.0) + # CHECK: IndexType + # CHECK: IndexType(index) + print_item(IndexType.get(), 0) + # CHECK: IntegerType + # CHECK: IntegerType(i32) + print_item(IntegerType.get_signless(32), 0) + + f32 = F32Type.get() + ranked_tensor = tensor.EmptyOp([10, 10], f32).result + # CHECK: RankedTensorType + print(type(ranked_tensor.type).__name__) + # CHECK: RankedTensorType(tensor<10x10xf32>) + print(repr(ranked_tensor.type)) + + cf32 = ComplexType.get(f32) + # CHECK: ComplexType + print(type(cf32).__name__) + # CHECK: ComplexType(complex) + print(repr(cf32)) + + ranked_tensor = tensor.EmptyOp([10, 10], f32).result + # CHECK: RankedTensorType + print(type(ranked_tensor.type).__name__) + # CHECK: RankedTensorType(tensor<10x10xf32>) + print(repr(ranked_tensor.type)) + + vector = VectorType.get([10, 10], f32) + tuple_type = TupleType.get_tuple([f32, vector]) + # CHECK: TupleType + print(type(tuple_type).__name__) + # CHECK: TupleType(tuple>) + print(repr(tuple_type)) + # CHECK: F32Type(f32) + print(repr(tuple_type.get_type(0))) + # CHECK: VectorType(vector<10x10xf32>) + print(repr(tuple_type.get_type(1))) + + index_type = IndexType.get() + + @func.FuncOp.from_py_func() + def default_builder(): + c0 = arith.ConstantOp(f32, 0.0) + unranked_tensor_type = UnrankedTensorType.get(f32) + unranked_tensor = tensor.FromElementsOp(unranked_tensor_type, [c0]).result + # CHECK: UnrankedTensorType + print(type(unranked_tensor.type).__name__) + # CHECK: UnrankedTensorType(tensor<*xf32>) + print(repr(unranked_tensor.type)) + + c10 = arith.ConstantOp(index_type, 10) + memref_f32_t = MemRefType.get([10, 10], f32) + memref_f32 = memref.AllocOp(memref_f32_t, [c10, c10], []).result + # CHECK: MemRefType + print(type(memref_f32.type).__name__) + # CHECK: MemRefType(memref<10x10xf32>) + print(repr(memref_f32.type)) + + unranked_memref_t = UnrankedMemRefType.get(f32, Attribute.parse("2")) + memref_f32 = memref.AllocOp(unranked_memref_t, [c10, c10], []).result + # CHECK: UnrankedMemRefType + print(type(memref_f32.type).__name__) + # CHECK: UnrankedMemRefType(memref<*xf32, 2>) + print(repr(memref_f32.type)) + + tuple_type = Operation.parse( + f'"test.make_tuple"() : () -> tuple').result + # CHECK: TupleType + print(type(tuple_type.type).__name__) + # CHECK: TupleType(tuple) + print(repr(tuple_type.type)) + + return c0, c10 + + func_op = default_builder.func_op + # CHECK: FunctionType + print(type(func_op.type).__name__) + # CHECK: FunctionType(() -> (f32, index)) + print(repr(func_op.type)) + # CHECK: [] + print(func_op.type.inputs) + # CHECK: [F32Type(f32), IndexType(index)] + print(func_op.type.results) + + t = Type.parse("!pdl.type", Context()) + print(repr(t)) + + @register_type_caster(t.typeid, "MySpecialTensorType", replace=True) + def type_caster(pytype): + return test.TestIntegerRankedTensorType(pytype) \ No newline at end of file diff --git a/mlir/test/python/lib/PythonTestModule.cpp b/mlir/test/python/lib/PythonTestModule.cpp --- a/mlir/test/python/lib/PythonTestModule.cpp +++ b/mlir/test/python/lib/PythonTestModule.cpp @@ -7,11 +7,19 @@ //===----------------------------------------------------------------------===// #include "PythonTestCAPI.h" +#include "mlir-c/BuiltinAttributes.h" #include "mlir-c/BuiltinTypes.h" +#include "mlir-c/IR.h" #include "mlir/Bindings/Python/PybindAdaptors.h" namespace py = pybind11; using namespace mlir::python::adaptors; +using namespace pybind11::literals; + +static bool mlirTypeIsARankedIntegerTensor(MlirType t) { + return mlirTypeIsARankedTensor(t) && + mlirTypeIsAInteger(mlirRankedTensorTypeGetElementType(t)); +} PYBIND11_MODULE(_mlirPythonTest, m) { m.def( @@ -41,9 +49,32 @@ return cls(mlirPythonTestTestTypeGet(ctx)); }, py::arg("cls"), py::arg("context") = py::none()); - mlir_type_subclass(m, "TestTensorType", mlirTypeIsARankedTensor, - py::module::import(MAKE_MLIR_PYTHON_QUALNAME("ir")) - .attr("RankedTensorType")); + auto cls = + mlir_type_subclass(m, "TestIntegerRankedTensorType", + mlirTypeIsARankedIntegerTensor, + py::module::import(MAKE_MLIR_PYTHON_QUALNAME("ir")) + .attr("RankedTensorType")) + .def_classmethod( + "get", + [](const py::object &cls, std::vector shape, + unsigned width, MlirContext ctx) { + MlirAttribute encoding = mlirAttributeGetNull(); + return cls(mlirRankedTensorTypeGet( + shape.size(), shape.data(), mlirIntegerTypeGet(ctx, width), + encoding)); + }, + "cls"_a, "shape"_a, "width"_a, "context"_a = py::none()); + assert(py::hasattr(cls.get_class(), "static_typeid") && + "TestIntegerRankedTensorType has no static_typeid"); + MlirTypeID mlirTypeID = + py::getattr(cls.get_class(), "static_typeid").cast(); + py::module::import(MAKE_MLIR_PYTHON_QUALNAME("ir")) + .attr("TypeCaster") + .attr("insert")(mlirTypeID, + pybind11::cpp_function([cls](const py::object &mlirType) { + return cls.get_class()(mlirType); + }), + "TestIntegerRankedTensorType", /*replace*/ true); mlir_value_subclass(m, "TestTensorValue", mlirTypeIsAPythonTestTestTensorValue) .def("is_null", [](MlirValue &self) { return mlirValueIsNull(self); });