diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td
@@ -19,6 +19,7 @@
   let hasRegionArgAttrVerify = 1;
   let hasRegionResultAttrVerify = 1;
   let hasOperationAttrVerify = 1;
+  let usePropertiesForAttributes = 1;
 
   let extraClassDeclaration = [{
     /// Name of the data layout attributes.
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
--- a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
@@ -5,14 +5,14 @@
 // CHECK-SAME: %[[A:.*]]: vector<16xf32>)
 //      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
-// CHECK-SAME: {reassoc = false} : (f32, vector<16xf32>) -> f32
+// CHECK-SAME: <{reassoc = false}> : (f32, vector<16xf32>) -> f32
 //      CHECK: return %[[V]] : f32
 //
 // REASSOC-LABEL: @reduce_add_f32(
 // REASSOC-SAME: %[[A:.*]]: vector<16xf32>)
 //      REASSOC: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32
 //      REASSOC: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
-// REASSOC-SAME: {reassoc = true} : (f32, vector<16xf32>) -> f32
+// REASSOC-SAME: <{reassoc = true}> : (f32, vector<16xf32>) -> f32
 //      REASSOC: return %[[V]] : f32
 //
 func.func @reduce_add_f32(%arg0: vector<16xf32>) -> f32 {
@@ -26,14 +26,14 @@
 // CHECK-SAME: %[[A:.*]]: vector<16xf32>)
 //      CHECK: %[[C:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fmul"(%[[C]], %[[A]])
-// CHECK-SAME: {reassoc = false} : (f32, vector<16xf32>) -> f32
+// CHECK-SAME: <{reassoc = false}> : (f32, vector<16xf32>) -> f32
 //      CHECK: return %[[V]] : f32
 //
 // REASSOC-LABEL: @reduce_mul_f32(
 // REASSOC-SAME: %[[A:.*]]: vector<16xf32>)
 //      REASSOC: %[[C:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
 //      REASSOC: %[[V:.*]] = "llvm.intr.vector.reduce.fmul"(%[[C]], %[[A]])
-// REASSOC-SAME: {reassoc = true} : (f32, vector<16xf32>) -> f32
+// REASSOC-SAME: <{reassoc = true}> : (f32, vector<16xf32>) -> f32
 //      REASSOC: return %[[V]] : f32
 //
 func.func @reduce_mul_f32(%arg0: vector<16xf32>) -> f32 {
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -1267,7 +1267,7 @@
 //      CHECK: %[[CA:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<f32> to vector<1xf32>
 //      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[CA]])
-// CHECK-SAME: {reassoc = false} : (f32, vector<1xf32>) -> f32
+// CHECK-SAME: <{reassoc = false}> : (f32, vector<1xf32>) -> f32
 //      CHECK: return %[[V]] : f32
 
 // -----
@@ -1280,7 +1280,7 @@
 // CHECK-SAME: %[[A:.*]]: vector<16xf16>)
 //      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f16) : f16
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
-// CHECK-SAME: {reassoc = false} : (f16, vector<16xf16>) -> f16
+// CHECK-SAME: <{reassoc = false}> : (f16, vector<16xf16>) -> f16
 //      CHECK: return %[[V]] : f16
 
 // -----
@@ -1293,7 +1293,7 @@
 // CHECK-SAME: %[[A:.*]]: vector<16xf32>)
 //      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
-// CHECK-SAME: {reassoc = false} : (f32, vector<16xf32>) -> f32
+// CHECK-SAME: <{reassoc = false}> : (f32, vector<16xf32>) -> f32
 //      CHECK: return %[[V]] : f32
 
 // -----
@@ -1306,7 +1306,7 @@
 // CHECK-SAME: %[[A:.*]]: vector<16xf64>)
 //      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : f64
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
-// CHECK-SAME: {reassoc = false} : (f64, vector<16xf64>) -> f64
+// CHECK-SAME: <{reassoc = false}> : (f64, vector<16xf64>) -> f64
 //      CHECK: return %[[V]] : f64
 
 // -----
diff --git a/mlir/test/Dialect/LLVMIR/func.mlir b/mlir/test/Dialect/LLVMIR/func.mlir
--- a/mlir/test/Dialect/LLVMIR/func.mlir
+++ b/mlir/test/Dialect/LLVMIR/func.mlir
@@ -5,33 +5,33 @@
 
 module {
   // GENERIC: "llvm.func"
-  // GENERIC: function_type = !llvm.func<void ()>
+  // GENERIC-SAME: function_type = !llvm.func<void ()>
   // GENERIC-SAME: sym_name = "foo"
-  // GENERIC-SAME: () -> ()
+  // GENERIC: () -> ()
   // CHECK: llvm.func @foo()
   "llvm.func" () ({
   }) {sym_name = "foo", function_type = !llvm.func<void ()>} : () -> ()
 
   // GENERIC: "llvm.func"
-  // GENERIC: function_type = !llvm.func<i64 (i64, i64)>
+  // GENERIC-SAME: function_type = !llvm.func<i64 (i64, i64)>
   // GENERIC-SAME: sym_name = "bar"
-  // GENERIC-SAME: () -> ()
+  // GENERIC: () -> ()
   // CHECK: llvm.func @bar(i64, i64) -> i64
   "llvm.func"() ({
   }) {sym_name = "bar", function_type = !llvm.func<i64 (i64, i64)>} : () -> ()
 
   // GENERIC: "llvm.func"
+  // GENERIC-SAME: function_type = !llvm.func<i64 (i64)>
+  // GENERIC-SAME: sym_name = "baz"
   // CHECK: llvm.func @baz(%{{.*}}: i64) -> i64
-  "llvm.func"() ({
+  "llvm.func"() <{sym_name = "baz", function_type = !llvm.func<i64 (i64)>}> ({
   // GENERIC: ^bb0
   ^bb0(%arg0: i64):
     // GENERIC: llvm.return
     llvm.return %arg0 : i64
 
-  // GENERIC: function_type = !llvm.func<i64 (i64)>
-  // GENERIC-SAME: sym_name = "baz"
-  // GENERIC-SAME: () -> ()
-  }) {sym_name = "baz", function_type = !llvm.func<i64 (i64)>} : () -> ()
+  // GENERIC: () -> ()
+  }) : () -> ()
 
   // CHECK: llvm.func @qux(!llvm.ptr {llvm.noalias}, i64)
   // CHECK: attributes {xxx = {yyy = 42 : i64}}
diff --git a/mlir/test/Dialect/LLVMIR/global.mlir b/mlir/test/Dialect/LLVMIR/global.mlir
--- a/mlir/test/Dialect/LLVMIR/global.mlir
+++ b/mlir/test/Dialect/LLVMIR/global.mlir
@@ -100,7 +100,7 @@
 // -----
 
 // expected-error @+1 {{op requires attribute 'sym_name'}}
-"llvm.mlir.global"() ({}) {linkage = "private", type = i64, constant, global_type = i64, value = 42 : i64} : () -> ()
+"llvm.mlir.global"() ({}) {linkage = #llvm.linkage<private>, type = i64, constant, global_type = i64, value = 42 : i64} : () -> ()
 
 // -----
 
diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir
--- a/mlir/test/Dialect/Tosa/invalid.mlir
+++ b/mlir/test/Dialect/Tosa/invalid.mlir
@@ -138,6 +138,7 @@
 // -----
 
 func.func @test_reshape_type_mismatch(%arg0 : tensor<13x21x3xf32>) -> () {
+  // expected-error@+2 {{failed to infer returned types}}
   // expected-error@+1 {{'tosa.reshape' op inferred type(s) 'tensor<13x21x3x1xf32>' are incompatible with return type(s) of operation 'tensor<13x21x3x1xi32>'}}
   %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 13, 21, 3, 1>} : (tensor<13x21x3xf32>) -> tensor<13x21x3x1xi32>
   return
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -273,13 +273,13 @@
   %12 = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %2)
   ; CHECK: "llvm.intr.vector.reduce.umin"(%{{.*}}) : (vector<8xi32>) -> i32
   %13 = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %2)
-  ; CHECK: "llvm.intr.vector.reduce.fadd"(%{{.*}}, %{{.*}}) {reassoc = false} : (f32, vector<8xf32>) -> f32
+  ; CHECK: "llvm.intr.vector.reduce.fadd"(%{{.*}}, %{{.*}}) <{reassoc = false}> : (f32, vector<8xf32>) -> f32
   %14 = call float @llvm.vector.reduce.fadd.v8f32(float %0, <8 x float> %1)
-  ; CHECK: "llvm.intr.vector.reduce.fmul"(%{{.*}}, %{{.*}}) {reassoc = false} : (f32, vector<8xf32>) -> f32
+  ; CHECK: "llvm.intr.vector.reduce.fmul"(%{{.*}}, %{{.*}}) <{reassoc = false}> : (f32, vector<8xf32>) -> f32
   %15 = call float @llvm.vector.reduce.fmul.v8f32(float %0, <8 x float> %1)
-  ; CHECK: "llvm.intr.vector.reduce.fadd"(%{{.*}}, %{{.*}}) {reassoc = true} : (f32, vector<8xf32>) -> f32
+  ; CHECK: "llvm.intr.vector.reduce.fadd"(%{{.*}}, %{{.*}}) <{reassoc = true}> : (f32, vector<8xf32>) -> f32
   %16 = call reassoc float @llvm.vector.reduce.fadd.v8f32(float %0, <8 x float> %1)
-  ; CHECK: "llvm.intr.vector.reduce.fmul"(%{{.*}}, %{{.*}}) {reassoc = true} : (f32, vector<8xf32>) -> f32
+  ; CHECK: "llvm.intr.vector.reduce.fmul"(%{{.*}}, %{{.*}}) <{reassoc = true}> : (f32, vector<8xf32>) -> f32
   %17 = call reassoc float @llvm.vector.reduce.fmul.v8f32(float %0, <8 x float> %1)
   ; CHECK:  "llvm.intr.vector.reduce.xor"(%{{.*}}) : (vector<8xi32>) -> i32
   %18 = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %2)