diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -835,20 +835,35 @@ }]; } -def LLVM_ShuffleVectorOp : LLVM_Op<"shufflevector", [NoSideEffect]> { - let arguments = (ins LLVM_AnyVector:$v1, LLVM_AnyVector:$v2, ArrayAttr:$mask); +//===----------------------------------------------------------------------===// +// ShuffleVectorOp +//===----------------------------------------------------------------------===// + +def LLVM_ShuffleVectorOp : LLVM_Op<"shufflevector", + [NoSideEffect, AllTypesMatch<["v1", "v2"]>]> { + let summary = "Construct a permutation of two vectors."; + + let arguments = (ins LLVM_AnyVector:$v1, LLVM_AnyVector:$v2, + DenseI64ArrayAttr:$mask); let results = (outs LLVM_AnyVector:$res); - string llvmBuilder = [{ - SmallVector position = - LLVM::convertArrayToIndices($mask); - SmallVector mask(position.begin(), position.end()); - $res = builder.CreateShuffleVector($v1, $v2, mask); - }]; + let builders = [ - OpBuilder<(ins "Value":$v1, "Value":$v2, "ArrayAttr":$mask, - CArg<"ArrayRef", "{}">:$attrs)>]; - let hasCustomAssemblyFormat = 1; + OpBuilder<(ins "Value":$v1, "Value":$v2, "DenseI64ArrayAttr":$mask, + CArg<"ArrayRef", "{}">:$attrs)>, + OpBuilder<(ins "Value":$v1, "Value":$v2, "ArrayRef":$mask)> + ]; + + let assemblyFormat = [{ + $v1 `,` $v2 $mask attr-dict `:` type($v1) + custom(ref(type($v1)), type($res), ref($mask)) + }]; + let hasVerifier = 1; + + string llvmBuilder = [{ + $res = builder.CreateShuffleVector( + $v1, $v2, SmallVector($mask.begin(), $mask.end())); + }]; } // Misc operations. diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp --- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp +++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp @@ -1380,8 +1380,8 @@ int vector1Size = vector1.getType().cast().getNumElements(); int vector2Size = vector2.getType().cast().getNumElements(); if (vector1Size == vector2Size) { - rewriter.replaceOpWithNewOp(op, vector1, vector2, - components); + rewriter.replaceOpWithNewOp( + op, vector1, vector2, LLVM::convertArrayToIndices(components)); return success(); } diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -573,7 +573,8 @@ // there is direct shuffle support in LLVM. Use it! if (rank == 1 && v1Type == v2Type) { Value llvmShuffleOp = rewriter.create( - loc, adaptor.getV1(), adaptor.getV2(), maskArrayAttr); + loc, adaptor.getV1(), adaptor.getV2(), + LLVM::convertArrayToIndices(maskArrayAttr)); rewriter.replaceOp(shuffleOp, llvmShuffleOp); return success(); } @@ -1210,12 +1211,11 @@ splatOp.getLoc(), vectorType, undef, adaptor.getInput(), zero); int64_t width = splatOp.getType().cast().getDimSize(0); - SmallVector zeroValues(width, 0); + SmallVector zeroValues(width, 0); // Shuffle the value across the desired number of elements. - ArrayAttr zeroAttrs = rewriter.getI32ArrayAttr(zeroValues); rewriter.replaceOpWithNewOp(splatOp, v, undef, - zeroAttrs); + zeroValues); return success(); } }; @@ -1256,9 +1256,8 @@ // Shuffle the value across the desired number of elements. int64_t width = resultType.getDimSize(resultType.getRank() - 1); - SmallVector zeroValues(width, 0); - ArrayAttr zeroAttrs = rewriter.getI32ArrayAttr(zeroValues); - v = rewriter.create(loc, v, v, zeroAttrs); + SmallVector zeroValues(width, 0); + v = rewriter.create(loc, v, v, zeroValues); // Iterate of linear index, convert to coords space and insert splatted 1-D // vector in each position. diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -2014,65 +2014,44 @@ } //===----------------------------------------------------------------------===// -// Printing/parsing for LLVM::ShuffleVectorOp. +// ShuffleVectorOp //===----------------------------------------------------------------------===// -// Expects vector to be of wrapped LLVM vector type and position to be of -// wrapped LLVM i32 type. -void LLVM::ShuffleVectorOp::build(OpBuilder &b, OperationState &result, - Value v1, Value v2, ArrayAttr mask, - ArrayRef attrs) { + +void ShuffleVectorOp::build(OpBuilder &builder, OperationState &state, Value v1, + Value v2, DenseI64ArrayAttr mask, + ArrayRef attrs) { auto containerType = v1.getType(); auto vType = LLVM::getVectorType(LLVM::getVectorElementType(containerType), mask.size(), LLVM::isScalableVectorType(containerType)); - build(b, result, vType, v1, v2, mask); - result.addAttributes(attrs); + build(builder, state, vType, v1, v2, mask); + state.addAttributes(attrs); } -void ShuffleVectorOp::print(OpAsmPrinter &p) { - p << ' ' << getV1() << ", " << getV2() << " " << getMask(); - p.printOptionalAttrDict((*this)->getAttrs(), {"mask"}); - p << " : " << getV1().getType() << ", " << getV2().getType(); +void ShuffleVectorOp::build(OpBuilder &builder, OperationState &state, Value v1, + Value v2, ArrayRef mask) { + build(builder, state, v1, v2, builder.getDenseI64ArrayAttr(mask)); } -// ::= `llvm.shufflevector` ssa-use `, ` ssa-use -// `[` integer-literal (`,` integer-literal)* `]` -// attribute-dict? `:` type -ParseResult ShuffleVectorOp::parse(OpAsmParser &parser, - OperationState &result) { - SMLoc loc; - OpAsmParser::UnresolvedOperand v1, v2; - ArrayAttr maskAttr; - Type typeV1, typeV2; - if (parser.getCurrentLocation(&loc) || parser.parseOperand(v1) || - parser.parseComma() || parser.parseOperand(v2) || - parser.parseAttribute(maskAttr, "mask", result.attributes) || - parser.parseOptionalAttrDict(result.attributes) || - parser.parseColonType(typeV1) || parser.parseComma() || - parser.parseType(typeV2) || - parser.resolveOperand(v1, typeV1, result.operands) || - parser.resolveOperand(v2, typeV2, result.operands)) - return failure(); - if (!LLVM::isCompatibleVectorType(typeV1)) - return parser.emitError( - loc, "expected LLVM IR dialect vector type for operand #1"); - auto vType = - LLVM::getVectorType(LLVM::getVectorElementType(typeV1), maskAttr.size(), - LLVM::isScalableVectorType(typeV1)); - result.addTypes(vType); +/// Build the result type of a shuffle vector operation. +static ParseResult parseShuffleType(AsmParser &parser, Type v1Type, + Type &resType, DenseI64ArrayAttr mask) { + if (!LLVM::isCompatibleVectorType(v1Type)) + return parser.emitError(parser.getCurrentLocation(), + "expected an LLVM compatible vector type"); + resType = LLVM::getVectorType(LLVM::getVectorElementType(v1Type), mask.size(), + LLVM::isScalableVectorType(v1Type)); return success(); } +/// Nothong to do when the result type is inferred. +static void printShuffleType(AsmPrinter &printer, Operation *op, Type v1Type, + Type resType, DenseI64ArrayAttr mask) {} + LogicalResult ShuffleVectorOp::verify() { - Type type1 = getV1().getType(); - Type type2 = getV2().getType(); - if (LLVM::getVectorElementType(type1) != LLVM::getVectorElementType(type2)) - return emitOpError("expected matching LLVM IR Dialect element types"); - if (LLVM::isScalableVectorType(type1)) - if (llvm::any_of(getMask(), [](Attribute attr) { - return attr.cast().getInt() != 0; - })) - return emitOpError("expected a splat operation for scalable vectors"); + if (LLVM::isScalableVectorType(getV1().getType()) && + llvm::any_of(getMask(), [](int64_t v) { return v != 0; })) + return emitOpError("expected a splat operation for scalable vectors"); return success(); } diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp --- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp +++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp @@ -1128,8 +1128,7 @@ if (!vec2) return failure(); - ArrayAttr mask = b.getI32ArrayAttr(svInst->getShuffleMask()); - + SmallVector mask(svInst->getShuffleMask()); instMap[inst] = b.create(loc, vec1, vec2, mask); return success(); } diff --git a/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir @@ -63,7 +63,7 @@ //===----------------------------------------------------------------------===// spv.func @vector_shuffle_same_size(%vector1: vector<2xf32>, %vector2: vector<2xf32>) -> vector<3xf32> "None" { - // CHECK: %[[res:.*]] = llvm.shufflevector {{.*}} [0 : i32, 2 : i32, -1 : i32] : vector<2xf32>, vector<2xf32> + // CHECK: %[[res:.*]] = llvm.shufflevector {{.*}} [0, 2, -1] : vector<2xf32> // CHECK-NEXT: return %[[res]] : vector<3xf32> %0 = spv.VectorShuffle [0: i32, 2: i32, 0xffffffff: i32] %vector1: vector<2xf32>, %vector2: vector<2xf32> -> vector<3xf32> spv.ReturnValue %0: vector<3xf32> diff --git a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir --- a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir @@ -6,7 +6,7 @@ // CMP32: %[[T0:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]> : vector<11xi32> // CMP32: %[[T1:.*]] = arith.index_cast %[[ARG]] : index to i32 // CMP32: %[[T2:.*]] = llvm.insertelement %[[T1]], %{{.*}}[%{{.*}} : i32] : vector<11xi32> -// CMP32: %[[T3:.*]] = llvm.shufflevector %[[T2]], %{{.*}} [0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32] : vector<11xi32>, vector<11xi32> +// CMP32: %[[T3:.*]] = llvm.shufflevector %[[T2]], %{{.*}} [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<11xi32> // CMP32: %[[T4:.*]] = arith.cmpi slt, %[[T0]], %[[T3]] : vector<11xi32> // CMP32: return %[[T4]] : vector<11xi1> @@ -15,7 +15,7 @@ // CMP64: %[[T0:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]> : vector<11xi64> // CMP64: %[[T1:.*]] = arith.index_cast %[[ARG]] : index to i64 // CMP64: %[[T2:.*]] = llvm.insertelement %[[T1]], %{{.*}}[%{{.*}} : i32] : vector<11xi64> -// CMP64: %[[T3:.*]] = llvm.shufflevector %[[T2]], %{{.*}} [0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32] : vector<11xi64>, vector<11xi64> +// CMP64: %[[T3:.*]] = llvm.shufflevector %[[T2]], %{{.*}} [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<11xi64> // CMP64: %[[T4:.*]] = arith.cmpi slt, %[[T0]], %[[T3]] : vector<11xi64> // CMP64: return %[[T4]] : vector<11xi1> @@ -29,7 +29,7 @@ // CMP32: %[[T0:.*]] = llvm.intr.experimental.stepvector : vector<[11]xi32> // CMP32: %[[T1:.*]] = arith.index_cast %[[ARG]] : index to i32 // CMP32: %[[T2:.*]] = llvm.insertelement %[[T1]], %{{.*}}[%{{.*}} : i32] : vector<[11]xi32> -// CMP32: %[[T3:.*]] = llvm.shufflevector %[[T2]], %{{.*}} [0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32] : vector<[11]xi32>, vector<[11]xi32> +// CMP32: %[[T3:.*]] = llvm.shufflevector %[[T2]], %{{.*}} [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<[11]xi32> // CMP32: %[[T4:.*]] = arith.cmpi slt, %[[T0]], %[[T3]] : vector<[11]xi32> // CMP32: return %[[T4]] : vector<[11]xi1> @@ -38,7 +38,7 @@ // CMP64: %[[T0:.*]] = llvm.intr.experimental.stepvector : vector<[11]xi64> // CMP64: %[[T1:.*]] = arith.index_cast %[[ARG]] : index to i64 // CMP64: %[[T2:.*]] = llvm.insertelement %[[T1]], %{{.*}}[%{{.*}} : i32] : vector<[11]xi64> -// CMP64: %[[T3:.*]] = llvm.shufflevector %[[T2]], %{{.*}} [0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32] : vector<[11]xi64>, vector<[11]xi64> +// CMP64: %[[T3:.*]] = llvm.shufflevector %[[T2]], %{{.*}} [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<[11]xi64> // CMP64: %[[T4:.*]] = arith.cmpi slt, %[[T0]], %[[T3]] : vector<[11]xi64> // CMP64: return %[[T4]] : vector<[11]xi1> @@ -54,7 +54,7 @@ // CMP32: %[[C:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]> : vector<16xi32> // CMP32: %[[B:.*]] = arith.index_cast %[[S]] : index to i32 // CMP32: %[[B0:.*]] = llvm.insertelement %[[B]], %{{.*}} : vector<16xi32> -// CMP32: %[[BV:.*]] = llvm.shufflevector %[[B0]], {{.*}} : vector<16xi32>, vector<16xi32> +// CMP32: %[[BV:.*]] = llvm.shufflevector %[[B0]], {{.*}} : vector<16xi32> // CMP32: %[[M:.*]] = arith.cmpi slt, %[[C]], %[[BV]] : vector<16xi32> // CMP32: %[[L:.*]] = llvm.intr.masked.load %{{.*}}, %[[M]], %{{.*}} // CMP32: return %[[L]] : vector<16xf32> @@ -66,7 +66,7 @@ // CMP64: %[[C:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]> : vector<16xi64> // CMP64: %[[B:.*]] = arith.index_cast %[[S]] : index to i64 // CMP64: %[[B0:.*]] = llvm.insertelement %[[B]], %{{.*}} : vector<16xi64> -// CMP64: %[[BV:.*]] = llvm.shufflevector %[[B0]], {{.*}} : vector<16xi64>, vector<16xi64> +// CMP64: %[[BV:.*]] = llvm.shufflevector %[[B0]], {{.*}} : vector<16xi64> // CMP64: %[[M:.*]] = arith.cmpi slt, %[[C]], %[[BV]] : vector<16xi64> // CMP64: %[[L:.*]] = llvm.intr.masked.load %{{.*}}, %[[M]], %{{.*}} // CMP64: return %[[L]] : vector<16xf32> diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir @@ -105,8 +105,8 @@ // CHECK-SAME: %[[A:.*]]: f32) // CHECK: %[[T0:.*]] = llvm.insertelement %[[A]] // CHECK: %[[T1:.*]] = llvm.shufflevector %[[T0]] -// CHECK: %[[T2:.*]] = llvm.insertvalue %[[T1]], %{{.*}}[0] : !llvm.array<2 x vector<3xf32>> -// CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %{{.*}}[1] : !llvm.array<2 x vector<3xf32>> +// CHECK: %[[T2:.*]] = llvm.insertvalue %[[T1]], %{{.*}}[0] : !llvm.array<2 x vector<3xf32>> +// CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %{{.*}}[1] : !llvm.array<2 x vector<3xf32>> // CHECK: %[[T4:.*]] = builtin.unrealized_conversion_cast %[[T3]] : !llvm.array<2 x vector<3xf32>> to vector<2x3xf32> // CHECK: return %[[T4]] : vector<2x3xf32> @@ -964,7 +964,7 @@ } // CHECK-LABEL: @extract_strided_slice1( // CHECK-SAME: %[[A:.*]]: vector<4xf32>) -// CHECK: %[[T0:.*]] = llvm.shufflevector %[[A]], %[[A]] [2, 3] : vector<4xf32>, vector<4xf32> +// CHECK: %[[T0:.*]] = llvm.shufflevector %[[A]], %[[A]] [2, 3] : vector<4xf32> // CHECK: return %[[T0]] : vector<2xf32> // ----- @@ -976,7 +976,7 @@ // CHECK-LABEL: @extract_strided_index_slice1( // CHECK-SAME: %[[A:.*]]: vector<4xindex>) // CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<4xindex> to vector<4xi64> -// CHECK: %[[T2:.*]] = llvm.shufflevector %[[T0]], %[[T0]] [2, 3] : vector<4xi64>, vector<4xi64> +// CHECK: %[[T2:.*]] = llvm.shufflevector %[[T0]], %[[T0]] [2, 3] : vector<4xi64> // CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[T2]] : vector<2xi64> to vector<2xindex> // CHECK: return %[[T3]] : vector<2xindex> @@ -1009,10 +1009,10 @@ // CHECK: %[[VAL_2:.*]] = arith.constant dense<0.000000e+00> : vector<2x2xf32> // CHECK: %[[VAL_6:.*]] = builtin.unrealized_conversion_cast %[[VAL_2]] : vector<2x2xf32> to !llvm.array<2 x vector<2xf32>> // CHECK: %[[T2:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vector<8xf32>> -// CHECK: %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T2]] [2, 3] : vector<8xf32>, vector<8xf32> +// CHECK: %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T2]] [2, 3] : vector<8xf32> // CHECK: %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[VAL_6]][0] : !llvm.array<2 x vector<2xf32>> // CHECK: %[[T5:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vector<8xf32>> -// CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T5]] [2, 3] : vector<8xf32>, vector<8xf32> +// CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T5]] [2, 3] : vector<8xf32> // CHECK: %[[T7:.*]] = llvm.insertvalue %[[T6]], %[[T4]][1] : !llvm.array<2 x vector<2xf32>> // CHECK: %[[VAL_12:.*]] = builtin.unrealized_conversion_cast %[[T7]] : !llvm.array<2 x vector<2xf32>> to vector<2x2xf32> // CHECK: return %[[VAL_12]] : vector<2x2xf32> @@ -1050,16 +1050,16 @@ // CHECK: %[[V2_0:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.array<2 x vector<2xf32>> // CHECK: %[[V4_0:.*]] = llvm.extractvalue {{.*}}[2] : !llvm.array<4 x vector<4xf32>> // Element @0 -> element @2 -// CHECK: %[[R4_0:.*]] = llvm.shufflevector %[[V2_0]], %[[V2_0]] [0, 1, 0, 0] : vector<2xf32>, vector<2xf32> -// CHECK: %[[R4_1:.*]] = llvm.shufflevector %[[R4_0]], %[[V4_0]] [4, 5, 0, 1] : vector<4xf32>, vector<4xf32> +// CHECK: %[[R4_0:.*]] = llvm.shufflevector %[[V2_0]], %[[V2_0]] [0, 1, 0, 0] : vector<2xf32> +// CHECK: %[[R4_1:.*]] = llvm.shufflevector %[[R4_0]], %[[V4_0]] [4, 5, 0, 1] : vector<4xf32> // CHECK: llvm.insertvalue %[[R4_1]], {{.*}}[2] : !llvm.array<4 x vector<4xf32>> // // Subvector vector<2xf32> @1 into vector<4xf32> @3 // CHECK: %[[V2_1:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.array<2 x vector<2xf32>> // CHECK: %[[V4_3:.*]] = llvm.extractvalue {{.*}}[3] : !llvm.array<4 x vector<4xf32>> // Element @0 -> element @2 -// CHECK: %[[R4_2:.*]] = llvm.shufflevector %[[V2_1]], %[[V2_1]] [0, 1, 0, 0] : vector<2xf32>, vector<2xf32> -// CHECK: %[[R4_3:.*]] = llvm.shufflevector %[[R4_2]], %[[V4_3]] [4, 5, 0, 1] : vector<4xf32>, vector<4xf32> +// CHECK: %[[R4_2:.*]] = llvm.shufflevector %[[V2_1]], %[[V2_1]] [0, 1, 0, 0] : vector<2xf32> +// CHECK: %[[R4_3:.*]] = llvm.shufflevector %[[R4_2]], %[[V4_3]] [4, 5, 0, 1] : vector<4xf32> // CHECK: llvm.insertvalue %[[R4_3]], {{.*}}[3] : !llvm.array<4 x vector<4xf32>> // ----- @@ -1072,14 +1072,14 @@ // CHECK-LABEL: func @insert_strided_slice3 // CHECK: %[[V4_0:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.array<2 x vector<4xf32>> // CHECK: %[[V4_0_0:.*]] = llvm.extractvalue {{.*}}[0, 0] : !llvm.array<16 x array<4 x vector<8xf32>>> -// CHECK: %[[R8_0:.*]] = llvm.shufflevector %[[V4_0]], %[[V4_0]] [0, 1, 2, 3, 0, 0, 0, 0] : vector<4xf32>, vector<4xf32> -// CHECK: %[[R8_1:.*]] = llvm.shufflevector %[[R8_0:.*]], %[[V4_0_0]] [8, 9, 0, 1, 2, 3, 14, 15] : vector<8xf32>, vector<8xf32> +// CHECK: %[[R8_0:.*]] = llvm.shufflevector %[[V4_0]], %[[V4_0]] [0, 1, 2, 3, 0, 0, 0, 0] : vector<4xf32> +// CHECK: %[[R8_1:.*]] = llvm.shufflevector %[[R8_0:.*]], %[[V4_0_0]] [8, 9, 0, 1, 2, 3, 14, 15] : vector<8xf32> // CHECK: llvm.insertvalue %[[R8_1]], {{.*}}[0] : !llvm.array<4 x vector<8xf32>> // CHECK: %[[V4_1:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.array<2 x vector<4xf32>> // CHECK: %[[V4_0_1:.*]] = llvm.extractvalue {{.*}}[0, 1] : !llvm.array<16 x array<4 x vector<8xf32>>> -// CHECK: %[[R8_2:.*]] = llvm.shufflevector %[[V4_1]], %[[V4_1]] [0, 1, 2, 3, 0, 0, 0, 0] : vector<4xf32>, vector<4xf32> -// CHECK: %[[R8_3:.*]] = llvm.shufflevector %[[R8_2]], %[[V4_0_1]] [8, 9, 0, 1, 2, 3, 14, 15] : vector<8xf32>, vector<8xf32> +// CHECK: %[[R8_2:.*]] = llvm.shufflevector %[[V4_1]], %[[V4_1]] [0, 1, 2, 3, 0, 0, 0, 0] : vector<4xf32> +// CHECK: %[[R8_3:.*]] = llvm.shufflevector %[[R8_2]], %[[V4_0_1]] [8, 9, 0, 1, 2, 3, 14, 15] : vector<8xf32> // CHECK: llvm.insertvalue %[[R8_3]], {{.*}}[1] : !llvm.array<4 x vector<8xf32>> // ----- @@ -1752,7 +1752,7 @@ // CHECK: %[[indices:.*]] = llvm.intr.experimental.stepvector : vector<[4]xi32> // CHECK: %[[arg_i32:.*]] = arith.index_cast %[[arg]] : index to i32 // CHECK: %[[boundsInsert:.*]] = llvm.insertelement %[[arg_i32]], {{.*}} : vector<[4]xi32> -// CHECK: %[[bounds:.*]] = llvm.shufflevector %[[boundsInsert]], {{.*}} : vector<[4]xi32>, vector<[4]xi32> +// CHECK: %[[bounds:.*]] = llvm.shufflevector %[[boundsInsert]], {{.*}} : vector<[4]xi32> // CHECK: %[[result:.*]] = arith.cmpi slt, %[[indices]], %[[bounds]] : vector<[4]xi32> // CHECK: return %[[result]] : vector<[4]xi1> @@ -2043,6 +2043,6 @@ // CHECK-NEXT: %[[UNDEF:[0-9]+]] = llvm.mlir.undef : vector<4xf32> // CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : i32 // CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[UNDEF]][%[[ZERO]] : i32] : vector<4xf32> -// CHECK-NEXT: %[[SPLAT:[0-9]+]] = llvm.shufflevector %[[V]], %[[UNDEF]] [0 : i32, 0 : i32, 0 : i32, 0 : i32] +// CHECK-NEXT: %[[SPLAT:[0-9]+]] = llvm.shufflevector %[[V]], %[[UNDEF]] [0, 0, 0, 0] // CHECK-NEXT: %[[SCALE:[0-9]+]] = arith.mulf %[[A]], %[[SPLAT]] : vector<4xf32> // CHECK-NEXT: return %[[SCALE]] : vector<4xf32> diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir --- a/mlir/test/Dialect/LLVMIR/invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/invalid.mlir @@ -472,8 +472,8 @@ // ----- func.func @invalid_vector_type_3(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32) { - // expected-error@+1 {{expected LLVM IR dialect vector type for operand #1}} - %0 = llvm.shufflevector %arg2, %arg2 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : f32, f32 + // expected-error@+2 {{expected an LLVM compatible vector type}} + %0 = llvm.shufflevector %arg2, %arg2 [0, 0, 0, 0, 7] : f32 } // ----- @@ -1298,7 +1298,7 @@ func.func @non_splat_shuffle_on_scalable_vector(%arg0: vector<[4]xf32>) { // expected-error@below {{expected a splat operation for scalable vectors}} - %0 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 1 : i32] : vector<[4]xf32>, vector<[4]xf32> + %0 = llvm.shufflevector %arg0, %arg0 [0, 0, 0, 1] : vector<[4]xf32> return } diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir --- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir +++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir @@ -283,10 +283,10 @@ %0 = llvm.extractelement %arg0[%arg1 : i32] : vector<4xf32> // CHECK: = llvm.insertelement {{.*}} : vector<4xf32> %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : vector<4xf32> -// CHECK: = llvm.shufflevector {{.*}} [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : vector<4xf32>, vector<4xf32> - %2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : vector<4xf32>, vector<4xf32> -// CHECK: = llvm.shufflevector %{{.+}}, %{{.+}} [1 : i32, 0 : i32] : !llvm.vec<2 x ptr>, !llvm.vec<2 x ptr> - %3 = llvm.shufflevector %arg3, %arg3 [1 : i32, 0 : i32] : !llvm.vec<2 x ptr>, !llvm.vec<2 x ptr> +// CHECK: = llvm.shufflevector {{.*}} [0, 0, 0, 0, 7] : vector<4xf32> + %2 = llvm.shufflevector %arg0, %arg0 [0, 0, 0, 0, 7] : vector<4xf32> +// CHECK: = llvm.shufflevector %{{.+}}, %{{.+}} [1, 0] : !llvm.vec<2 x ptr> + %3 = llvm.shufflevector %arg3, %arg3 [1, 0] : !llvm.vec<2 x ptr> // CHECK: = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : vector<4xf32> %4 = llvm.mlir.constant(dense<1.0> : vector<4xf32>) : vector<4xf32> return @@ -298,8 +298,8 @@ %0 = llvm.extractelement %arg0[%arg1 : i32] : vector<[4]xf32> // CHECK: = llvm.insertelement {{.*}} : vector<[4]xf32> %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : vector<[4]xf32> -// CHECK: = llvm.shufflevector {{.*}} [0 : i32, 0 : i32, 0 : i32, 0 : i32] : vector<[4]xf32>, vector<[4]xf32> - %2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32] : vector<[4]xf32>, vector<[4]xf32> +// CHECK: = llvm.shufflevector {{.*}} [0, 0, 0, 0] : vector<[4]xf32> + %2 = llvm.shufflevector %arg0, %arg0 [0, 0, 0, 0] : vector<[4]xf32> // CHECK: = llvm.mlir.constant(dense<1.000000e+00> : vector<[4]xf32>) : vector<[4]xf32> %3 = llvm.mlir.constant(dense<1.0> : vector<[4]xf32>) : vector<[4]xf32> return diff --git a/mlir/test/Integration/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir b/mlir/test/Integration/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir --- a/mlir/test/Integration/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir +++ b/mlir/test/Integration/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir @@ -15,7 +15,7 @@ %4 = llvm.mlir.undef : vector<4xf32> %5 = llvm.mlir.constant(0 : index) : i64 %6 = llvm.insertelement %0, %4[%5 : i64] : vector<4xf32> - %7 = llvm.shufflevector %6, %4 [0 : i32, 0 : i32, 0 : i32, 0 : i32] + %7 = llvm.shufflevector %6, %4 [0, 0, 0, 0] : vector<4xf32>, vector<4xf32> %8 = llvm.mlir.constant(1 : i64) : i64 %9 = llvm.insertelement %1, %7[%8 : i64] : vector<4xf32> diff --git a/mlir/test/Integration/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir b/mlir/test/Integration/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir --- a/mlir/test/Integration/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir +++ b/mlir/test/Integration/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir @@ -15,7 +15,7 @@ %4 = llvm.mlir.undef : vector<4xi64> %5 = llvm.mlir.constant(0 : index) : i64 %6 = llvm.insertelement %0, %4[%5 : i64] : vector<4xi64> - %7 = llvm.shufflevector %6, %4 [0 : i64, 0 : i64, 0 : i64, 0 : i64] + %7 = llvm.shufflevector %6, %4 [0, 0, 0, 0] : vector<4xi64>, vector<4xi64> %8 = llvm.mlir.constant(1 : i64) : i64 %9 = llvm.insertelement %1, %7[%8 : i64] : vector<4xi64> diff --git a/mlir/test/Target/LLVMIR/Import/basic.ll b/mlir/test/Target/LLVMIR/Import/basic.ll --- a/mlir/test/Target/LLVMIR/Import/basic.ll +++ b/mlir/test/Target/LLVMIR/Import/basic.ll @@ -588,7 +588,7 @@ %val0 = load <4 x half>, <4 x half>* %arg0 ; CHECK: %[[V1:.+]] = llvm.load %{{.+}} : !llvm.ptr> %val1 = load <4 x half>, <4 x half>* %arg1 - ; CHECK: llvm.shufflevector %[[V0]], %[[V1]] [2 : i32, 3 : i32, -1 : i32, -1 : i32] : vector<4xf16>, vector<4xf16> + ; CHECK: llvm.shufflevector %[[V0]], %[[V1]] [2, 3, -1, -1] : vector<4xf16> %shuffle = shufflevector <4 x half> %val0, <4 x half> %val1, <4 x i32> ret <4 x half> %shuffle } diff --git a/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll b/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll --- a/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll +++ b/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll @@ -2,7 +2,7 @@ ; CHECK: llvm.func @shufflevector_crash define void @shufflevector_crash(<2 x i32*> %arg0) { - ; CHECK: llvm.shufflevector %{{.+}}, %{{.+}} [1 : i32, 0 : i32] : !llvm.vec<2 x ptr>, !llvm.vec<2 x ptr> + ; CHECK: llvm.shufflevector %{{.+}}, %{{.+}} [1, 0] : !llvm.vec<2 x ptr> %1 = shufflevector <2 x i32*> %arg0, <2 x i32*> undef, <2 x i32> ret void } diff --git a/mlir/test/Target/LLVMIR/arm-neon.mlir b/mlir/test/Target/LLVMIR/arm-neon.mlir --- a/mlir/test/Target/LLVMIR/arm-neon.mlir +++ b/mlir/test/Target/LLVMIR/arm-neon.mlir @@ -5,12 +5,12 @@ // CHECK: %[[V0:.*]] = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %{{.*}}, <8 x i8> %{{.*}}) // CHECK-NEXT: %[[V00:.*]] = shufflevector <8 x i16> %3, <8 x i16> %[[V0]], <4 x i32> %0 = arm_neon.intr.smull %arg0, %arg1 : vector<8xi8> to vector<8xi16> - %1 = llvm.shufflevector %0, %0 [3, 4, 5, 6] : vector<8xi16>, vector<8xi16> + %1 = llvm.shufflevector %0, %0 [3, 4, 5, 6] : vector<8xi16> // CHECK-NEXT: %[[V1:.*]] = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %[[V00]], <4 x i16> %[[V00]]) // CHECK-NEXT: %[[V11:.*]] = shufflevector <4 x i32> %[[V1]], <4 x i32> %[[V1]], <2 x i32> %2 = arm_neon.intr.smull %1, %1 : vector<4xi16> to vector<4xi32> - %3 = llvm.shufflevector %2, %2 [1, 2] : vector<4xi32>, vector<4xi32> + %3 = llvm.shufflevector %2, %2 [1, 2] : vector<4xi32> // CHECK-NEXT: %[[V1:.*]] = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %[[V11]], <2 x i32> %[[V11]]) %4 = arm_neon.intr.smull %3, %3 : vector<2xi32> to vector<2xi64> diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir --- a/mlir/test/Target/LLVMIR/llvmir.mlir +++ b/mlir/test/Target/LLVMIR/llvmir.mlir @@ -1174,7 +1174,7 @@ // CHECK-NEXT: shufflevector <4 x float> {{.*}}, <4 x float> {{.*}}, <5 x i32> %0 = llvm.extractelement %arg0[%arg1 : i32] : vector<4xf32> %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : vector<4xf32> - %2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : vector<4xf32>, vector<4xf32> + %2 = llvm.shufflevector %arg0, %arg0 [0, 0, 0, 0, 7] : vector<4xf32> llvm.return } @@ -1194,7 +1194,7 @@ // CHECK-NEXT: shufflevector %0, %0, zeroinitializer %0 = llvm.extractelement %arg0[%arg1 : i32] : vector<[4]xf32> %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : vector<[4]xf32> - %2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32] : vector<[4]xf32>, vector<[4]xf32> + %2 = llvm.shufflevector %arg0, %arg0 [0, 0, 0, 0] : vector<[4]xf32> llvm.return }