diff --git a/mlir/include/mlir/Dialect/ArmSVE/ArmSVE.td b/mlir/include/mlir/Dialect/ArmSVE/ArmSVE.td --- a/mlir/include/mlir/Dialect/ArmSVE/ArmSVE.td +++ b/mlir/include/mlir/Dialect/ArmSVE/ArmSVE.td @@ -47,52 +47,6 @@ /*list traits=*/traits, /*int numResults=*/1>; -class ScalableMaskedFOp traits = []> : - ArmSVE_Op, - TypesMatchWith< - "mask has i1 element type and same shape as operands", - "src1", "mask", "getI1SameShape($_self)">])> { - let summary = "masked " # op_description # " for scalable vectors of floats"; - let description = [{ - The `arm_sve.}] # mnemonic # [{` operation takes one scalable vector mask - and two scalable vector operands, and perform floating point }] # - op_description # [{ on active lanes. Inactive lanes will keep the value of - the first operand.}]; - let arguments = (ins - ScalableVectorOf<[I1]>:$mask, - ScalableVectorOf<[AnyFloat]>:$src1, - ScalableVectorOf<[AnyFloat]>:$src2 - ); - let results = (outs ScalableVectorOf<[AnyFloat]>:$res); - let assemblyFormat = - "$mask `,` $src1 `,` $src2 attr-dict `:` type($mask) `,` type($res)"; -} - -class ScalableMaskedIOp traits = []> : - ArmSVE_Op, - TypesMatchWith< - "mask has i1 element type and same shape as operands", - "src1", "mask", "getI1SameShape($_self)">])> { - let summary = "masked " # op_description # " for scalable vectors of integers"; - let description = [{ - The `arm_sve.}] # mnemonic # [{` operation takes one scalable vector mask - and two scalable vector operands, and perform integer }] # - op_description # [{ on active lanes. Inactive lanes will keep the value of - the first operand.}]; - let arguments = (ins - ScalableVectorOf<[I1]>:$mask, - ScalableVectorOf<[I8, I16, I32, I64]>:$src1, - ScalableVectorOf<[I8, I16, I32, I64]>:$src2 - ); - let results = (outs ScalableVectorOf<[I8, I16, I32, I64]>:$res); - let assemblyFormat = - "$mask `,` $src1 `,` $src2 attr-dict `:` type($mask) `,` type($res)"; -} - def SdotOp : ArmSVE_Op<"sdot", [NoSideEffect, AllTypesMatch<["src1", "src2"]>, @@ -219,30 +173,6 @@ "$acc `,` $src1 `,` $src2 attr-dict `:` type($src1) `to` type($dst)"; } -def ScalableMaskedAddIOp : ScalableMaskedIOp<"masked.addi", "addition", - [Commutative]>; - -def ScalableMaskedAddFOp : ScalableMaskedFOp<"masked.addf", "addition", - [Commutative]>; - -def ScalableMaskedSubIOp : ScalableMaskedIOp<"masked.subi", "subtraction">; - -def ScalableMaskedSubFOp : ScalableMaskedFOp<"masked.subf", "subtraction">; - -def ScalableMaskedMulIOp : ScalableMaskedIOp<"masked.muli", "multiplication", - [Commutative]>; - -def ScalableMaskedMulFOp : ScalableMaskedFOp<"masked.mulf", "multiplication", - [Commutative]>; - -def ScalableMaskedSDivIOp : ScalableMaskedIOp<"masked.divi_signed", - "signed division">; - -def ScalableMaskedUDivIOp : ScalableMaskedIOp<"masked.divi_unsigned", - "unsigned division">; - -def ScalableMaskedDivFOp : ScalableMaskedFOp<"masked.divf", "division">; - def UmmlaIntrOp : ArmSVE_IntrBinaryOverloadedOp<"ummla">, Arguments<(ins AnyScalableVector, AnyScalableVector, AnyScalableVector)>; @@ -259,40 +189,4 @@ ArmSVE_IntrBinaryOverloadedOp<"udot">, Arguments<(ins AnyScalableVector, AnyScalableVector, AnyScalableVector)>; -def ScalableMaskedAddIIntrOp : - ArmSVE_IntrBinaryOverloadedOp<"add">, - Arguments<(ins AnyScalableVector, AnyScalableVector, AnyScalableVector)>; - -def ScalableMaskedAddFIntrOp : - ArmSVE_IntrBinaryOverloadedOp<"fadd">, - Arguments<(ins AnyScalableVector, AnyScalableVector, AnyScalableVector)>; - -def ScalableMaskedMulIIntrOp : - ArmSVE_IntrBinaryOverloadedOp<"mul">, - Arguments<(ins AnyScalableVector, AnyScalableVector, AnyScalableVector)>; - -def ScalableMaskedMulFIntrOp : - ArmSVE_IntrBinaryOverloadedOp<"fmul">, - Arguments<(ins AnyScalableVector, AnyScalableVector, AnyScalableVector)>; - -def ScalableMaskedSubIIntrOp : - ArmSVE_IntrBinaryOverloadedOp<"sub">, - Arguments<(ins AnyScalableVector, AnyScalableVector, AnyScalableVector)>; - -def ScalableMaskedSubFIntrOp : - ArmSVE_IntrBinaryOverloadedOp<"fsub">, - Arguments<(ins AnyScalableVector, AnyScalableVector, AnyScalableVector)>; - -def ScalableMaskedSDivIIntrOp : - ArmSVE_IntrBinaryOverloadedOp<"sdiv">, - Arguments<(ins AnyScalableVector, AnyScalableVector, AnyScalableVector)>; - -def ScalableMaskedUDivIIntrOp : - ArmSVE_IntrBinaryOverloadedOp<"udiv">, - Arguments<(ins AnyScalableVector, AnyScalableVector, AnyScalableVector)>; - -def ScalableMaskedDivFIntrOp : - ArmSVE_IntrBinaryOverloadedOp<"fdiv">, - Arguments<(ins AnyScalableVector, AnyScalableVector, AnyScalableVector)>; - #endif // ARMSVE_OPS diff --git a/mlir/lib/Dialect/ArmSVE/IR/ArmSVEDialect.cpp b/mlir/lib/Dialect/ArmSVE/IR/ArmSVEDialect.cpp --- a/mlir/lib/Dialect/ArmSVE/IR/ArmSVEDialect.cpp +++ b/mlir/lib/Dialect/ArmSVE/IR/ArmSVEDialect.cpp @@ -21,19 +21,6 @@ using namespace mlir; using namespace mlir::arm_sve; -//===----------------------------------------------------------------------===// -// ScalableVector versions of general helpers for comparison ops -//===----------------------------------------------------------------------===// - -/// Return the scalable vector of the same shape and containing i1. -static Type getI1SameShape(Type type) { - auto i1Type = IntegerType::get(type.getContext(), 1); - if (auto sVectorType = type.dyn_cast()) - return VectorType::get(sVectorType.getShape(), i1Type, - sVectorType.getNumScalableDims()); - return nullptr; -} - //===----------------------------------------------------------------------===// // Tablegen Definitions //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeForLLVMExport.cpp b/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeForLLVMExport.cpp --- a/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeForLLVMExport.cpp +++ b/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeForLLVMExport.cpp @@ -38,33 +38,6 @@ using SmmlaOpLowering = OneToOneConvertToLLVMPattern; using UdotOpLowering = OneToOneConvertToLLVMPattern; using UmmlaOpLowering = OneToOneConvertToLLVMPattern; -using ScalableMaskedAddIOpLowering = - OneToOneConvertToLLVMPattern; -using ScalableMaskedAddFOpLowering = - OneToOneConvertToLLVMPattern; -using ScalableMaskedSubIOpLowering = - OneToOneConvertToLLVMPattern; -using ScalableMaskedSubFOpLowering = - OneToOneConvertToLLVMPattern; -using ScalableMaskedMulIOpLowering = - OneToOneConvertToLLVMPattern; -using ScalableMaskedMulFOpLowering = - OneToOneConvertToLLVMPattern; -using ScalableMaskedSDivIOpLowering = - OneToOneConvertToLLVMPattern; -using ScalableMaskedUDivIOpLowering = - OneToOneConvertToLLVMPattern; -using ScalableMaskedDivFOpLowering = - OneToOneConvertToLLVMPattern; /// Populate the given list with patterns that convert from ArmSVE to LLVM. void mlir::populateArmSVELegalizeForLLVMExportPatterns( @@ -79,16 +52,7 @@ patterns.add(converter); + UmmlaOpLowering>(converter); // clang-format on } @@ -98,28 +62,10 @@ target.addLegalOp(); + UmmlaIntrOp>(); target.addIllegalOp(); + UmmlaOp>(); // clang-format on } diff --git a/mlir/test/Dialect/ArmSVE/legalize-for-llvm.mlir b/mlir/test/Dialect/ArmSVE/legalize-for-llvm.mlir --- a/mlir/test/Dialect/ArmSVE/legalize-for-llvm.mlir +++ b/mlir/test/Dialect/ArmSVE/legalize-for-llvm.mlir @@ -40,77 +40,6 @@ return %0 : vector<[4]xi32> } -func.func @arm_sve_arithi_masked(%a: vector<[4]xi32>, - %b: vector<[4]xi32>, - %c: vector<[4]xi32>, - %d: vector<[4]xi32>, - %e: vector<[4]xi32>, - %mask: vector<[4]xi1> - ) -> vector<[4]xi32> { - // CHECK: arm_sve.intr.add{{.*}}: (vector<[4]xi1>, vector<[4]xi32>, vector<[4]xi32>) -> vector<[4]xi32> - %0 = arm_sve.masked.addi %mask, %a, %b : vector<[4]xi1>, - vector<[4]xi32> - // CHECK: arm_sve.intr.sub{{.*}}: (vector<[4]xi1>, vector<[4]xi32>, vector<[4]xi32>) -> vector<[4]xi32> - %1 = arm_sve.masked.subi %mask, %0, %c : vector<[4]xi1>, - vector<[4]xi32> - // CHECK: arm_sve.intr.mul{{.*}}: (vector<[4]xi1>, vector<[4]xi32>, vector<[4]xi32>) -> vector<[4]xi32> - %2 = arm_sve.masked.muli %mask, %1, %d : vector<[4]xi1>, - vector<[4]xi32> - // CHECK: arm_sve.intr.sdiv{{.*}}: (vector<[4]xi1>, vector<[4]xi32>, vector<[4]xi32>) -> vector<[4]xi32> - %3 = arm_sve.masked.divi_signed %mask, %2, %e : vector<[4]xi1>, - vector<[4]xi32> - // CHECK: arm_sve.intr.udiv{{.*}}: (vector<[4]xi1>, vector<[4]xi32>, vector<[4]xi32>) -> vector<[4]xi32> - %4 = arm_sve.masked.divi_unsigned %mask, %3, %e : vector<[4]xi1>, - vector<[4]xi32> - return %4 : vector<[4]xi32> -} - -func.func @arm_sve_arithf_masked(%a: vector<[4]xf32>, - %b: vector<[4]xf32>, - %c: vector<[4]xf32>, - %d: vector<[4]xf32>, - %e: vector<[4]xf32>, - %mask: vector<[4]xi1> - ) -> vector<[4]xf32> { - // CHECK: arm_sve.intr.fadd{{.*}}: (vector<[4]xi1>, vector<[4]xf32>, vector<[4]xf32>) -> vector<[4]xf32> - %0 = arm_sve.masked.addf %mask, %a, %b : vector<[4]xi1>, - vector<[4]xf32> - // CHECK: arm_sve.intr.fsub{{.*}}: (vector<[4]xi1>, vector<[4]xf32>, vector<[4]xf32>) -> vector<[4]xf32> - %1 = arm_sve.masked.subf %mask, %0, %c : vector<[4]xi1>, - vector<[4]xf32> - // CHECK: arm_sve.intr.fmul{{.*}}: (vector<[4]xi1>, vector<[4]xf32>, vector<[4]xf32>) -> vector<[4]xf32> - %2 = arm_sve.masked.mulf %mask, %1, %d : vector<[4]xi1>, - vector<[4]xf32> - // CHECK: arm_sve.intr.fdiv{{.*}}: (vector<[4]xi1>, vector<[4]xf32>, vector<[4]xf32>) -> vector<[4]xf32> - %3 = arm_sve.masked.divf %mask, %2, %e : vector<[4]xi1>, - vector<[4]xf32> - return %3 : vector<[4]xf32> -} - -func.func @arm_sve_abs_diff(%a: vector<[4]xi32>, - %b: vector<[4]xi32>) - -> vector<[4]xi32> { - // CHECK: llvm.mlir.constant(dense<0> : vector<[4]xi32>) : vector<[4]xi32> - %z = arith.subi %a, %a : vector<[4]xi32> - // CHECK: llvm.icmp "sge" {{.*}}: vector<[4]xi32> - %agb = arith.cmpi sge, %a, %b : vector<[4]xi32> - // CHECK: llvm.icmp "slt" {{.*}}: vector<[4]xi32> - %bga = arith.cmpi slt, %a, %b : vector<[4]xi32> - // CHECK: "arm_sve.intr.sub"{{.*}}: (vector<[4]xi1>, vector<[4]xi32>, vector<[4]xi32>) -> vector<[4]xi32> - %0 = arm_sve.masked.subi %agb, %a, %b : vector<[4]xi1>, - vector<[4]xi32> - // CHECK: "arm_sve.intr.sub"{{.*}}: (vector<[4]xi1>, vector<[4]xi32>, vector<[4]xi32>) -> vector<[4]xi32> - %1 = arm_sve.masked.subi %bga, %b, %a : vector<[4]xi1>, - vector<[4]xi32> - // CHECK: "arm_sve.intr.add"{{.*}}: (vector<[4]xi1>, vector<[4]xi32>, vector<[4]xi32>) -> vector<[4]xi32> - %2 = arm_sve.masked.addi %agb, %z, %0 : vector<[4]xi1>, - vector<[4]xi32> - // CHECK: "arm_sve.intr.add"{{.*}}: (vector<[4]xi1>, vector<[4]xi32>, vector<[4]xi32>) -> vector<[4]xi32> - %3 = arm_sve.masked.addi %bga, %2, %1 : vector<[4]xi1>, - vector<[4]xi32> - return %3 : vector<[4]xi32> -} - func.func @get_vector_scale() -> index { // CHECK: llvm.intr.vscale %0 = vector.vscale diff --git a/mlir/test/Dialect/ArmSVE/roundtrip.mlir b/mlir/test/Dialect/ArmSVE/roundtrip.mlir --- a/mlir/test/Dialect/ArmSVE/roundtrip.mlir +++ b/mlir/test/Dialect/ArmSVE/roundtrip.mlir @@ -35,50 +35,3 @@ vector<[16]xi8> to vector<[4]xi32> return %0 : vector<[4]xi32> } - -func.func @arm_sve_masked_arithi(%a: vector<[4]xi32>, - %b: vector<[4]xi32>, - %c: vector<[4]xi32>, - %d: vector<[4]xi32>, - %e: vector<[4]xi32>, - %mask: vector<[4]xi1>) - -> vector<[4]xi32> { - // CHECK: arm_sve.masked.muli {{.*}}: vector<[4]xi1>, vector< - %0 = arm_sve.masked.muli %mask, %a, %b : vector<[4]xi1>, - vector<[4]xi32> - // CHECK: arm_sve.masked.addi {{.*}}: vector<[4]xi1>, vector< - %1 = arm_sve.masked.addi %mask, %0, %c : vector<[4]xi1>, - vector<[4]xi32> - // CHECK: arm_sve.masked.subi {{.*}}: vector<[4]xi1>, vector< - %2 = arm_sve.masked.subi %mask, %1, %d : vector<[4]xi1>, - vector<[4]xi32> - // CHECK: arm_sve.masked.divi_signed - %3 = arm_sve.masked.divi_signed %mask, %2, %e : vector<[4]xi1>, - vector<[4]xi32> - // CHECK: arm_sve.masked.divi_unsigned - %4 = arm_sve.masked.divi_unsigned %mask, %3, %e : vector<[4]xi1>, - vector<[4]xi32> - return %2 : vector<[4]xi32> -} - -func.func @arm_sve_masked_arithf(%a: vector<[4]xf32>, - %b: vector<[4]xf32>, - %c: vector<[4]xf32>, - %d: vector<[4]xf32>, - %e: vector<[4]xf32>, - %mask: vector<[4]xi1>) - -> vector<[4]xf32> { - // CHECK: arm_sve.masked.mulf {{.*}}: vector<[4]xi1>, vector< - %0 = arm_sve.masked.mulf %mask, %a, %b : vector<[4]xi1>, - vector<[4]xf32> - // CHECK: arm_sve.masked.addf {{.*}}: vector<[4]xi1>, vector< - %1 = arm_sve.masked.addf %mask, %0, %c : vector<[4]xi1>, - vector<[4]xf32> - // CHECK: arm_sve.masked.subf {{.*}}: vector<[4]xi1>, vector< - %2 = arm_sve.masked.subf %mask, %1, %d : vector<[4]xi1>, - vector<[4]xf32> - // CHECK: arm_sve.masked.divf {{.*}}: vector<[4]xi1>, vector< - %3 = arm_sve.masked.divf %mask, %2, %e : vector<[4]xi1>, - vector<[4]xf32> - return %3 : vector<[4]xf32> -} diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-sve.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-sve.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-sve.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-sve.mlir @@ -53,10 +53,8 @@ %1 = vector.load %b[%i0] : memref, vector<[2]xi64> %agb = arith.cmpi sge, %0, %1 : vector<[2]xi64> %bga = arith.cmpi slt, %0, %1 : vector<[2]xi64> - %10 = arm_sve.masked.subi %agb, %0, %1 : vector<[2]xi1>, - vector<[2]xi64> - %01 = arm_sve.masked.subi %bga, %1, %0 : vector<[2]xi1>, - vector<[2]xi64> + %10 = arith.subi %0, %1 : vector<[2]xi64> + %01 = arith.subi %1, %0 : vector<[2]xi64> vector.maskedstore %c[%i0], %agb, %10 : memref, vector<[2]xi1>, vector<[2]xi64> diff --git a/mlir/test/Target/LLVMIR/arm-sve.mlir b/mlir/test/Target/LLVMIR/arm-sve.mlir --- a/mlir/test/Target/LLVMIR/arm-sve.mlir +++ b/mlir/test/Target/LLVMIR/arm-sve.mlir @@ -48,97 +48,6 @@ llvm.return %0 : vector<[4]xi32> } -// CHECK-LABEL: define @arm_sve_arithi -llvm.func @arm_sve_arithi(%arg0: vector<[4]xi32>, - %arg1: vector<[4]xi32>, - %arg2: vector<[4]xi32>) - -> vector<[4]xi32> { - // CHECK: mul - %0 = llvm.mul %arg0, %arg1 : vector<[4]xi32> - // CHECK: add - %1 = llvm.add %0, %arg2 : vector<[4]xi32> - llvm.return %1 : vector<[4]xi32> -} - -// CHECK-LABEL: define @arm_sve_arithf -llvm.func @arm_sve_arithf(%arg0: vector<[4]xf32>, - %arg1: vector<[4]xf32>, - %arg2: vector<[4]xf32>) - -> vector<[4]xf32> { - // CHECK: fmul - %0 = llvm.fmul %arg0, %arg1 : vector<[4]xf32> - // CHECK: fadd - %1 = llvm.fadd %0, %arg2 : vector<[4]xf32> - llvm.return %1 : vector<[4]xf32> -} - -// CHECK-LABEL: define @arm_sve_arithi_masked -llvm.func @arm_sve_arithi_masked(%arg0: vector<[4]xi32>, - %arg1: vector<[4]xi32>, - %arg2: vector<[4]xi32>, - %arg3: vector<[4]xi32>, - %arg4: vector<[4]xi32>, - %arg5: vector<[4]xi1>) - -> vector<[4]xi32> { - // CHECK: call @llvm.aarch64.sve.add.nxv4i32 - %0 = "arm_sve.intr.add"(%arg5, %arg0, %arg1) : (vector<[4]xi1>, - vector<[4]xi32>, - vector<[4]xi32>) - -> vector<[4]xi32> - // CHECK: call @llvm.aarch64.sve.sub.nxv4i32 - %1 = "arm_sve.intr.sub"(%arg5, %0, %arg1) : (vector<[4]xi1>, - vector<[4]xi32>, - vector<[4]xi32>) - -> vector<[4]xi32> - // CHECK: call @llvm.aarch64.sve.mul.nxv4i32 - %2 = "arm_sve.intr.mul"(%arg5, %1, %arg3) : (vector<[4]xi1>, - vector<[4]xi32>, - vector<[4]xi32>) - -> vector<[4]xi32> - // CHECK: call @llvm.aarch64.sve.sdiv.nxv4i32 - %3 = "arm_sve.intr.sdiv"(%arg5, %2, %arg4) : (vector<[4]xi1>, - vector<[4]xi32>, - vector<[4]xi32>) - -> vector<[4]xi32> - // CHECK: call @llvm.aarch64.sve.udiv.nxv4i32 - %4 = "arm_sve.intr.udiv"(%arg5, %3, %arg4) : (vector<[4]xi1>, - vector<[4]xi32>, - vector<[4]xi32>) - -> vector<[4]xi32> - llvm.return %4 : vector<[4]xi32> -} - -// CHECK-LABEL: define @arm_sve_arithf_masked -llvm.func @arm_sve_arithf_masked(%arg0: vector<[4]xf32>, - %arg1: vector<[4]xf32>, - %arg2: vector<[4]xf32>, - %arg3: vector<[4]xf32>, - %arg4: vector<[4]xf32>, - %arg5: vector<[4]xi1>) - -> vector<[4]xf32> { - // CHECK: call @llvm.aarch64.sve.fadd.nxv4f32 - %0 = "arm_sve.intr.fadd"(%arg5, %arg0, %arg1) : (vector<[4]xi1>, - vector<[4]xf32>, - vector<[4]xf32>) - -> vector<[4]xf32> - // CHECK: call @llvm.aarch64.sve.fsub.nxv4f32 - %1 = "arm_sve.intr.fsub"(%arg5, %0, %arg2) : (vector<[4]xi1>, - vector<[4]xf32>, - vector<[4]xf32>) - -> vector<[4]xf32> - // CHECK: call @llvm.aarch64.sve.fmul.nxv4f32 - %2 = "arm_sve.intr.fmul"(%arg5, %1, %arg3) : (vector<[4]xi1>, - vector<[4]xf32>, - vector<[4]xf32>) - -> vector<[4]xf32> - // CHECK: call @llvm.aarch64.sve.fdiv.nxv4f32 - %3 = "arm_sve.intr.fdiv"(%arg5, %2, %arg4) : (vector<[4]xi1>, - vector<[4]xf32>, - vector<[4]xf32>) - -> vector<[4]xf32> - llvm.return %3 : vector<[4]xf32> -} - // CHECK-LABEL: define @arm_sve_mask_genf llvm.func @arm_sve_mask_genf(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>) @@ -157,39 +66,6 @@ llvm.return %0 : vector<[4]xi1> } -// CHECK-LABEL: define @arm_sve_abs_diff -llvm.func @arm_sve_abs_diff(%arg0: vector<[4]xi32>, - %arg1: vector<[4]xi32>) - -> vector<[4]xi32> { - // CHECK: sub - %0 = llvm.sub %arg0, %arg0 : vector<[4]xi32> - // CHECK: icmp sge - %1 = llvm.icmp "sge" %arg0, %arg1 : vector<[4]xi32> - // CHECK: icmp slt - %2 = llvm.icmp "slt" %arg0, %arg1 : vector<[4]xi32> - // CHECK: call @llvm.aarch64.sve.sub.nxv4i32 - %3 = "arm_sve.intr.sub"(%1, %arg0, %arg1) : (vector<[4]xi1>, - vector<[4]xi32>, - vector<[4]xi32>) - -> vector<[4]xi32> - // CHECK: call @llvm.aarch64.sve.sub.nxv4i32 - %4 = "arm_sve.intr.sub"(%2, %arg1, %arg0) : (vector<[4]xi1>, - vector<[4]xi32>, - vector<[4]xi32>) - -> vector<[4]xi32> - // CHECK: call @llvm.aarch64.sve.add.nxv4i32 - %5 = "arm_sve.intr.add"(%1, %0, %3) : (vector<[4]xi1>, - vector<[4]xi32>, - vector<[4]xi32>) - -> vector<[4]xi32> - // CHECK: call @llvm.aarch64.sve.add.nxv4i32 - %6 = "arm_sve.intr.add"(%2, %5, %4) : (vector<[4]xi1>, - vector<[4]xi32>, - vector<[4]xi32>) - -> vector<[4]xi32> - llvm.return %6 : vector<[4]xi32> -} - // CHECK-LABEL: define void @memcopy llvm.func @memcopy(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: i64, %arg3: i64, %arg4: i64,