Index: clang/include/clang/Basic/arm_mve.td =================================================================== --- clang/include/clang/Basic/arm_mve.td +++ clang/include/clang/Basic/arm_mve.td @@ -623,16 +623,16 @@ (xval $pair, 0))>; } -multiclass VectorComplexAddPred { +multiclass VectorComplexAddPred { def "" : Intrinsic halving, angle, $a, $b)>; + (IRInt<"vcaddq", [Vector]> not_halving, angle, $a, $b)>; def _m : Intrinsic - halving, angle, $inactive, $a, $b, $pred)>; + not_halving, angle, $inactive, $a, $b, $pred)>; def _x : Intrinsic - halving, angle, (undef Vector), $a, $b, $pred)>; + not_halving, angle, (undef Vector), $a, $b, $pred)>; } multiclass VectorComplexMulPred { @@ -655,9 +655,9 @@ (IRInt<"vcmlaq_predicated", [Vector, Predicate]> angle, $a, $b, $c, $pred)>; } -multiclass VectorComplexAddAngle { - defm _rot90 : VectorComplexAddPred; - defm _rot270 : VectorComplexAddPred; +multiclass VectorComplexAddAngle { + defm _rot90 : VectorComplexAddPred; + defm _rot270 : VectorComplexAddPred; } multiclass VectorComplexMulAngle { @@ -675,10 +675,10 @@ } let params = T.Usual in -defm vcaddq : VectorComplexAddAngle<(u32 0)>; +defm vcaddq : VectorComplexAddAngle<(u32 1)>; let params = T.Signed in -defm vhcaddq : VectorComplexAddAngle<(u32 1)>; +defm vhcaddq : VectorComplexAddAngle<(u32 0)>; let params = T.Float in { defm vcmulq : VectorComplexMulAngle; Index: clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c =================================================================== --- clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c +++ clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c @@ -6,7 +6,7 @@ // CHECK-LABEL: @test_vcaddq_rot90_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // uint8x16_t test_vcaddq_rot90_u8(uint8x16_t a, uint8x16_t b) @@ -20,7 +20,7 @@ // CHECK-LABEL: @test_vcaddq_rot90_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // uint16x8_t test_vcaddq_rot90_u16(uint16x8_t a, uint16x8_t b) @@ -34,7 +34,7 @@ // CHECK-LABEL: @test_vcaddq_rot90_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // uint32x4_t test_vcaddq_rot90_u32(uint32x4_t a, uint32x4_t b) @@ -48,7 +48,7 @@ // CHECK-LABEL: @test_vcaddq_rot90_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // int8x16_t test_vcaddq_rot90_s8(int8x16_t a, int8x16_t b) @@ -62,7 +62,7 @@ // CHECK-LABEL: @test_vcaddq_rot90_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // int16x8_t test_vcaddq_rot90_s16(int16x8_t a, int16x8_t b) @@ -76,7 +76,7 @@ // CHECK-LABEL: @test_vcaddq_rot90_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // int32x4_t test_vcaddq_rot90_s32(int32x4_t a, int32x4_t b) @@ -90,7 +90,7 @@ // CHECK-LABEL: @test_vcaddq_rot90_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 0, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 1, i32 0, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]]) // CHECK-NEXT: ret <8 x half> [[TMP0]] // float16x8_t test_vcaddq_rot90_f16(float16x8_t a, float16x8_t b) @@ -104,7 +104,7 @@ // CHECK-LABEL: @test_vcaddq_rot90_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 0, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 0, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]]) // CHECK-NEXT: ret <4 x float> [[TMP0]] // float32x4_t test_vcaddq_rot90_f32(float32x4_t a, float32x4_t b) @@ -118,7 +118,7 @@ // CHECK-LABEL: @test_vcaddq_rot270_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // uint8x16_t test_vcaddq_rot270_u8(uint8x16_t a, uint8x16_t b) @@ -132,7 +132,7 @@ // CHECK-LABEL: @test_vcaddq_rot270_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // uint16x8_t test_vcaddq_rot270_u16(uint16x8_t a, uint16x8_t b) @@ -146,7 +146,7 @@ // CHECK-LABEL: @test_vcaddq_rot270_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // uint32x4_t test_vcaddq_rot270_u32(uint32x4_t a, uint32x4_t b) @@ -160,7 +160,7 @@ // CHECK-LABEL: @test_vcaddq_rot270_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // int8x16_t test_vcaddq_rot270_s8(int8x16_t a, int8x16_t b) @@ -174,7 +174,7 @@ // CHECK-LABEL: @test_vcaddq_rot270_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // int16x8_t test_vcaddq_rot270_s16(int16x8_t a, int16x8_t b) @@ -188,7 +188,7 @@ // CHECK-LABEL: @test_vcaddq_rot270_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // int32x4_t test_vcaddq_rot270_s32(int32x4_t a, int32x4_t b) @@ -202,7 +202,7 @@ // CHECK-LABEL: @test_vcaddq_rot270_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 1, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 1, i32 1, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]]) // CHECK-NEXT: ret <8 x half> [[TMP0]] // float16x8_t test_vcaddq_rot270_f16(float16x8_t a, float16x8_t b) @@ -216,7 +216,7 @@ // CHECK-LABEL: @test_vcaddq_rot270_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 1, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 1, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]]) // CHECK-NEXT: ret <4 x float> [[TMP0]] // float32x4_t test_vcaddq_rot270_f32(float32x4_t a, float32x4_t b) @@ -233,7 +233,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // uint8x16_t test_vcaddq_rot90_m_u8(uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p) @@ -249,7 +249,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // uint16x8_t test_vcaddq_rot90_m_u16(uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p) @@ -265,7 +265,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // uint32x4_t test_vcaddq_rot90_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p) @@ -281,7 +281,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // int8x16_t test_vcaddq_rot90_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) @@ -297,7 +297,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // int16x8_t test_vcaddq_rot90_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) @@ -313,7 +313,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // int32x4_t test_vcaddq_rot90_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) @@ -329,7 +329,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 0, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x half> [[TMP2]] // float16x8_t test_vcaddq_rot90_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p) @@ -345,7 +345,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 0, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x float> [[TMP2]] // float32x4_t test_vcaddq_rot90_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p) @@ -361,7 +361,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // uint8x16_t test_vcaddq_rot270_m_u8(uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p) @@ -377,7 +377,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // uint16x8_t test_vcaddq_rot270_m_u16(uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p) @@ -393,7 +393,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // uint32x4_t test_vcaddq_rot270_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p) @@ -409,7 +409,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // int8x16_t test_vcaddq_rot270_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) @@ -425,7 +425,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // int16x8_t test_vcaddq_rot270_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) @@ -441,7 +441,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // int32x4_t test_vcaddq_rot270_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) @@ -457,7 +457,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 1, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x half> [[TMP2]] // float16x8_t test_vcaddq_rot270_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p) @@ -473,7 +473,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 1, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x float> [[TMP2]] // float32x4_t test_vcaddq_rot270_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p) @@ -489,7 +489,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // uint8x16_t test_vcaddq_rot90_x_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p) @@ -505,7 +505,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // uint16x8_t test_vcaddq_rot90_x_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p) @@ -521,7 +521,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // uint32x4_t test_vcaddq_rot90_x_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p) @@ -537,7 +537,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // int8x16_t test_vcaddq_rot90_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) @@ -553,7 +553,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // int16x8_t test_vcaddq_rot90_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) @@ -569,7 +569,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // int32x4_t test_vcaddq_rot90_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) @@ -585,7 +585,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 0, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x half> [[TMP2]] // float16x8_t test_vcaddq_rot90_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p) @@ -601,7 +601,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 0, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x float> [[TMP2]] // float32x4_t test_vcaddq_rot90_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p) @@ -617,7 +617,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // uint8x16_t test_vcaddq_rot270_x_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p) @@ -633,7 +633,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // uint16x8_t test_vcaddq_rot270_x_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p) @@ -649,7 +649,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // uint32x4_t test_vcaddq_rot270_x_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p) @@ -665,7 +665,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // int8x16_t test_vcaddq_rot270_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) @@ -681,7 +681,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // int16x8_t test_vcaddq_rot270_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) @@ -697,7 +697,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // int32x4_t test_vcaddq_rot270_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) @@ -713,7 +713,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 1, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x half> [[TMP2]] // float16x8_t test_vcaddq_rot270_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p) @@ -729,7 +729,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 1, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x float> [[TMP2]] // float32x4_t test_vcaddq_rot270_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p) Index: clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c =================================================================== --- clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c +++ clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c @@ -6,7 +6,7 @@ // CHECK-LABEL: @test_vhcaddq_rot90_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // int8x16_t test_vhcaddq_rot90_s8(int8x16_t a, int8x16_t b) @@ -20,7 +20,7 @@ // CHECK-LABEL: @test_vhcaddq_rot90_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // int16x8_t test_vhcaddq_rot90_s16(int16x8_t a, int16x8_t b) @@ -34,7 +34,7 @@ // CHECK-LABEL: @test_vhcaddq_rot90_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // int32x4_t test_vhcaddq_rot90_s32(int32x4_t a, int32x4_t b) @@ -48,7 +48,7 @@ // CHECK-LABEL: @test_vhcaddq_rot270_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // int8x16_t test_vhcaddq_rot270_s8(int8x16_t a, int8x16_t b) @@ -62,7 +62,7 @@ // CHECK-LABEL: @test_vhcaddq_rot270_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // int16x8_t test_vhcaddq_rot270_s16(int16x8_t a, int16x8_t b) @@ -76,7 +76,7 @@ // CHECK-LABEL: @test_vhcaddq_rot270_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // int32x4_t test_vhcaddq_rot270_s32(int32x4_t a, int32x4_t b) @@ -92,7 +92,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // int8x16_t test_vhcaddq_rot90_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) @@ -108,7 +108,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // int16x8_t test_vhcaddq_rot90_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) @@ -124,7 +124,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // int32x4_t test_vhcaddq_rot90_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) @@ -140,7 +140,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // int8x16_t test_vhcaddq_rot270_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) @@ -156,7 +156,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // int16x8_t test_vhcaddq_rot270_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) @@ -172,7 +172,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // int32x4_t test_vhcaddq_rot270_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) @@ -188,7 +188,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // int8x16_t test_vhcaddq_rot90_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) @@ -204,7 +204,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // int16x8_t test_vhcaddq_rot90_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) @@ -220,7 +220,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // int32x4_t test_vhcaddq_rot90_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) @@ -236,7 +236,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]]) // CHECK-NEXT: ret <16 x i8> [[TMP2]] // int8x16_t test_vhcaddq_rot270_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) @@ -252,7 +252,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]]) // CHECK-NEXT: ret <8 x i16> [[TMP2]] // int16x8_t test_vhcaddq_rot270_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) @@ -268,7 +268,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]]) // CHECK-NEXT: ret <4 x i32> [[TMP2]] // int32x4_t test_vhcaddq_rot270_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) Index: llvm/include/llvm/IR/IntrinsicsARM.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsARM.td +++ llvm/include/llvm/IR/IntrinsicsARM.td @@ -942,7 +942,9 @@ } // The first two parameters are compile-time constants: -// * Halving: is the a halving (vhcaddq) or non-halving (vcaddq) instruction +// * Halving: 0 means halving (vhcaddq), 1 means non-halving (vcaddq) +// instruction. Note: the flag is inverted to match the corresonding +// bit in the instruction encoding // * Rotation angle: 0 mean 90 deg, 1 means 180 deg defm int_arm_mve_vcaddq : MVEMXPredicated< [llvm_anyvector_ty], Index: llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -233,22 +233,6 @@ void SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry, uint16_t OpcodeWithNoCarry, bool Add, bool Predicated); - /// Select MVE complex vector addition intrinsic - /// OpcodesInt are opcodes for non-halving addition of complex integer vectors - /// OpcodesHInt are opcodes for halving addition of complex integer vectors - /// OpcodesFP are opcodes for addition of complex floating point vectors - void SelectMVE_VCADD(SDNode *N, const uint16_t *OpcodesInt, - const uint16_t *OpcodesHInt, const uint16_t *OpcodesFP, - bool Predicated); - - /// Select MVE complex vector multiplication intrinsic - void SelectMVE_VCMUL(SDNode *N, uint16_t OpcodeF16, uint16_t OpcodeF32, - bool Predicated); - - /// Sekect NVE complex vector multiply-add intrinsic - void SelectMVE_VCMLA(SDNode *N, uint16_t OpcodeF16, uint16_t OpcodeF32, - bool Predicated); - /// SelectMVE_VLD - Select MVE interleaving load intrinsics. NumVecs /// should be 2 or 4. The opcode array specifies the instructions /// used for 8, 16 and 32-bit lane sizes respectively, and each @@ -2533,138 +2517,6 @@ CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops)); } -/// Convert an SDValue to a boolean value. SDVal must be a compile-time constant -static bool SDValueToConstBool(SDValue SDVal) { - ConstantSDNode *SDValConstant = dyn_cast(SDVal); - assert(SDValConstant && "expected a compile-time constant"); - uint64_t Value = SDValConstant->getZExtValue(); - assert((Value == 0 || Value == 1) && "expected value 0 or 1"); - return Value; -} - -/// Select an opcode based on a floating point vector type. One opcode -/// corresponds to 16-bit floating point element type, the other to two 32-bit -/// element type. -/// Other types are not allowed -static uint16_t SelectFPOpcode(EVT VT, uint16_t OpcodeF16, uint16_t OpcodeF32) { - assert(VT.isFloatingPoint() && VT.isVector() && - "expected a floating-point vector"); - switch (VT.getVectorElementType().getSizeInBits()) { - case 16: - return OpcodeF16; - case 32: - return OpcodeF32; - default: - llvm_unreachable("bad vector element size"); - } -} - -void ARMDAGToDAGISel::SelectMVE_VCADD(SDNode *N, const uint16_t *OpcodesInt, - const uint16_t *OpcodesHInt, - const uint16_t *OpcodesFP, - bool Predicated) { - EVT VT = N->getValueType(0); - SDLoc Loc(N); - - bool IsHalved = SDValueToConstBool(N->getOperand(1)); - bool IsAngle270 = SDValueToConstBool(N->getOperand(2)); - bool IsFP = VT.isFloatingPoint(); - if (IsHalved) - assert(!IsFP && "vhcaddq requires integer vector type"); - - uint16_t Opcode; - if (IsFP) { - Opcode = SelectFPOpcode(VT, OpcodesFP[0], OpcodesFP[1]); - } else { - const uint16_t *Opcodes = IsHalved ? OpcodesHInt : OpcodesInt; - switch (VT.getVectorElementType().getSizeInBits()) { - case 8: - Opcode = Opcodes[0]; - break; - case 16: - Opcode = Opcodes[1]; - break; - case 32: - Opcode = Opcodes[2]; - break; - default: - llvm_unreachable("bad vector element size"); - } - } - - int FirstInputOp = Predicated ? 4 : 3; - SmallVector Ops; - // Vectors - Ops.push_back(N->getOperand(FirstInputOp)); - Ops.push_back(N->getOperand(FirstInputOp + 1)); - // Rotation - Ops.push_back(CurDAG->getTargetConstant(IsAngle270, Loc, MVT::i32)); - - if (Predicated) - AddMVEPredicateToOps(Ops, Loc, - N->getOperand(FirstInputOp + 2), // predicate - N->getOperand(FirstInputOp - 1)); // inactive - else - AddEmptyMVEPredicateToOps(Ops, Loc, VT); - - CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops)); -} - -static uint32_t GetCMulRotation(SDValue V) { - const ConstantSDNode *RotConstant = dyn_cast(V); - assert(RotConstant && "expected a compile-time constant"); - uint64_t RotValue = RotConstant->getZExtValue(); - assert(RotValue < 4 && "expected value in range [0, 3]"); - return RotValue; -} - -void ARMDAGToDAGISel::SelectMVE_VCMUL(SDNode *N, uint16_t OpcodeF16, - uint16_t OpcodeF32, bool Predicated) { - EVT VT = N->getValueType(0); - SDLoc Loc(N); - - int FirstInputOp = Predicated ? 3 : 2; - SmallVector Ops; - // Vectors - Ops.push_back(N->getOperand(FirstInputOp)); - Ops.push_back(N->getOperand(FirstInputOp + 1)); - // Rotation - uint32_t RotValue = GetCMulRotation(N->getOperand(1)); - Ops.push_back(CurDAG->getTargetConstant(RotValue, Loc, MVT::i32)); - - if (Predicated) - AddMVEPredicateToOps(Ops, Loc, - N->getOperand(FirstInputOp + 2), // predicate - N->getOperand(FirstInputOp - 1)); // inactive - else - AddEmptyMVEPredicateToOps(Ops, Loc, VT); - - uint16_t Opcode = SelectFPOpcode(VT, OpcodeF16, OpcodeF32); - CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops)); -} - -void ARMDAGToDAGISel::SelectMVE_VCMLA(SDNode *N, uint16_t OpcodeF16, - uint16_t OpcodeF32, bool Predicated) { - SDLoc Loc(N); - - SmallVector Ops; - // The 3 vector operands - for (int i = 2; i < 5; ++i) - Ops.push_back(N->getOperand(i)); - // Rotation - uint32_t RotValue = GetCMulRotation(N->getOperand(1)); - Ops.push_back(CurDAG->getTargetConstant(RotValue, Loc, MVT::i32)); - - if (Predicated) - AddMVEPredicateToOps(Ops, Loc, N->getOperand(5)); - else - AddEmptyMVEPredicateToOps(Ops, Loc); - - EVT VT = N->getValueType(0); - uint16_t Opcode = SelectFPOpcode(VT, OpcodeF16, OpcodeF32); - CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops)); -} - void ARMDAGToDAGISel::SelectMVE_VLD(SDNode *N, unsigned NumVecs, const uint16_t *const *Opcodes) { EVT VT = N->getValueType(0); @@ -4510,35 +4362,6 @@ IntNo == Intrinsic::arm_mve_vadc_predicated); return; - case Intrinsic::arm_mve_vcaddq: - case Intrinsic::arm_mve_vcaddq_predicated: { - static const uint16_t OpcodesInt[] = { - ARM::MVE_VCADDi8, ARM::MVE_VCADDi16, ARM::MVE_VCADDi32, - }; - static const uint16_t OpcodesHInt[] = { - ARM::MVE_VHCADDs8, ARM::MVE_VHCADDs16, ARM::MVE_VHCADDs32, - }; - static const uint16_t OpcodesFP[] = { - ARM::MVE_VCADDf16, ARM::MVE_VCADDf32, - }; - - SelectMVE_VCADD(N, OpcodesInt, OpcodesHInt, - OpcodesFP, IntNo == Intrinsic::arm_mve_vcaddq_predicated); - return; - } - - case Intrinsic::arm_mve_vcmulq: - case Intrinsic::arm_mve_vcmulq_predicated: - SelectMVE_VCMUL(N, ARM::MVE_VCMULf16, ARM::MVE_VCMULf32, - IntNo == Intrinsic::arm_mve_vcmulq_predicated); - return; - - case Intrinsic::arm_mve_vcmlaq: - case Intrinsic::arm_mve_vcmlaq_predicated: - SelectMVE_VCMLA(N, ARM::MVE_VCMLAf16, ARM::MVE_VCMLAf32, - IntNo == Intrinsic::arm_mve_vcmlaq_predicated); - return; - } break; } Index: llvm/lib/Target/ARM/ARMInstrMVE.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrMVE.td +++ llvm/lib/Target/ARM/ARMInstrMVE.td @@ -2845,10 +2845,10 @@ defm MVE_VMULf32 : MVE_VMUL_fp_m; defm MVE_VMULf16 : MVE_VMUL_fp_m; -class MVE_VCMLA pattern=[]> +class MVE_VCMLA : MVEFloatArithNeon<"vcmla", suffix, size, (outs MQPR:$Qd), (ins MQPR:$Qd_src, MQPR:$Qn, MQPR:$Qm, complexrotateop:$rot), - "$Qd, $Qn, $Qm, $rot", vpred_n, "$Qd = $Qd_src", pattern> { + "$Qd, $Qn, $Qm, $rot", vpred_n, "$Qd = $Qd_src", []> { bits<4> Qd; bits<4> Qn; bits<2> rot; @@ -2865,8 +2865,32 @@ let Inst{4} = 0b0; } -def MVE_VCMLAf16 : MVE_VCMLA<"f16", 0b0>; -def MVE_VCMLAf32 : MVE_VCMLA<"f32", 0b1>; +multiclass MVE_VCMLA_m { + def "" : MVE_VCMLA; + + let Predicates = [HasMVEFloat] in { + def : Pat<(VTI.Vec (int_arm_mve_vcmlaq + imm:$rot, (VTI.Vec MQPR:$Qd_src), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qd_src), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot))>; + + def : Pat<(VTI.Vec (int_arm_mve_vcmlaq_predicated + imm:$rot, (VTI.Vec MQPR:$Qd_src), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + (VTI.Pred VCCR:$mask))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qd_src), (VTI.Vec MQPR:$Qn), + (VTI.Vec MQPR:$Qm), imm:$rot, + ARMVCCThen, (VTI.Pred VCCR:$mask)))>; + + } +} + +defm MVE_VCMLAf16 : MVE_VCMLA_m; +defm MVE_VCMLAf32 : MVE_VCMLA_m; class MVE_VADDSUBFMA_fp; defm MVE_VSUBf16 : MVE_VSUB_fp_m; -class MVE_VCADD pattern=[]> +class MVE_VCADD : MVEFloatArithNeon<"vcadd", suffix, size, (outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm, complexrotateopodd:$rot), - "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, pattern> { + "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> { bits<4> Qd; bits<4> Qn; bit rot; @@ -2962,8 +2986,31 @@ let Inst{4} = 0b0; } -def MVE_VCADDf16 : MVE_VCADD<"f16", 0b0>; -def MVE_VCADDf32 : MVE_VCADD<"f32", 0b1, "@earlyclobber $Qd">; +multiclass MVE_VCADD_m { + def "" : MVE_VCADD; + + let Predicates = [HasMVEFloat] in { + def : Pat<(VTI.Vec (int_arm_mve_vcaddq (i32 1), + imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot))>; + + def : Pat<(VTI.Vec (int_arm_mve_vcaddq_predicated (i32 1), + imm:$rot, (VTI.Vec MQPR:$inactive), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + (VTI.Pred VCCR:$mask))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot, + ARMVCCThen, (VTI.Pred VCCR:$mask), + (VTI.Vec MQPR:$inactive)))>; + + } +} + +defm MVE_VCADDf16 : MVE_VCADD_m; +defm MVE_VCADDf32 : MVE_VCADD_m; class MVE_VABD_fp : MVE_float<"vabd", suffix, (outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm), @@ -3575,10 +3622,10 @@ defm MVE_VQRDMLSDH : MVE_VQxDMLxDH_multi<"vqrdmlsdh", 0b0, 0b1, 0b1>; defm MVE_VQRDMLSDHX : MVE_VQxDMLxDH_multi<"vqrdmlsdhx", 0b1, 0b1, 0b1>; -class MVE_VCMUL pattern=[]> +class MVE_VCMUL : MVE_qDest_qSrc { + "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> { bits<4> Qn; bits<2> rot; @@ -3594,8 +3641,33 @@ let Predicates = [HasMVEFloat]; } -def MVE_VCMULf16 : MVE_VCMUL<"vcmul", "f16", 0b0>; -def MVE_VCMULf32 : MVE_VCMUL<"vcmul", "f32", 0b1, "@earlyclobber $Qd">; +multiclass MVE_VCMUL_m { + def "" : MVE_VCMUL; + + + let Predicates = [HasMVEFloat] in { + def : Pat<(VTI.Vec (int_arm_mve_vcmulq + imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot))>; + + def : Pat<(VTI.Vec (int_arm_mve_vcmulq_predicated + imm:$rot, (VTI.Vec MQPR:$inactive), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + (VTI.Pred VCCR:$mask))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot, + ARMVCCThen, (VTI.Pred VCCR:$mask), + (VTI.Vec MQPR:$inactive)))>; + + } +} + +defm MVE_VCMULf16 : MVE_VCMUL_m<"vcmul", MVE_v8f16, 0b0>; +defm MVE_VCMULf32 : MVE_VCMUL_m<"vcmul", MVE_v4f32, 0b1, "@earlyclobber $Qd">; class MVE_VMULL bits_21_20, bit T, string cstr, list pattern=[]> @@ -3776,10 +3848,10 @@ defm MVE_VCVTf32f16th : MVE_VCVT_h2f_m<"vcvtt", 0b1>; class MVE_VxCADD size, bit halve, - string cstr="", list pattern=[]> + string cstr=""> : MVE_qDest_qSrc { + "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> { bits<4> Qn; bit rot; @@ -3793,13 +3865,37 @@ let Inst{0} = 0b0; } -def MVE_VCADDi8 : MVE_VxCADD<"vcadd", "i8", 0b00, 0b1>; -def MVE_VCADDi16 : MVE_VxCADD<"vcadd", "i16", 0b01, 0b1>; -def MVE_VCADDi32 : MVE_VxCADD<"vcadd", "i32", 0b10, 0b1, "@earlyclobber $Qd">; +multiclass MVE_VxCADD_m { + def "" : MVE_VxCADD; + + let Predicates = [HasMVEInt] in { + def : Pat<(VTI.Vec (int_arm_mve_vcaddq halve, + imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot))>; + + def : Pat<(VTI.Vec (int_arm_mve_vcaddq_predicated halve, + imm:$rot, (VTI.Vec MQPR:$inactive), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + (VTI.Pred VCCR:$mask))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot, + ARMVCCThen, (VTI.Pred VCCR:$mask), + (VTI.Vec MQPR:$inactive)))>; + + } +} + +defm MVE_VCADDi8 : MVE_VxCADD_m<"vcadd", MVE_v16i8, 0b1>; +defm MVE_VCADDi16 : MVE_VxCADD_m<"vcadd", MVE_v8i16, 0b1>; +defm MVE_VCADDi32 : MVE_VxCADD_m<"vcadd", MVE_v4i32, 0b1, "@earlyclobber $Qd">; -def MVE_VHCADDs8 : MVE_VxCADD<"vhcadd", "s8", 0b00, 0b0>; -def MVE_VHCADDs16 : MVE_VxCADD<"vhcadd", "s16", 0b01, 0b0>; -def MVE_VHCADDs32 : MVE_VxCADD<"vhcadd", "s32", 0b10, 0b0, "@earlyclobber $Qd">; +defm MVE_VHCADDs8 : MVE_VxCADD_m<"vhcadd", MVE_v16s8, 0b0>; +defm MVE_VHCADDs16 : MVE_VxCADD_m<"vhcadd", MVE_v8s16, 0b0>; +defm MVE_VHCADDs32 : MVE_VxCADD_m<"vhcadd", MVE_v4s32, 0b0, "@earlyclobber $Qd">; class MVE_VADCSBC pattern=[]> Index: llvm/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll =================================================================== --- llvm/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll +++ llvm/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll @@ -23,7 +23,7 @@ ; CHECK-NEXT: vcadd.i8 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -33,7 +33,7 @@ ; CHECK-NEXT: vcadd.i16 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -44,7 +44,7 @@ ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -54,7 +54,7 @@ ; CHECK-NEXT: vcadd.i8 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -64,7 +64,7 @@ ; CHECK-NEXT: vcadd.i16 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -75,7 +75,7 @@ ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -85,7 +85,7 @@ ; CHECK-NEXT: vcadd.f16 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 0, <8 x half> %a, <8 x half> %b) + %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 1, i32 0, <8 x half> %a, <8 x half> %b) ret <8 x half> %0 } @@ -96,7 +96,7 @@ ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 0, <4 x float> %a, <4 x float> %b) + %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 0, <4 x float> %a, <4 x float> %b) ret <4 x float> %0 } @@ -106,7 +106,7 @@ ; CHECK-NEXT: vcadd.i8 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -116,7 +116,7 @@ ; CHECK-NEXT: vcadd.i16 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -127,7 +127,7 @@ ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -137,7 +137,7 @@ ; CHECK-NEXT: vcadd.i8 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -147,7 +147,7 @@ ; CHECK-NEXT: vcadd.i16 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -158,7 +158,7 @@ ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -168,7 +168,7 @@ ; CHECK-NEXT: vcadd.f16 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 1, <8 x half> %a, <8 x half> %b) + %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 1, i32 1, <8 x half> %a, <8 x half> %b) ret <8 x half> %0 } @@ -179,7 +179,7 @@ ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 1, <4 x float> %a, <4 x float> %b) + %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 1, <4 x float> %a, <4 x float> %b) ret <4 x float> %0 } @@ -193,7 +193,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -207,7 +207,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -221,7 +221,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -235,7 +235,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -249,7 +249,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -263,7 +263,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -277,7 +277,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1) + %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 0, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1) ret <8 x half> %2 } @@ -291,7 +291,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1) + %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 0, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1) ret <4 x float> %2 } @@ -305,7 +305,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -319,7 +319,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -333,7 +333,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -347,7 +347,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -361,7 +361,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -375,7 +375,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -389,7 +389,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1) + %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 1, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1) ret <8 x half> %2 } @@ -403,7 +403,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1) + %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 1, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1) ret <4 x float> %2 } @@ -417,7 +417,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -431,7 +431,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -446,7 +446,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -460,7 +460,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -474,7 +474,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -489,7 +489,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -503,7 +503,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1) + %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 0, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1) ret <8 x half> %2 } @@ -518,7 +518,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1) + %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 0, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1) ret <4 x float> %2 } @@ -532,7 +532,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -546,7 +546,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -561,7 +561,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -575,7 +575,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -589,7 +589,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -604,7 +604,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -618,7 +618,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1) + %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 1, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1) ret <8 x half> %2 } @@ -633,7 +633,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1) + %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 1, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1) ret <4 x float> %2 } @@ -643,7 +643,7 @@ ; CHECK-NEXT: vhcadd.s8 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -653,7 +653,7 @@ ; CHECK-NEXT: vhcadd.s16 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -664,7 +664,7 @@ ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -674,7 +674,7 @@ ; CHECK-NEXT: vhcadd.s8 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -684,7 +684,7 @@ ; CHECK-NEXT: vhcadd.s16 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -695,7 +695,7 @@ ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -709,7 +709,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -723,7 +723,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -738,7 +738,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -752,7 +752,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -766,7 +766,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -781,7 +781,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -795,7 +795,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -809,7 +809,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -823,7 +823,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -837,7 +837,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -851,7 +851,7 @@ entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -865,6 +865,6 @@ entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 }