diff --git a/clang/include/clang/Basic/arm_mve.td b/clang/include/clang/Basic/arm_mve.td --- a/clang/include/clang/Basic/arm_mve.td +++ b/clang/include/clang/Basic/arm_mve.td @@ -1139,27 +1139,31 @@ def lsll: LongScalarShift $lo, $hi, $sh)>; def asrl: LongScalarShift $lo, $hi, $sh)>; +multiclass vadcsbc { + def q: Intrinsic:$carry), + (seq (IRInt $a, $b, (shl (load $carry), 29)):$pair, + (store (and 1, (lshr (xval $pair, 1), 29)), $carry), + (xval $pair, 0))>; + def iq: Intrinsic:$carry), + (seq (IRInt $a, $b, 0):$pair, + (store (and 1, (lshr (xval $pair, 1), 29)), $carry), + (xval $pair, 0))>; + def q_m: Intrinsic:$carry, Predicate:$pred), + (seq (IRInt $inactive, $a, $b, + (shl (load $carry), 29), $pred):$pair, + (store (and 1, (lshr (xval $pair, 1), 29)), $carry), + (xval $pair, 0))>; + def iq_m: Intrinsic:$carry, Predicate:$pred), + (seq (IRInt $inactive, $a, $b, + 0, $pred):$pair, + (store (and 1, (lshr (xval $pair, 1), 29)), $carry), + (xval $pair, 0))>; +} let params = T.Int32 in { -def vadcq: Intrinsic:$carry), - (seq (IRInt<"vadc", [Vector]> $a, $b, (shl (load $carry), 29)):$pair, - (store (and 1, (lshr (xval $pair, 1), 29)), $carry), - (xval $pair, 0))>; -def vadciq: Intrinsic:$carry), - (seq (IRInt<"vadc", [Vector]> $a, $b, 0):$pair, - (store (and 1, (lshr (xval $pair, 1), 29)), $carry), - (xval $pair, 0))>; -def vadcq_m: Intrinsic:$carry, Predicate:$pred), - (seq (IRInt<"vadc_predicated", [Vector, Predicate]> $inactive, $a, $b, - (shl (load $carry), 29), $pred):$pair, - (store (and 1, (lshr (xval $pair, 1), 29)), $carry), - (xval $pair, 0))>; -def vadciq_m: Intrinsic:$carry, Predicate:$pred), - (seq (IRInt<"vadc_predicated", [Vector, Predicate]> $inactive, $a, $b, - 0, $pred):$pair, - (store (and 1, (lshr (xval $pair, 1), 29)), $carry), - (xval $pair, 0))>; + defm vadc: vadcsbc; + defm vsbc: vadcsbc; } multiclass VectorComplexAddPred { diff --git a/clang/test/CodeGen/arm-mve-intrinsics/vadc.c b/clang/test/CodeGen/arm-mve-intrinsics/vadc.c --- a/clang/test/CodeGen/arm-mve-intrinsics/vadc.c +++ b/clang/test/CodeGen/arm-mve-intrinsics/vadc.c @@ -87,3 +87,172 @@ return vadcq_m_s32(inactive, a, b, carry, p); #endif /* POLYMORPHIC */ } + +// CHECK-LABEL: @test_vsbciq_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0) +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP0]], 1 +// CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 29 +// CHECK-NEXT: [[TMP3:%.*]] = and i32 1, [[TMP2]] +// CHECK-NEXT: store i32 [[TMP3]], i32* [[CARRY_OUT:%.*]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP0]], 0 +// CHECK-NEXT: ret <4 x i32> [[TMP4]] +// +int32x4_t test_vsbciq_s32(int32x4_t a, int32x4_t b, unsigned *carry_out) +{ +#ifdef POLYMORPHIC + return vsbciq(a, b, carry_out); +#else /* POLYMORPHIC */ + return vsbciq_s32(a, b, carry_out); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vsbciq_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0) +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP0]], 1 +// CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 29 +// CHECK-NEXT: [[TMP3:%.*]] = and i32 1, [[TMP2]] +// CHECK-NEXT: store i32 [[TMP3]], i32* [[CARRY_OUT:%.*]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP0]], 0 +// CHECK-NEXT: ret <4 x i32> [[TMP4]] +// +uint32x4_t test_vsbciq_u32(uint32x4_t a, uint32x4_t b, unsigned *carry_out) +{ +#ifdef POLYMORPHIC + return vsbciq(a, b, carry_out); +#else /* POLYMORPHIC */ + return vsbciq_u32(a, b, carry_out); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vsbcq_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CARRY:%.*]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[TMP0]], 29 +// CHECK-NEXT: [[TMP2:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[TMP1]]) +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 1 +// CHECK-NEXT: [[TMP4:%.*]] = lshr i32 [[TMP3]], 29 +// CHECK-NEXT: [[TMP5:%.*]] = and i32 1, [[TMP4]] +// CHECK-NEXT: store i32 [[TMP5]], i32* [[CARRY]], align 4 +// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 0 +// CHECK-NEXT: ret <4 x i32> [[TMP6]] +// +int32x4_t test_vsbcq_s32(int32x4_t a, int32x4_t b, unsigned *carry) +{ +#ifdef POLYMORPHIC + return vsbcq(a, b, carry); +#else /* POLYMORPHIC */ + return vsbcq_s32(a, b, carry); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vsbcq_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CARRY:%.*]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[TMP0]], 29 +// CHECK-NEXT: [[TMP2:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[TMP1]]) +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 1 +// CHECK-NEXT: [[TMP4:%.*]] = lshr i32 [[TMP3]], 29 +// CHECK-NEXT: [[TMP5:%.*]] = and i32 1, [[TMP4]] +// CHECK-NEXT: store i32 [[TMP5]], i32* [[CARRY]], align 4 +// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 0 +// CHECK-NEXT: ret <4 x i32> [[TMP6]] +// +uint32x4_t test_vsbcq_u32(uint32x4_t a, uint32x4_t b, unsigned *carry) +{ +#ifdef POLYMORPHIC + return vsbcq(a, b, carry); +#else /* POLYMORPHIC */ + return vsbcq_u32(a, b, carry); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vsbciq_m_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 1 +// CHECK-NEXT: [[TMP4:%.*]] = lshr i32 [[TMP3]], 29 +// CHECK-NEXT: [[TMP5:%.*]] = and i32 1, [[TMP4]] +// CHECK-NEXT: store i32 [[TMP5]], i32* [[CARRY_OUT:%.*]], align 4 +// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 0 +// CHECK-NEXT: ret <4 x i32> [[TMP6]] +// +int32x4_t test_vsbciq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry_out, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vsbciq_m(inactive, a, b, carry_out, p); +#else /* POLYMORPHIC */ + return vsbciq_m_s32(inactive, a, b, carry_out, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vsbciq_m_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, <4 x i1> [[TMP1]]) +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 1 +// CHECK-NEXT: [[TMP4:%.*]] = lshr i32 [[TMP3]], 29 +// CHECK-NEXT: [[TMP5:%.*]] = and i32 1, [[TMP4]] +// CHECK-NEXT: store i32 [[TMP5]], i32* [[CARRY_OUT:%.*]], align 4 +// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 0 +// CHECK-NEXT: ret <4 x i32> [[TMP6]] +// +uint32x4_t test_vsbciq_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry_out, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vsbciq_m(inactive, a, b, carry_out, p); +#else /* POLYMORPHIC */ + return vsbciq_m_u32(inactive, a, b, carry_out, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vsbcq_m_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CARRY:%.*]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[TMP0]], 29 +// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[TMP1]], <4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP4]], 1 +// CHECK-NEXT: [[TMP6:%.*]] = lshr i32 [[TMP5]], 29 +// CHECK-NEXT: [[TMP7:%.*]] = and i32 1, [[TMP6]] +// CHECK-NEXT: store i32 [[TMP7]], i32* [[CARRY]], align 4 +// CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP4]], 0 +// CHECK-NEXT: ret <4 x i32> [[TMP8]] +// +int32x4_t test_vsbcq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vsbcq_m(inactive, a, b, carry, p); +#else /* POLYMORPHIC */ + return vsbcq_m_s32(inactive, a, b, carry, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vsbcq_m_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CARRY:%.*]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[TMP0]], 29 +// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[TMP1]], <4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP4]], 1 +// CHECK-NEXT: [[TMP6:%.*]] = lshr i32 [[TMP5]], 29 +// CHECK-NEXT: [[TMP7:%.*]] = and i32 1, [[TMP6]] +// CHECK-NEXT: store i32 [[TMP7]], i32* [[CARRY]], align 4 +// CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP4]], 0 +// CHECK-NEXT: ret <4 x i32> [[TMP8]] +// +uint32x4_t test_vsbcq_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vsbcq_m(inactive, a, b, carry, p); +#else /* POLYMORPHIC */ + return vsbcq_m_u32(inactive, a, b, carry, p); +#endif /* POLYMORPHIC */ +} + diff --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td --- a/llvm/include/llvm/IR/IntrinsicsARM.td +++ b/llvm/include/llvm/IR/IntrinsicsARM.td @@ -1020,10 +1020,17 @@ def int_arm_mve_vadc: Intrinsic< [llvm_anyvector_ty, llvm_i32_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>; +def int_arm_mve_vsbc: Intrinsic< + [llvm_anyvector_ty, llvm_i32_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>; def int_arm_mve_vadc_predicated: Intrinsic< [llvm_anyvector_ty, llvm_i32_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>; +def int_arm_mve_vsbc_predicated: Intrinsic< + [llvm_anyvector_ty, llvm_i32_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, + llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>; def int_arm_mve_vmulh: Intrinsic< [llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */], diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -4588,6 +4588,11 @@ SelectMVE_VADCSBC(N, ARM::MVE_VADC, ARM::MVE_VADCI, true, IntNo == Intrinsic::arm_mve_vadc_predicated); return; + case Intrinsic::arm_mve_vsbc: + case Intrinsic::arm_mve_vsbc_predicated: + SelectMVE_VADCSBC(N, ARM::MVE_VSBC, ARM::MVE_VSBCI, true, + IntNo == Intrinsic::arm_mve_vsbc_predicated); + return; case Intrinsic::arm_mve_vmlldava: case Intrinsic::arm_mve_vmlldava_predicated: { diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll --- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll @@ -96,3 +96,187 @@ %8 = extractvalue { <4 x i32>, i32 } %4, 0 ret <4 x i32> %8 } + +declare { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32>, <4 x i32>, i32) + +define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_s32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out) { +; CHECK-LABEL: test_vsbciq_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vsbci.i32 q0, q0, q1 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 0) + %1 = extractvalue { <4 x i32>, i32 } %0, 1 + %2 = lshr i32 %1, 29 + %3 = and i32 %2, 1 + store i32 %3, i32* %carry_out, align 4 + %4 = extractvalue { <4 x i32>, i32 } %0, 0 + ret <4 x i32> %4 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_u32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out) { +; CHECK-LABEL: test_vsbciq_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vsbci.i32 q0, q0, q1 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 0) + %1 = extractvalue { <4 x i32>, i32 } %0, 1 + %2 = lshr i32 %1, 29 + %3 = and i32 %2, 1 + store i32 %3, i32* %carry_out, align 4 + %4 = extractvalue { <4 x i32>, i32 } %0, 0 + ret <4 x i32> %4 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_s32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry) { +; CHECK-LABEL: test_vsbcq_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: ldr r1, [r0] +; CHECK-NEXT: lsls r1, r1, #29 +; CHECK-NEXT: vmsr fpscr_nzcvqc, r1 +; CHECK-NEXT: vsbc.i32 q0, q0, q1 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load i32, i32* %carry, align 4 + %1 = shl i32 %0, 29 + %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 %1) + %3 = extractvalue { <4 x i32>, i32 } %2, 1 + %4 = lshr i32 %3, 29 + %5 = and i32 %4, 1 + store i32 %5, i32* %carry, align 4 + %6 = extractvalue { <4 x i32>, i32 } %2, 0 + ret <4 x i32> %6 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_u32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry) { +; CHECK-LABEL: test_vsbcq_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: ldr r1, [r0] +; CHECK-NEXT: lsls r1, r1, #29 +; CHECK-NEXT: vmsr fpscr_nzcvqc, r1 +; CHECK-NEXT: vsbc.i32 q0, q0, q1 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load i32, i32* %carry, align 4 + %1 = shl i32 %0, 29 + %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 %1) + %3 = extractvalue { <4 x i32>, i32 } %2, 1 + %4 = lshr i32 %3, 29 + %5 = and i32 %4, 1 + store i32 %5, i32* %carry, align 4 + %6 = extractvalue { <4 x i32>, i32 } %2, 0 + ret <4 x i32> %6 +} + +declare { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i32>, i32, <4 x i1>) + +define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out, i16 zeroext %p) { +; CHECK-LABEL: test_vsbciq_m_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r1 +; CHECK-NEXT: vpst +; CHECK-NEXT: vsbcit.i32 q0, q1, q2 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, <4 x i1> %1) + %3 = extractvalue { <4 x i32>, i32 } %2, 1 + %4 = lshr i32 %3, 29 + %5 = and i32 %4, 1 + store i32 %5, i32* %carry_out, align 4 + %6 = extractvalue { <4 x i32>, i32 } %2, 0 + ret <4 x i32> %6 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out, i16 zeroext %p) { +; CHECK-LABEL: test_vsbciq_m_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r1 +; CHECK-NEXT: vpst +; CHECK-NEXT: vsbcit.i32 q0, q1, q2 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, <4 x i1> %1) + %3 = extractvalue { <4 x i32>, i32 } %2, 1 + %4 = lshr i32 %3, 29 + %5 = and i32 %4, 1 + store i32 %5, i32* %carry_out, align 4 + %6 = extractvalue { <4 x i32>, i32 } %2, 0 + ret <4 x i32> %6 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry, i16 zeroext %p) { +; CHECK-LABEL: test_vsbcq_m_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: ldr r2, [r0] +; CHECK-NEXT: vmsr p0, r1 +; CHECK-NEXT: lsls r1, r2, #29 +; CHECK-NEXT: vmsr fpscr_nzcvqc, r1 +; CHECK-NEXT: vpst +; CHECK-NEXT: vsbct.i32 q0, q1, q2 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load i32, i32* %carry, align 4 + %1 = shl i32 %0, 29 + %2 = zext i16 %p to i32 + %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2) + %4 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 %1, <4 x i1> %3) + %5 = extractvalue { <4 x i32>, i32 } %4, 1 + %6 = lshr i32 %5, 29 + %7 = and i32 %6, 1 + store i32 %7, i32* %carry, align 4 + %8 = extractvalue { <4 x i32>, i32 } %4, 0 + ret <4 x i32> %8 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry, i16 zeroext %p) { +; CHECK-LABEL: test_vsbcq_m_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: ldr r2, [r0] +; CHECK-NEXT: vmsr p0, r1 +; CHECK-NEXT: lsls r1, r2, #29 +; CHECK-NEXT: vmsr fpscr_nzcvqc, r1 +; CHECK-NEXT: vpst +; CHECK-NEXT: vsbct.i32 q0, q1, q2 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load i32, i32* %carry, align 4 + %1 = shl i32 %0, 29 + %2 = zext i16 %p to i32 + %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2) + %4 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 %1, <4 x i1> %3) + %5 = extractvalue { <4 x i32>, i32 } %4, 1 + %6 = lshr i32 %5, 29 + %7 = and i32 %6, 1 + store i32 %7, i32* %carry, align 4 + %8 = extractvalue { <4 x i32>, i32 } %4, 0 + ret <4 x i32> %8 +}